macOS: Remove unnecessary checks now that minimum version is macOS 11.2

MacOS minimum version is now 11.2 we no longer need to check for lower API versions.

Pull Request: https://projects.blender.org/blender/blender/pulls/118388
This commit is contained in:
Raul Fernandez
2024-02-16 19:03:23 +01:00
committed by Raul Fernandez Hernandez
parent f909e279e7
commit 324ff4ddef
10 changed files with 389 additions and 490 deletions

View File

@@ -401,4 +401,3 @@ int sequencer_clipboard_paste_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}

View File

@@ -306,66 +306,50 @@ bool MTLBackend::metal_is_supported()
return false;
}
if (@available(macOS 10.15, *)) {
id<MTLDevice> device = MTLCreateSystemDefaultDevice();
id<MTLDevice> device = MTLCreateSystemDefaultDevice();
/* Debug: Enable low power GPU with Environment Var: METAL_FORCE_INTEL. */
static const char *forceIntelStr = getenv("METAL_FORCE_INTEL");
bool forceIntel = forceIntelStr ? (atoi(forceIntelStr) != 0) : false;
/* Debug: Enable low power GPU with Environment Var: METAL_FORCE_INTEL. */
static const char *forceIntelStr = getenv("METAL_FORCE_INTEL");
bool forceIntel = forceIntelStr ? (atoi(forceIntelStr) != 0) : false;
if (forceIntel) {
NSArray<id<MTLDevice>> *allDevices = MTLCopyAllDevices();
for (id<MTLDevice> _device in allDevices) {
if (_device.lowPower) {
device = _device;
}
if (forceIntel) {
NSArray<id<MTLDevice>> *allDevices = MTLCopyAllDevices();
for (id<MTLDevice> _device in allDevices) {
if (_device.lowPower) {
device = _device;
}
}
/* If Intel, we must be on macOS 11.2+ for full Metal backend support. */
NSString *gpu_name = [device name];
const char *vendor = [gpu_name UTF8String];
if ((strstr(vendor, "Intel") || strstr(vendor, "INTEL"))) {
if (@available(macOS 11.2, *)) {
/* Intel device supported -- Carry on.
* NOTE: @available syntax cannot be negated. */
}
else {
return false;
}
}
/* Metal Viewport requires argument buffer tier-2 support and Barycentric Coordinates.
* These are available on most hardware configurations supporting Metal 2.2. */
bool supports_argument_buffers_tier2 = ([device argumentBuffersSupport] ==
MTLArgumentBuffersTier2);
bool supports_barycentrics = [device supportsShaderBarycentricCoordinates] ||
supports_barycentric_whitelist(device);
bool supported_metal_version = [device supportsFamily:MTLGPUFamilyMac2];
bool result = supports_argument_buffers_tier2 && supports_barycentrics &&
supported_os_version && supported_metal_version;
if (G.debug & G_DEBUG_GPU) {
if (!supports_argument_buffers_tier2) {
printf("[Metal] Device does not support argument buffers tier 2\n");
}
if (!supports_barycentrics) {
printf("[Metal] Device does not support barycentrics coordinates\n");
}
if (!supported_metal_version) {
printf("[Metal] Device does not support metal 2.2 or higher\n");
}
if (result) {
printf("Device with name %s supports metal minimum requirements\n",
[[device name] UTF8String]);
}
}
return result;
}
return false;
/* Metal Viewport requires argument buffer tier-2 support and Barycentric Coordinates.
* These are available on most hardware configurations supporting Metal 2.2. */
bool supports_argument_buffers_tier2 = ([device argumentBuffersSupport] ==
MTLArgumentBuffersTier2);
bool supports_barycentrics = [device supportsShaderBarycentricCoordinates] ||
supports_barycentric_whitelist(device);
bool supported_metal_version = [device supportsFamily:MTLGPUFamilyMac2];
bool result = supports_argument_buffers_tier2 && supports_barycentrics && supported_os_version &&
supported_metal_version;
if (G.debug & G_DEBUG_GPU) {
if (!supports_argument_buffers_tier2) {
printf("[Metal] Device does not support argument buffers tier 2\n");
}
if (!supports_barycentrics) {
printf("[Metal] Device does not support barycentrics coordinates\n");
}
if (!supported_metal_version) {
printf("[Metal] Device does not support metal 2.2 or higher\n");
}
if (result) {
printf("Device with name %s supports metal minimum requirements\n",
[[device name] UTF8String]);
}
}
return result;
}
void MTLBackend::capabilities_init(MTLContext *ctx)

View File

@@ -53,15 +53,13 @@ id<MTLCommandBuffer> MTLCommandBufferManager::ensure_begin()
BLI_assert(MTLCommandBufferManager::num_active_cmd_bufs <
GHOST_ContextCGL::max_command_buffer_count);
if (@available(macos 11.0, *)) {
if (G.debug & G_DEBUG_GPU) {
/* Debug: Enable Advanced Errors for GPU work execution. */
MTLCommandBufferDescriptor *desc = [[MTLCommandBufferDescriptor alloc] init];
desc.errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
desc.retainedReferences = YES;
BLI_assert(context_.queue != nil);
active_command_buffer_ = [context_.queue commandBufferWithDescriptor:desc];
}
if (G.debug & G_DEBUG_GPU) {
/* Debug: Enable Advanced Errors for GPU work execution. */
MTLCommandBufferDescriptor *desc = [[MTLCommandBufferDescriptor alloc] init];
desc.errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
desc.retainedReferences = YES;
BLI_assert(context_.queue != nil);
active_command_buffer_ = [context_.queue commandBufferWithDescriptor:desc];
}
/* Ensure command buffer is created if debug command buffer unavailable. */
@@ -151,14 +149,12 @@ bool MTLCommandBufferManager::submit(bool wait)
/* Command buffer execution debugging can return an error message if
* execution has failed or encountered GPU-side errors. */
if (@available(macos 11.0, *)) {
if (G.debug & G_DEBUG_GPU) {
if (G.debug & G_DEBUG_GPU) {
NSError *error = [active_command_buffer_ error];
if (error != nil) {
NSLog(@"%@", error);
BLI_assert(false);
}
NSError *error = [active_command_buffer_ error];
if (error != nil) {
NSLog(@"%@", error);
BLI_assert(false);
}
}
}
@@ -578,86 +574,80 @@ bool MTLCommandBufferManager::insert_memory_barrier(eGPUBarrier barrier_bits,
eGPUStageBarrierBits before_stages,
eGPUStageBarrierBits after_stages)
{
/* Only supporting Metal on 10.14 onward anyway - Check required for warnings. */
if (@available(macOS 10.14, *)) {
/* Apple Silicon does not support memory barriers for RenderCommandEncoder's.
* We do not currently need these due to implicit API guarantees. However, render->render
* resource dependencies are only evaluated at RenderCommandEncoder boundaries due to work
* execution on TBDR architecture.
*
* NOTE: Render barriers are therefore inherently expensive. Where possible, opt for local
* synchronization using raster order groups, or, prefer compute to avoid subsequent passes
* re-loading pass attachments which are not needed. */
const bool is_tile_based_arch = (GPU_platform_architecture() == GPU_ARCHITECTURE_TBDR);
if (is_tile_based_arch && (active_command_encoder_type_ != MTL_COMPUTE_COMMAND_ENCODER)) {
if (active_command_encoder_type_ == MTL_RENDER_COMMAND_ENCODER) {
end_active_command_encoder();
return true;
}
return false;
}
/* Apple Silicon does not support memory barriers for RenderCommandEncoder's.
* We do not currently need these due to implicit API guarantees. However, render->render
* resource dependencies are only evaluated at RenderCommandEncoder boundaries due to work
* execution on TBDR architecture.
*
* NOTE: Render barriers are therefore inherently expensive. Where possible, opt for local
* synchronization using raster order groups, or, prefer compute to avoid subsequent passes
* re-loading pass attachments which are not needed. */
const bool is_tile_based_arch = (GPU_platform_architecture() == GPU_ARCHITECTURE_TBDR);
if (is_tile_based_arch && (active_command_encoder_type_ != MTL_COMPUTE_COMMAND_ENCODER)) {
if (active_command_encoder_type_ == MTL_RENDER_COMMAND_ENCODER) {
end_active_command_encoder();
/* Resolve scope. */
MTLBarrierScope scope = 0;
if (barrier_bits & GPU_BARRIER_SHADER_IMAGE_ACCESS || barrier_bits & GPU_BARRIER_TEXTURE_FETCH) {
bool is_compute = (active_command_encoder_type_ != MTL_RENDER_COMMAND_ENCODER);
scope |= (is_compute ? 0 : MTLBarrierScopeRenderTargets) | MTLBarrierScopeTextures;
}
if (barrier_bits & GPU_BARRIER_SHADER_STORAGE ||
barrier_bits & GPU_BARRIER_VERTEX_ATTRIB_ARRAY || barrier_bits & GPU_BARRIER_ELEMENT_ARRAY ||
barrier_bits & GPU_BARRIER_UNIFORM || barrier_bits & GPU_BARRIER_BUFFER_UPDATE)
{
scope = scope | MTLBarrierScopeBuffers;
}
if (scope != 0) {
/* Issue barrier based on encoder. */
switch (active_command_encoder_type_) {
case MTL_NO_COMMAND_ENCODER:
case MTL_BLIT_COMMAND_ENCODER: {
/* No barrier to be inserted. */
return false;
}
/* Rendering. */
case MTL_RENDER_COMMAND_ENCODER: {
/* Currently flagging both stages -- can use bits above to filter on stage type --
* though full barrier is safe for now. */
MTLRenderStages before_stage_flags = 0;
MTLRenderStages after_stage_flags = 0;
if (before_stages & GPU_BARRIER_STAGE_VERTEX &&
!(before_stages & GPU_BARRIER_STAGE_FRAGMENT))
{
before_stage_flags = before_stage_flags | MTLRenderStageVertex;
}
if (before_stages & GPU_BARRIER_STAGE_FRAGMENT) {
before_stage_flags = before_stage_flags | MTLRenderStageFragment;
}
if (after_stages & GPU_BARRIER_STAGE_VERTEX) {
after_stage_flags = after_stage_flags | MTLRenderStageVertex;
}
if (after_stages & GPU_BARRIER_STAGE_FRAGMENT) {
after_stage_flags = MTLRenderStageFragment;
}
id<MTLRenderCommandEncoder> rec = this->get_active_render_command_encoder();
BLI_assert(rec != nil);
[rec memoryBarrierWithScope:scope
afterStages:after_stage_flags
beforeStages:before_stage_flags];
return true;
}
return false;
}
/* Resolve scope. */
MTLBarrierScope scope = 0;
if (barrier_bits & GPU_BARRIER_SHADER_IMAGE_ACCESS || barrier_bits & GPU_BARRIER_TEXTURE_FETCH)
{
bool is_compute = (active_command_encoder_type_ != MTL_RENDER_COMMAND_ENCODER);
scope |= (is_compute ? 0 : MTLBarrierScopeRenderTargets) | MTLBarrierScopeTextures;
}
if (barrier_bits & GPU_BARRIER_SHADER_STORAGE ||
barrier_bits & GPU_BARRIER_VERTEX_ATTRIB_ARRAY ||
barrier_bits & GPU_BARRIER_ELEMENT_ARRAY || barrier_bits & GPU_BARRIER_UNIFORM ||
barrier_bits & GPU_BARRIER_BUFFER_UPDATE)
{
scope = scope | MTLBarrierScopeBuffers;
}
if (scope != 0) {
/* Issue barrier based on encoder. */
switch (active_command_encoder_type_) {
case MTL_NO_COMMAND_ENCODER:
case MTL_BLIT_COMMAND_ENCODER: {
/* No barrier to be inserted. */
return false;
}
/* Rendering. */
case MTL_RENDER_COMMAND_ENCODER: {
/* Currently flagging both stages -- can use bits above to filter on stage type --
* though full barrier is safe for now. */
MTLRenderStages before_stage_flags = 0;
MTLRenderStages after_stage_flags = 0;
if (before_stages & GPU_BARRIER_STAGE_VERTEX &&
!(before_stages & GPU_BARRIER_STAGE_FRAGMENT))
{
before_stage_flags = before_stage_flags | MTLRenderStageVertex;
}
if (before_stages & GPU_BARRIER_STAGE_FRAGMENT) {
before_stage_flags = before_stage_flags | MTLRenderStageFragment;
}
if (after_stages & GPU_BARRIER_STAGE_VERTEX) {
after_stage_flags = after_stage_flags | MTLRenderStageVertex;
}
if (after_stages & GPU_BARRIER_STAGE_FRAGMENT) {
after_stage_flags = MTLRenderStageFragment;
}
id<MTLRenderCommandEncoder> rec = this->get_active_render_command_encoder();
BLI_assert(rec != nil);
[rec memoryBarrierWithScope:scope
afterStages:after_stage_flags
beforeStages:before_stage_flags];
return true;
}
/* Compute. */
case MTL_COMPUTE_COMMAND_ENCODER: {
id<MTLComputeCommandEncoder> rec = this->get_active_compute_command_encoder();
BLI_assert(rec != nil);
[rec memoryBarrierWithScope:scope];
return true;
}
/* Compute. */
case MTL_COMPUTE_COMMAND_ENCODER: {
id<MTLComputeCommandEncoder> rec = this->get_active_compute_command_encoder();
BLI_assert(rec != nil);
[rec memoryBarrierWithScope:scope];
return true;
}
}
}

View File

@@ -314,16 +314,13 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info)
MTLCompileOptions *options = [[[MTLCompileOptions alloc] init] autorelease];
options.languageVersion = MTLLanguageVersion2_2;
options.fastMathEnabled = YES;
options.preserveInvariance = YES;
if (@available(macOS 11.00, *)) {
options.preserveInvariance = YES;
/* Raster order groups for tile data in struct require Metal 2.3.
* Retaining Metal 2.2. for old shaders to maintain backwards
* compatibility for existing features. */
if (info->subpass_inputs_.size() > 0) {
options.languageVersion = MTLLanguageVersion2_3;
}
/* Raster order groups for tile data in struct require Metal 2.3.
* Retaining Metal 2.2. for old shaders to maintain backwards
* compatibility for existing features. */
if (info->subpass_inputs_.size() > 0) {
options.languageVersion = MTLLanguageVersion2_3;
}
#if defined(MAC_OS_VERSION_14_0)
if (@available(macOS 14.00, *)) {
@@ -364,14 +361,6 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info)
/* Inject unique context ID to avoid cross-context shader cache collisions.
* Required on macOS 11.0. */
NSString *source_with_header = source_with_header_a;
if (@available(macos 11.0, *)) {
/* Pass-through. Availability syntax requirement, expression cannot be negated. */
}
else {
source_with_header = [source_with_header_a
stringByAppendingString:[NSString stringWithFormat:@"\n\n#define MTL_CONTEXT_IND %d\n",
context_->context_id]];
}
[source_with_header retain];
/* Prepare Shader Library. */

View File

@@ -2793,9 +2793,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
* by ensuring that vertex position is consistently calculated between subsequent passes
* with maximum precision. */
out << "\tfloat4 _default_position_ [[position]]";
if (@available(macos 11.0, *)) {
out << " [[invariant]]";
}
out << " [[invariant]]";
out << ";" << std::endl;
}
else {
@@ -2806,9 +2804,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
/* Use invariance if available. See above for detail. */
out << "\tfloat4 " << this->vertex_output_varyings[0].name << " [[position]];";
if (@available(macos 11.0, *)) {
out << " [[invariant]]";
}
out << " [[invariant]]";
out << ";" << std::endl;
first_attr_is_position = true;
}

View File

@@ -2345,15 +2345,9 @@ void gpu::MTLTexture::ensure_baked()
/* Override storage mode if memoryless attachments are being used.
* NOTE: Memoryless textures can only be supported on TBDR GPUs. */
if (gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MEMORYLESS) {
if (@available(macOS 11.00, *)) {
const bool is_tile_based_arch = (GPU_platform_architecture() == GPU_ARCHITECTURE_TBDR);
if (is_tile_based_arch) {
texture_descriptor_.storageMode = MTLStorageModeMemoryless;
}
}
else {
MTL_LOG_WARNING(
"GPU_TEXTURE_USAGE_MEMORYLESS is not available on macOS versions prior to 11.0");
const bool is_tile_based_arch = (GPU_platform_architecture() == GPU_ARCHITECTURE_TBDR);
if (is_tile_based_arch) {
texture_descriptor_.storageMode = MTLStorageModeMemoryless;
}
}