diff --git a/intern/cycles/device/metal/device_impl.mm b/intern/cycles/device/metal/device_impl.mm index 3bcec510742..359d204bb61 100644 --- a/intern/cycles/device/metal/device_impl.mm +++ b/intern/cycles/device/metal/device_impl.mm @@ -709,7 +709,7 @@ MetalDevice::MetalMem *MetalDevice::generic_alloc(device_memory &mem) mem.device_size = metal_buffer.allocatedSize; stats.mem_alloc(mem.device_size); - metal_buffer.label = [[NSString alloc] initWithFormat:@"%s", mem.name]; + metal_buffer.label = [NSString stringWithFormat:@"%s", mem.name]; std::lock_guard lock(metal_mem_map_mutex); diff --git a/intern/cycles/device/metal/queue.mm b/intern/cycles/device/metal/queue.mm index bb529c23a49..27c54bc42b9 100644 --- a/intern/cycles/device/metal/queue.mm +++ b/intern/cycles/device/metal/queue.mm @@ -437,10 +437,10 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel, if (label_command_encoders_) { /* Add human-readable labels if we're doing any form of debugging / profiling. */ - mtlComputeCommandEncoder.label = [[NSString alloc] - initWithFormat:@"Metal queue launch %s, work_size %d", - device_kernel_as_string(kernel), - work_size]; + mtlComputeCommandEncoder.label = [NSString + stringWithFormat:@"Metal queue launch %s, work_size %d", + device_kernel_as_string(kernel), + work_size]; } /* this relies on IntegratorStateGPU layout being contiguous device_ptrs. */ diff --git a/intern/ghost/intern/GHOST_ContextCGL.mm b/intern/ghost/intern/GHOST_ContextCGL.mm index 742a62c9960..ea96442c0d1 100644 --- a/intern/ghost/intern/GHOST_ContextCGL.mm +++ b/intern/ghost/intern/GHOST_ContextCGL.mm @@ -124,6 +124,13 @@ GHOST_ContextCGL::~GHOST_ContextCGL() m_metalLayer = nil; } } + assert(s_sharedCount); + + s_sharedCount--; + [s_sharedMetalCommandQueue release]; + if (s_sharedCount == 0) { + s_sharedMetalCommandQueue = nil; + } } GHOST_TSuccess GHOST_ContextCGL::swapBuffers() @@ -230,6 +237,7 @@ void GHOST_ContextCGL::metalInit() } /* Ensure active GHOSTContext retains a reference to the shared context. */ [s_sharedMetalCommandQueue retain]; + s_sharedCount++; /* Create shaders for blit operation. */ NSString *source = @R"msl( @@ -308,6 +316,9 @@ void GHOST_ContextCGL::metalInit() "GHOST_ContextCGL::metalInit: newRenderPipelineStateWithDescriptor:error: failed (when " "creating the Metal overlay pipeline)!"); } + + [desc.fragmentFunction release]; + [desc.vertexFunction release]; } } @@ -315,11 +326,13 @@ void GHOST_ContextCGL::metalFree() { if (m_metalRenderPipeline) { [m_metalRenderPipeline release]; + m_metalRenderPipeline = nil; } for (int i = 0; i < METAL_SWAPCHAIN_SIZE; i++) { if (m_defaultFramebufferMetalTexture[i].texture) { [m_defaultFramebufferMetalTexture[i].texture release]; + m_defaultFramebufferMetalTexture[i].texture = nil; } } } diff --git a/source/blender/blenlib/intern/storage_apple.mm b/source/blender/blenlib/intern/storage_apple.mm index 9feabb776f0..2a3998f682b 100644 --- a/source/blender/blenlib/intern/storage_apple.mm +++ b/source/blender/blenlib/intern/storage_apple.mm @@ -129,9 +129,9 @@ eFileAttributes BLI_file_attributes(const char *path) /* clang-format off */ @autoreleasepool { /* clang-format on */ - NSURL *fileURL = [[NSURL alloc] initFileURLWithFileSystemRepresentation:path - isDirectory:NO - relativeToURL:nil]; + NSURL *fileURL = [[[NSURL alloc] initFileURLWithFileSystemRepresentation:path + isDirectory:NO + relativeToURL:nil] autorelease]; /* Querying NSURLIsReadableKey and NSURLIsWritableKey keys for OneDrive placeholder files * triggers their unwanted download. */ diff --git a/source/blender/gpu/metal/mtl_context.mm b/source/blender/gpu/metal/mtl_context.mm index caacee8c9ba..47ad45d733a 100644 --- a/source/blender/gpu/metal/mtl_context.mm +++ b/source/blender/gpu/metal/mtl_context.mm @@ -2723,9 +2723,6 @@ void present(MTLRenderPassDescriptor *blit_descriptor, MTLContext::get_global_memory_manager()->get_current_safe_list(); BLI_assert(cmd_free_buffer_list); - id cmd_buffer_ref = cmdbuf; - [cmd_buffer_ref retain]; - /* Increment drawables in flight limiter. */ MTLContext::max_drawables_in_flight++; std::chrono::time_point submission_time = std::chrono::high_resolution_clock::now(); @@ -2735,7 +2732,6 @@ void present(MTLRenderPassDescriptor *blit_descriptor, [cmdbuf addCompletedHandler:^(id /*cb*/) { /* Flag freed buffers associated with this CMD buffer as ready to be freed. */ cmd_free_buffer_list->decrement_reference(); - [cmd_buffer_ref release]; /* Decrement count */ ctx->main_command_buffer.dec_active_command_buffer_count(); diff --git a/source/blender/gpu/metal/mtl_shader.hh b/source/blender/gpu/metal/mtl_shader.hh index 5abdb095f9f..72e66ce77b2 100644 --- a/source/blender/gpu/metal/mtl_shader.hh +++ b/source/blender/gpu/metal/mtl_shader.hh @@ -448,7 +448,7 @@ class MTLParallelShaderCompiler { void add_parallel_item_to_queue(ParallelWork *add_parallel_item_to_queuework_item, BatchHandle batch_handle); - std::atomic ref_count; + std::atomic ref_count = 1; public: MTLParallelShaderCompiler(); @@ -469,6 +469,7 @@ class MTLParallelShaderCompiler { } void decrement_ref_count() { + BLI_assert(ref_count > 0); ref_count--; } int get_ref_count() diff --git a/source/blender/gpu/metal/mtl_shader.mm b/source/blender/gpu/metal/mtl_shader.mm index df6fa4ecb8b..4cca98b939d 100644 --- a/source/blender/gpu/metal/mtl_shader.mm +++ b/source/blender/gpu/metal/mtl_shader.mm @@ -119,16 +119,12 @@ MTLShader::~MTLShader() push_constant_data_ = nullptr; } - /* Free Metal resources. */ - if (shader_library_vert_ != nil) { - [shader_library_vert_ release]; - shader_library_vert_ = nil; - } - if (shader_library_frag_ != nil) { - [shader_library_frag_ release]; - shader_library_frag_ = nil; - } - + /* Free Metal resources. + * This is done in the order of: + * 1. Pipelinestate objects + * 2. MTLFunctions + * 3. MTLLibraries + * So that each object releases it's references to the one following it. */ if (pso_descriptor_ != nil) { [pso_descriptor_ release]; pso_descriptor_ = nil; @@ -137,31 +133,50 @@ MTLShader::~MTLShader() /* Free Pipeline Cache. */ pso_cache_lock_.lock(); for (const MTLRenderPipelineStateInstance *pso_inst : pso_cache_.values()) { + /* Free pipeline state object. */ + if (pso_inst->pso) { + [pso_inst->pso release]; + } + /* Free vertex function. */ if (pso_inst->vert) { [pso_inst->vert release]; } + /* Free fragment function. */ if (pso_inst->frag) { [pso_inst->frag release]; } - if (pso_inst->pso) { - [pso_inst->pso release]; - } delete pso_inst; } pso_cache_.clear(); /* Free Compute pipeline cache. */ for (const MTLComputePipelineStateInstance *pso_inst : compute_pso_cache_.values()) { - if (pso_inst->compute) { - [pso_inst->compute release]; - } + /* Free pipeline state object. */ if (pso_inst->pso) { [pso_inst->pso release]; } + /* Free compute function. */ + if (pso_inst->compute) { + [pso_inst->compute release]; + } } compute_pso_cache_.clear(); pso_cache_lock_.unlock(); + /* Free shader libraries. */ + if (shader_library_vert_ != nil) { + [shader_library_vert_ release]; + shader_library_vert_ = nil; + } + if (shader_library_frag_ != nil) { + [shader_library_frag_ release]; + shader_library_frag_ = nil; + } + if (shader_library_compute_ != nil) { + [shader_library_compute_ release]; + shader_library_compute_ = nil; + } + /* NOTE(Metal): #ShaderInterface deletion is handled in the super destructor `~Shader()`. */ } valid_ = false; @@ -394,21 +409,18 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info) switch (src_stage) { case ShaderStage::VERTEX: { - /* Retain generated library and assign debug name. */ + /* Store generated library and assign debug name. */ shader_library_vert_ = library; - [shader_library_vert_ retain]; shader_library_vert_.label = [NSString stringWithUTF8String:this->name]; } break; case ShaderStage::FRAGMENT: { - /* Retain generated library for fragment shader and assign debug name. */ + /* Store generated library for fragment shader and assign debug name. */ shader_library_frag_ = library; - [shader_library_frag_ retain]; shader_library_frag_.label = [NSString stringWithUTF8String:this->name]; } break; case ShaderStage::COMPUTE: { - /* Retain generated library for fragment shader and assign debug name. */ + /* Store generated library for fragment shader and assign debug name. */ shader_library_compute_ = library; - [shader_library_compute_ retain]; shader_library_compute_.label = [NSString stringWithUTF8String:this->name]; } break; case ShaderStage::ANY: { @@ -433,7 +445,6 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info) if (!is_compute) { /* Prepare Render pipeline descriptor. */ pso_descriptor_ = [[MTLRenderPipelineDescriptor alloc] init]; - [pso_descriptor_ retain]; pso_descriptor_.label = [NSString stringWithUTF8String:this->name]; } @@ -1429,10 +1440,6 @@ MTLRenderPipelineStateInstance *MTLShader::bake_pipeline_state( } } - [pso_inst->vert retain]; - [pso_inst->frag retain]; - [pso_inst->pso retain]; - /* Insert into pso cache. */ pso_cache_lock_.lock(); pso_inst->shader_pso_index = pso_cache_.size(); @@ -1601,13 +1608,14 @@ MTLComputePipelineStateInstance *MTLShader::bake_compute_pipeline_state( #endif } + [desc release]; + /* Gather reflection data and create MTLComputePipelineStateInstance to store results. */ MTLComputePipelineStateInstance *compute_pso_instance = new MTLComputePipelineStateInstance(); - compute_pso_instance->compute = [compute_function retain]; - compute_pso_instance->pso = [pso retain]; + compute_pso_instance->compute = compute_function; + compute_pso_instance->pso = pso; compute_pso_instance->base_uniform_buffer_index = MTL_uniform_buffer_base_index; compute_pso_instance->base_storage_buffer_index = MTL_storage_buffer_base_index; - pso_cache_lock_.lock(); compute_pso_instance->shader_pso_index = compute_pso_cache_.size(); compute_pso_cache_.add(compute_pipeline_descriptor, compute_pso_instance); diff --git a/source/blender/gpu/metal/mtl_texture_util.mm b/source/blender/gpu/metal/mtl_texture_util.mm index c4109d47bae..86755e9cc28 100644 --- a/source/blender/gpu/metal/mtl_texture_util.mm +++ b/source/blender/gpu/metal/mtl_texture_util.mm @@ -468,7 +468,6 @@ id gpu::MTLTexture::mtl_texture_update_impl( } /* Store PSO. */ - [compute_pso retain]; specialization_cache.add_new(specialization_params, compute_pso); return_pso = compute_pso; } @@ -790,7 +789,6 @@ id gpu::MTLTexture::mtl_texture_read_impl( } /* Store PSO. */ - [compute_pso retain]; specialization_cache.add_new(specialization_params, compute_pso); return_pso = compute_pso; } diff --git a/source/blender/makesrna/intern/rna_ui.cc b/source/blender/makesrna/intern/rna_ui.cc index 9c682436f3d..2081cd5b4a6 100644 --- a/source/blender/makesrna/intern/rna_ui.cc +++ b/source/blender/makesrna/intern/rna_ui.cc @@ -211,31 +211,25 @@ static bool rna_Panel_unregister(Main *bmain, StructRNA *type) child_pt->parent = nullptr; } - const char space_type = pt->space_type; - BLI_freelistN(&pt->children); - BLI_freelinkN(&art->paneltypes, pt); - LISTBASE_FOREACH (bScreen *, screen, &bmain->screens) { LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) { LISTBASE_FOREACH (SpaceLink *, sl, &area->spacedata) { - if (sl->spacetype == space_type) { - ListBase *regionbase = (sl == area->spacedata.first) ? &area->regionbase : - &sl->regionbase; - LISTBASE_FOREACH (ARegion *, region, regionbase) { - if (region->type == art) { - LISTBASE_FOREACH (Panel *, panel, ®ion->panels) { - panel_type_clear_recursive(panel, pt); - } - } - /* The unregistered panel might have had a template that added instanced panels, - * so remove them just in case. They can be re-added on redraw anyway. */ - UI_panels_free_instanced(nullptr, region); + ListBase *regionbase = (sl == area->spacedata.first) ? &area->regionbase : &sl->regionbase; + LISTBASE_FOREACH (ARegion *, region, regionbase) { + LISTBASE_FOREACH (Panel *, panel, ®ion->panels) { + panel_type_clear_recursive(panel, pt); } + /* The unregistered panel might have had a template that added instanced panels, + * so remove them just in case. They can be re-added on redraw anyway. */ + UI_panels_free_instanced(nullptr, region); } } } } + BLI_freelistN(&pt->children); + BLI_freelinkN(&art->paneltypes, pt); + /* update while blender is running */ WM_main_add_notifier(NC_WINDOW, nullptr); return true;