From 53cb09357e74a936f25d66b751fc5ea1c0431f14 Mon Sep 17 00:00:00 2001 From: Jason Fielder Date: Mon, 19 Jun 2023 20:28:51 +0200 Subject: [PATCH 1/3] Fix #108792: Ensure Metal buffers correctly freed on exit Replaces vector of allocations with dynamic linked list. Bug caused by previously freed buffers still having been in the list. Linked list enables fast removal of already-released buffers. Also ensured that the memory manager classes are included in memory tracking. Authored by Apple: Michael Parkin-White Pull Request: https://projects.blender.org/blender/blender/pulls/108940 --- source/blender/gpu/metal/mtl_memory.hh | 23 ++++- source/blender/gpu/metal/mtl_memory.mm | 125 ++++++++++++++++++++++--- 2 files changed, 131 insertions(+), 17 deletions(-) diff --git a/source/blender/gpu/metal/mtl_memory.hh b/source/blender/gpu/metal/mtl_memory.hh index 0142f5d248a..22cb3616ec2 100644 --- a/source/blender/gpu/metal/mtl_memory.hh +++ b/source/blender/gpu/metal/mtl_memory.hh @@ -107,6 +107,11 @@ class MTLUniformBuf; /* MTLBuffer allocation wrapper. */ class MTLBuffer { + public: + /* NOTE: ListBase API is not used due to cutsom destructor operation required to release + * Metal objective C buffer resource. */ + gpu::MTLBuffer *next, *prev; + private: /* Metal resource. */ id metal_buffer_; @@ -177,6 +182,8 @@ class MTLBuffer { /* Safety check to ensure buffers are not used after free. */ void debug_ensure_used(); + + MEM_CXX_CLASS_ALLOC_FUNCS("MTLBuffer"); }; /* View into part of an MTLBuffer. */ @@ -333,6 +340,8 @@ class MTLSafeFreeList { } } } + + MEM_CXX_CLASS_ALLOC_FUNCS("MTLSafeFreeList"); }; /* MTLBuffer pools. */ @@ -362,7 +371,7 @@ class MTLBufferPool { #endif /* Metal resources. */ - bool ensure_initialised_ = false; + bool initialized_ = false; id device_ = nil; /* The buffer selection aims to pick a buffer which meets the minimum size requirements. @@ -389,7 +398,10 @@ class MTLBufferPool { std::mutex buffer_pool_lock_; blender::Map buffer_pools_; - blender::Vector allocations_; + + /* Linked list to track all existing allocations. Prioritizing fast insert/deletion. */ + gpu::MTLBuffer *allocations_list_base_; + uint allocations_list_size_; /* Maintain a queue of all MTLSafeFreeList's that have been released * by the GPU and are ready to have their buffers re-inserted into the @@ -432,6 +444,11 @@ class MTLBufferPool { void ensure_buffer_pool(MTLResourceOptions options); void insert_buffer_into_pool(MTLResourceOptions options, gpu::MTLBuffer *buffer); void free(); + + /* Allocations list. */ + void allocations_list_insert(gpu::MTLBuffer *buffer); + void allocations_list_delete(gpu::MTLBuffer *buffer); + void allocations_list_delete_all(); }; /* Scratch buffers are circular-buffers used for temporary data within the current frame. @@ -492,6 +509,8 @@ class MTLScratchBufferManager { * This call will perform a partial flush of the buffer starting from * the last offset the data was flushed from, to the current offset. */ void flush_active_scratch_buffer(); + + MEM_CXX_CLASS_ALLOC_FUNCS("MTLBufferPool"); }; /** \} */ diff --git a/source/blender/gpu/metal/mtl_memory.mm b/source/blender/gpu/metal/mtl_memory.mm index 73b167a9ce3..9219d80a7b2 100644 --- a/source/blender/gpu/metal/mtl_memory.mm +++ b/source/blender/gpu/metal/mtl_memory.mm @@ -25,9 +25,9 @@ namespace blender::gpu { void MTLBufferPool::init(id mtl_device) { - if (!ensure_initialised_) { + if (!initialized_) { BLI_assert(mtl_device); - ensure_initialised_ = true; + initialized_ = true; device_ = mtl_device; #if MTL_DEBUG_MEMORY_STATISTICS == 1 @@ -39,6 +39,10 @@ void MTLBufferPool::init(id mtl_device) /* Track pool allocation size. */ allocations_in_pool_ = 0; + /* Live allocations list. */ + allocations_list_base_ = nullptr; + allocations_list_size_ = 0; + /* Free pools -- Create initial safe free pool */ BLI_assert(current_free_list_ == nullptr); this->begin_new_safe_list(); @@ -53,17 +57,29 @@ MTLBufferPool::~MTLBufferPool() void MTLBufferPool::free() { buffer_pool_lock_.lock(); - for (auto buffer : allocations_) { - BLI_assert(buffer); - delete buffer; - } - allocations_.clear(); + /* Delete all existing allocations. */ + allocations_list_delete_all(); + + /* Release safe free lists. */ + for (int safe_pool_free_index = 0; safe_pool_free_index < completed_safelist_queue_.size(); + safe_pool_free_index++) + { + delete completed_safelist_queue_[safe_pool_free_index]; + } + completed_safelist_queue_.clear(); + if (current_free_list_ != nullptr) { + delete current_free_list_; + current_free_list_ = nullptr; + } + + /* Clear and release memory pools. */ for (std::multiset *buffer_pool : buffer_pools_.values()) { delete buffer_pool; } + buffer_pools_.clear(); buffer_pool_lock_.unlock(); } @@ -155,10 +171,7 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size, new_buffer = new gpu::MTLBuffer(device_, size, options, alignment); /* Track allocation in context. */ - allocations_.append(new_buffer); -#if MTL_DEBUG_MEMORY_STATISTICS == 1 - total_allocation_bytes_ += aligned_alloc_size; -#endif + allocations_list_insert(new_buffer); } else { /* Re-use suitable buffer. */ @@ -289,6 +302,7 @@ void MTLBufferPool::update_memory_pools() * animation. * If memory is continually used, then we do not want to free this memory as it will be * re-allocated during a short time period. */ + const time_t time_now = std::time(nullptr); for (auto buffer_pool_list : buffer_pools_.items()) { MTLBufferPoolOrderedList *pool_allocations = buffer_pool_list.value; @@ -323,12 +337,13 @@ void MTLBufferPool::update_memory_pools() if (time_passed > deletion_time_threshold_s) { - /* Delete allocation. */ - delete handle.buffer; + /* Remove buffer from global allocations list and release resource. */ + allocations_list_delete(handle.buffer); + + /* Remove buffer from pool and update pool statistics. */ pool_iterator = pool_allocations->erase(pool_iterator); allocations_in_pool_ -= handle.buffer_size; #if MTL_DEBUG_MEMORY_STATISTICS == 1 - total_allocation_bytes_ -= handle.buffer_size; buffers_in_pool_--; #endif continue; @@ -343,7 +358,7 @@ void MTLBufferPool::update_memory_pools() uint framealloc = (uint)per_frame_allocation_count_; printf(" Allocations in frame: %u\n", framealloc); - printf(" Total Buffers allocated: %u\n", (uint)allocations_.size()); + printf(" Total Buffers allocated: %u\n", allocations_list_size_); printf(" Total Memory allocated: %u MB\n", (uint)total_allocation_bytes_ / (1024 * 1024)); uint allocs = (uint)(allocations_in_pool_) / 1024 / 2024; @@ -453,6 +468,80 @@ void MTLBufferPool::insert_buffer_into_pool(MTLResourceOptions options, gpu::MTL #endif } +void MTLBufferPool::allocations_list_insert(gpu::MTLBuffer *buffer) +{ + /* NOTE: Function should only be called while buffer_pool_lock_ is acquired. */ + BLI_assert(initialized_); + BLI_assert(buffer != nullptr); + + /* Insert buffer at base of allocations list. */ + gpu::MTLBuffer *current_head = allocations_list_base_; + buffer->next = current_head; + buffer->prev = nullptr; + + if (current_head != nullptr) { + current_head->prev = buffer; + } + + allocations_list_base_ = buffer; + allocations_list_size_++; + +#if MTL_DEBUG_MEMORY_STATISTICS == 1 + total_allocation_bytes_ += buffer->get_size(); +#endif +} + +void MTLBufferPool::allocations_list_delete(gpu::MTLBuffer *buffer) +{ + /* NOTE: Function should only be called while buffer_pool_lock_ is acquired. */ + /* Remove a buffer link in the allocations chain. */ + BLI_assert(initialized_); + BLI_assert(buffer != nullptr); + BLI_assert(allocations_list_size_ >= 1); + + gpu::MTLBuffer *next = buffer->next; + gpu::MTLBuffer *prev = buffer->prev; + + if (prev != nullptr) { + BLI_assert(prev->next == buffer); + prev->next = next; + } + + if (next != nullptr) { + BLI_assert(next->prev == buffer); + next->prev = prev; + } + + if (allocations_list_base_ == buffer) { + allocations_list_base_ = next; + BLI_assert(prev == nullptr); + } + allocations_list_size_--; + +#if MTL_DEBUG_MEMORY_STATISTICS == 1 + total_allocation_bytes_ -= buffer->get_size(); +#endif + + /* Delete buffer. */ + delete buffer; +} + +void MTLBufferPool::allocations_list_delete_all() +{ + gpu::MTLBuffer *current = allocations_list_base_; + while (current != nullptr) { + gpu::MTLBuffer *next = current->next; + delete current; + current = next; + } + allocations_list_size_ = 0; + allocations_list_base_ = nullptr; + +#if MTL_DEBUG_MEMORY_STATISTICS == 1 + total_allocation_bytes_ = 0; +#endif +} + MTLSafeFreeList::MTLSafeFreeList() { reference_count_ = 1; @@ -565,6 +654,9 @@ MTLBuffer::MTLBuffer(id mtl_device, else { data_ = nullptr; } + + /* Linked resources. */ + next = prev = nullptr; } MTLBuffer::MTLBuffer(id external_buffer) @@ -584,6 +676,9 @@ MTLBuffer::MTLBuffer(id external_buffer) this->set_usage_size(size_); data_ = [metal_buffer_ contents]; in_use_ = true; + + /* Linked resources. */ + next = prev = nullptr; } gpu::MTLBuffer::~MTLBuffer() From c3a12704de112696c9d273cff3a5e6b0040b85bd Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Mon, 19 Jun 2023 21:33:14 -0400 Subject: [PATCH 2/3] Fix: Torus primitive shaded smooth after mesh refactor See #109070 (full fix will contain a more general change). --- scripts/startup/bl_operators/add_mesh_torus.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/startup/bl_operators/add_mesh_torus.py b/scripts/startup/bl_operators/add_mesh_torus.py index de23917b2a0..1fa7428983a 100644 --- a/scripts/startup/bl_operators/add_mesh_torus.py +++ b/scripts/startup/bl_operators/add_mesh_torus.py @@ -243,6 +243,7 @@ class AddTorus(Operator, object_utils.AddObjectHelper): mesh.vertices.foreach_set("co", verts_loc) mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4)) mesh.loops.foreach_set("vertex_index", faces) + mesh.shade_flat() if self.generate_uvs: add_uvs(mesh, self.minor_segments, self.major_segments) From ee352c968fd165fafd17adcc696e98bb0efa844a Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Mon, 19 Jun 2023 21:40:59 -0400 Subject: [PATCH 3/3] Fix #109070: Creating mesh from Python skips setting faces sharp The default value of the "sharp_face" attribute is False like other boolean attributes. That mistakenly changed behavior in addons that created meshes with `Mesh.from_pydata`. To fix, add an argument with a default value that maintains the behavior from before, and add convenience, `shade_smooth` and `shade_flat` methods. --- scripts/modules/bpy_types.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/scripts/modules/bpy_types.py b/scripts/modules/bpy_types.py index 9d67805a481..9fb0e390b56 100644 --- a/scripts/modules/bpy_types.py +++ b/scripts/modules/bpy_types.py @@ -534,7 +534,7 @@ def ord_ind(i1, i2): class Mesh(bpy_types.ID): __slots__ = () - def from_pydata(self, vertices, edges, faces): + def from_pydata(self, vertices, edges, faces, shade_flat=True): """ Make a mesh from a list of vertices/edges/faces Until we have a nicer way to make geometry, use this. @@ -591,6 +591,9 @@ class Mesh(bpy_types.ID): self.polygons.foreach_set("loop_start", loop_starts) self.polygons.foreach_set("vertices", vertex_indices) + if shade_flat: + self.shade_flat() + if edges_len or faces_len: self.update( # Needed to either: @@ -605,6 +608,26 @@ class Mesh(bpy_types.ID): def edge_keys(self): return [ed.key for ed in self.edges] + def shade_flat(self): + """ + Render and display faces uniform, using face normals, + setting the "sharp_face" attribute true for every face + """ + sharp_faces = self.attributes.new("sharp_face", 'BOOLEAN', 'FACE') + for value in sharp_faces.data: + value.value = True + + def shade_smooth(self): + """ + Render and display faces smooth, using interpolated vertex normals, + removing the "sharp_face" attribute + """ + try: + attribute = self.attributes["sharp_face"] + self.attributes.remove(attribute) + except KeyError: + pass + class MeshEdge(StructRNA): __slots__ = ()