Merge branch 'blender-v3.6-release'

This commit is contained in:
Hans Goudey
2023-06-19 21:45:36 -04:00
4 changed files with 152 additions and 18 deletions

View File

@@ -566,7 +566,7 @@ def _name_convention_attribute_remove(attributes, name):
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
def from_pydata(self, vertices, edges, faces, shade_flat=True):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
@@ -623,6 +623,9 @@ class Mesh(bpy_types.ID):
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
if shade_flat:
self.shade_flat()
if edges_len or faces_len:
self.update(
# Needed to either:
@@ -663,6 +666,22 @@ class Mesh(bpy_types.ID):
def edge_creases_remove(self):
_name_convention_attribute_remove(self.attributes, "crease_edge")
def shade_flat(self):
"""
Render and display faces uniform, using face normals,
setting the "sharp_face" attribute true for every face
"""
sharp_faces = _name_convention_attribute_ensure(self.attributes, "sharp_face", 'FACE', 'BOOLEAN')
for value in sharp_faces.data:
value.value = True
def shade_smooth(self):
"""
Render and display faces smooth, using interpolated vertex normals,
removing the "sharp_face" attribute
"""
_name_convention_attribute_remove(self.attributes, "sharp_face")
class MeshEdge(StructRNA):
__slots__ = ()

View File

@@ -246,6 +246,7 @@ class AddTorus(Operator, object_utils.AddObjectHelper):
mesh.vertices.foreach_set("co", verts_loc)
mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
mesh.loops.foreach_set("vertex_index", faces)
mesh.shade_flat()
if self.generate_uvs:
add_uvs(mesh, self.minor_segments, self.major_segments)

View File

@@ -109,6 +109,11 @@ class MTLUniformBuf;
/* MTLBuffer allocation wrapper. */
class MTLBuffer {
public:
/* NOTE: ListBase API is not used due to cutsom destructor operation required to release
* Metal objective C buffer resource. */
gpu::MTLBuffer *next, *prev;
private:
/* Metal resource. */
id<MTLBuffer> metal_buffer_;
@@ -179,6 +184,8 @@ class MTLBuffer {
/* Safety check to ensure buffers are not used after free. */
void debug_ensure_used();
MEM_CXX_CLASS_ALLOC_FUNCS("MTLBuffer");
};
/* View into part of an MTLBuffer. */
@@ -335,6 +342,8 @@ class MTLSafeFreeList {
}
}
}
MEM_CXX_CLASS_ALLOC_FUNCS("MTLSafeFreeList");
};
/* MTLBuffer pools. */
@@ -364,7 +373,7 @@ class MTLBufferPool {
#endif
/* Metal resources. */
bool ensure_initialised_ = false;
bool initialized_ = false;
id<MTLDevice> device_ = nil;
/* The buffer selection aims to pick a buffer which meets the minimum size requirements.
@@ -391,7 +400,10 @@ class MTLBufferPool {
std::mutex buffer_pool_lock_;
blender::Map<MTLBufferResourceOptions, MTLBufferPoolOrderedList *> buffer_pools_;
blender::Vector<gpu::MTLBuffer *> allocations_;
/* Linked list to track all existing allocations. Prioritizing fast insert/deletion. */
gpu::MTLBuffer *allocations_list_base_;
uint allocations_list_size_;
/* Maintain a queue of all MTLSafeFreeList's that have been released
* by the GPU and are ready to have their buffers re-inserted into the
@@ -434,6 +446,11 @@ class MTLBufferPool {
void ensure_buffer_pool(MTLResourceOptions options);
void insert_buffer_into_pool(MTLResourceOptions options, gpu::MTLBuffer *buffer);
void free();
/* Allocations list. */
void allocations_list_insert(gpu::MTLBuffer *buffer);
void allocations_list_delete(gpu::MTLBuffer *buffer);
void allocations_list_delete_all();
};
/* Scratch buffers are circular-buffers used for temporary data within the current frame.
@@ -494,6 +511,8 @@ class MTLScratchBufferManager {
* This call will perform a partial flush of the buffer starting from
* the last offset the data was flushed from, to the current offset. */
void flush_active_scratch_buffer();
MEM_CXX_CLASS_ALLOC_FUNCS("MTLBufferPool");
};
/** \} */

View File

@@ -27,9 +27,9 @@ namespace blender::gpu {
void MTLBufferPool::init(id<MTLDevice> mtl_device)
{
if (!ensure_initialised_) {
if (!initialized_) {
BLI_assert(mtl_device);
ensure_initialised_ = true;
initialized_ = true;
device_ = mtl_device;
#if MTL_DEBUG_MEMORY_STATISTICS == 1
@@ -41,6 +41,10 @@ void MTLBufferPool::init(id<MTLDevice> mtl_device)
/* Track pool allocation size. */
allocations_in_pool_ = 0;
/* Live allocations list. */
allocations_list_base_ = nullptr;
allocations_list_size_ = 0;
/* Free pools -- Create initial safe free pool */
BLI_assert(current_free_list_ == nullptr);
this->begin_new_safe_list();
@@ -55,17 +59,29 @@ MTLBufferPool::~MTLBufferPool()
void MTLBufferPool::free()
{
buffer_pool_lock_.lock();
for (auto buffer : allocations_) {
BLI_assert(buffer);
delete buffer;
}
allocations_.clear();
/* Delete all existing allocations. */
allocations_list_delete_all();
/* Release safe free lists. */
for (int safe_pool_free_index = 0; safe_pool_free_index < completed_safelist_queue_.size();
safe_pool_free_index++)
{
delete completed_safelist_queue_[safe_pool_free_index];
}
completed_safelist_queue_.clear();
if (current_free_list_ != nullptr) {
delete current_free_list_;
current_free_list_ = nullptr;
}
/* Clear and release memory pools. */
for (std::multiset<blender::gpu::MTLBufferHandle, blender::gpu::CompareMTLBuffer> *buffer_pool :
buffer_pools_.values())
{
delete buffer_pool;
}
buffer_pools_.clear();
buffer_pool_lock_.unlock();
}
@@ -156,10 +172,7 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
new_buffer = new gpu::MTLBuffer(device_, size, options, alignment);
/* Track allocation in context. */
allocations_.append(new_buffer);
#if MTL_DEBUG_MEMORY_STATISTICS == 1
total_allocation_bytes_ += aligned_alloc_size;
#endif
allocations_list_insert(new_buffer);
}
else {
/* Re-use suitable buffer. */
@@ -290,6 +303,7 @@ void MTLBufferPool::update_memory_pools()
* animation.
* If memory is continually used, then we do not want to free this memory as it will be
* re-allocated during a short time period. */
const time_t time_now = std::time(nullptr);
for (auto buffer_pool_list : buffer_pools_.items()) {
MTLBufferPoolOrderedList *pool_allocations = buffer_pool_list.value;
@@ -324,12 +338,13 @@ void MTLBufferPool::update_memory_pools()
if (time_passed > deletion_time_threshold_s) {
/* Delete allocation. */
delete handle.buffer;
/* Remove buffer from global allocations list and release resource. */
allocations_list_delete(handle.buffer);
/* Remove buffer from pool and update pool statistics. */
pool_iterator = pool_allocations->erase(pool_iterator);
allocations_in_pool_ -= handle.buffer_size;
#if MTL_DEBUG_MEMORY_STATISTICS == 1
total_allocation_bytes_ -= handle.buffer_size;
buffers_in_pool_--;
#endif
continue;
@@ -344,7 +359,7 @@ void MTLBufferPool::update_memory_pools()
uint framealloc = (uint)per_frame_allocation_count_;
printf(" Allocations in frame: %u\n", framealloc);
printf(" Total Buffers allocated: %u\n", (uint)allocations_.size());
printf(" Total Buffers allocated: %u\n", allocations_list_size_);
printf(" Total Memory allocated: %u MB\n", (uint)total_allocation_bytes_ / (1024 * 1024));
uint allocs = (uint)(allocations_in_pool_) / 1024 / 2024;
@@ -454,6 +469,80 @@ void MTLBufferPool::insert_buffer_into_pool(MTLResourceOptions options, gpu::MTL
#endif
}
void MTLBufferPool::allocations_list_insert(gpu::MTLBuffer *buffer)
{
/* NOTE: Function should only be called while buffer_pool_lock_ is acquired. */
BLI_assert(initialized_);
BLI_assert(buffer != nullptr);
/* Insert buffer at base of allocations list. */
gpu::MTLBuffer *current_head = allocations_list_base_;
buffer->next = current_head;
buffer->prev = nullptr;
if (current_head != nullptr) {
current_head->prev = buffer;
}
allocations_list_base_ = buffer;
allocations_list_size_++;
#if MTL_DEBUG_MEMORY_STATISTICS == 1
total_allocation_bytes_ += buffer->get_size();
#endif
}
void MTLBufferPool::allocations_list_delete(gpu::MTLBuffer *buffer)
{
/* NOTE: Function should only be called while buffer_pool_lock_ is acquired. */
/* Remove a buffer link in the allocations chain. */
BLI_assert(initialized_);
BLI_assert(buffer != nullptr);
BLI_assert(allocations_list_size_ >= 1);
gpu::MTLBuffer *next = buffer->next;
gpu::MTLBuffer *prev = buffer->prev;
if (prev != nullptr) {
BLI_assert(prev->next == buffer);
prev->next = next;
}
if (next != nullptr) {
BLI_assert(next->prev == buffer);
next->prev = prev;
}
if (allocations_list_base_ == buffer) {
allocations_list_base_ = next;
BLI_assert(prev == nullptr);
}
allocations_list_size_--;
#if MTL_DEBUG_MEMORY_STATISTICS == 1
total_allocation_bytes_ -= buffer->get_size();
#endif
/* Delete buffer. */
delete buffer;
}
void MTLBufferPool::allocations_list_delete_all()
{
gpu::MTLBuffer *current = allocations_list_base_;
while (current != nullptr) {
gpu::MTLBuffer *next = current->next;
delete current;
current = next;
}
allocations_list_size_ = 0;
allocations_list_base_ = nullptr;
#if MTL_DEBUG_MEMORY_STATISTICS == 1
total_allocation_bytes_ = 0;
#endif
}
MTLSafeFreeList::MTLSafeFreeList()
{
reference_count_ = 1;
@@ -566,6 +655,9 @@ MTLBuffer::MTLBuffer(id<MTLDevice> mtl_device,
else {
data_ = nullptr;
}
/* Linked resources. */
next = prev = nullptr;
}
MTLBuffer::MTLBuffer(id<MTLBuffer> external_buffer)
@@ -585,6 +677,9 @@ MTLBuffer::MTLBuffer(id<MTLBuffer> external_buffer)
this->set_usage_size(size_);
data_ = [metal_buffer_ contents];
in_use_ = true;
/* Linked resources. */
next = prev = nullptr;
}
gpu::MTLBuffer::~MTLBuffer()