GPU: Batch: Remove unused instance attributes
This has been completely replaced by SSBOs overtime. This reduces API size and avoid untested/legacy path in drivers that are known to break (see #134509). Pull Request: https://projects.blender.org/blender/blender/pulls/145242
This commit is contained in:
@@ -31,7 +31,6 @@ class Shader;
|
||||
} // namespace blender::gpu
|
||||
|
||||
constexpr static int GPU_BATCH_VBO_MAX_LEN = 16;
|
||||
constexpr static int GPU_BATCH_INST_VBO_MAX_LEN = 2;
|
||||
constexpr static int GPU_BATCH_VAO_STATIC_LEN = 3;
|
||||
constexpr static int GPU_BATCH_VAO_DYN_ALLOC_COUNT = 16;
|
||||
|
||||
@@ -43,13 +42,8 @@ enum eGPUBatchFlag {
|
||||
GPU_BATCH_OWNS_VBO = (1 << 0),
|
||||
GPU_BATCH_OWNS_VBO_MAX = (GPU_BATCH_OWNS_VBO << (GPU_BATCH_VBO_MAX_LEN - 1)),
|
||||
GPU_BATCH_OWNS_VBO_ANY = ((GPU_BATCH_OWNS_VBO << GPU_BATCH_VBO_MAX_LEN) - 1),
|
||||
/** Instance blender::gpu::VertBuf ownership. (One bit per vbo) */
|
||||
GPU_BATCH_OWNS_INST_VBO = (GPU_BATCH_OWNS_VBO_MAX << 1),
|
||||
GPU_BATCH_OWNS_INST_VBO_MAX = (GPU_BATCH_OWNS_INST_VBO << (GPU_BATCH_INST_VBO_MAX_LEN - 1)),
|
||||
GPU_BATCH_OWNS_INST_VBO_ANY = ((GPU_BATCH_OWNS_INST_VBO << GPU_BATCH_INST_VBO_MAX_LEN) - 1) &
|
||||
~GPU_BATCH_OWNS_VBO_ANY,
|
||||
/** blender::gpu::IndexBuf ownership. */
|
||||
GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_INST_VBO_MAX << 1),
|
||||
GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_VBO_MAX << 1),
|
||||
|
||||
/** Has been initialized. At least one VBO is set. */
|
||||
GPU_BATCH_INIT = (1 << 26),
|
||||
@@ -80,8 +74,6 @@ class Batch {
|
||||
public:
|
||||
/** verts[0] is required, others can be nullptr */
|
||||
blender::gpu::VertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
|
||||
/** Instance attributes. */
|
||||
blender::gpu::VertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
|
||||
/** nullptr if element list not needed */
|
||||
blender::gpu::IndexBuf *elem;
|
||||
/** Number of vertices to draw for procedural drawcalls. -1 otherwise. */
|
||||
@@ -119,10 +111,6 @@ class Batch {
|
||||
{
|
||||
return verts[index];
|
||||
}
|
||||
VertBuf *inst_(const int index) const
|
||||
{
|
||||
return inst[index];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace blender::gpu
|
||||
@@ -228,22 +216,6 @@ int GPU_batch_vertbuf_add(blender::gpu::Batch *batch,
|
||||
blender::gpu::VertBuf *vertex_buf,
|
||||
bool own_vbo);
|
||||
|
||||
/**
|
||||
* Add the given \a vertex_buf as instanced vertex buffer to a #blender::gpu::Batch.
|
||||
* \return the index of verts in the batch.
|
||||
*/
|
||||
int GPU_batch_instbuf_add(blender::gpu::Batch *batch,
|
||||
blender::gpu::VertBuf *vertex_buf,
|
||||
bool own_vbo);
|
||||
|
||||
/**
|
||||
* Set the first instanced vertex buffer of a #blender::gpu::Batch.
|
||||
* \note Override ONLY the first instance VBO (and free them if owned).
|
||||
*/
|
||||
void GPU_batch_instbuf_set(blender::gpu::Batch *batch,
|
||||
blender::gpu::VertBuf *vertex_buf,
|
||||
bool own_vbo);
|
||||
|
||||
/**
|
||||
* Set the index buffer of a #blender::gpu::Batch.
|
||||
* \note Override any previously assigned index buffer (and free it if owned).
|
||||
|
||||
@@ -34,7 +34,6 @@ using namespace blender::gpu;
|
||||
void GPU_batch_zero(Batch *batch)
|
||||
{
|
||||
std::fill_n(batch->verts, ARRAY_SIZE(batch->verts), nullptr);
|
||||
std::fill_n(batch->inst, ARRAY_SIZE(batch->inst), nullptr);
|
||||
batch->elem = nullptr;
|
||||
batch->flag = eGPUBatchFlag(0);
|
||||
batch->prim_type = GPUPrimType(0);
|
||||
@@ -74,9 +73,6 @@ void GPU_batch_init_ex(Batch *batch,
|
||||
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
batch->verts[v] = nullptr;
|
||||
}
|
||||
for (auto &v : batch->inst) {
|
||||
v = nullptr;
|
||||
}
|
||||
batch->elem = index_buf;
|
||||
batch->prim_type = primitive_type;
|
||||
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
||||
@@ -90,9 +86,6 @@ Batch *GPU_batch_create_procedural(GPUPrimType primitive_type, int32_t vertex_co
|
||||
for (auto &v : batch->verts) {
|
||||
v = nullptr;
|
||||
}
|
||||
for (auto &v : batch->inst) {
|
||||
v = nullptr;
|
||||
}
|
||||
batch->elem = nullptr;
|
||||
batch->prim_type = primitive_type;
|
||||
batch->flag = GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
||||
@@ -126,13 +119,6 @@ void GPU_batch_clear(Batch *batch)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (batch->flag & GPU_BATCH_OWNS_INST_VBO_ANY) {
|
||||
for (int v = 0; (v < GPU_BATCH_INST_VBO_MAX_LEN) && batch->inst[v]; v++) {
|
||||
if (batch->flag & (GPU_BATCH_OWNS_INST_VBO << v)) {
|
||||
GPU_VERTBUF_DISCARD_SAFE(batch->inst[v]);
|
||||
}
|
||||
}
|
||||
}
|
||||
batch->flag = GPU_BATCH_INVALID;
|
||||
batch->procedural_vertices = -1;
|
||||
}
|
||||
@@ -149,19 +135,6 @@ void GPU_batch_discard(Batch *batch)
|
||||
/** \name Buffers Management
|
||||
* \{ */
|
||||
|
||||
void GPU_batch_instbuf_set(Batch *batch, VertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(vertex_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
|
||||
GPU_vertbuf_discard(batch->inst[0]);
|
||||
}
|
||||
batch->inst[0] = vertex_buf;
|
||||
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
|
||||
}
|
||||
|
||||
void GPU_batch_elembuf_set(Batch *batch, blender::gpu::IndexBuf *index_buf, bool own_ibo)
|
||||
{
|
||||
BLI_assert(index_buf);
|
||||
@@ -175,29 +148,6 @@ void GPU_batch_elembuf_set(Batch *batch, blender::gpu::IndexBuf *index_buf, bool
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
|
||||
}
|
||||
|
||||
int GPU_batch_instbuf_add(Batch *batch, VertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(vertex_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
||||
if (batch->inst[v] == nullptr) {
|
||||
/* for now all VertexBuffers must have same vertex_len */
|
||||
if (batch->inst[0]) {
|
||||
/* Allow for different size of vertex buffer (will choose the smallest number of verts). */
|
||||
// BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
|
||||
}
|
||||
|
||||
batch->inst[v] = vertex_buf;
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
|
||||
return v;
|
||||
}
|
||||
}
|
||||
/* we only make it this far if there is no room for another VertBuf */
|
||||
BLI_assert_msg(0, "Not enough Instance VBO slot in batch");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int GPU_batch_vertbuf_add(Batch *batch, VertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(vertex_buf);
|
||||
@@ -366,12 +316,7 @@ void GPU_batch_draw_parameter_get(Batch *batch,
|
||||
*r_base_index = -1;
|
||||
}
|
||||
|
||||
int i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
||||
/* Meh. This is to be able to use different numbers of verts in instance VBO's. */
|
||||
if (batch->inst[1] != nullptr) {
|
||||
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
||||
}
|
||||
*r_instance_count = i_count;
|
||||
*r_instance_count = 1;
|
||||
}
|
||||
|
||||
blender::IndexRange GPU_batch_draw_expanded_parameter_get(GPUPrimType input_prim_type,
|
||||
@@ -464,7 +409,6 @@ void GPU_batch_draw_range(Batch *batch, int vertex_first, int vertex_count)
|
||||
void GPU_batch_draw_instance_range(Batch *batch, int instance_first, int instance_count)
|
||||
{
|
||||
BLI_assert(batch != nullptr);
|
||||
BLI_assert(batch->inst[0] == nullptr);
|
||||
/* Not polyline shaders support instancing. */
|
||||
BLI_assert(batch->shader->is_polyline == false);
|
||||
|
||||
@@ -491,11 +435,7 @@ void GPU_batch_draw_advanced(
|
||||
}
|
||||
}
|
||||
if (instance_count == 0) {
|
||||
instance_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
||||
/* Meh. This is to be able to use different numbers of verts in instance VBO's. */
|
||||
if (batch->inst[1] != nullptr) {
|
||||
instance_count = min_ii(instance_count, batch->inst_(1)->vertex_len);
|
||||
}
|
||||
instance_count = 1;
|
||||
}
|
||||
|
||||
if (vertex_count == 0 || instance_count == 0) {
|
||||
|
||||
@@ -27,7 +27,6 @@ class MTLShaderInterface;
|
||||
|
||||
struct VertexBufferID {
|
||||
uint32_t id : 16;
|
||||
uint32_t is_instance : 15;
|
||||
uint32_t used : 1;
|
||||
};
|
||||
|
||||
@@ -104,10 +103,6 @@ class MTLBatch : public Batch {
|
||||
{
|
||||
return static_cast<MTLVertBuf *>(verts[index]);
|
||||
}
|
||||
MTLVertBuf *inst_(const int index) const
|
||||
{
|
||||
return static_cast<MTLVertBuf *>(inst[index]);
|
||||
}
|
||||
MTLShader *active_shader_get() const
|
||||
{
|
||||
return active_shader_;
|
||||
|
||||
@@ -410,11 +410,9 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int
|
||||
/* Reset vertex descriptor to default state. */
|
||||
desc.reset_vertex_descriptor();
|
||||
|
||||
/* Fetch Vertex and Instance Buffers. */
|
||||
/* Fetch Vertex Buffers. */
|
||||
Span<MTLVertBuf *> mtl_verts(reinterpret_cast<MTLVertBuf **>(this->verts),
|
||||
GPU_BATCH_VBO_MAX_LEN);
|
||||
Span<MTLVertBuf *> mtl_inst(reinterpret_cast<MTLVertBuf **>(this->inst),
|
||||
GPU_BATCH_INST_VBO_MAX_LEN);
|
||||
|
||||
/* Resolve Metal vertex buffer bindings. */
|
||||
/* Vertex Descriptors
|
||||
@@ -436,14 +434,8 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int
|
||||
|
||||
for (int bid = 0; bid < GPU_BATCH_VBO_MAX_LEN; ++bid) {
|
||||
if (descriptor->bufferIds[bid].used) {
|
||||
if (descriptor->bufferIds[bid].is_instance) {
|
||||
buffers[bid] = mtl_inst[descriptor->bufferIds[bid].id];
|
||||
buffer_is_instanced[bid] = true;
|
||||
}
|
||||
else {
|
||||
buffers[bid] = mtl_verts[descriptor->bufferIds[bid].id];
|
||||
buffer_is_instanced[bid] = false;
|
||||
}
|
||||
buffers[bid] = mtl_verts[descriptor->bufferIds[bid].id];
|
||||
buffer_is_instanced[bid] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -453,28 +445,10 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int
|
||||
|
||||
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN; ++i) {
|
||||
pair.bufferIds[i].id = -1;
|
||||
pair.bufferIds[i].is_instance = 0;
|
||||
pair.bufferIds[i].used = 0;
|
||||
}
|
||||
/* NOTE: Attribute extraction order from buffer is the reverse of the OpenGL as we flag once an
|
||||
* attribute is found, rather than pre-setting the mask. */
|
||||
/* Extract Instance attributes (These take highest priority). */
|
||||
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
||||
if (mtl_inst[v]) {
|
||||
MTL_LOG_DEBUG(" -- [Batch] Checking bindings for bound instance buffer %p", mtl_inst[v]);
|
||||
int buffer_ind = this->prepare_vertex_binding(
|
||||
mtl_inst[v], desc, interface, attr_mask, true);
|
||||
if (buffer_ind >= 0) {
|
||||
buffers[buffer_ind] = mtl_inst[v];
|
||||
buffer_is_instanced[buffer_ind] = true;
|
||||
|
||||
pair.bufferIds[buffer_ind].id = v;
|
||||
pair.bufferIds[buffer_ind].used = 1;
|
||||
pair.bufferIds[buffer_ind].is_instance = 1;
|
||||
num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Extract Vertex attributes (First-bound vertex buffer takes priority). */
|
||||
for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
@@ -488,7 +462,6 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int
|
||||
|
||||
pair.bufferIds[buffer_ind].id = v;
|
||||
pair.bufferIds[buffer_ind].used = 1;
|
||||
pair.bufferIds[buffer_ind].is_instance = 0;
|
||||
num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,8 +121,7 @@ struct MTLVertexDescriptor {
|
||||
|
||||
/* Core Vertex Attributes. */
|
||||
MTLVertexAttributeDescriptorPSO attributes[GPU_VERT_ATTR_MAX_LEN];
|
||||
MTLVertexBufferLayoutDescriptorPSO
|
||||
buffer_layouts[GPU_BATCH_VBO_MAX_LEN + GPU_BATCH_INST_VBO_MAX_LEN];
|
||||
MTLVertexBufferLayoutDescriptorPSO buffer_layouts[GPU_BATCH_VBO_MAX_LEN];
|
||||
int max_attribute_value;
|
||||
int total_attributes;
|
||||
int num_vert_buffers;
|
||||
|
||||
@@ -221,7 +221,7 @@ GLuint GLVaoCache::vao_get(Batch *batch)
|
||||
/* Cache miss, create a new VAO. */
|
||||
glGenVertexArrays(1, &vao_id_);
|
||||
this->insert(interface_, vao_id_);
|
||||
GLVertArray::update_bindings(vao_id_, batch, interface_, 0);
|
||||
GLVertArray::update_bindings(vao_id_, batch, interface_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -108,10 +108,6 @@ class GLBatch : public Batch {
|
||||
{
|
||||
return static_cast<GLVertBuf *>(verts[index]);
|
||||
}
|
||||
GLVertBuf *inst_(const int index) const
|
||||
{
|
||||
return static_cast<GLVertBuf *>(inst[index]);
|
||||
}
|
||||
|
||||
MEM_CXX_CLASS_ALLOC_FUNCS("GLBatch");
|
||||
};
|
||||
|
||||
@@ -81,8 +81,7 @@ static uint16_t vbo_bind(const ShaderInterface *interface,
|
||||
|
||||
void GLVertArray::update_bindings(const GLuint vao,
|
||||
const Batch *batch_, /* Should be GLBatch. */
|
||||
const ShaderInterface *interface,
|
||||
const int base_instance)
|
||||
const ShaderInterface *interface)
|
||||
{
|
||||
const GLBatch *batch = static_cast<const GLBatch *>(batch_);
|
||||
uint16_t attr_mask = interface->enabled_attr_mask_;
|
||||
@@ -98,14 +97,6 @@ void GLVertArray::update_bindings(const GLuint vao,
|
||||
}
|
||||
}
|
||||
|
||||
for (int v = GPU_BATCH_INST_VBO_MAX_LEN - 1; v > -1; v--) {
|
||||
GLVertBuf *vbo = batch->inst_(v);
|
||||
if (vbo) {
|
||||
vbo->bind();
|
||||
attr_mask &= ~vbo_bind(interface, &vbo->format, base_instance, vbo->vertex_len, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (attr_mask != 0) {
|
||||
for (uint16_t mask = 1, a = 0; a < 16; a++, mask <<= 1) {
|
||||
if (attr_mask & mask) {
|
||||
|
||||
@@ -19,10 +19,7 @@ namespace GLVertArray {
|
||||
/**
|
||||
* Update the Attribute Binding of the currently bound VAO.
|
||||
*/
|
||||
void update_bindings(const GLuint vao,
|
||||
const Batch *batch,
|
||||
const ShaderInterface *interface,
|
||||
int base_instance);
|
||||
void update_bindings(const GLuint vao, const Batch *batch, const ShaderInterface *interface);
|
||||
|
||||
/**
|
||||
* Another version of update_bindings for Immediate mode.
|
||||
|
||||
@@ -25,7 +25,6 @@ class VKBatch : public Batch {
|
||||
void multi_draw_indirect(VkBuffer indirect_buf, int count, intptr_t offset, intptr_t stride);
|
||||
|
||||
VKVertexBuffer *vertex_buffer_get(int index);
|
||||
VKVertexBuffer *instance_buffer_get(int index);
|
||||
VKIndexBuffer *index_buffer_get();
|
||||
};
|
||||
|
||||
@@ -39,11 +38,6 @@ inline VKVertexBuffer *VKBatch::vertex_buffer_get(int index)
|
||||
return unwrap(verts_(index));
|
||||
}
|
||||
|
||||
inline VKVertexBuffer *VKBatch::instance_buffer_get(int index)
|
||||
{
|
||||
return unwrap(inst_(index));
|
||||
}
|
||||
|
||||
inline VKIndexBuffer *VKBatch::index_buffer_get()
|
||||
{
|
||||
return unwrap(unwrap(elem));
|
||||
|
||||
@@ -92,13 +92,6 @@ void VKVertexAttributeObject::update_bindings(const VKContext &context, VKBatch
|
||||
const VKShaderInterface &interface = unwrap(context.shader)->interface_get();
|
||||
AttributeMask occupied_attributes = 0;
|
||||
|
||||
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
||||
VKVertexBuffer *vbo = batch.instance_buffer_get(v);
|
||||
if (vbo) {
|
||||
update_bindings(
|
||||
vbo->format, vbo, nullptr, vbo->vertex_len, interface, occupied_attributes, true);
|
||||
}
|
||||
}
|
||||
for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
VKVertexBuffer *vbo = batch.vertex_buffer_get(v);
|
||||
if (vbo) {
|
||||
|
||||
Reference in New Issue
Block a user