Vulkan: Render graph direct compute

This PR adds support for compute shaders to render graph. Only direct dispatch
is supported. indirect dispatch will be added in a future PR.

This change enables the next test cases to be supported when using render graphs

- `GPUVulkanTest.push_constants*`
- `GPUVulkanTest.shader_compute_*`
- `GPUVulkanTest.buffer_texture`
- `GPUVulkanTest.specialization_constants_compute`
- `GPUVulkanTest.compute_direct`

```
[==========] 95 tests from 2 test suites ran. (24059 ms total)
[  PASSED  ] 95 tests.
```

Specialization constants are supported when using the render graph. This should conclude
the conversion the prototype of the render graph.

Pull Request: https://projects.blender.org/blender/blender/pulls/120963
This commit is contained in:
Jeroen Bakker
2024-04-23 15:43:32 +02:00
parent 2a1b4bf219
commit 164fb68386
7 changed files with 130 additions and 31 deletions

View File

@@ -140,11 +140,20 @@ void VKBackend::samplers_update()
void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
VKContext &context = *VKContext::get();
render_graph::VKResourceAccessInfo resource_access_info = {};
context.state_manager_get().apply_bindings(context, resource_access_info);
context.bind_compute_pipeline();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.dispatch(groups_x_len, groups_y_len, groups_z_len);
if (use_render_graph) {
render_graph::VKDispatchCreateInfo &dispatch_info = context.update_and_get_dispatch_info();
dispatch_info.dispatch_node.group_count_x = groups_x_len;
dispatch_info.dispatch_node.group_count_y = groups_y_len;
dispatch_info.dispatch_node.group_count_z = groups_z_len;
context.render_graph.add_node(dispatch_info);
}
else {
render_graph::VKResourceAccessInfo resource_access_info = {};
context.state_manager_get().apply_bindings(context, resource_access_info);
context.bind_compute_pipeline();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.dispatch(groups_x_len, groups_y_len, groups_z_len);
}
}
void VKBackend::compute_dispatch_indirect(StorageBuf *indirect_buf)

View File

@@ -213,6 +213,49 @@ void VKContext::bind_compute_pipeline()
}
}
void VKContext::update_pipeline_data(render_graph::VKPipelineData &pipeline_data)
{
VKShader &vk_shader = unwrap(*shader);
pipeline_data.vk_pipeline_layout = vk_shader.vk_pipeline_layout_get();
/* Update descriptor set. */
pipeline_data.vk_descriptor_set = VK_NULL_HANDLE;
if (vk_shader.has_descriptor_set()) {
descriptor_set_.update(*this);
pipeline_data.vk_descriptor_set = descriptor_set_get().active_descriptor_set()->vk_handle();
}
/* Update push constants. */
pipeline_data.push_constants_data = nullptr;
pipeline_data.push_constants_size = 0;
const VKPushConstants::Layout &push_constants_layout =
vk_shader.interface_get().push_constants_layout_get();
if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::PUSH_CONSTANTS) {
pipeline_data.push_constants_size = push_constants_layout.size_in_bytes();
pipeline_data.push_constants_data = vk_shader.push_constants.data();
}
}
void VKContext::update_dispatch_info()
{
dispatch_info_.dispatch_node = {};
dispatch_info_.resources.reset();
state_manager_get().apply_bindings(*this, dispatch_info_.resources);
update_pipeline_data(dispatch_info_.dispatch_node.pipeline_data);
VKShader &vk_shader = unwrap(*shader);
VkPipeline vk_pipeline = vk_shader.ensure_and_get_compute_pipeline();
dispatch_info_.dispatch_node.pipeline_data.vk_pipeline = vk_pipeline;
}
render_graph::VKDispatchNode::CreateInfo &VKContext::update_and_get_dispatch_info()
{
VKShader *shader = unwrap(this->shader);
shader->push_constants.update(*this);
update_dispatch_info();
return dispatch_info_;
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@@ -35,6 +35,8 @@ class VKContext : public Context, NonCopyable {
GPUTexture *surface_texture_ = nullptr;
void *ghost_context_;
render_graph::VKDispatchNode::CreateInfo dispatch_info_ = {};
public:
render_graph::VKRenderGraph render_graph;
@@ -70,6 +72,9 @@ class VKContext : public Context, NonCopyable {
VKFrameBuffer *active_framebuffer_get() const;
void bind_compute_pipeline();
void update_dispatch_info();
render_graph::VKDispatchNode::CreateInfo &update_and_get_dispatch_info();
void bind_graphics_pipeline(const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object);
void sync_backbuffer();
@@ -102,6 +107,13 @@ class VKContext : public Context, NonCopyable {
private:
void swap_buffers_pre_handler(const GHOST_VulkanSwapChainData &data);
void swap_buffers_post_handler();
/**
* Update the give shader data with the current state of the context.
*
* NOTE: Shader data structure is reused between render graph nodes.
*/
void update_pipeline_data(render_graph::VKPipelineData &pipeline_data);
};
BLI_INLINE bool operator==(const VKContext &a, const VKContext &b)

View File

@@ -182,6 +182,11 @@ class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint vk_pipeline_bind_point);
/**
* Update the descriptor set on the device.
*/
void update(VKContext &context);
void debug_print() const;
protected:
@@ -189,11 +194,6 @@ class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
private:
Binding &ensure_location(VKDescriptorSet::Location location);
/**
* Update the descriptor set on the device.
*/
void update(VKContext &context);
};
} // namespace blender::gpu

View File

@@ -95,7 +95,6 @@ class VKPipelinePool : public NonCopyable {
public:
VKPipelinePool();
/**
* Get an existing or create a new compute pipeline based on the provided ComputeInfo.
*

View File

@@ -664,23 +664,26 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
push_constants = VKPushConstants(&vk_interface->push_constants_layout_get());
/* TODO we might need to move the actual pipeline construction to a later stage as the graphics
* pipeline requires more data before it can be constructed. */
bool result;
if (is_graphics_shader()) {
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
BLI_assert(compute_module_ == VK_NULL_HANDLE);
pipeline_ = VKPipeline::create_graphics_pipeline();
if (use_render_graph) {
result = true;
}
else {
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
BLI_assert(geometry_module_ == VK_NULL_HANDLE);
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
BLI_assert(compute_module_ != VK_NULL_HANDLE);
pipeline_ = VKPipeline::create_compute_pipeline(compute_module_, vk_pipeline_layout_);
result = pipeline_.is_valid();
if (is_graphics_shader()) {
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
BLI_assert(compute_module_ == VK_NULL_HANDLE);
pipeline_ = VKPipeline::create_graphics_pipeline();
result = true;
}
else {
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
BLI_assert(geometry_module_ == VK_NULL_HANDLE);
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
BLI_assert(compute_module_ != VK_NULL_HANDLE);
pipeline_ = VKPipeline::create_compute_pipeline(compute_module_, vk_pipeline_layout_);
result = pipeline_.is_valid();
}
}
if (result) {
@@ -799,24 +802,25 @@ std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) co
interface.init(info);
std::stringstream ss;
/* TODO: Add support for specialization constants at compile time. */
ss << "\n/* Specialization Constants (pass-through). */\n";
uint constant_id = 0;
for (const ShaderCreateInfo::SpecializationConstant &sc : info.specialization_constants_) {
ss << "layout (constant_id=" << constant_id++ << ") const ";
switch (sc.type) {
case Type::INT:
ss << "const int " << sc.name << "=" << std::to_string(sc.default_value.i) << ";\n";
ss << "int " << sc.name << "=" << std::to_string(sc.default_value.i) << ";\n";
break;
case Type::UINT:
ss << "const uint " << sc.name << "=" << std::to_string(sc.default_value.u) << "u;\n";
ss << "uint " << sc.name << "=" << std::to_string(sc.default_value.u) << "u;\n";
break;
case Type::BOOL:
ss << "const bool " << sc.name << "=" << (sc.default_value.u ? "true" : "false") << ";\n";
ss << "bool " << sc.name << "=" << (sc.default_value.u ? "true" : "false") << ";\n";
break;
case Type::FLOAT:
/* Use uint representation to allow exact same bit pattern even if NaN. uintBitsToFloat
* isn't supported during global const initialization. */
ss << "#define " << sc.name << " uintBitsToFloat(" << std::to_string(sc.default_value.u)
<< "u)\n";
* isn't supported during global const initialization. */
ss << "uint " << sc.name << "_uint=" << std::to_string(sc.default_value.u) << "u;\n";
ss << "#define " << sc.name << " uintBitsToFloat(" << sc.name << "_uint)\n";
break;
default:
BLI_assert_unreachable();
@@ -1184,6 +1188,30 @@ bool VKShader::do_geometry_shader_injection(const shader::ShaderCreateInfo *info
/** \} */
VkPipeline VKShader::ensure_and_get_compute_pipeline()
{
BLI_assert(compute_module_ != VK_NULL_HANDLE);
BLI_assert(vk_pipeline_layout_ != VK_NULL_HANDLE);
/* Early exit when no specialization constants are used and the vk_pipeline_ is already
* valid. This would handle most cases. */
if (constants.values.is_empty() && vk_pipeline_ != VK_NULL_HANDLE) {
return vk_pipeline_;
}
VKComputeInfo compute_info = {};
compute_info.specialization_constants.extend(constants.values);
compute_info.vk_shader_module = compute_module_;
compute_info.vk_pipeline_layout = vk_pipeline_layout_;
VKDevice &device = VKBackend::get().device_get();
/* Store result in local variable to ensure thread safety. */
VkPipeline vk_pipeline = device.pipelines.get_or_create_compute_pipeline(compute_info,
vk_pipeline_);
vk_pipeline_ = vk_pipeline;
return vk_pipeline;
}
int VKShader::program_handle_get() const
{
return -1;

View File

@@ -34,7 +34,14 @@ class VKShader : public Shader {
*/
VkDescriptorSetLayout vk_descriptor_set_layout_ = VK_NULL_HANDLE;
VkPipelineLayout vk_pipeline_layout_ = VK_NULL_HANDLE;
/* deprecated `when use_render_graph=true`. In that case use vk_pipeline_ */
VKPipeline pipeline_;
/**
* Last created VkPipeline handle. This handle is used as template when building a variation of
* the shader. In case for compute shaders without specialization constants this handle is also
* used as an early exit. In this case there is only 1 variation.
*/
VkPipeline vk_pipeline_ = VK_NULL_HANDLE;
public:
VKPushConstants push_constants;
@@ -81,6 +88,7 @@ class VKShader : public Shader {
/* DEPRECATED: Kept only because of BGL API. */
int program_handle_get() const override;
VkPipeline ensure_and_get_compute_pipeline();
VKPipeline &pipeline_get();
VkPipelineLayout vk_pipeline_layout_get() const