Files
test/source/blender/gpu/vulkan/render_graph/vk_scheduler.cc
2024-10-08 09:03:49 +11:00

226 lines
7.7 KiB
C++

/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include <sstream>
#include "vk_render_graph.hh"
#include "vk_scheduler.hh"
#include "BLI_index_range.hh"
#include "BLI_set.hh"
namespace blender::gpu::render_graph {
Span<NodeHandle> VKScheduler::select_nodes_for_image(const VKRenderGraph &render_graph,
VkImage vk_image)
{
UNUSED_VARS(vk_image);
select_all_nodes(render_graph);
reorder_nodes(render_graph);
return result_;
}
Span<NodeHandle> VKScheduler::select_nodes_for_buffer(const VKRenderGraph &render_graph,
VkBuffer vk_buffer)
{
UNUSED_VARS(vk_buffer);
select_all_nodes(render_graph);
reorder_nodes(render_graph);
return result_;
}
Span<NodeHandle> VKScheduler::select_nodes(const VKRenderGraph &render_graph)
{
select_all_nodes(render_graph);
reorder_nodes(render_graph);
return result_;
}
void VKScheduler::select_all_nodes(const VKRenderGraph &render_graph)
{
/* TODO: This will not work when we extract subgraphs. When subgraphs are removed the order in
* the render graph may not follow the order the nodes were added. */
result_.clear();
for (NodeHandle node_handle : render_graph.nodes_.index_range()) {
result_.append(node_handle);
}
}
/* -------------------------------------------------------------------- */
/** \name Reorder - move data transfer and dispatches outside rendering scope
* \{ */
void VKScheduler::reorder_nodes(const VKRenderGraph &render_graph)
{
move_transfer_and_dispatch_outside_rendering_scope(render_graph);
}
std::optional<std::pair<int64_t, int64_t>> VKScheduler::find_rendering_scope(
const VKRenderGraph &render_graph, IndexRange search_range) const
{
int64_t rendering_start = -1;
for (int64_t index : search_range) {
NodeHandle node_handle = result_[index];
const VKRenderGraphNode &node = render_graph.nodes_[node_handle];
if (node.type == VKNodeType::BEGIN_RENDERING) {
rendering_start = index;
}
if (node.type == VKNodeType::END_RENDERING && rendering_start != -1) {
return std::pair(rendering_start, index);
}
}
BLI_assert(rendering_start == -1);
return std::nullopt;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Reorder - move data transfer and dispatches outside rendering scope
* \{ */
void VKScheduler::move_transfer_and_dispatch_outside_rendering_scope(
const VKRenderGraph &render_graph)
{
Vector<NodeHandle> pre_rendering_scope;
Vector<NodeHandle> rendering_scope;
Set<ResourceHandle> used_buffers;
foreach_rendering_scope(render_graph, [&](int64_t start_index, int64_t end_index) {
/* Move end_rendering right after the last graphics node. */
for (int index = end_index - 1; index >= start_index; index--) {
NodeHandle node_handle = result_[index];
const VKRenderGraphNode &node = render_graph.nodes_[node_handle];
if (node_type_is_rendering(node.type)) {
break;
}
std::swap(result_[end_index], result_[index]);
end_index -= 1;
}
/* Move begin_rendering right before the first graphics node. */
for (int index = start_index + 1; index < end_index; index++) {
NodeHandle node_handle = result_[index];
const VKRenderGraphNode &node = render_graph.nodes_[node_handle];
if (node_type_is_rendering(node.type)) {
break;
}
std::swap(result_[start_index], result_[index]);
start_index += 1;
}
/* Move buffer update buffer commands to before the rendering scope, unless the buffer is
* already being used by a draw command. Images modification could also be moved outside the
* rendering scope, but it is more tricky as they could also be attached to the frame-buffer.
*/
pre_rendering_scope.clear();
rendering_scope.clear();
used_buffers.clear();
for (int index = start_index + 1; index < end_index; index++) {
NodeHandle node_handle = result_[index];
const VKRenderGraphNode &node = render_graph.nodes_[node_handle];
/* Should we add this node to the rendering scope. This is only done when we need to reorder
* nodes. In that case the rendering_scope has already an item and we should add this node to
* or the rendering scope or before the rendering scope. Adding nodes before rendering scope
* is done in the VKNodeType::UPDATE_BUFFER branch. */
bool add_to_rendering_scope = !rendering_scope.is_empty();
if (node.type == VKNodeType::UPDATE_BUFFER) {
if (!used_buffers.contains(
render_graph.resources_.buffer_resources_.lookup(node.update_buffer.dst_buffer)))
{
/* Buffer isn't used by this rendering scope so we can safely move it before the
* rendering scope begins. */
pre_rendering_scope.append(node_handle);
add_to_rendering_scope = false;
/* When this is the first time we move a node before the rendering we should start
* building up the rendering scope as well. This is postponed so we can safe some cycles
* when no nodes needs to be moved at all. */
if (rendering_scope.is_empty()) {
rendering_scope.extend(Span<NodeHandle>(&result_[start_index], index - start_index));
}
}
}
if (add_to_rendering_scope) {
/* When rendering scope has an item we are rewriting the execution order and need to track
* what should be inside the rendering scope. */
rendering_scope.append(node_handle);
}
/* Any read/write to buffer resources should be added to used_buffers in order to detect if
* it is safe to move a node before the rendering scope.*/
const VKRenderGraphNodeLinks &links = render_graph.links_[node_handle];
for (const VKRenderGraphLink &input : links.inputs) {
if (render_graph.resources_.resource_type_get(input.resource.handle) ==
VKResourceType::BUFFER)
{
used_buffers.add(input.resource.handle);
}
}
for (const VKRenderGraphLink &output : links.outputs) {
if (render_graph.resources_.resource_type_get(output.resource.handle) ==
VKResourceType::BUFFER)
{
used_buffers.add(output.resource.handle);
}
}
}
/* When pre_rendering_scope has an item we want to rewrite the order.
* The number of nodes are not changed, so we can do this inline. */
if (!pre_rendering_scope.is_empty()) {
MutableSpan<NodeHandle> store_none_rendering = result_.as_mutable_span().slice(
start_index, pre_rendering_scope.size());
MutableSpan<NodeHandle> store_rendering = result_.as_mutable_span().slice(
start_index + pre_rendering_scope.size(), rendering_scope.size());
store_none_rendering.copy_from(pre_rendering_scope);
store_rendering.copy_from(rendering_scope);
start_index += pre_rendering_scope.size();
}
});
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Debug
* \{ */
void VKScheduler::debug_print(const VKRenderGraph &render_graph) const
{
std::stringstream ss;
int indent = 0;
for (int index : result_.index_range()) {
const NodeHandle node_handle = result_[index];
const VKRenderGraphNode &node = render_graph.nodes_[node_handle];
if (node.type == VKNodeType::END_RENDERING) {
indent--;
}
for (int i = 0; i < indent; i++) {
ss << " ";
}
ss << node.type << "\n";
#if 0
render_graph.debug_print(node_handle);
#endif
if (node.type == VKNodeType::BEGIN_RENDERING) {
indent++;
}
}
ss << "\n";
std::cout << ss.str();
}
/** \} */
} // namespace blender::gpu::render_graph