The OpenGL specs require that the storage image qualifier in shaders (e.g., "rgba32f") needs to be compatible with the format of a bound image (see https://registry.khronos.org/OpenGL/specs/gl/glspec46.core.pdf#page=318). We know that Blender currently does not handle this correctly in multiple places. AMD and NVIDIA seem to silently ignore a mismatch and just seem to use the format of the bound image. However, for the Intel Windows drivers, this seems to lead to visual corruptions (#141436, #141173). While a more graceful handling of a mismatch may be nice, this is in line with the OpenGL specs. This PR adds code for validating image formats for bindings. Pull Request: https://projects.blender.org/blender/blender/pulls/143791
169 lines
4.5 KiB
C++
169 lines
4.5 KiB
C++
/* SPDX-FileCopyrightText: 2023 Blender Authors
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
/** \file
|
|
* \ingroup gpu
|
|
*/
|
|
|
|
#include "vk_state_manager.hh"
|
|
#include "vk_context.hh"
|
|
#include "vk_index_buffer.hh"
|
|
#include "vk_shader.hh"
|
|
#include "vk_storage_buffer.hh"
|
|
#include "vk_texture.hh"
|
|
#include "vk_vertex_buffer.hh"
|
|
|
|
#include "GPU_capabilities.hh"
|
|
|
|
namespace blender::gpu {
|
|
|
|
void VKStateManager::apply_state()
|
|
{
|
|
/* Intentionally empty. State is polled during pipeline creation and doesn't need to be applied.
|
|
* If this leads to issues we should have an active state. */
|
|
}
|
|
|
|
void VKStateManager::force_state()
|
|
{
|
|
/* Intentionally empty. State is polled during pipeline creation and is always forced. */
|
|
}
|
|
|
|
void VKStateManager::issue_barrier(eGPUBarrier barrier_bits)
|
|
{
|
|
/**
|
|
* Workaround for EEVEE ThicknessFromShadow shader.
|
|
*
|
|
* EEVEE light evaluation uses layered sub-pass tracking. Currently, the tracking supports
|
|
* transitioning a layer to a different layout once per rendering scope. When using the thickness
|
|
* from shadow, the layers need to be transitioned twice: once to image load/store for the
|
|
* thickness from shadow shader and then to a sampler for the light evaluation shader. We work
|
|
* around this limitation by suspending the rendering.
|
|
*
|
|
* The reason we need to suspend the rendering is that Vulkan, by default, doesn't support layout
|
|
* transitions between the begin and end of rendering. By suspending the render, the graph will
|
|
* create a new node group that allows the necessary image layout transition.
|
|
*
|
|
* This limitation could also be addressed in the render graph scheduler, but that would be quite
|
|
* a hassle to track and might not be worth the effort.
|
|
*/
|
|
if (bool(barrier_bits & GPU_BARRIER_SHADER_IMAGE_ACCESS)) {
|
|
VKContext &context = *VKContext::get();
|
|
context.rendering_end();
|
|
}
|
|
}
|
|
|
|
void VKStateManager::texture_bind(Texture *texture, GPUSamplerState sampler, int binding)
|
|
{
|
|
textures_.bind(BindSpaceTextures::Type::Texture, texture, sampler, binding);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::texture_unbind(Texture *texture)
|
|
{
|
|
textures_.unbind(texture);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::texture_unbind_all()
|
|
{
|
|
textures_.unbind_all();
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::image_bind(Texture *tex, int binding)
|
|
{
|
|
VKTexture *texture = unwrap(tex);
|
|
images_.bind(texture, binding, TextureWriteFormat(tex->format_get()), this);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::image_unbind(Texture *tex)
|
|
{
|
|
VKTexture *texture = unwrap(tex);
|
|
images_.unbind(texture, this);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::image_unbind_all()
|
|
{
|
|
images_.unbind_all();
|
|
image_formats.fill(TextureWriteFormat::Invalid);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::uniform_buffer_bind(VKUniformBuffer *uniform_buffer, int binding)
|
|
{
|
|
uniform_buffers_.bind(uniform_buffer, binding);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::uniform_buffer_unbind(VKUniformBuffer *uniform_buffer)
|
|
{
|
|
uniform_buffers_.unbind(uniform_buffer);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::uniform_buffer_unbind_all()
|
|
{
|
|
uniform_buffers_.unbind_all();
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::unbind_from_all_namespaces(void *resource)
|
|
{
|
|
uniform_buffers_.unbind(resource);
|
|
storage_buffers_.unbind(resource);
|
|
images_.unbind(resource, this);
|
|
textures_.unbind(resource);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::texel_buffer_bind(VKVertexBuffer &vertex_buffer, int binding)
|
|
{
|
|
textures_.bind(BindSpaceTextures::Type::VertexBuffer,
|
|
&vertex_buffer,
|
|
GPUSamplerState::default_sampler(),
|
|
binding);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::texel_buffer_unbind(VKVertexBuffer &vertex_buffer)
|
|
{
|
|
textures_.unbind(&vertex_buffer);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::storage_buffer_bind(BindSpaceStorageBuffers::Type resource_type,
|
|
void *resource,
|
|
int binding,
|
|
VkDeviceSize offset)
|
|
{
|
|
storage_buffers_.bind(resource_type, resource, binding, offset);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::storage_buffer_unbind(void *resource)
|
|
{
|
|
storage_buffers_.unbind(resource);
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::storage_buffer_unbind_all()
|
|
{
|
|
storage_buffers_.unbind_all();
|
|
is_dirty = true;
|
|
}
|
|
|
|
void VKStateManager::texture_unpack_row_length_set(uint len)
|
|
{
|
|
texture_unpack_row_length_ = len;
|
|
}
|
|
|
|
uint VKStateManager::texture_unpack_row_length_get() const
|
|
{
|
|
return texture_unpack_row_length_;
|
|
}
|
|
|
|
} // namespace blender::gpu
|