GPU: Remove GPU_shader_storage_buffer_objects_support

Blender 4.0 requires OpenGL 4.3 which always support SSBO's.
Platforms that don't support enough SSBO bind points will be marked
as unsupported.

Users who start Blender on those platforms will be informed via a
dialog. This PR also updates the `--debug-gpu-force-workarounds`
to match our minimum requirements. Note that some bugs are still
there that should be solved in other PRs:

* Workbench only renders the object using a unit matrix this is because
  there is a bug in the workaround for shader_draw_parameters
* Navigating with middle mouse button is not working. Unsure what the
  cause is, but might be a missing feature check in the OpenGL backend.

Related to #112224

Pull Request: https://projects.blender.org/blender/blender/pulls/112572
This commit is contained in:
Jeroen Bakker
2023-09-21 12:55:51 +02:00
parent 25031f8a86
commit 6d91d36161
24 changed files with 63 additions and 229 deletions

View File

@@ -212,8 +212,6 @@ def write_sysinfo(filepath):
output.write("\nFeatures:\n")
output.write("Compute Shader Support: \t%d\n" %
gpu.capabilities.compute_shader_support_get())
output.write("Shader Storage Buffer Objects Support:\t%d\n" %
gpu.capabilities.shader_storage_buffer_objects_support_get())
output.write("Image Load/Store Support: \t%d\n" %
gpu.capabilities.shader_image_load_store_support_get())

View File

@@ -102,7 +102,7 @@ static bool is_subdivision_evaluation_possible_on_gpu()
return false;
}
if (!(GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support())) {
if (!(GPU_compute_shader_support())) {
return false;
}

View File

@@ -33,11 +33,6 @@ struct EEVEE_Data {
static void eevee_engine_init(void *vedata)
{
/* TODO(fclem): Remove once it is minimum required. */
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
EEVEE_Data *ved = reinterpret_cast<EEVEE_Data *>(vedata);
if (ved->instance == nullptr) {
ved->instance = new eevee::Instance();
@@ -94,10 +89,6 @@ static void eevee_engine_init(void *vedata)
static void eevee_draw_scene(void *vedata)
{
EEVEE_Data *ved = reinterpret_cast<EEVEE_Data *>(vedata);
if (!GPU_shader_storage_buffer_objects_support()) {
STRNCPY(ved->info, "Error: No shader storage buffer support");
return;
}
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
ved->instance->draw_viewport(dfbl);
STRNCPY(ved->info, ved->instance->info.c_str());
@@ -107,25 +98,16 @@ static void eevee_draw_scene(void *vedata)
static void eevee_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->begin_sync();
}
static void eevee_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->object_sync(object);
}
static void eevee_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->end_sync();
}
@@ -136,9 +118,6 @@ static void eevee_engine_free()
static void eevee_instance_free(void *instance)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
delete reinterpret_cast<eevee::Instance *>(instance);
}
@@ -147,10 +126,6 @@ static void eevee_render_to_image(void *vedata,
RenderLayer *layer,
const rcti * /*rect*/)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
eevee::Instance *instance = new eevee::Instance();
Render *render = engine->re;
@@ -173,9 +148,6 @@ static void eevee_render_to_image(void *vedata,
static void eevee_store_metadata(void *vedata, RenderResult *render_result)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
EEVEE_Data *ved = static_cast<EEVEE_Data *>(vedata);
eevee::Instance *instance = ved->instance;
instance->store_metadata(render_result);
@@ -185,9 +157,6 @@ static void eevee_store_metadata(void *vedata, RenderResult *render_result)
static void eevee_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
eevee::Instance::update_passes(engine, scene, view_layer);
}

View File

@@ -214,11 +214,6 @@ struct GPENCIL_NEXT_Data {
static void gpencil_engine_init(void *vedata)
{
/* TODO(fclem): Remove once it is minimum required. */
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
GPENCIL_NEXT_Data *ved = reinterpret_cast<GPENCIL_NEXT_Data *>(vedata);
if (ved->instance == nullptr) {
ved->instance = new draw::greasepencil::Instance();
@@ -232,10 +227,6 @@ static void gpencil_engine_init(void *vedata)
static void gpencil_draw_scene(void *vedata)
{
GPENCIL_NEXT_Data *ved = reinterpret_cast<GPENCIL_NEXT_Data *>(vedata);
if (!GPU_shader_storage_buffer_objects_support()) {
STRNCPY(ved->info, "Error: No shader storage buffer support");
return;
}
if (DRW_state_is_select() || DRW_state_is_depth()) {
return;
}
@@ -248,18 +239,12 @@ static void gpencil_draw_scene(void *vedata)
static void gpencil_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
draw::Manager *manager = DRW_manager_get();
reinterpret_cast<GPENCIL_NEXT_Data *>(vedata)->instance->begin_sync(*manager);
}
static void gpencil_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
draw::Manager *manager = DRW_manager_get();
draw::ObjectRef ref;
@@ -272,18 +257,12 @@ static void gpencil_cache_populate(void *vedata, Object *object)
static void gpencil_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
draw::Manager *manager = DRW_manager_get();
reinterpret_cast<GPENCIL_NEXT_Data *>(vedata)->instance->end_sync(*manager);
}
static void gpencil_instance_free(void *instance)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
delete reinterpret_cast<draw::greasepencil::Instance *>(instance);
}

View File

@@ -41,10 +41,6 @@ using Instance = blender::draw::overlay::Instance;
static void OVERLAY_next_engine_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
OVERLAY_Data *ved = reinterpret_cast<OVERLAY_Data *>(vedata);
if (ved->instance == nullptr) {
@@ -56,17 +52,11 @@ static void OVERLAY_next_engine_init(void *vedata)
static void OVERLAY_next_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)->begin_sync();
}
static void OVERLAY_next_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
ObjectRef ref;
ref.object = object;
ref.dupli_object = DRW_object_get_dupli(object);
@@ -78,18 +68,11 @@ static void OVERLAY_next_cache_populate(void *vedata, Object *object)
static void OVERLAY_next_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)->end_sync();
}
static void OVERLAY_next_draw_scene(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)
->draw(*DRW_manager_get());
}

View File

@@ -35,10 +35,6 @@ struct SELECT_NextData {
static void SELECT_next_engine_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
OVERLAY_Data *ved = reinterpret_cast<OVERLAY_Data *>(vedata);
if (ved->instance == nullptr) {
@@ -50,17 +46,11 @@ static void SELECT_next_engine_init(void *vedata)
static void SELECT_next_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)->begin_sync();
}
static void SELECT_next_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
ObjectRef ref;
ref.object = object;
ref.dupli_object = DRW_object_get_dupli(object);
@@ -72,18 +62,11 @@ static void SELECT_next_cache_populate(void *vedata, Object *object)
static void SELECT_next_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)->end_sync();
}
static void SELECT_next_draw_scene(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<Instance *>(reinterpret_cast<OVERLAY_Data *>(vedata)->instance)
->draw(*DRW_manager_get());
}

View File

@@ -541,11 +541,6 @@ struct WORKBENCH_Data {
static void workbench_engine_init(void *vedata)
{
/* TODO(fclem): Remove once it is minimum required. */
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
WORKBENCH_Data *ved = reinterpret_cast<WORKBENCH_Data *>(vedata);
if (ved->instance == nullptr) {
ved->instance = new workbench::Instance();
@@ -556,17 +551,11 @@ static void workbench_engine_init(void *vedata)
static void workbench_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<WORKBENCH_Data *>(vedata)->instance->begin_sync();
}
static void workbench_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
draw::Manager *manager = DRW_manager_get();
draw::ObjectRef ref;
@@ -579,19 +568,12 @@ static void workbench_cache_populate(void *vedata, Object *object)
static void workbench_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<WORKBENCH_Data *>(vedata)->instance->end_sync();
}
static void workbench_draw_scene(void *vedata)
{
WORKBENCH_Data *ved = reinterpret_cast<WORKBENCH_Data *>(vedata);
if (!GPU_shader_storage_buffer_objects_support()) {
STRNCPY(ved->info, "Error: No shader storage buffer support");
return;
}
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
draw::Manager *manager = DRW_manager_get();
if (DRW_state_is_viewport_image_render()) {
@@ -605,9 +587,6 @@ static void workbench_draw_scene(void *vedata)
static void workbench_instance_free(void *instance)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
delete reinterpret_cast<workbench::Instance *>(instance);
}
@@ -747,11 +726,6 @@ static void workbench_render_to_image(void *vedata,
RenderLayer *layer,
const rcti *rect)
{
/* TODO(fclem): Remove once it is minimum required. */
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
if (!workbench_render_framebuffers_init()) {
RE_engine_report(engine, RPT_ERROR, "Failed to allocate GPU buffers");
return;

View File

@@ -38,9 +38,7 @@ BLI_INLINE eParticleRefineShaderType drw_curves_shader_type_get()
* Metal and Apple Silicon GPUs. This is also because vertex work can more easily be executed in
* parallel with fragment work, whereas compute inserts an explicit dependency,
* due to switching of command encoder types. */
if (GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support() &&
(GPU_backend_get_type() != GPU_BACKEND_METAL))
{
if (GPU_compute_shader_support() && (GPU_backend_get_type() != GPU_BACKEND_METAL)) {
return PART_REFINE_SHADER_COMPUTE;
}
if (GPU_transform_feedback_support()) {
@@ -505,7 +503,7 @@ void DRW_curves_update()
GPUFrameBuffer *temp_fb = nullptr;
GPUFrameBuffer *prev_fb = nullptr;
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_METAL)) {
if (!(GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support())) {
if (!(GPU_compute_shader_support())) {
prev_fb = GPU_framebuffer_active_get();
char errorOut[256];
/* if the frame-buffer is invalid we need a dummy frame-buffer to be bound. */

View File

@@ -106,7 +106,6 @@ void DebugDraw::modelmat_set(const float modelmat[4][4])
GPUStorageBuf *DebugDraw::gpu_draw_buf_get()
{
BLI_assert(GPU_shader_storage_buffer_objects_support());
if (!gpu_draw_buf_used) {
gpu_draw_buf_used = true;
gpu_draw_buf_.push_update();
@@ -116,7 +115,6 @@ GPUStorageBuf *DebugDraw::gpu_draw_buf_get()
GPUStorageBuf *DebugDraw::gpu_print_buf_get()
{
BLI_assert(GPU_shader_storage_buffer_objects_support());
if (!gpu_print_buf_used) {
gpu_print_buf_used = true;
gpu_print_buf_.push_update();
@@ -601,9 +599,6 @@ void DebugDraw::display_to_view()
blender::draw::DebugDraw *DRW_debug_get()
{
if (!GPU_shader_storage_buffer_objects_support()) {
return nullptr;
}
return reinterpret_cast<blender::draw::DebugDraw *>(DST.debug);
}
@@ -616,7 +611,7 @@ blender::draw::DebugDraw *DRW_debug_get()
void drw_debug_draw()
{
#ifdef DRAW_DEBUG
if (!GPU_shader_storage_buffer_objects_support() || DST.debug == nullptr) {
if (DST.debug == nullptr) {
return;
}
/* TODO(@fclem): Convenience for now. Will have to move to #DRWManager. */
@@ -632,9 +627,6 @@ void drw_debug_init()
/* Module should not be used in release builds. */
/* TODO(@fclem): Hide the functions declarations without using `ifdefs` everywhere. */
#ifdef DRAW_DEBUG
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
/* TODO(@fclem): Convenience for now. Will have to move to #DRWManager. */
if (DST.debug == nullptr) {
DST.debug = reinterpret_cast<DRWDebugModule *>(new blender::draw::DebugDraw());
@@ -645,9 +637,6 @@ void drw_debug_init()
void drw_debug_module_free(DRWDebugModule *module)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
if (module != nullptr) {
delete reinterpret_cast<blender::draw::DebugDraw *>(module);
}
@@ -671,18 +660,12 @@ GPUStorageBuf *drw_debug_gpu_print_buf_get()
void DRW_debug_modelmat_reset()
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->modelmat_reset();
}
void DRW_debug_modelmat(const float modelmat[4][4])
{
#ifdef DRAW_DEBUG
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->modelmat_set(modelmat);
#else
UNUSED_VARS(modelmat);
@@ -691,34 +674,22 @@ void DRW_debug_modelmat(const float modelmat[4][4])
void DRW_debug_line_v3v3(const float v1[3], const float v2[3], const float color[4])
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->draw_line(v1, v2, color);
}
void DRW_debug_polygon_v3(const float (*v)[3], int vert_len, const float color[4])
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->draw_polygon(
blender::Span<float3>((float3 *)v, vert_len), color);
}
void DRW_debug_m4(const float m[4][4])
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->draw_matrix(float4x4(m));
}
void DRW_debug_m4_as_bbox(const float m[4][4], bool invert, const float color[4])
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
blender::float4x4 m4(m);
if (invert) {
m4 = blender::math::invert(m4);
@@ -729,9 +700,6 @@ void DRW_debug_m4_as_bbox(const float m[4][4], bool invert, const float color[4]
void DRW_debug_bbox(const BoundBox *bbox, const float color[4])
{
#ifdef DRAW_DEBUG
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->draw_bbox(*bbox, color);
#else
UNUSED_VARS(bbox, color);
@@ -740,9 +708,6 @@ void DRW_debug_bbox(const BoundBox *bbox, const float color[4])
void DRW_debug_sphere(const float center[3], float radius, const float color[4])
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->draw_sphere(center, radius, color);
}

View File

@@ -41,9 +41,7 @@ BLI_INLINE eParticleRefineShaderType drw_hair_shader_type_get()
* and Apple Silicon GPUs. This is also because vertex work can more easily be executed in
* parallel with fragment work, whereas compute inserts an explicit dependency,
* due to switching of command encoder types. */
if (GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support() &&
(GPU_backend_get_type() != GPU_BACKEND_METAL))
{
if (GPU_compute_shader_support() && (GPU_backend_get_type() != GPU_BACKEND_METAL)) {
return PART_REFINE_SHADER_COMPUTE;
}
if (GPU_transform_feedback_support()) {
@@ -390,7 +388,7 @@ void DRW_hair_update()
GPUFrameBuffer *temp_fb = nullptr;
GPUFrameBuffer *prev_fb = nullptr;
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_METAL)) {
if (!(GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support())) {
if (!GPU_compute_shader_support()) {
prev_fb = GPU_framebuffer_active_get();
char errorOut[256];
/* if the frame-buffer is invalid we need a dummy frame-buffer to be bound. */

View File

@@ -43,7 +43,7 @@ struct DRWViewData {
DRWViewData()
{
/* Only for GL >= 4.3 implementation for now. */
if (GPU_shader_storage_buffer_objects_support() && GPU_compute_shader_support()) {
if (GPU_compute_shader_support()) {
manager = new draw::Manager();
}
};

View File

@@ -49,7 +49,6 @@ bool GPU_crappy_amd_driver(void);
bool GPU_geometry_shader_support(void);
bool GPU_compute_shader_support(void);
bool GPU_shader_storage_buffer_objects_support(void);
bool GPU_shader_image_load_store_support(void);
bool GPU_shader_draw_parameters_support(void);
bool GPU_hdr_support(void);

View File

@@ -167,11 +167,6 @@ bool GPU_geometry_shader_support()
return GCaps.geometry_shader_support;
}
bool GPU_shader_storage_buffer_objects_support()
{
return GCaps.shader_storage_buffer_objects_support;
}
bool GPU_shader_image_load_store_support()
{
return GCaps.shader_image_load_store_support;

View File

@@ -44,7 +44,6 @@ struct GPUCapabilities {
bool mem_stats_support = false;
bool compute_shader_support = false;
bool geometry_shader_support = false;
bool shader_storage_buffer_objects_support = false;
bool shader_image_load_store_support = false;
bool shader_draw_parameters_support = false;
bool transform_feedback_support = false;

View File

@@ -522,8 +522,7 @@ bool gpu_shader_create_info_compile_all()
if ((info->metal_backend_only_ && GPU_backend_get_type() != GPU_BACKEND_METAL) ||
(GPU_compute_shader_support() == false && info->compute_source_ != nullptr) ||
(GPU_geometry_shader_support() == false && info->geometry_source_ != nullptr) ||
(GPU_shader_image_load_store_support() == false && info->has_resource_image()) ||
(GPU_shader_storage_buffer_objects_support() == false && info->has_resource_storage()))
(GPU_shader_image_load_store_support() == false && info->has_resource_image()))
{
skipped++;
continue;

View File

@@ -1032,11 +1032,6 @@ struct ShaderCreateInfo {
return has_resource_type(Resource::BindType::IMAGE);
}
bool has_resource_storage() const
{
return has_resource_type(Resource::BindType::STORAGE_BUFFER);
}
/** \} */
#undef TEST_EQUAL

View File

@@ -418,7 +418,6 @@ void MTLBackend::capabilities_init(MTLContext *ctx)
MTLBackend::capabilities.supports_family_mac1 ||
MTLBackend::capabilities.supports_family_mac2);
GCaps.compute_shader_support = true;
GCaps.shader_storage_buffer_objects_support = true;
GCaps.shader_draw_parameters_support = true;
GCaps.hdr_viewport_support = true;

View File

@@ -146,6 +146,21 @@ void GLBackend::platform_init()
support_level = GPU_SUPPORT_LEVEL_LIMITED;
}
}
/* Check SSBO bindings requirement. */
GLint max_ssbo_binds_vertex;
GLint max_ssbo_binds_fragment;
GLint max_ssbo_binds_compute;
glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &max_ssbo_binds_vertex);
glGetIntegerv(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, &max_ssbo_binds_fragment);
glGetIntegerv(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS, &max_ssbo_binds_compute);
GLint max_ssbo_binds = min_iii(
max_ssbo_binds_vertex, max_ssbo_binds_fragment, max_ssbo_binds_compute);
if (max_ssbo_binds < 8) {
std::cout << "Warning: Unsupported platform as it supports max " << max_ssbo_binds
<< " SSBO binding locations\n";
support_level = GPU_SUPPORT_LEVEL_UNSUPPORTED;
}
}
GPG.init(device, os, driver, support_level, GPU_BACKEND_OPENGL, vendor, renderer, version);
@@ -230,29 +245,41 @@ static void detect_workarounds()
GCaps.mip_render_workaround = true;
GLContext::debug_layer_workaround = true;
GLContext::unused_fb_slot_workaround = true;
/* Turn off extensions. */
GCaps.shader_image_load_store_support = false;
GCaps.shader_draw_parameters_support = false;
GCaps.shader_storage_buffer_objects_support = false;
/* Turn off Blender features. */
GCaps.hdr_viewport_support = false;
GLContext::base_instance_support = false;
/* Turn off OpenGL 4.4 features. */
GLContext::clear_texture_support = false;
GLContext::multi_bind_support = false;
GLContext::multi_bind_image_support = false;
/* Turn off OpenGL 4.5 features. */
GLContext::direct_state_access_support = false;
/* Turn off OpenGL 4.6 features. */
GLContext::texture_filter_anisotropic_support = false;
GCaps.shader_draw_parameters_support = false;
GLContext::shader_draw_parameters_support = false;
/* Although an OpenGL 4.3 feature, our implementation requires shader_draw_parameters_support.
* NOTE: we should untangle this by checking both features for clarity. */
GLContext::multi_draw_indirect_support = false;
/* Turn off vendor specific extensions. */
GLContext::native_barycentric_support = false;
/* Do not alter OpenGL 4.3 features.
* These code paths should be removed. */
/*
GCaps.shader_image_load_store_support = false;
GLContext::base_instance_support = false;
GLContext::copy_image_support = false;
GLContext::debug_layer_support = false;
GLContext::direct_state_access_support = false;
GLContext::fixed_restart_index_support = false;
GLContext::geometry_shader_invocations = false;
GLContext::layered_rendering_support = false;
GLContext::native_barycentric_support = false;
GLContext::multi_bind_support = false;
GLContext::multi_bind_image_support = false;
GLContext::multi_draw_indirect_support = false;
GLContext::shader_draw_parameters_support = false;
GLContext::texture_cube_map_array_support = false;
GLContext::texture_filter_anisotropic_support = false;
GLContext::texture_gather_support = false;
GLContext::texture_storage_support = false;
GLContext::vertex_attrib_binding_support = false;
*/
return;
}
@@ -457,7 +484,7 @@ static void detect_workarounds()
/* Minimum Per-Vertex stride is 1 byte for OpenGL. */
GCaps.minimum_per_vertex_stride = 1;
} // namespace blender::gpu
}
/** Internal capabilities. */
@@ -546,8 +573,6 @@ void GLBackend::capabilities_init()
&GCaps.max_shader_storage_buffer_bindings);
glGetIntegerv(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS, &GCaps.max_compute_shader_storage_blocks);
}
GCaps.shader_storage_buffer_objects_support = epoxy_has_gl_extension(
"GL_ARB_shader_storage_buffer_object");
GCaps.transform_feedback_support = true;
GCaps.texture_view_support = epoxy_gl_version() >= 43 ||
epoxy_has_gl_extension("GL_ARB_texture_view");
@@ -557,21 +582,15 @@ void GLBackend::capabilities_init()
glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &GLContext::max_cubemap_size);
glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_BLOCKS, &GLContext::max_ubo_binds);
glGetIntegerv(GL_MAX_UNIFORM_BLOCK_SIZE, &GLContext::max_ubo_size);
if (GCaps.shader_storage_buffer_objects_support) {
GLint max_ssbo_binds;
GLContext::max_ssbo_binds = 999999;
glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
glGetIntegerv(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
glGetIntegerv(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
if (GLContext::max_ssbo_binds < 8) {
/* Does not meet our minimum requirements. */
GCaps.shader_storage_buffer_objects_support = false;
}
glGetIntegerv(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &GLContext::max_ssbo_size);
}
GLint max_ssbo_binds;
GLContext::max_ssbo_binds = 999999;
glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
glGetIntegerv(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
glGetIntegerv(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS, &max_ssbo_binds);
GLContext::max_ssbo_binds = min_ii(GLContext::max_ssbo_binds, max_ssbo_binds);
glGetIntegerv(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &GLContext::max_ssbo_size);
GLContext::base_instance_support = epoxy_has_gl_extension("GL_ARB_base_instance");
GLContext::clear_texture_support = epoxy_has_gl_extension("GL_ARB_clear_texture");
GLContext::copy_image_support = epoxy_has_gl_extension("GL_ARB_copy_image");

View File

@@ -221,11 +221,9 @@ GLShaderInterface::GLShaderInterface(GLuint program)
uniform_len = active_uniform_len;
GLint max_ssbo_name_len = 0, ssbo_len = 0;
if (GPU_shader_storage_buffer_objects_support()) {
glGetProgramInterfaceiv(program, GL_SHADER_STORAGE_BLOCK, GL_ACTIVE_RESOURCES, &ssbo_len);
glGetProgramInterfaceiv(
program, GL_SHADER_STORAGE_BLOCK, GL_MAX_NAME_LENGTH, &max_ssbo_name_len);
}
glGetProgramInterfaceiv(program, GL_SHADER_STORAGE_BLOCK, GL_ACTIVE_RESOURCES, &ssbo_len);
glGetProgramInterfaceiv(
program, GL_SHADER_STORAGE_BLOCK, GL_MAX_NAME_LENGTH, &max_ssbo_name_len);
BLI_assert_msg(ubo_len <= 16, "enabled_ubo_mask_ is uint16_t");

View File

@@ -18,7 +18,7 @@ namespace blender::gpu::tests {
static void test_buffer_texture()
{
if (!GPU_compute_shader_support() && !GPU_shader_storage_buffer_objects_support()) {
if (!GPU_compute_shader_support()) {
/* We can't test as a the platform does not support compute shaders. */
std::cout << "Skipping compute shader test: platform not supported";
GTEST_SKIP();

View File

@@ -133,7 +133,7 @@ struct Shader {
/** Test the given info when doing a single call. */
static void do_push_constants_test(const char *info_name, const int num_calls_simultaneously = 1)
{
if (!GPU_compute_shader_support() && !GPU_shader_storage_buffer_objects_support()) {
if (!GPU_compute_shader_support()) {
/* We can't test as a the platform does not support compute shaders. */
std::cout << "Skipping test: platform not supported";
return;

View File

@@ -221,7 +221,7 @@ GPU_TEST(shader_compute_ibo)
static void test_shader_compute_ssbo()
{
if (!GPU_compute_shader_support() && !GPU_shader_storage_buffer_objects_support()) {
if (!GPU_compute_shader_support()) {
/* We can't test as a the platform does not support compute shaders. */
std::cout << "Skipping compute shader test: platform not supported";
return;

View File

@@ -222,7 +222,6 @@ void VKBackend::capabilities_init(VKDevice &device)
GCaps = {};
GCaps.compute_shader_support = true;
GCaps.geometry_shader_support = true;
GCaps.shader_storage_buffer_objects_support = true;
GCaps.shader_image_load_store_support = true;
GCaps.shader_draw_parameters_support =
device.physical_device_vulkan_11_features_get().shaderDrawParameters;

View File

@@ -206,17 +206,6 @@ static PyObject *pygpu_compute_shader_support_get(PyObject * /*self*/)
return PyBool_FromLong(GPU_compute_shader_support());
}
PyDoc_STRVAR(pygpu_shader_storage_buffer_objects_support_get_doc,
".. function:: shader_storage_buffer_objects_support_get()\n"
"\n"
" Are SSBO's supported.\n"
"\n"
" :return: True when supported, False when not supported.\n"
" :rtype: bool\n");
static PyObject *pygpu_shader_storage_buffer_objects_support_get(PyObject * /*self*/)
{
return PyBool_FromLong(GPU_shader_storage_buffer_objects_support());
}
PyDoc_STRVAR(pygpu_shader_image_load_store_support_get_doc,
".. function:: shader_image_load_store_support_get()\n"
"\n"
@@ -307,10 +296,6 @@ static PyMethodDef pygpu_capabilities__tp_methods[] = {
(PyCFunction)pygpu_compute_shader_support_get,
METH_NOARGS,
pygpu_compute_shader_support_get_doc},
{"shader_storage_buffer_objects_support_get",
(PyCFunction)pygpu_shader_storage_buffer_objects_support_get,
METH_NOARGS,
pygpu_shader_storage_buffer_objects_support_get_doc},
{"shader_image_load_store_support_get",
(PyCFunction)pygpu_shader_image_load_store_support_get,
METH_NOARGS,