Cleanup: Mark unused function arguments as such
A lot of such cases got discovered since recent change to CLang's compiler flags for C++. Pull Request: https://projects.blender.org/blender/blender/pulls/109732
This commit is contained in:
committed by
Sergey Sharybin
parent
b3cd646a15
commit
bad41885db
@@ -201,7 +201,7 @@ bool BVHMetal::build_BLAS_mesh(Progress &progress,
|
||||
sizeDataType:MTLDataTypeULong];
|
||||
}
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
/* free temp resources */
|
||||
[scratchBuf release];
|
||||
[indexBuf release];
|
||||
@@ -220,7 +220,7 @@ bool BVHMetal::build_BLAS_mesh(Progress &progress,
|
||||
[accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
|
||||
toAccelerationStructure:accel];
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
uint64_t allocated_size = [accel allocatedSize];
|
||||
stats.mem_alloc(allocated_size);
|
||||
accel_struct = accel;
|
||||
@@ -420,7 +420,7 @@ bool BVHMetal::build_BLAS_hair(Progress &progress,
|
||||
sizeDataType:MTLDataTypeULong];
|
||||
}
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
/* free temp resources */
|
||||
[scratchBuf release];
|
||||
[aabbBuf release];
|
||||
@@ -438,7 +438,7 @@ bool BVHMetal::build_BLAS_hair(Progress &progress,
|
||||
[accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
|
||||
toAccelerationStructure:accel];
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
uint64_t allocated_size = [accel allocatedSize];
|
||||
stats.mem_alloc(allocated_size);
|
||||
accel_struct = accel;
|
||||
@@ -634,7 +634,7 @@ bool BVHMetal::build_BLAS_pointcloud(Progress &progress,
|
||||
sizeDataType:MTLDataTypeULong];
|
||||
}
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
/* free temp resources */
|
||||
[scratchBuf release];
|
||||
[aabbBuf release];
|
||||
@@ -652,7 +652,7 @@ bool BVHMetal::build_BLAS_pointcloud(Progress &progress,
|
||||
[accelEnc copyAndCompactAccelerationStructure:accel_uncompressed
|
||||
toAccelerationStructure:accel];
|
||||
[accelEnc endEncoding];
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
|
||||
[accelCommands addCompletedHandler:^(id<MTLCommandBuffer> /*command_buffer*/) {
|
||||
uint64_t allocated_size = [accel allocatedSize];
|
||||
stats.mem_alloc(allocated_size);
|
||||
accel_struct = accel;
|
||||
|
||||
@@ -25,7 +25,8 @@ std::map<int, MetalDevice *> MetalDevice::active_device_ids;
|
||||
|
||||
/* Thread-safe device access for async work. Calling code must pass an appropriately scoped lock
|
||||
* to existing_devices_mutex to safeguard against destruction of the returned instance. */
|
||||
MetalDevice *MetalDevice::get_device_by_ID(int ID, thread_scoped_lock &existing_devices_mutex_lock)
|
||||
MetalDevice *MetalDevice::get_device_by_ID(int ID,
|
||||
thread_scoped_lock & /*existing_devices_mutex_lock*/)
|
||||
{
|
||||
auto it = active_device_ids.find(ID);
|
||||
if (it != active_device_ids.end()) {
|
||||
@@ -288,12 +289,12 @@ MetalDevice::~MetalDevice()
|
||||
texture_info.free();
|
||||
}
|
||||
|
||||
bool MetalDevice::support_device(const uint kernel_features /*requested_features*/)
|
||||
bool MetalDevice::support_device(const uint /*kernel_features*/)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MetalDevice::check_peer_access(Device *peer_device)
|
||||
bool MetalDevice::check_peer_access(Device * /*peer_device*/)
|
||||
{
|
||||
assert(0);
|
||||
/* does peer access make sense? */
|
||||
@@ -913,7 +914,9 @@ void MetalDevice::mem_free(device_memory &mem)
|
||||
}
|
||||
}
|
||||
|
||||
device_ptr MetalDevice::mem_alloc_sub_ptr(device_memory &mem, size_t offset, size_t /*size*/)
|
||||
device_ptr MetalDevice::mem_alloc_sub_ptr(device_memory & /*mem*/,
|
||||
size_t /*offset*/,
|
||||
size_t /*size*/)
|
||||
{
|
||||
/* METAL_WIP - revive if necessary */
|
||||
assert(0);
|
||||
|
||||
@@ -168,7 +168,7 @@ void ShaderCache::wait_for_all()
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderCache::compile_thread_func(int thread_index)
|
||||
void ShaderCache::compile_thread_func(int /*thread_index*/)
|
||||
{
|
||||
while (running) {
|
||||
|
||||
@@ -711,7 +711,7 @@ void MetalKernelPipeline::compile()
|
||||
newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
|
||||
options:pipelineOptions
|
||||
completionHandler:^(id<MTLComputePipelineState> computePipelineState,
|
||||
MTLComputePipelineReflection *reflection,
|
||||
MTLComputePipelineReflection * /*reflection*/,
|
||||
NSError *error) {
|
||||
pipeline = computePipelineState;
|
||||
|
||||
|
||||
@@ -683,20 +683,21 @@ bool MetalDeviceQueue::synchronize()
|
||||
/* For per-kernel timing, add event handlers to measure & accumulate dispatch times. */
|
||||
__block double completion_time = 0;
|
||||
for (uint64_t i = command_buffer_start_timing_id_; i < timing_shared_event_id_; i++) {
|
||||
[timing_shared_event_ notifyListener:shared_event_listener_
|
||||
atValue:i
|
||||
block:^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
|
||||
completion_time = timer.get_time() - completion_time;
|
||||
last_completion_time_ = completion_time;
|
||||
for (auto label : command_encoder_labels_) {
|
||||
if (label.timing_id == value) {
|
||||
TimingStats &stat = timing_stats_[label.kernel];
|
||||
stat.num_dispatches++;
|
||||
stat.total_time += completion_time;
|
||||
stat.total_work_size += label.work_size;
|
||||
}
|
||||
}
|
||||
}];
|
||||
[timing_shared_event_
|
||||
notifyListener:shared_event_listener_
|
||||
atValue:i
|
||||
block:^(id<MTLSharedEvent> /*sharedEvent*/, uint64_t value) {
|
||||
completion_time = timer.get_time() - completion_time;
|
||||
last_completion_time_ = completion_time;
|
||||
for (auto label : command_encoder_labels_) {
|
||||
if (label.timing_id == value) {
|
||||
TimingStats &stat = timing_stats_[label.kernel];
|
||||
stat.num_dispatches++;
|
||||
stat.total_time += completion_time;
|
||||
stat.total_work_size += label.work_size;
|
||||
}
|
||||
}
|
||||
}];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -707,7 +708,7 @@ bool MetalDeviceQueue::synchronize()
|
||||
__block dispatch_semaphore_t block_sema = wait_semaphore_;
|
||||
[shared_event_ notifyListener:shared_event_listener_
|
||||
atValue:shared_event_id_
|
||||
block:^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
|
||||
block:^(id<MTLSharedEvent> /*sharedEvent*/, uint64_t /*value*/) {
|
||||
dispatch_semaphore_signal(block_sema);
|
||||
}];
|
||||
|
||||
@@ -853,7 +854,7 @@ void MetalDeviceQueue::copy_from_device(device_memory &mem)
|
||||
}
|
||||
}
|
||||
|
||||
void MetalDeviceQueue::prepare_resources(DeviceKernel kernel)
|
||||
void MetalDeviceQueue::prepare_resources(DeviceKernel /*kernel*/)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ GHOST_TSuccess GHOST_DisplayManagerCocoa::getNumDisplays(uint8_t &numDisplays) c
|
||||
return GHOST_kSuccess;
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_DisplayManagerCocoa::getNumDisplaySettings(uint8_t display,
|
||||
GHOST_TSuccess GHOST_DisplayManagerCocoa::getNumDisplaySettings(uint8_t /*display*/,
|
||||
int32_t &numSettings) const
|
||||
{
|
||||
numSettings = (int32_t)3; // Width, Height, BitsPerPixel
|
||||
@@ -31,7 +31,7 @@ GHOST_TSuccess GHOST_DisplayManagerCocoa::getNumDisplaySettings(uint8_t display,
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_DisplayManagerCocoa::getDisplaySetting(uint8_t display,
|
||||
int32_t index,
|
||||
int32_t /*index*/,
|
||||
GHOST_DisplaySetting &setting) const
|
||||
{
|
||||
NSScreen *askedDisplay;
|
||||
@@ -110,7 +110,7 @@ GHOST_TSuccess GHOST_DisplayManagerCocoa::getCurrentDisplaySetting(
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_DisplayManagerCocoa::setCurrentDisplaySetting(
|
||||
uint8_t display, const GHOST_DisplaySetting &setting)
|
||||
uint8_t display, const GHOST_DisplaySetting & /*setting*/)
|
||||
{
|
||||
GHOST_ASSERT(
|
||||
(display == kMainDisplay),
|
||||
|
||||
@@ -153,7 +153,7 @@ static void unload_driver()
|
||||
dlclose(module);
|
||||
}
|
||||
|
||||
static void DeviceAdded(uint32_t unused)
|
||||
static void DeviceAdded(uint32_t /*unused*/)
|
||||
{
|
||||
#if DEBUG_NDOF_DRIVER
|
||||
printf("ndof: device added\n");
|
||||
@@ -168,14 +168,14 @@ static void DeviceAdded(uint32_t unused)
|
||||
ndof_manager->setDevice(vendorID, productID);
|
||||
}
|
||||
|
||||
static void DeviceRemoved(uint32_t unused)
|
||||
static void DeviceRemoved(uint32_t /*unused*/)
|
||||
{
|
||||
#if DEBUG_NDOF_DRIVER
|
||||
printf("ndof: device removed\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
static void DeviceEvent(uint32_t unused, uint32_t msg_type, void *msg_arg)
|
||||
static void DeviceEvent(uint32_t /*unused*/, uint32_t msg_type, void *msg_arg)
|
||||
{
|
||||
if (msg_type == kConnexionMsgDeviceState) {
|
||||
ConnexionDeviceState *s = (ConnexionDeviceState *)msg_arg;
|
||||
|
||||
@@ -237,7 +237,7 @@ class GHOST_SystemCocoa : public GHOST_System {
|
||||
/**
|
||||
* \see GHOST_ISystem
|
||||
*/
|
||||
bool setConsoleWindowState(GHOST_TConsoleWindowState action)
|
||||
bool setConsoleWindowState(GHOST_TConsoleWindowState /*action*/)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -698,7 +698,7 @@ GHOST_IWindow *GHOST_SystemCocoa::createWindow(const char *title,
|
||||
uint32_t height,
|
||||
GHOST_TWindowState state,
|
||||
GHOST_GPUSettings gpuSettings,
|
||||
const bool exclusive,
|
||||
const bool /*exclusive*/,
|
||||
const bool is_dialog,
|
||||
const GHOST_IWindow *parentWindow)
|
||||
{
|
||||
@@ -914,7 +914,7 @@ GHOST_TCapabilityFlag GHOST_SystemCocoa::getCapabilities() const
|
||||
/**
|
||||
* The event queue polling function
|
||||
*/
|
||||
bool GHOST_SystemCocoa::processEvents(bool waitForEvent)
|
||||
bool GHOST_SystemCocoa::processEvents(bool /*waitForEvent*/)
|
||||
{
|
||||
bool anyProcessed = false;
|
||||
NSEvent *event;
|
||||
@@ -1953,7 +1953,7 @@ GHOST_TSuccess GHOST_SystemCocoa::handleKeyEvent(void *eventPtr)
|
||||
|
||||
#pragma mark Clipboard get/set
|
||||
|
||||
char *GHOST_SystemCocoa::getClipboard(bool selection) const
|
||||
char *GHOST_SystemCocoa::getClipboard(bool /*selection*/) const
|
||||
{
|
||||
char *temp_buff;
|
||||
size_t pastedTextSize;
|
||||
|
||||
@@ -256,7 +256,7 @@ void MTLBackend::platform_exit()
|
||||
* \{ */
|
||||
MTLCapabilities MTLBackend::capabilities = {};
|
||||
|
||||
static const char *mtl_extensions_get_null(int i)
|
||||
static const char *mtl_extensions_get_null(int /*i*/)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -83,10 +83,10 @@ class MTLBatch : public Batch {
|
||||
|
||||
void draw(int v_first, int v_count, int i_first, int i_count) override;
|
||||
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override;
|
||||
void multi_draw_indirect(GPUStorageBuf *indirect_buf,
|
||||
int count,
|
||||
intptr_t offset,
|
||||
intptr_t stride) override
|
||||
void multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/,
|
||||
int /*count*/,
|
||||
intptr_t /*offset*/,
|
||||
intptr_t /*stride*/) override
|
||||
{
|
||||
/* TODO(Metal): Support indirect draw commands. */
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ int MTLCommandBufferManager::num_active_cmd_bufs = 0;
|
||||
/** \name MTLCommandBuffer initialization and render coordination.
|
||||
* \{ */
|
||||
|
||||
void MTLCommandBufferManager::prepare(bool supports_render)
|
||||
void MTLCommandBufferManager::prepare(bool /*supports_render*/)
|
||||
{
|
||||
render_pass_state_.reset_state();
|
||||
}
|
||||
@@ -115,7 +115,7 @@ bool MTLCommandBufferManager::submit(bool wait)
|
||||
id<MTLCommandBuffer> cmd_buffer_ref = active_command_buffer_;
|
||||
[cmd_buffer_ref retain];
|
||||
|
||||
[cmd_buffer_ref addCompletedHandler:^(id<MTLCommandBuffer> cb) {
|
||||
[cmd_buffer_ref addCompletedHandler:^(id<MTLCommandBuffer> /*cb*/) {
|
||||
/* Upon command buffer completion, decrement MTLSafeFreeList reference count
|
||||
* to allow buffers no longer in use by this CommandBuffer to be freed. */
|
||||
cmd_free_buffer_list->decrement_reference();
|
||||
@@ -499,7 +499,7 @@ bool MTLCommandBufferManager::do_break_submission()
|
||||
* \{ */
|
||||
|
||||
/* Debug. */
|
||||
void MTLCommandBufferManager::push_debug_group(const char *name, int index)
|
||||
void MTLCommandBufferManager::push_debug_group(const char *name, int /*index*/)
|
||||
{
|
||||
/* Only perform this operation if capturing. */
|
||||
MTLCaptureManager *capture_manager = [MTLCaptureManager sharedCaptureManager];
|
||||
|
||||
@@ -385,7 +385,7 @@ void MTLContext::end_frame()
|
||||
is_inside_frame_ = false;
|
||||
}
|
||||
|
||||
void MTLContext::check_error(const char *info)
|
||||
void MTLContext::check_error(const char * /*info*/)
|
||||
{
|
||||
/* TODO(Metal): Implement. */
|
||||
}
|
||||
@@ -1093,7 +1093,7 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
|
||||
/* Bind UBOs and SSBOs to an active render command encoder using the rendering state of the
|
||||
* current context -> Active shader, Bound UBOs). */
|
||||
bool MTLContext::ensure_buffer_bindings(
|
||||
id<MTLRenderCommandEncoder> rec,
|
||||
id<MTLRenderCommandEncoder> /*rec*/,
|
||||
const MTLShaderInterface *shader_interface,
|
||||
const MTLRenderPipelineStateInstance *pipeline_state_instance)
|
||||
{
|
||||
@@ -1338,7 +1338,7 @@ bool MTLContext::ensure_buffer_bindings(
|
||||
/* Variant for compute. Bind UBOs and SSBOs to an active compute command encoder using the
|
||||
* rendering state of the current context -> Active shader, Bound UBOs). */
|
||||
bool MTLContext::ensure_buffer_bindings(
|
||||
id<MTLComputeCommandEncoder> rec,
|
||||
id<MTLComputeCommandEncoder> /*rec*/,
|
||||
const MTLShaderInterface *shader_interface,
|
||||
const MTLComputePipelineStateInstance &pipeline_state_instance)
|
||||
{
|
||||
@@ -1516,6 +1516,7 @@ void MTLContext::ensure_texture_bindings(
|
||||
{
|
||||
BLI_assert(shader_interface != nil);
|
||||
BLI_assert(rec != nil);
|
||||
UNUSED_VARS_NDEBUG(rec);
|
||||
|
||||
/* Fetch Render Pass state. */
|
||||
MTLRenderPassState &rps = this->main_command_buffer.get_render_pass_state();
|
||||
@@ -1752,6 +1753,7 @@ void MTLContext::ensure_texture_bindings(
|
||||
{
|
||||
BLI_assert(shader_interface != nil);
|
||||
BLI_assert(rec != nil);
|
||||
UNUSED_VARS_NDEBUG(rec);
|
||||
|
||||
/* Fetch Render Pass state. */
|
||||
MTLComputeState &cs = this->main_command_buffer.get_compute_state();
|
||||
@@ -2597,7 +2599,7 @@ void present(MTLRenderPassDescriptor *blit_descriptor,
|
||||
|
||||
/* Increment free pool reference and decrement upon command buffer completion. */
|
||||
cmd_free_buffer_list->increment_reference();
|
||||
[cmdbuf addCompletedHandler:^(id<MTLCommandBuffer> cb) {
|
||||
[cmdbuf addCompletedHandler:^(id<MTLCommandBuffer> /*cb*/) {
|
||||
/* Flag freed buffers associated with this CMD buffer as ready to be freed. */
|
||||
cmd_free_buffer_list->decrement_reference();
|
||||
[cmd_buffer_ref release];
|
||||
|
||||
@@ -624,7 +624,7 @@ void MTLFrameBuffer::mark_do_clear()
|
||||
has_pending_clear_ = true;
|
||||
}
|
||||
|
||||
void MTLFrameBuffer::update_attachments(bool update_viewport)
|
||||
void MTLFrameBuffer::update_attachments(bool /*update_viewport*/)
|
||||
{
|
||||
if (!dirty_attachments_) {
|
||||
return;
|
||||
|
||||
@@ -204,7 +204,7 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
|
||||
}
|
||||
|
||||
gpu::MTLBuffer *MTLBufferPool::allocate_aligned_with_data(uint64_t size,
|
||||
uint32_t alignment,
|
||||
uint32_t /*alignment*/,
|
||||
bool cpu_visible,
|
||||
const void *data)
|
||||
{
|
||||
|
||||
@@ -188,7 +188,7 @@ void MTLShader::vertex_shader_from_glsl(MutableSpan<const char *> sources)
|
||||
shd_builder_->glsl_vertex_source_ = ss.str();
|
||||
}
|
||||
|
||||
void MTLShader::geometry_shader_from_glsl(MutableSpan<const char *> sources)
|
||||
void MTLShader::geometry_shader_from_glsl(MutableSpan<const char *> /*sources*/)
|
||||
{
|
||||
MTL_LOG_ERROR("MTLShader::geometry_shader_from_glsl - Geometry shaders unsupported!");
|
||||
}
|
||||
@@ -1534,7 +1534,8 @@ void MTLShader::ssbo_vertex_fetch_bind_attribute(const MTLSSBOAttribute &ssbo_at
|
||||
ssbo_vbo_slot_used_[ssbo_attr.vbo_id] = true;
|
||||
}
|
||||
|
||||
void MTLShader::ssbo_vertex_fetch_bind_attributes_end(id<MTLRenderCommandEncoder> active_encoder)
|
||||
void MTLShader::ssbo_vertex_fetch_bind_attributes_end(
|
||||
id<MTLRenderCommandEncoder> /*active_encoder*/)
|
||||
{
|
||||
ssbo_vertex_attribute_bind_active_ = false;
|
||||
|
||||
|
||||
@@ -450,7 +450,8 @@ static bool balanced_braces(char *current_str_begin, char *current_str_end)
|
||||
*
|
||||
* Constants declared within function-scope do not exhibit this problem.
|
||||
*/
|
||||
static void extract_global_scope_constants(std::string &str, std::stringstream &global_scope_out)
|
||||
static void extract_global_scope_constants(std::string &str,
|
||||
std::stringstream & /*global_scope_out*/)
|
||||
{
|
||||
char *current_str_begin = &*str.begin();
|
||||
char *current_str_end = &*str.end();
|
||||
@@ -490,7 +491,7 @@ static void extract_global_scope_constants(std::string &str, std::stringstream &
|
||||
#endif
|
||||
|
||||
static bool extract_ssbo_pragma_info(const MTLShader *shader,
|
||||
const MSLGeneratorInterface &msl_iface,
|
||||
const MSLGeneratorInterface & /*msl_iface*/,
|
||||
const std::string &in_vertex_src,
|
||||
MTLPrimitiveType &out_prim_tye,
|
||||
uint32_t &out_num_output_verts)
|
||||
@@ -803,19 +804,19 @@ std::string MTLShader::fragment_interface_declare(const shader::ShaderCreateInfo
|
||||
}
|
||||
|
||||
std::string MTLShader::MTLShader::geometry_interface_declare(
|
||||
const shader::ShaderCreateInfo &info) const
|
||||
const shader::ShaderCreateInfo & /*info*/) const
|
||||
{
|
||||
BLI_assert_msg(false, "Geometry shading unsupported by Metal");
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string MTLShader::geometry_layout_declare(const shader::ShaderCreateInfo &info) const
|
||||
std::string MTLShader::geometry_layout_declare(const shader::ShaderCreateInfo & /*info*/) const
|
||||
{
|
||||
BLI_assert_msg(false, "Geometry shading unsupported by Metal");
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string MTLShader::compute_layout_declare(const ShaderCreateInfo &info) const
|
||||
std::string MTLShader::compute_layout_declare(const ShaderCreateInfo & /*info*/) const
|
||||
{
|
||||
/* Metal supports compute shaders. THis function is a pass-through.
|
||||
* Compute shader interface population happens during mtl_shader_generator, as part of GLSL
|
||||
@@ -1976,14 +1977,14 @@ bool MSLGeneratorInterface::use_argument_buffer_for_samplers() const
|
||||
return use_argument_buffer;
|
||||
}
|
||||
|
||||
uint32_t MSLGeneratorInterface::num_samplers_for_stage(ShaderStage stage) const
|
||||
uint32_t MSLGeneratorInterface::num_samplers_for_stage(ShaderStage /*stage*/) const
|
||||
{
|
||||
/* NOTE: Sampler bindings and argument buffer shared across stages,
|
||||
* in case stages share texture/sampler bindings. */
|
||||
return texture_samplers.size();
|
||||
}
|
||||
|
||||
uint32_t MSLGeneratorInterface::max_sampler_index_for_stage(ShaderStage stage) const
|
||||
uint32_t MSLGeneratorInterface::max_sampler_index_for_stage(ShaderStage /*stage*/) const
|
||||
{
|
||||
/* NOTE: Sampler bindings and argument buffer shared across stages,
|
||||
* in case stages share texture/sampler bindings. */
|
||||
@@ -2508,6 +2509,7 @@ std::string MSLGeneratorInterface::generate_msl_uniform_structs(ShaderStage shad
|
||||
return "";
|
||||
}
|
||||
BLI_assert(shader_stage == ShaderStage::VERTEX || shader_stage == ShaderStage::FRAGMENT);
|
||||
UNUSED_VARS_NDEBUG(shader_stage);
|
||||
std::stringstream out;
|
||||
|
||||
/* Common Uniforms. */
|
||||
@@ -2538,7 +2540,7 @@ std::string MSLGeneratorInterface::generate_msl_uniform_structs(ShaderStage shad
|
||||
}
|
||||
|
||||
/* NOTE: Uniform macro definition vars can conflict with other parameters. */
|
||||
std::string MSLGeneratorInterface::generate_msl_uniform_undefs(ShaderStage shader_stage)
|
||||
std::string MSLGeneratorInterface::generate_msl_uniform_undefs(ShaderStage /*shader_stage*/)
|
||||
{
|
||||
std::stringstream out;
|
||||
|
||||
@@ -2713,6 +2715,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_transform_feedback_out_st
|
||||
ShaderStage shader_stage)
|
||||
{
|
||||
BLI_assert(shader_stage == ShaderStage::VERTEX || shader_stage == ShaderStage::FRAGMENT);
|
||||
UNUSED_VARS_NDEBUG(shader_stage);
|
||||
std::stringstream out;
|
||||
vertex_output_varyings_tf.clear();
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ uint32_t MTLShaderInterface::add_uniform_block(uint32_t name_offset,
|
||||
uint32_t buffer_index,
|
||||
uint32_t location,
|
||||
uint32_t size,
|
||||
ShaderStage stage_mask)
|
||||
ShaderStage /*stage_mask*/)
|
||||
{
|
||||
/* Ensure Size is 16 byte aligned to guarantees alignment rules are satisfied. */
|
||||
if ((size % 16) != 0) {
|
||||
@@ -139,7 +139,7 @@ uint32_t MTLShaderInterface::add_storage_block(uint32_t name_offset,
|
||||
uint32_t buffer_index,
|
||||
uint32_t location,
|
||||
uint32_t size,
|
||||
ShaderStage stage_mask)
|
||||
ShaderStage /*stage_mask*/)
|
||||
{
|
||||
/* Ensure Size is 16 byte aligned to guarantees alignment rules are satisfied. */
|
||||
if ((size % 16) != 0) {
|
||||
|
||||
@@ -367,7 +367,7 @@ void MTLStateManager::set_clip_distances(const int new_dist_len, const int old_d
|
||||
}
|
||||
}
|
||||
|
||||
void MTLStateManager::set_logic_op(const bool enable)
|
||||
void MTLStateManager::set_logic_op(const bool /*enable*/)
|
||||
{
|
||||
/* NOTE(Metal): Logic Operations not directly supported. */
|
||||
}
|
||||
@@ -402,7 +402,7 @@ void MTLStateManager::set_backface_culling(const eGPUFaceCullTest test)
|
||||
pipeline_state.dirty = true;
|
||||
}
|
||||
|
||||
void MTLStateManager::set_provoking_vert(const eGPUProvokingVertex vert)
|
||||
void MTLStateManager::set_provoking_vert(const eGPUProvokingVertex /*vert*/)
|
||||
{
|
||||
/* NOTE(Metal): Provoking vertex is not a feature in the Metal API.
|
||||
* Shaders are handled on a case-by-case basis using a modified vertex shader.
|
||||
|
||||
@@ -247,7 +247,10 @@ void MTLStorageBuf::clear(uint32_t clear_value)
|
||||
}
|
||||
}
|
||||
|
||||
void MTLStorageBuf::copy_sub(VertBuf *src_, uint dst_offset, uint src_offset, uint copy_size)
|
||||
void MTLStorageBuf::copy_sub(VertBuf * /*src_*/,
|
||||
uint /*dst_offset*/,
|
||||
uint /*src_offset*/,
|
||||
uint /*copy_size*/)
|
||||
{
|
||||
/* TODO(Metal): Support Copy sub operation. */
|
||||
MTL_LOG_WARNING("MTLStorageBuf::copy_sub not yet supported.");
|
||||
|
||||
@@ -1490,6 +1490,7 @@ void gpu::MTLTexture::read_internal(int mip,
|
||||
}
|
||||
/* DEBUG check that the allocated data size matches the bytes we expect. */
|
||||
BLI_assert(total_bytes <= debug_data_size);
|
||||
UNUSED_VARS_NDEBUG(debug_data_size);
|
||||
|
||||
/* Fetch allocation from scratch buffer. */
|
||||
gpu::MTLBuffer *dest_buf = MTLContext::get_global_memory_manager()->allocate_aligned(
|
||||
|
||||
Reference in New Issue
Block a user