Cleanup: Make format

Formatting changes resulting from Make Format
This commit is contained in:
Harley Acheson
2023-11-23 15:09:36 -08:00
parent 2ece99891b
commit b4b898063e
10 changed files with 97 additions and 98 deletions

View File

@@ -820,8 +820,7 @@ void draw_nla_main_data(bAnimContext *ac, SpaceNla *snla, ARegion *region)
/* check if visible */
if (IN_RANGE(ymin, v2d->cur.ymin, v2d->cur.ymax) ||
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax))
{
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax)) {
/* data to draw depends on the type of track */
switch (ale->type) {
case ANIMTYPE_NLATRACK: {
@@ -958,8 +957,7 @@ void draw_nla_track_list(const bContext *C, bAnimContext *ac, ARegion *region)
/* check if visible */
if (IN_RANGE(ymin, v2d->cur.ymin, v2d->cur.ymax) ||
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax))
{
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax)) {
/* draw all tracks using standard channel-drawing API */
ANIM_channel_draw(ac, ale, ymin, ymax, track_index);
}
@@ -981,8 +979,7 @@ void draw_nla_track_list(const bContext *C, bAnimContext *ac, ARegion *region)
/* check if visible */
if (IN_RANGE(ymin, v2d->cur.ymin, v2d->cur.ymax) ||
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax))
{
IN_RANGE(ymax, v2d->cur.ymin, v2d->cur.ymax)) {
/* draw all tracks using standard channel-drawing API */
rctf track_rect;
BLI_rctf_init(&track_rect, 0, v2d->cur.xmax, ymin, ymax);

View File

@@ -2164,8 +2164,7 @@ static int nlaedit_apply_scale_exec(bContext *C, wmOperator * /*op*/)
* (transitions don't have scale) */
if ((strip->flag & NLASTRIP_FLAG_SELECT) && (strip->type == NLASTRIP_TYPE_CLIP)) {
if (strip->act == nullptr || ID_IS_OVERRIDE_LIBRARY(strip->act) ||
ID_IS_LINKED(strip->act))
{
ID_IS_LINKED(strip->act)) {
continue;
}
/* if the referenced action is used by other strips,

View File

@@ -241,8 +241,7 @@ static void box_select_nla_strips(bAnimContext *ac, rcti rect, short mode, short
/* only select strips if they fall within the required ranges (if applicable) */
LISTBASE_FOREACH (NlaStrip *, strip, &nlt->strips) {
if ((mode == NLA_BOXSEL_CHANNELS) ||
BKE_nlastrip_within_bounds(strip, rectf.xmin, rectf.xmax))
{
BKE_nlastrip_within_bounds(strip, rectf.xmin, rectf.xmax)) {
/* set selection */
ACHANNEL_SET_FLAG(strip, selectmode, NLASTRIP_FLAG_SELECT);

View File

@@ -481,7 +481,7 @@ static void unlink_object_fn(bContext *C,
Scene *scene = (Scene *)tsep->id;
FOREACH_SCENE_COLLECTION_BEGIN (scene, collection) {
if (BKE_collection_has_object(collection, ob)){
if (BKE_collection_has_object(collection, ob)) {
BKE_collection_object_remove(bmain, collection, ob, true);
}
}

View File

@@ -17,12 +17,12 @@
#include "BLI_utildefines.h"
#include "gpu_py_capabilities.h"
#include "gpu_py_compute.h"
#include "gpu_py_matrix.h"
#include "gpu_py_platform.h"
#include "gpu_py_select.h"
#include "gpu_py_state.h"
#include "gpu_py_types.h"
#include "gpu_py_compute.h"
#include "gpu_py.h"
#include "gpu_py_api.h" /* Own include. */

View File

@@ -346,15 +346,17 @@ static PyMethodDef pygpu_capabilities__tp_methods[] = {
(PyCFunction)pygpu_hdr_support_get,
METH_NOARGS,
pygpu_hdr_support_get_doc},
{"max_work_group_count_get",
(PyCFunction)pygpu_max_work_group_count_get,
METH_VARARGS,
pygpu_max_work_group_count_get_doc,
{
"max_work_group_count_get",
(PyCFunction)pygpu_max_work_group_count_get,
METH_VARARGS,
pygpu_max_work_group_count_get_doc,
},
{"max_work_group_size_get",
(PyCFunction)pygpu_max_work_group_size_get,
METH_VARARGS,
pygpu_max_work_group_size_get_doc,
{
"max_work_group_size_get",
(PyCFunction)pygpu_max_work_group_size_get,
METH_VARARGS,
pygpu_max_work_group_size_get_doc,
},
{nullptr, nullptr, 0, nullptr},
};

View File

@@ -13,12 +13,12 @@
#include "BLI_utildefines.h"
#include "GPU_capabilities.h"
#include "GPU_compute.h"
#include "GPU_shader.h"
#include "GPU_state.h"
#include "GPU_texture.h"
#include "GPU_uniform_buffer.h"
#include "GPU_compute.h"
#include "GPU_state.h"
#include "GPU_capabilities.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_compat.h"
@@ -27,37 +27,37 @@
#include "../mathutils/mathutils.h"
#include "gpu_py.h"
#include "gpu_py_compute.h" /* own include */
#include "gpu_py_shader.h"
#include "gpu_py_texture.h"
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_format.h"
#include "gpu_py_shader.h"
#include "gpu_py_compute.h" /* own include */
PyDoc_STRVAR(
pygpu_compute_dispatch_doc,
".. function:: dispatch(shader, groups_x_len, groups_y_len, groups_z_len)\n"
"\n"
" Dispatches GPU compute.\n"
"\n"
" :arg shader: The shader that you want to dispatch.\n"
" :type shader: :class:`gpu.types.GPUShader`\n"
" :arg groups_x_len: Int for group x length:\n"
" :type groups_x_len: int\n"
" :arg groups_y_len: Int for group y length:\n"
" :type groups_y_len: int\n"
" :arg groups_z_len: Int for group z length:\n"
" :type groups_z_len: int\n"
" :return: Shader object.\n"
" :rtype: :class:`bpy.types.GPUShader`\n");
PyDoc_STRVAR(pygpu_compute_dispatch_doc,
".. function:: dispatch(shader, groups_x_len, groups_y_len, groups_z_len)\n"
"\n"
" Dispatches GPU compute.\n"
"\n"
" :arg shader: The shader that you want to dispatch.\n"
" :type shader: :class:`gpu.types.GPUShader`\n"
" :arg groups_x_len: Int for group x length:\n"
" :type groups_x_len: int\n"
" :arg groups_y_len: Int for group y length:\n"
" :type groups_y_len: int\n"
" :arg groups_z_len: Int for group z length:\n"
" :type groups_z_len: int\n"
" :return: Shader object.\n"
" :rtype: :class:`bpy.types.GPUShader`\n");
static PyObject *pygpu_compute_dispatch(PyObject * /*self*/, PyObject *args, PyObject *kwds)
{
BPyGPUShader *py_shader;
int groups_x_len;
int groups_y_len;
int groups_z_len;
static const char *_keywords[] = {"shader", "groups_x_len", "groups_y_len", "groups_z_len", nullptr};
static _PyArg_Parser _parser = {
BPyGPUShader *py_shader;
int groups_x_len;
int groups_y_len;
int groups_z_len;
static const char *_keywords[] = {
"shader", "groups_x_len", "groups_y_len", "groups_z_len", nullptr};
static _PyArg_Parser _parser = {
PY_ARG_PARSER_HEAD_COMPAT()
"O" /* `shader` */
"i" /* `groups_x_len` */
@@ -67,44 +67,48 @@ static PyObject *pygpu_compute_dispatch(PyObject * /*self*/, PyObject *args, PyO
_keywords,
nullptr,
};
if (_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&py_shader,
&groups_x_len,
&groups_y_len,
&groups_z_len))
{
if (!BPyGPUShader_Check(py_shader)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUShader, got %s", Py_TYPE(py_shader)->tp_name);
return nullptr;
}
// Check that groups do not exceed GPU_max_work_group_count()
const int max_work_group_count_x = GPU_max_work_group_count(0);
const int max_work_group_count_y = GPU_max_work_group_count(1);
const int max_work_group_count_z = GPU_max_work_group_count(2);
if (_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, &py_shader, &groups_x_len, &groups_y_len, &groups_z_len))
{
// Report back to the user both the requested and the maximum supported value
if (groups_x_len > GPU_max_work_group_count(0)) {
PyErr_Format(PyExc_ValueError, "groups_x_len (%d) exceeds maximum supported value (max work group count: %d)", groups_x_len, max_work_group_count_x);
return nullptr;
}
if (groups_y_len > GPU_max_work_group_count(1)) {
PyErr_Format(PyExc_ValueError, "groups_y_len (%d) exceeds maximum supported value (max work group count: %d)", groups_y_len, max_work_group_count_y);
return nullptr;
}
if (groups_z_len > GPU_max_work_group_count(2)) {
PyErr_Format(PyExc_ValueError, "groups_z_len (%d) exceeds maximum supported value (max work group count: %d)", groups_z_len, max_work_group_count_z);
return nullptr;
}
GPUShader *shader = py_shader->shader;
GPU_compute_dispatch(shader, groups_x_len, groups_y_len, groups_z_len);
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
if (!BPyGPUShader_Check(py_shader)) {
PyErr_Format(PyExc_TypeError, "Expected a GPUShader, got %s", Py_TYPE(py_shader)->tp_name);
return nullptr;
}
Py_RETURN_NONE;
// Check that groups do not exceed GPU_max_work_group_count()
const int max_work_group_count_x = GPU_max_work_group_count(0);
const int max_work_group_count_y = GPU_max_work_group_count(1);
const int max_work_group_count_z = GPU_max_work_group_count(2);
// Report back to the user both the requested and the maximum supported value
if (groups_x_len > GPU_max_work_group_count(0)) {
PyErr_Format(PyExc_ValueError,
"groups_x_len (%d) exceeds maximum supported value (max work group count: %d)",
groups_x_len,
max_work_group_count_x);
return nullptr;
}
if (groups_y_len > GPU_max_work_group_count(1)) {
PyErr_Format(PyExc_ValueError,
"groups_y_len (%d) exceeds maximum supported value (max work group count: %d)",
groups_y_len,
max_work_group_count_y);
return nullptr;
}
if (groups_z_len > GPU_max_work_group_count(2)) {
PyErr_Format(PyExc_ValueError,
"groups_z_len (%d) exceeds maximum supported value (max work group count: %d)",
groups_z_len,
max_work_group_count_z);
return nullptr;
}
GPUShader *shader = py_shader->shader;
GPU_compute_dispatch(shader, groups_x_len, groups_y_len, groups_z_len);
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
}
Py_RETURN_NONE;
}
/* -------------------------------------------------------------------- */
@@ -112,7 +116,10 @@ static PyObject *pygpu_compute_dispatch(PyObject * /*self*/, PyObject *args, PyO
* \{ */
static PyMethodDef pygpu_compute__tp_methods[] = {
{"dispatch", (PyCFunction)pygpu_compute_dispatch, METH_VARARGS | METH_KEYWORDS, pygpu_compute_dispatch_doc},
{"dispatch",
(PyCFunction)pygpu_compute_dispatch,
METH_VARARGS | METH_KEYWORDS,
pygpu_compute_dispatch_doc},
{nullptr, nullptr, 0, nullptr},
};
@@ -120,7 +127,8 @@ static PyMethodDef pygpu_compute__tp_methods[] = {
# pragma GCC diagnostic pop
#endif
PyDoc_STRVAR(pygpu_compute__tp_doc, "This module provides access to the global GPU compute functions");
PyDoc_STRVAR(pygpu_compute__tp_doc,
"This module provides access to the global GPU compute functions");
static PyModuleDef pygpu_compute_module_def = {
/*m_base*/ PyModuleDef_HEAD_INIT,
/*m_name*/ "gpu.compute",

View File

@@ -563,9 +563,7 @@ static PyObject *pygpu_shader_image(BPyGPUShader *self, PyObject *args)
{
const char *name;
BPyGPUTexture *py_texture;
if (!PyArg_ParseTuple(
args, "sO!:GPUShader.image", &name, &BPyGPUTexture_Type, &py_texture))
{
if (!PyArg_ParseTuple(args, "sO!:GPUShader.image", &name, &BPyGPUTexture_Type, &py_texture)) {
return nullptr;
}
@@ -581,7 +579,6 @@ static PyObject *pygpu_shader_image(BPyGPUShader *self, PyObject *args)
Py_RETURN_NONE;
}
PyDoc_STRVAR(
pygpu_shader_uniform_block_doc,
".. method:: uniform_block(name, ubo)\n"
@@ -732,10 +729,7 @@ static PyMethodDef pygpu_shader__tp_methods[] = {
(PyCFunction)pygpu_shader_uniform_sampler,
METH_VARARGS,
pygpu_shader_uniform_sampler_doc},
{"image",
(PyCFunction)pygpu_shader_image,
METH_VARARGS,
pygpu_shader_image_doc},
{"image", (PyCFunction)pygpu_shader_image, METH_VARARGS, pygpu_shader_image_doc},
{"uniform_block",
(PyCFunction)pygpu_shader_uniform_block,
METH_VARARGS,

View File

@@ -1183,9 +1183,9 @@ static PyMethodDef pygpu_shader_info__tp_methods[] = {
METH_O,
pygpu_shader_info_fragment_source_doc},
{"compute_source",
(PyCFunction)pygpu_shader_info_compute_source,
METH_O,
pygpu_shader_info_compute_source_doc},
(PyCFunction)pygpu_shader_info_compute_source,
METH_O,
pygpu_shader_info_compute_source_doc},
{"typedef_source",
(PyCFunction)pygpu_shader_info_typedef_source,
METH_O,

View File

@@ -11,6 +11,7 @@
#include "gpu_py_buffer.h"
#include "gpu_py_batch.h"
#include "gpu_py_compute.h"
#include "gpu_py_element.h"
#include "gpu_py_framebuffer.h"
#include "gpu_py_offscreen.h"
@@ -19,7 +20,6 @@
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_buffer.h"
#include "gpu_py_vertex_format.h"
#include "gpu_py_compute.h"
#ifdef __cplusplus
extern "C" {