Cleanup: spelling in comments (make check_spelling_*)

Also use doxy-code blocks for inlined TEX to avoid
them being interpreted as English.
This commit is contained in:
Campbell Barton
2025-01-02 15:11:17 +11:00
parent 7481355397
commit dfef060ea4
21 changed files with 133 additions and 91 deletions

View File

@@ -60,14 +60,14 @@ class Package:
# There is no version check performed here, and a single missing package will fail the whole thing.
# Used for the basic sets of build packages and dependencies that can be assumed always available,
# with stable enough API that the version does not matter (to some extent, it is expected to work with
# any recent distro version at least).
# any recent distribution version at least).
"is_group",
# Whether Blender can build without this package or not.
# Note: In case of group packages, all sub-packages inherit from the value of the root group package.
"is_mandatory",
# Exact version currently used for pre-built libraries and buildbot builds.
# Exact version currently used for pre-built libraries and build-bot builds.
"version",
# Ideal version of the package (if possible, prioritize a package of that version), `version` shoudl match it.
# Ideal version of the package (if possible, prioritize a package of that version), `version` should match it.
"version_short",
# Minimal (included)/maximal (excluded) assumed supported version range.
# Package outside of that range won't be installed.
@@ -76,7 +76,7 @@ class Package:
"version_installed",
# Other Packages that depend/are only installed if the 'parent' one is valid.
"sub_packages",
# A mapping from distro name key to distro package name value.
# A mapping from distribution name key to distribution package name value.
# Value may either be:
# - A package name string.
# - A callback taking the Package and an iterable of its parents as parameters, and returning a string.

View File

@@ -283,17 +283,17 @@ enum {
G_FILE_COMPRESS = (1 << 1),
/**
* When in background mode, do not automatically build a depsgraph when loading a blendfile.
* When in background mode, do not automatically build a depsgraph when loading a blend-file.
* Typically set by the `--disable-depsgraph-on-file-load` command-line argument.
*
* NOTE: This is a temporary option, it is intended to be removed in blender 5.0.
* The default behavior will then be changed to never automatically generate a depsgraph on
* fileload when in background mode.
* file-load when in background mode.
*/
G_BACKGROUND_NO_DEPSGRAPH = 1 << 2,
/**
* Do not perform automatic resync of library overrides on blendfile load.
* Do not perform automatic resync of library overrides on blend-file load.
*
* NOTE: runtime version of #UserDef_Experimental.no_override_auto_resync, both values are OR'ed
* together.

View File

@@ -156,7 +156,7 @@ struct IDTypeInfo {
/**
* The user visible name for this data-block, also used as default name for a new data-block.
*
* \note: Also used for the 'filepath' ID type part when listing IDs in library blendfiles
* \note: Also used for the 'filepath' ID type part when listing IDs in library blend-files
* (`my_blendfile.blend/<IDType.name>/my_id_name`, e.g. `boat-v001.blend/Collection/PR-boat` for
* the `GRPR-boat` Collection ID in `boat-v001.blend`).
*/

View File

@@ -754,16 +754,18 @@ static void lib_query_unused_ids_untag_id(ID &id, UnusedIDsData &data)
}
}
/* Certain corner-cases require to consider an ID as used, even if there are no 'real' refcounting
* usages of these. */
/**
* Certain corner-cases require to consider an ID as used,
* even if there are no 'real' reference-counting usages of these.
*/
static bool lib_query_unused_ids_has_exception_user(ID &id, UnusedIDsData &data)
{
switch (GS(id.name)) {
case ID_OB: {
/* FIXME: This is a workaround until Object usages are handled more soundly.
*
* Historically, only refcounting Object usages were the Collection ones. All other
* references (e.g. as Constraints or Modifiers targets) did not increase their usercount.
* Historically, only reference-counting Object usages were the Collection ones. All other
* references (e.g. as Constraints or Modifiers targets) did not increase their user-count.
*
* This is not entirely true anymore (e.g. some type-agnostic ID usages like IDPointer custom
* properties do refcount Object ones too), but there are still many Object usages that
@@ -771,7 +773,7 @@ static bool lib_query_unused_ids_has_exception_user(ID &id, UnusedIDsData &data)
*
* This becomes a problem with linked data, as in that case instancing of linked Objects in
* the scene is not enforced (to avoid cluttering the scene), which leaves some actually used
* linked objects with a `0` usercount.
* linked objects with a `0` user-count.
*
* So this is a special check to consider linked objects as used also in case some other
* used ID uses them.

View File

@@ -1657,11 +1657,11 @@ DerivedMesh *subsurf_make_derived_from_derived(DerivedMesh *dm,
/* It is quite possible there is a much better place to do this. It
* depends a bit on how rigorously we expect this function to never
* be called in editmode. In semi-theory we could share a single
* cache, but the handles used inside and outside editmode are not
* be called in edit-mode. In semi-theory we could share a single
* cache, but the handles used inside and outside edit-mode are not
* the same so we would need some way of converting them. Its probably
* not worth the effort. But then why am I even writing this long
* comment that no one will read? Hmmm. - zr
* comment that no one will read? Hmm. - zr
*
* Addendum: we can't really ensure that this is never called in edit
* mode, so now we have a parameter to verify it. - brecht

View File

@@ -169,7 +169,7 @@ int BLI_bvhtree_overlap_thread_num(const BVHTree *tree);
/**
* Collision/overlap: check two trees if they overlap,
* alloc's *overlap with length of the int return value.
* allocates `*overlap` with length of the int return value.
*
* \param callback: optional, to test the overlap before adding (must be thread-safe!).
*/

View File

@@ -77,11 +77,11 @@ struct BVHNode {
/* keep under 26 bytes for speed purposes */
struct BVHTree {
BVHNode **nodes;
BVHNode *nodearray; /* pre-alloc branch nodes */
BVHNode **nodechild; /* pre-alloc children for nodes */
float *nodebv; /* pre-alloc bounding-volumes for nodes */
BVHNode *nodearray; /* Pre-allocate branch nodes. */
BVHNode **nodechild; /* Pre-allocate children for nodes. */
float *nodebv; /* Pre-allocate bounding-volumes for nodes. */
float epsilon; /* Epsilon is used for inflation of the K-DOP. */
int leaf_num; /* leafs */
int leaf_num; /* Leafs. */
int branch_num;
axis_t start_axis, stop_axis; /* bvhtree_kdop_axes array indices according to axis */
axis_t axis; /* KDOP type (6 => OBB, 7 => AABB, ...) */
@@ -624,7 +624,7 @@ static int implicit_leafs_index(const BVHBuildHelper *data, const int depth, con
*
* An implicit tree is a tree where its structure is implied,
* thus there is no need to store child pointers or indexes.
* It's possible to find the position of the child or the parent with simple maths
* It's possible to find the position of the child or the parent with simple math
* (multiplication and addition).
* This type of tree is for example used on heaps..
* where node N has its child at indices N*2 and N*2+1.

View File

@@ -2132,20 +2132,27 @@ static float2 compute_2d_gabor_kernel(const float2 position,
return windowed_gaussian_envelope * phasor;
}
/* Computes the approximate standard deviation of the zero mean normal distribution representing
/**
* Computes the approximate standard deviation of the zero mean normal distribution representing
* the amplitude distribution of the noise based on Equation (9) in the original Gabor noise paper.
* For simplicity, the Hann window is ignored and the orientation is fixed since the variance is
* orientation invariant. We start integrating the squared Gabor kernel with respect to x:
*
* \int_{-\infty}^{-\infty} (e^{- \pi (x^2 + y^2)} cos(2 \pi f_0 x))^2 dx
* \code{.tex}
* \int_{-\infty}^{-\infty} (e^{- \pi (x^2 + y^2)} cos(2 \pi f_0 x))^2 dx
* \endcode
*
* Which gives:
*
* \frac{(e^{2 \pi f_0^2}-1) e^{-2 \pi y^2 - 2 pi f_0^2}}{2^\frac{3}{2}}
* \code{.tex}
* \frac{(e^{2 \pi f_0^2}-1) e^{-2 \pi y^2 - 2 pi f_0^2}}{2^\frac{3}{2}}
* \endcode
*
* Then we similarly integrate with respect to y to get:
*
* \frac{1 - e^{-2 \pi f_0^2}}{4}
* \code{.tex}
* \frac{1 - e^{-2 \pi f_0^2}}{4}
* \endcode
*
* Secondly, we note that the second moment of the weights distribution is 0.5 since it is a
* fair Bernoulli distribution. So the final standard deviation expression is square root the
@@ -2155,7 +2162,9 @@ static float2 compute_2d_gabor_kernel(const float2 position,
* converges to an upper limit as the frequency approaches infinity, so we replace the expression
* with the following limit:
*
* \lim_{x \to \infty} \frac{1 - e^{-2 \pi f_0^2}}{4}
* \code{.tex}
* \lim_{x \to \infty} \frac{1 - e^{-2 \pi f_0^2}}{4}
* \endcode
*
* To get an approximation of 0.25. */
static float compute_2d_gabor_standard_deviation()

View File

@@ -201,7 +201,8 @@ static double4 compute_non_causal_feedforward_coefficients(
return double4(n1, n2, n3, n4);
}
/* The IIR filter difference equation relies on previous outputs to compute new outputs, those
/**
* The IIR filter difference equation relies on previous outputs to compute new outputs, those
* previous outputs are not really defined at the start of the filter. To do Neumann boundary
* condition, we initialize the previous output with a special value that is a function of the
* boundary value. This special value is computed by multiply the boundary value with a coefficient
@@ -214,21 +215,28 @@ static double4 compute_non_causal_feedforward_coefficients(
* Start by the difference equation where b_i are the feedforward coefficients and a_i are the
* feedback coefficients:
*
* y[n] = \sum_{i = 0}^3 b_i x[n - i] - \sum_{i = 0}^3 a_i y[n - i]
* \code{.tex}
* y[n] = \sum_{i = 0}^3 b_i x[n - i] - \sum_{i = 0}^3 a_i y[n - i]
* \endcode
*
* Assume all outputs are y and all inputs are x, which is the boundary value:
*
* y = \sum_{i = 0}^3 b_i x - \sum_{i = 0}^3 a_i y
* \code{.tex}
* y = \sum_{i = 0}^3 b_i x - \sum_{i = 0}^3 a_i y
* \endcode
*
* Now rearrange to compute y:
*
* y = x \sum_{i = 0}^3 b_i - y \sum_{i = 0}^3 a_i
* y + y \sum_{i = 0}^3 a_i = x \sum_{i = 0}^3 b_i
* y (1 + \sum_{i = 0}^3 a_i) = x \sum_{i = 0}^3 b_i
* y = x \cdot \frac{\sum_{i = 0}^3 b_i}{1 + \sum_{i = 0}^3 a_i}
* \code{.tex}
* y = x \sum_{i = 0}^3 b_i - y \sum_{i = 0}^3 a_i
* y + y \sum_{i = 0}^3 a_i = x \sum_{i = 0}^3 b_i
* y (1 + \sum_{i = 0}^3 a_i) = x \sum_{i = 0}^3 b_i
* y = x \cdot \frac{\sum_{i = 0}^3 b_i}{1 + \sum_{i = 0}^3 a_i}
* \endcode
*
* So our coefficient is the value that is multiplied by the boundary value x. Had x been zero,
* that is, we are doing Dirichlet boundary condition, the equations still hold. */
* that is, we are doing Dirichlet boundary condition, the equations still hold.
*/
static double compute_boundary_coefficient(const double4 &feedforward_coefficients,
const double4 &feedback_coefficients)
{

View File

@@ -79,7 +79,8 @@ static double compute_scaled_poles_variance(const std::array<std::complex<double
return variance.real();
}
/* Computes the partial derivative with respect to the scale factor at the given scale factor of
/**
* Computes the partial derivative with respect to the scale factor at the given scale factor of
* the variance of the Gaussian filter represented by the given poles scaled by the given scale
* factor. This is based on the partial derivative with respect to the scale factor of Equation
* (20) in Van Vliet's paper.
@@ -87,13 +88,16 @@ static double compute_scaled_poles_variance(const std::array<std::complex<double
* The derivative is not listed in the paper, but was computed manually as the sum of the following
* for each of the poles:
*
* \frac{
* 2a^\frac{1}{x}e^\frac{ib}{x} (e^\frac{ib}{x}+a^\frac{1}{x}) (\ln(a)-ib)
* }{
* x^2 (a^\frac{1}{x}-e^\frac{ib}{x})^3
* }
* \code{.tex}
* \frac{
* 2a^\frac{1}{x}e^\frac{ib}{x} (e^\frac{ib}{x}+a^\frac{1}{x}) (\ln(a)-ib)
* }{
* x^2 (a^\frac{1}{x}-e^\frac{ib}{x})^3
* }
* \endcode
*
* Where "x" is the scale factor, "a" is the magnitude of the pole, and "b" is its phase. */
* Where "x" is the scale factor, "a" is the magnitude of the pole, and "b" is its phase.
*/
static double compute_scaled_poles_variance_derivative(
const std::array<std::complex<double>, 4> &poles, double scale_factor)
{
@@ -247,7 +251,8 @@ static double compute_feedforward_coefficient(const double4 &feedback_coefficien
return 1.0 + math::reduce_add(feedback_coefficients);
}
/* Computes the residue of the partial fraction of the transfer function of the given causal poles
/**
* Computes the residue of the partial fraction of the transfer function of the given causal poles
* and gain for the given target pole. This essentially evaluates Equation (3.41) in Oppenheim's
* book, where d_k is the target pole and assuming the transfer function is in the form given in
* Equation (3.39), where d_k are the poles. See the following derivation for the gain value.
@@ -259,22 +264,29 @@ static double compute_feedforward_coefficient(const double4 &feedback_coefficien
*
* Start from the causal term of Equation (3):
*
* H_+(z) = \prod_{i=1}^N \frac{d_i - 1}{d_i - z^{-1}}
* \code{.tex}
* H_+(z) = \prod_{i=1}^N \frac{d_i - 1}{d_i - z^{-1}}
* \endcode
*
* Divide by d_i:
*
* H_+(z) = \prod_{i=1}^N \frac{1 - d_i^{-1}}{1 - d_i^{-1}z^{-1}}
* \code{.tex}
* H_+(z) = \prod_{i=1}^N \frac{1 - d_i^{-1}}{1 - d_i^{-1}z^{-1}}
* \endcode
*
* Move the numerator to its own product:
*
* H_+(z) = \prod_{i=1}^N 1 - d_i^{-1} \prod_{i=1}^N \frac{1}{1 - d_i^{-1}z^{-1}}
* \code{.tex}
* H_+(z) = \prod_{i=1}^N 1 - d_i^{-1} \prod_{i=1}^N \frac{1}{1 - d_i^{-1}z^{-1}}
* \endcode
*
* And we reach the same form as Equation (3.39). Where the first product term is b0 / a0 and is
* also the given gain value, which is also the same as the feedforward coefficient denoted by
* the alpha in Equation (12). Further d_i^{-1} in our derivation is the same as d_k in Equation
* (3.39), the discrepancy in the inverse operator is the fact that Van Vliet's derivation assume
* non causal poles, while Oppenheim's assume causal poles, which are inverse of each other as can
* be seen in the compute_causal_poles function. */
* be seen in the compute_causal_poles function.
*/
static std::complex<double> compute_partial_fraction_residue(
const std::array<std::complex<double>, 4> &poles,
const std::complex<double> &target_pole,
@@ -343,7 +355,8 @@ static void compute_second_order_section(const std::complex<double> &pole,
non_causal_feedforward_2);
}
/* The IIR filter difference equation relies on previous outputs to compute new outputs, those
/**
The IIR filter difference equation relies on previous outputs to compute new outputs, those
* previous outputs are not really defined at the start of the filter. To do Neumann boundary
* condition, we initialize the previous output with a special value that is a function of the
* boundary value. This special value is computed by multiply the boundary value with a coefficient
@@ -356,21 +369,28 @@ static void compute_second_order_section(const std::complex<double> &pole,
* Start by the difference equation where b_i are the feedforward coefficients and a_i are the
* feedback coefficients:
*
* y[n] = \sum_{i = 0}^3 b_i x[n - i] - \sum_{i = 0}^3 a_i y[n - i]
* \code{.tex}
* y[n] = \sum_{i = 0}^3 b_i x[n - i] - \sum_{i = 0}^3 a_i y[n - i]
* \endcode
*
* Assume all outputs are y and all inputs are x, which is the boundary value:
*
* y = \sum_{i = 0}^3 b_i x - \sum_{i = 0}^3 a_i y
* \code{.tex}
* y = \sum_{i = 0}^3 b_i x - \sum_{i = 0}^3 a_i y
* \endcode
*
* Now rearrange to compute y:
*
* y = x \sum_{i = 0}^3 b_i - y \sum_{i = 0}^3 a_i
* y + y \sum_{i = 0}^3 a_i = x \sum_{i = 0}^3 b_i
* y (1 + \sum_{i = 0}^3 a_i) = x \sum_{i = 0}^3 b_i
* y = x \cdot \frac{\sum_{i = 0}^3 b_i}{1 + \sum_{i = 0}^3 a_i}
* \code{.tex}
* y = x \sum_{i = 0}^3 b_i - y \sum_{i = 0}^3 a_i
* y + y \sum_{i = 0}^3 a_i = x \sum_{i = 0}^3 b_i
* y (1 + \sum_{i = 0}^3 a_i) = x \sum_{i = 0}^3 b_i
* y = x \cdot \frac{\sum_{i = 0}^3 b_i}{1 + \sum_{i = 0}^3 a_i}
* \endcode
*
* So our coefficient is the value that is multiplied by the boundary value x. Had x been zero,
* that is, we are doing Dirichlet boundary condition, the equations still hold. */
* that is, we are doing Dirichlet boundary condition, the equations still hold.
*/
static double compute_boundary_coefficient(const double2 &feedback_coefficients,
const double2 &feedforward_coefficients)
{

View File

@@ -26,22 +26,24 @@ void MOV_exit();
*/
bool MOV_is_movie_file(const char *filepath);
/** Checks whether given ffmpeg video AVCodecID supports alpha channel (RGBA). */
/** Checks whether given FFMPEG video AVCodecID supports alpha channel (RGBA). */
bool MOV_codec_supports_alpha(int av_codec_id);
/** Checks whether given ffmpeg video AVCodecID supports CRF (i.e. "quality level")
* setting. For codecs that do not support constant quality, only target bitrate
* can be specified. */
/**
* Checks whether given FFMPEG video AVCodecID supports CRF (i.e. "quality level")
* setting. For codecs that do not support constant quality, only target bit-rate
* can be specified.
*/
bool MOV_codec_supports_crf(int av_codec_id);
/**
* Which pixel bit depths are supported by a given ffmpeg video AVCodecID.
* Returns bitmask of `R_IMF_CHAN_DEPTH_` flags.
* Which pixel bit depths are supported by a given FFMPEG video AVCodecID.
* Returns bit-mask of `R_IMF_CHAN_DEPTH_` flags.
*/
int MOV_codec_valid_bit_depths(int av_codec_id);
/**
* Given desired output image format type, sets up required ffmpeg
* Given desired output image format type, sets up required FFMPEG
* related settings in render data.
*/
void MOV_validate_output_settings(RenderData *rd, const ImageFormatData *imf);

View File

@@ -20,11 +20,11 @@
#include <libavutil/display.h>
#include <libswscale/swscale.h>
/* Check if our ffmpeg is new enough, avoids user complaints.
/* Check if our FFMPEG is new enough, avoids user complaints.
* Minimum supported version is currently 3.2.0 which mean the following library versions:
* libavutil > 55.30
* libavcodec > 57.60
* libavformat > 57.50
* `libavutil` > 55.30
* `libavcodec` > 57.60
* `libavformat` > 57.50
*
* We only check for one of these as they are usually updated in tandem.
*/
@@ -42,31 +42,31 @@
#endif
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(58, 29, 100)
/* In ffmpeg 6.1 usage of the "key_frame" variable from "AVFrame" has been deprecated.
/* In FFMPEG 6.1 usage of the "key_frame" variable from "AVFrame" has been deprecated.
* used the new method to query for the "AV_FRAME_FLAG_KEY" flag instead.
*/
# define FFMPEG_OLD_KEY_FRAME_QUERY_METHOD
#endif
#if (LIBAVFORMAT_VERSION_MAJOR < 59)
/* For versions older than ffmpeg 5.0, use the old channel layout variables.
/* For versions older than FFMPEG 5.0, use the old channel layout variables.
* We intend to only keep this workaround for around two releases (3.5, 3.6).
* If it sticks around any longer, then we should consider refactoring this.
*/
# define FFMPEG_USE_OLD_CHANNEL_VARS
#endif
/* Threaded sws_scale_frame was added in ffmpeg 5.0 (swscale version 6.1). */
/* Threaded sws_scale_frame was added in FFMPEG 5.0 (`swscale` version 6.1). */
#if (LIBSWSCALE_VERSION_INT >= AV_VERSION_INT(6, 1, 100))
# define FFMPEG_SWSCALE_THREADING
#endif
/* AV_CODEC_CAP_AUTO_THREADS was renamed to AV_CODEC_CAP_OTHER_THREADS with
* upstream commit
* github.com/FFmpeg/FFmpeg/commit/7d09579190def3ef7562399489e628f3b65714ce
* (lavc 58.132.100) and removed with commit
* github.com/FFmpeg/FFmpeg/commit/10c9a0874cb361336237557391d306d26d43f137
* for ffmpeg 6.0.
* `github.com/FFmpeg/FFmpeg/commit/7d09579190def3ef7562399489e628f3b65714ce`
* (`lavc` 58.132.100) and removed with commit
* `github.com/FFmpeg/FFmpeg/commit/10c9a0874cb361336237557391d306d26d43f137`
* for FFMPEG 6.0.
*/
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 132, 100)
# define AV_CODEC_CAP_OTHER_THREADS AV_CODEC_CAP_AUTO_THREADS
@@ -76,10 +76,10 @@
((LIBAVFORMAT_VERSION_MAJOR == 58) && (LIBAVFORMAT_VERSION_MINOR < 76))
# define FFMPEG_USE_DURATION_WORKAROUND 1
/* Before ffmpeg 4.4, package duration calculation used depricated variables to calculate the
/* Before FFMPEG 4.4, package duration calculation used deprecated variables to calculate the
* packet duration. Use the function from commit
* github.com/FFmpeg/FFmpeg/commit/1c0885334dda9ee8652e60c586fa2e3674056586
* to calculate the correct framerate for ffmpeg < 4.4.
* `github.com/FFmpeg/FFmpeg/commit/1c0885334dda9ee8652e60c586fa2e3674056586`
* to calculate the correct frame-rate for FFMPEG < 4.4.
*/
FFMPEG_INLINE
@@ -153,7 +153,7 @@ int64_t av_get_frame_duration_in_pts_units(const AVFrame *picture)
FFMPEG_INLINE size_t ffmpeg_get_buffer_alignment()
{
/* NOTE: even if av_frame_get_buffer suggests to pass 0 for alignment,
* as of ffmpeg 6.1/7.0 it does not use correct alignment for AVX512
* as of FFMPEG 6.1/7.0 it does not use correct alignment for AVX512
* CPU (frame.c get_video_buffer ends up always using 32 alignment,
* whereas it should have used 64). Reported upstream:
* https://trac.ffmpeg.org/ticket/11116 and the fix on their code

View File

@@ -133,7 +133,7 @@ static int isffmpeg(const char *filepath)
}
/* -------------------------------------------------------------------- */
/* AVFrame deinterlacing. Code for this was originally based on ffmpeg 2.6.4 (LGPL). */
/* AVFrame de-interlacing. Code for this was originally based on FFMPEG 2.6.4 (LGPL). */
# define MAX_NEG_CROP 1024
@@ -217,9 +217,10 @@ FFMPEG_INLINE void deinterlace_line_inplace(
}
}
/* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
* top field is copied as is, but the bottom field is deinterlaced
* against the top field. */
/**
* De-interlacing: 2 temporal taps, 3 spatial taps linear filter.
* The top field is copied as is, but the bottom field is de-interlaced against the top field.
*/
FFMPEG_INLINE void deinterlace_bottom_field(
uint8_t *dst, int dst_wrap, const uint8_t *src1, int src_wrap, int width, int height)
{

View File

@@ -228,7 +228,7 @@ static AVFrame *generate_video_frame(MovieWriter *context, const ImBuf *image)
const size_t linesize_dst = rgb_frame->linesize[0];
if (use_float) {
/* Float image: need to split up the image into a planar format,
* because libswscale does not support RGBA->YUV conversions from
* because `libswscale` does not support RGBA->YUV conversions from
* packed float formats. */
BLI_assert_msg(rgb_frame->linesize[1] == linesize_dst &&
rgb_frame->linesize[2] == linesize_dst &&

View File

@@ -165,7 +165,7 @@ FFMPEG_TEST_VCODEC_NAME(libtheora, AV_PIX_FMT_YUV420P)
FFMPEG_TEST_VCODEC_NAME(libx264, AV_PIX_FMT_YUV420P)
FFMPEG_TEST_VCODEC_NAME(libvpx, AV_PIX_FMT_YUV420P)
FFMPEG_TEST_VCODEC_NAME(libopenjpeg, AV_PIX_FMT_YUV420P)
/* aom's AV1 encoder is "libaom-av1". FFMPEG_TEST_VCODEC_NAME(libaom-av1, ...)
/* AOM's AV1 encoder is `libaom-av1`. `FFMPEG_TEST_VCODEC_NAME(libaom-av1, ...)`
* will not work because the dash will not work with the test macro. */
TEST(ffmpeg, libaom_av1_AV_PIX_FMT_YUV420P)
{

View File

@@ -757,7 +757,7 @@ bool AbstractHierarchyIterator::mark_as_weak_export(const Object * /*object*/) c
bool AbstractHierarchyIterator::should_visit_dupli_object(const DupliObject *dupli_object) const
{
/* Do not visit dupli objects if their `no_draw` flag is set (things like custom bone shapes) or
* if they are metaballs. */
* if they are meta-balls. */
if (dupli_object->no_draw || dupli_object->ob->type == OB_MBALL) {
return false;
}

View File

@@ -52,7 +52,7 @@ struct ImportSettings {
* converted by invoking the 'on_material_import' USD hook.
* This map is updated by readers during stage traversal. */
mutable blender::Map<std::string, Material *> usd_path_to_mat_for_hook{};
/* Set of paths to USD material prims that can be converted by the
/* Set of paths to USD material primitives that can be converted by the
* 'on_material_import' USD hook. For efficiency this set should
* be populated prior to stage traversal. */
mutable blender::Set<std::string> mat_import_hook_sources{};

View File

@@ -458,7 +458,7 @@ typedef struct TextVars {
char align;
char _pad[2];
/* Ofssets in bytes relative to #TextVars::text. */
/** Offsets in bytes relative to #TextVars::text. */
int cursor_offset;
int selection_start_offset;
int selection_end_offset;

View File

@@ -94,7 +94,7 @@ struct LineInfo {
struct TextVarsRuntime {
Vector<LineInfo> lines;
rcti text_boundbox; /* Boundbox used for box drawing and selection. */
rcti text_boundbox; /* Bound-box used for box drawing and selection. */
int line_height;
int font_descender;
int character_count;

View File

@@ -39,7 +39,7 @@ ImBuf *prepare_effect_imbufs(const SeqRenderData *context,
int base_flags = uninitialized_pixels ? IB_uninitialized_pixels : 0;
if (!ibuf1 && !ibuf2) {
/* hmmm, global float option ? */
/* Hmm, global float option? */
out = IMB_allocImBuf(x, y, 32, IB_rect | base_flags);
}
else if ((ibuf1 && ibuf1->float_buffer.data) || (ibuf2 && ibuf2->float_buffer.data)) {

View File

@@ -104,12 +104,12 @@ defs_precalc = {
import sys
if 0:
# Examples with LLVM as the root dir: '/dsk/src/llvm'
# Examples with LLVM as the root dir: `/dsk/src/llvm`.
# path containing 'clang/__init__.py'
# Path containing `clang/__init__.py`.
CLANG_BIND_DIR = "/dsk/src/llvm/tools/clang/bindings/python"
# path containing libclang.so
# Path containing `libclang.so`.
CLANG_LIB_DIR = "/opt/llvm/lib"
else:
import os