diff --git a/source/blender/blenlib/intern/math_base_inline.c b/source/blender/blenlib/intern/math_base_inline.c index 9e32ff5ad4e..58c882e894e 100644 --- a/source/blender/blenlib/intern/math_base_inline.c +++ b/source/blender/blenlib/intern/math_base_inline.c @@ -39,7 +39,7 @@ #define __MATH_BASE_INLINE_C__ /* A few small defines. Keep'em local! */ -#define SMALL_NUMBER 1.e-8f +#define SMALL_NUMBER 1.e-8f MINLINE float sqrt3f(float f) { diff --git a/source/blender/blenlib/intern/math_color.c b/source/blender/blenlib/intern/math_color.c index 7103ef6106d..abd9c1ea5b8 100644 --- a/source/blender/blenlib/intern/math_color.c +++ b/source/blender/blenlib/intern/math_color.c @@ -17,7 +17,7 @@ * * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. * All rights reserved. - + * * The Original Code is: some of this file. * * ***** END GPL LICENSE BLOCK ***** @@ -129,18 +129,18 @@ void rgb_to_ycc(float r, float g, float b, float *ly, float *lcb, float *lcr, in switch (colorspace) { case BLI_YCC_ITU_BT601: y = (0.257f * sr) + (0.504f * sg) + (0.098f * sb) + 16.0f; - cb = (-0.148f * sr)-(0.291f * sg) + (0.439f * sb) + 128.0f; - cr = (0.439f * sr)-(0.368f * sg)-(0.071f * sb) + 128.0f; + cb = (-0.148f * sr) - (0.291f * sg) + (0.439f * sb) + 128.0f; + cr = (0.439f * sr) - (0.368f * sg) - (0.071f * sb) + 128.0f; break; case BLI_YCC_ITU_BT709: y = (0.183f * sr) + (0.614f * sg) + (0.062f * sb) + 16.0f; - cb = (-0.101f * sr)-(0.338f * sg) + (0.439f * sb) + 128.0f; - cr = (0.439f * sr)-(0.399f * sg)-(0.040f * sb) + 128.0f; + cb = (-0.101f * sr) - (0.338f * sg) + (0.439f * sb) + 128.0f; + cr = (0.439f * sr) - (0.399f * sg) - (0.040f * sb) + 128.0f; break; case BLI_YCC_JFIF_0_255: y = (0.299f * sr) + (0.587f * sg) + (0.114f * sb); - cb = (-0.16874f * sr)-(0.33126f * sg) + (0.5f * sb) + 128.0f; - cr = (0.5f * sr)-(0.41869f * sg)-(0.08131f * sb) + 128.0f; + cb = (-0.16874f * sr) - (0.33126f * sg) + (0.5f * sb) + 128.0f; + cr = (0.5f * sr) - (0.41869f * sg) - (0.08131f * sb) + 128.0f; break; default: assert(!"invalid colorspace"); @@ -230,13 +230,17 @@ void rgb_to_hsv(float r, float g, float b, float *lh, float *ls, float *lv) rc = (cmax - r) / cdelta; gc = (cmax - g) / cdelta; bc = (cmax - b) / cdelta; - if (r == cmax) + + if (r == cmax) { h = bc - gc; - else - if (g == cmax) - h = 2.0f + rc - bc; - else - h = 4.0f + gc - rc; + } + else if (g == cmax) { + h = 2.0f + rc - bc; + } + else { + h = 4.0f + gc - rc; + } + h = h * 60.0f; if (h < 0.0f) h += 360.0f; @@ -331,13 +335,13 @@ unsigned int rgb_to_cpack(float r, float g, float b) void cpack_to_rgb(unsigned int col, float *r, float *g, float *b) { - *r = (float)((col)&0xFF); + *r = (float)((col) & 0xFF); *r /= 255.0f; - *g = (float)(((col) >> 8)&0xFF); + *g = (float)(((col) >> 8) & 0xFF); *g /= 255.0f; - *b = (float)(((col) >> 16)&0xFF); + *b = (float)(((col) >> 16) & 0xFF); *b /= 255.0f; } diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c index f84a79c544d..e76431e981e 100644 --- a/source/blender/blenlib/intern/math_geom.c +++ b/source/blender/blenlib/intern/math_geom.c @@ -17,7 +17,7 @@ * * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. * All rights reserved. - + * * The Original Code is: some of this file. * * ***** END GPL LICENSE BLOCK ***** @@ -173,7 +173,7 @@ float dist_to_line_v2(const float v1[2], const float v2[2], const float v3[2]) deler = (float)sqrt(a[0] * a[0] + a[1] * a[1]); if (deler == 0.0f) return 0; - return fabsf((v1[0] - v2[0]) * a[0]+(v1[1] - v2[1]) * a[1]) / deler; + return fabsf((v1[0] - v2[0]) * a[0] + (v1[1] - v2[1]) * a[1]) / deler; } @@ -298,12 +298,12 @@ int isect_line_line_v2_int(const int v1[2], const int v2[2], const int v3[2], co { float div, labda, mu; - div = (float)((v2[0] - v1[0]) * (v4[1] - v3[1])-(v2[1] - v1[1]) * (v4[0] - v3[0])); + div = (float)((v2[0] - v1[0]) * (v4[1] - v3[1]) - (v2[1] - v1[1]) * (v4[0] - v3[0])); if (div == 0.0f) return ISECT_LINE_LINE_COLINEAR; - labda = ((float)(v1[1] - v3[1]) * (v4[0] - v3[0])-(v1[0] - v3[0]) * (v4[1] - v3[1])) / div; + labda = ((float)(v1[1] - v3[1]) * (v4[0] - v3[0]) - (v1[0] - v3[0]) * (v4[1] - v3[1])) / div; - mu = ((float)(v1[1] - v3[1]) * (v2[0] - v1[0])-(v1[0] - v3[0]) * (v2[1] - v1[1])) / div; + mu = ((float)(v1[1] - v3[1]) * (v2[0] - v1[0]) - (v1[0] - v3[0]) * (v2[1] - v1[1])) / div; if (labda >= 0.0f && labda <= 1.0f && mu >= 0.0f && mu <= 1.0f) { if (labda == 0.0f || labda == 1.0f || mu == 0.0f || mu == 1.0f) return ISECT_LINE_LINE_EXACT; @@ -317,12 +317,12 @@ int isect_line_line_v2(const float v1[2], const float v2[2], const float v3[2], { float div, labda, mu; - div = (v2[0] - v1[0]) * (v4[1] - v3[1])-(v2[1] - v1[1]) * (v4[0] - v3[0]); + div = (v2[0] - v1[0]) * (v4[1] - v3[1]) - (v2[1] - v1[1]) * (v4[0] - v3[0]); if (div == 0.0f) return ISECT_LINE_LINE_COLINEAR; - labda = ((float)(v1[1] - v3[1]) * (v4[0] - v3[0])-(v1[0] - v3[0]) * (v4[1] - v3[1])) / div; + labda = ((float)(v1[1] - v3[1]) * (v4[0] - v3[0]) - (v1[0] - v3[0]) * (v4[1] - v3[1])) / div; - mu = ((float)(v1[1] - v3[1]) * (v2[0] - v1[0])-(v1[0] - v3[0]) * (v2[1] - v1[1])) / div; + mu = ((float)(v1[1] - v3[1]) * (v2[0] - v1[0]) - (v1[0] - v3[0]) * (v2[1] - v1[1])) / div; if (labda >= 0.0f && labda <= 1.0f && mu >= 0.0f && mu <= 1.0f) { if (labda == 0.0f || labda == 1.0f || mu == 0.0f || mu == 1.0f) return ISECT_LINE_LINE_EXACT; @@ -383,7 +383,7 @@ int isect_seg_seg_v2_point(const float v1[2], const float v2[2], const float v3[ if (u > u2) SWAP(float, u, u2); - if (u > 1.0f + eps || u2<-eps) return -1; /* non-ovlerlapping segments */ + if (u > 1.0f + eps || u2 < -eps) return -1; /* non-ovlerlapping segments */ else if (maxf(0.0f, u) == minf(1.0f, u2)) { /* one common point: can return result */ interp_v2_v2v2(vi, v1, v2, maxf(0, u)); return 1; @@ -491,7 +491,7 @@ int isect_line_sphere_v2(const float l1[2], const float l2[2], const float b = 2.0f * (ldir[0] * (l1[0] - sp[0]) + - ldir[1] * (l1[1] - sp[1])); + ldir[1] * (l1[1] - sp[1])); const float c = dot_v2v2(sp, sp) + @@ -565,7 +565,7 @@ static short IsectLLPt2Df(const float x0, const float y0, const float x1, const return -1; /*m2 = (float)1e+10;*/ // close enough to infinity if (fabs(m1 - m2) < 0.000001) - return -1; /* parallel lines */ + return -1; /* parallel lines */ // compute constants @@ -1918,11 +1918,11 @@ int interp_sparse_array(float *array, int const list_size, const float skipval) float valid_last = skipval; int valid_ofs = 0; - float *array_up = MEM_callocN(sizeof(float)* list_size, "interp_sparse_array up"); - float *array_down = MEM_callocN(sizeof(float)* list_size, "interp_sparse_array up"); + float *array_up = MEM_callocN(sizeof(float) * list_size, "interp_sparse_array up"); + float *array_down = MEM_callocN(sizeof(float) * list_size, "interp_sparse_array up"); - int *ofs_tot_up = MEM_callocN(sizeof(int)* list_size, "interp_sparse_array tup"); - int *ofs_tot_down = MEM_callocN(sizeof(int)* list_size, "interp_sparse_array tdown"); + int *ofs_tot_up = MEM_callocN(sizeof(int) * list_size, "interp_sparse_array tup"); + int *ofs_tot_down = MEM_callocN(sizeof(int) * list_size, "interp_sparse_array tdown"); for (i = 0; i < list_size; i++) { if (array[i] == skipval) { @@ -2046,7 +2046,7 @@ void interp_cubic_v3(float x[3], float v[3], const float x1[3], const float v1[3 /* unfortunately internal calculations have to be done at double precision to achieve correct/stable results. */ -#define IS_ZERO(x) ((x>(-DBL_EPSILON) && x (-DBL_EPSILON) && x < DBL_EPSILON) ? 1 : 0) /* Barycentric reverse */ void resolve_tri_uv(float r_uv[2], const float st[2], const float st0[2], const float st1[2], const float st2[2]) @@ -2353,7 +2353,7 @@ void map_to_sphere(float *r_u, float *r_v, const float x, const float y, const f len = sqrtf(x * x + y * y + z * z); if (len > 0.0f) { - if (x == 0.0f && y == 0.0f) *r_u = 0.0f; /* othwise domain error */ + if (x == 0.0f && y == 0.0f) *r_u = 0.0f; /* othwise domain error */ else *r_u = (1.0f - atan2f(x, y) / (float)M_PI) / 2.0f; *r_v = 1.0f - (float)saacos(z / len) / (float)M_PI; @@ -2447,7 +2447,7 @@ void accumulate_vertex_normals_poly(float **vertnos, float polyno[3], /* from BKE_mesh.h */ -#define STD_UV_CONNECT_LIMIT 0.0001f +#define STD_UV_CONNECT_LIMIT 0.0001f void sum_or_add_vertex_tangent(void *arena, VertexTangent **vtang, const float tang[3], const float uv[2]) { @@ -2915,14 +2915,14 @@ static vFloat vec_splat_float(float val) static float ff_quad_form_factor(float *p, float *n, float *q0, float *q1, float *q2, float *q3) { vFloat vcos, rlen, vrx, vry, vrz, vsrx, vsry, vsrz, gx, gy, gz, vangle; - vUInt8 rotate = (vUInt8){4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3}; + vUInt8 rotate = (vUInt8) {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3}; vFloatResult vresult; float result; /* compute r* */ - vrx = (vFloat) {q0[0], q1[0], q2[0], q3[0]} - vec_splat_float(p[0]); - vry = (vFloat) {q0[1], q1[1], q2[1], q3[1]} - vec_splat_float(p[1]); - vrz = (vFloat) {q0[2], q1[2], q2[2], q3[2]} - vec_splat_float(p[2]); + vrx = (vFloat) {q0[0], q1[0], q2[0], q3[0]} -vec_splat_float(p[0]); + vry = (vFloat) {q0[1], q1[1], q2[1], q3[1]} -vec_splat_float(p[1]); + vrz = (vFloat) {q0[2], q1[2], q2[2], q3[2]} -vec_splat_float(p[2]); /* normalize r* */ rlen = vec_rsqrte(vrx * vrx + vry * vry + vrz * vrz + vec_splat_float(1e-16f)); @@ -2973,8 +2973,8 @@ static float ff_quad_form_factor(float *p, float *n, float *q0, float *q1, float static __m128 sse_approx_acos(__m128 x) { /* needs a better approximation than taylor expansion of acos, since that - * gives big erros for near 1.0 values, sqrt(2 * x) * acos(1 - x) should work - * better, see http://www.tom.womack.net/projects/sse-fast-arctrig.html */ + * gives big erros for near 1.0 values, sqrt(2 * x) * acos(1 - x) should work + * better, see http://www.tom.womack.net/projects/sse-fast-arctrig.html */ return _mm_set_ps1(1.0f); } diff --git a/source/blender/blenlib/intern/math_matrix.c b/source/blender/blenlib/intern/math_matrix.c index 09b5ab4f62a..e61a8ef041a 100644 --- a/source/blender/blenlib/intern/math_matrix.c +++ b/source/blender/blenlib/intern/math_matrix.c @@ -245,9 +245,9 @@ void mul_m4_m3m4(float (*m1)[4], float (*m3)[3], float (*m2)[4]) } void mul_serie_m3(float answ[][3], -float m1[][3], float m2[][3], float m3[][3], -float m4[][3], float m5[][3], float m6[][3], -float m7[][3], float m8[][3]) + float m1[][3], float m2[][3], float m3[][3], + float m4[][3], float m5[][3], float m6[][3], + float m7[][3], float m8[][3]) { float temp[3][3]; @@ -278,9 +278,9 @@ float m7[][3], float m8[][3]) } void mul_serie_m4(float answ[][4], float m1[][4], -float m2[][4], float m3[][4], float m4[][4], -float m5[][4], float m6[][4], float m7[][4], -float m8[][4]) + float m2[][4], float m3[][4], float m4[][4], + float m5[][4], float m6[][4], float m7[][4], + float m8[][4]) { float temp[4][4]; @@ -542,9 +542,9 @@ int invert_m4(float m[4][4]) /* * invertmat - - * computes the inverse of mat and puts it in inverse. Returns - * TRUE on success (i.e. can always find a pivot) and FALSE on failure. - * Uses Gaussian Elimination with partial (maximal column) pivoting. + * computes the inverse of mat and puts it in inverse. Returns + * TRUE on success (i.e. can always find a pivot) and FALSE on failure. + * Uses Gaussian Elimination with partial (maximal column) pivoting. * * Mark Segal - 1992 */ @@ -589,7 +589,7 @@ int invert_m4_m4(float inverse[4][4], float mat[4][4]) temp = tempmat[i][i]; if (temp == 0) - return 0; /* No non-zero pivot */ + return 0; /* No non-zero pivot */ for (k = 0; k < 4; k++) { tempmat[i][k] = (float)(tempmat[i][k] / temp); inverse[i][k] = (float)(inverse[i][k] / temp); @@ -1568,7 +1568,7 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4]) break; } t = (ks != p ? fabsf(e[ks]) : 0.f) + - (ks != k + 1 ? fabsf(e[ks - 1]) : 0.0f); + (ks != k + 1 ? fabsf(e[ks - 1]) : 0.0f); if (fabsf(s[ks]) <= eps * t) { s[ks] = 0.0f; break; @@ -1617,7 +1617,7 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4]) break; } - // Split at negligible s(k). + // Split at negligible s(k). case 2: { @@ -1641,7 +1641,7 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4]) break; } - // Perform one qr step. + // Perform one qr step. case 3: { @@ -1713,7 +1713,7 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4]) iter = iter + 1; break; } - // Convergence. + // Convergence. case 4: { diff --git a/source/blender/blenlib/intern/math_rotation.c b/source/blender/blenlib/intern/math_rotation.c index 25e7e451451..dbba672100e 100644 --- a/source/blender/blenlib/intern/math_rotation.c +++ b/source/blender/blenlib/intern/math_rotation.c @@ -1178,7 +1178,7 @@ typedef struct RotOrderInfo { /* Array of info for Rotation Order calculations * WARNING: must be kept in same order as eEulerRotationOrders */ -static RotOrderInfo rotOrders[]= { +static RotOrderInfo rotOrders[] = { /* i, j, k, n */ {{0, 1, 2}, 0}, // XYZ {{0, 2, 1}, 1}, // XZY @@ -1192,7 +1192,7 @@ static RotOrderInfo rotOrders[]= { * NOTE: since we start at 1 for the values, but arrays index from 0, * there is -1 factor involved in this process... */ -#define GET_ROTATIONORDER_INFO(order) (assert(order>=0 && order<=6), (order < 1) ? &rotOrders[0] : &rotOrders[(order)-1]) +#define GET_ROTATIONORDER_INFO(order) (assert(order >= 0 && order <= 6), (order < 1) ? &rotOrders[0] : &rotOrders[(order) - 1]) /* Construct quaternion from Euler angles (in radians). */ void eulO_to_quat(float q[4], const float e[3], const short order) diff --git a/source/blender/blenlib/intern/math_vector.c b/source/blender/blenlib/intern/math_vector.c index fcab5e75f39..ce387291dff 100644 --- a/source/blender/blenlib/intern/math_vector.c +++ b/source/blender/blenlib/intern/math_vector.c @@ -17,7 +17,7 @@ * * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. * All rights reserved. - + * * The Original Code is: some of this file. * * ***** END GPL LICENSE BLOCK ***** @@ -350,16 +350,16 @@ void rotate_normalized_v3_v3v3fl(float r[3], const float p[3], const float axis[ const float sintheta = sin(angle); r[0] = ((costheta + (1 - costheta) * axis[0] * axis[0]) * p[0]) + - (((1 - costheta) * axis[0] * axis[1] - axis[2] * sintheta) * p[1]) + - (((1 - costheta) * axis[0] * axis[2] + axis[1] * sintheta) * p[2]); + (((1 - costheta) * axis[0] * axis[1] - axis[2] * sintheta) * p[1]) + + (((1 - costheta) * axis[0] * axis[2] + axis[1] * sintheta) * p[2]); r[1] = (((1 - costheta) * axis[0] * axis[1] + axis[2] * sintheta) * p[0]) + - ((costheta + (1 - costheta) * axis[1] * axis[1]) * p[1]) + - (((1 - costheta) * axis[1] * axis[2] - axis[0] * sintheta) * p[2]); + ((costheta + (1 - costheta) * axis[1] * axis[1]) * p[1]) + + (((1 - costheta) * axis[1] * axis[2] - axis[0] * sintheta) * p[2]); r[2] = (((1 - costheta) * axis[0] * axis[2] - axis[1] * sintheta) * p[0]) + - (((1 - costheta) * axis[1] * axis[2] + axis[0] * sintheta) * p[1]) + - ((costheta + (1 - costheta) * axis[2] * axis[2]) * p[2]); + (((1 - costheta) * axis[1] * axis[2] + axis[0] * sintheta) * p[1]) + + ((costheta + (1 - costheta) * axis[2] * axis[2]) * p[2]); } void rotate_v3_v3v3fl(float r[3], const float p[3], const float axis[3], const float angle) @@ -467,7 +467,7 @@ void negate_vn_vn(float *array_tar, const float *array_src, const int size) const float *src = array_src + (size - 1); int i = size; while (i--) { - *(tar--) = - *(src--); + *(tar--) = -*(src--); } } diff --git a/source/blender/blenlib/intern/math_vector_inline.c b/source/blender/blenlib/intern/math_vector_inline.c index f1ab77a10e9..6d217b48d37 100644 --- a/source/blender/blenlib/intern/math_vector_inline.c +++ b/source/blender/blenlib/intern/math_vector_inline.c @@ -17,7 +17,7 @@ * * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. * All rights reserved. - + * * The Original Code is: some of this file. * * ***** END GPL LICENSE BLOCK *****