svn merge ^/trunk/blender -r48893:48911

This commit is contained in:
Campbell Barton
2012-07-14 13:04:01 +00:00
14 changed files with 397 additions and 170 deletions

View File

@@ -17,6 +17,7 @@ set(WITH_FFTW3 OFF CACHE FORCE BOOL)
set(WITH_LIBMV OFF CACHE FORCE BOOL)
set(WITH_CARVE OFF CACHE FORCE BOOL)
set(WITH_GAMEENGINE OFF CACHE FORCE BOOL)
set(WITH_COMPOSITOR OFF CACHE FORCE BOOL)
set(WITH_GHOST_XDND OFF CACHE FORCE BOOL)
set(WITH_IK_ITASC OFF CACHE FORCE BOOL)
set(WITH_IMAGE_CINEON OFF CACHE FORCE BOOL)

View File

@@ -60,8 +60,8 @@
#ifndef __MEM_GUARDEDALLOC_H__
#define __MEM_GUARDEDALLOC_H__
#include <stdio.h> /* needed for FILE* */
#include "MEM_sys_types.h" /* needed for uintptr_t */
#include <stdio.h> /* needed for FILE* */
#include "MEM_sys_types.h" /* needed for uintptr_t */
#ifdef __cplusplus
extern "C" {
@@ -70,7 +70,7 @@ extern "C" {
/** Returns the length of the allocated memory segment pointed at
* by vmemh. If the pointer was not previously allocated by this
* module, the result is undefined.*/
size_t MEM_allocN_len(void *vmemh)
size_t MEM_allocN_len(const void *vmemh)
#ifdef __GNUC__
__attribute__((warn_unused_result))
#endif
@@ -111,10 +111,10 @@ extern "C" {
* Allocate a block of memory of size len, with tag name str. The
* memory is cleared. The name must be static, because only a
* pointer to it is stored ! */
void *MEM_callocN(size_t len, const char * str)
void *MEM_callocN(size_t len, const char *str)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull))
__attribute__((nonnull(2)))
__attribute__((alloc_size(1)))
#endif
;
@@ -122,10 +122,10 @@ extern "C" {
/** Allocate a block of memory of size len, with tag name str. The
* name must be a static, because only a pointer to it is stored !
* */
void *MEM_mallocN(size_t len, const char * str)
void *MEM_mallocN(size_t len, const char *str)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull))
__attribute__((nonnull(2)))
__attribute__((alloc_size(1)))
#endif
;
@@ -133,10 +133,10 @@ extern "C" {
/** Same as callocN, clears memory and uses mmap (disk cached) if supported.
* Can be free'd with MEM_freeN as usual.
* */
void *MEM_mapallocN(size_t len, const char * str)
void *MEM_mapallocN(size_t len, const char *str)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull))
__attribute__((nonnull(2)))
__attribute__((alloc_size(1)))
#endif
;
@@ -213,11 +213,10 @@ public: \
MEM_freeN(mem); \
} \
#endif
#endif /* __cplusplus */
#ifdef __cplusplus
}
#endif
#endif /* __cplusplus */
#endif
#endif /* __MEM_GUARDEDALLOC_H__ */

View File

@@ -222,10 +222,10 @@ void MEM_set_memory_debug(void)
malloc_debug_memset = 1;
}
size_t MEM_allocN_len(void *vmemh)
size_t MEM_allocN_len(const void *vmemh)
{
if (vmemh) {
MemHead *memh = vmemh;
const MemHead *memh = vmemh;
memh--;
return memh->len;

View File

@@ -34,20 +34,18 @@
#include "DNA_mask_types.h"
#include "BLI_utildefines.h"
#include "BLI_kdopbvh.h"
#include "BLI_scanfill.h"
#include "BLI_memarena.h"
#include "BLI_math.h"
#include "BLI_rect.h"
#include "BLI_listbase.h"
#include "BLI_mempool.h"
#include "BLI_linklist.h"
#include "BKE_mask.h"
#ifndef USE_RASKTER
#define RESOL 32
/**
* A single #MaskRasterHandle contains multile #MaskRasterLayer's,
* each #MaskRasterLayer does its own lookup which contributes to
@@ -56,15 +54,22 @@
/* internal use only */
typedef struct MaskRasterLayer {
/* xy raytree */
BVHTree *bvhtree;
/* geometry */
unsigned int tri_tot;
unsigned int (*tri_array)[4]; /* access coords tri/quad */
float (*tri_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
/* 2d bounds (to quickly skip raytree lookup) */
rctf bounds;
/* geometry */
unsigned int (*tri_array)[4]; /* access coords tri/quad */
float (*tri_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
/* buckets */
unsigned int **buckets_tri;
/* cache divide and subtract */
float buckets_xy_scalar[2]; /* 1.0 / (buckets_width + FLT_EPSILON) */
unsigned int buckets_x;
unsigned int buckets_y;
/* copied direct from #MaskLayer.--- */
@@ -75,6 +80,7 @@ typedef struct MaskRasterLayer {
} MaskRasterLayer;
static void layer_bucket_init(MaskRasterLayer *layer);
/**
* opaque local struct for mask pixel lookup, each MaskLayer needs one of these
@@ -104,7 +110,6 @@ void BLI_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
/* raycast vars */
for (i = 0; i < layers_tot; i++, raslayers++) {
BLI_bvhtree_free(raslayers->bvhtree);
if (raslayers->tri_array) {
MEM_freeN(raslayers->tri_array);
@@ -113,27 +118,44 @@ void BLI_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
if (raslayers->tri_coords) {
MEM_freeN(raslayers->tri_coords);
}
if (raslayers->buckets_tri) {
const unsigned int bucket_tot = raslayers->buckets_x * raslayers->buckets_y;
unsigned int bucket_index;
for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
unsigned int *tri_index = raslayers->buckets_tri[bucket_index];
if (tri_index) {
MEM_freeN(tri_index);
}
}
MEM_freeN(raslayers->buckets_tri);
}
}
MEM_freeN(mr_handle->layers);
MEM_freeN(mr_handle);
}
#define RESOL 32
#define PRINT_MASK_DEBUG printf
#define SF_EDGE_IS_BOUNDARY 0xff
#define SF_KEYINDEX_TEMP_ID ((unsigned int) -1)
#define TRI_TERMINATOR_ID ((unsigned int) -1)
void maskrasterize_spline_differentiate_point_inset(float (*diff_feather_points)[2], float (*diff_points)[2],
const int tot_diff_point, const float ofs, const int do_test)
const unsigned int tot_diff_point, const float ofs,
const short do_test)
{
int k_prev = tot_diff_point - 2;
int k_curr = tot_diff_point - 1;
int k_next = 0;
unsigned int k_prev = tot_diff_point - 2;
unsigned int k_curr = tot_diff_point - 1;
unsigned int k_next = 0;
int k;
unsigned int k;
float d_prev[2];
float d_next[2];
@@ -153,9 +175,6 @@ void maskrasterize_spline_differentiate_point_inset(float (*diff_feather_points)
sub_v2_v2v2(d_prev, co_prev, co_curr);
normalize_v2(d_prev);
/* TODO, speedup by only doing one normalize per iter */
for (k = 0; k < tot_diff_point; k++) {
co_prev = diff_points[k_prev];
@@ -202,7 +221,7 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
const float zvec[3] = {0.0f, 0.0f, 1.0f};
MaskLayer *masklay;
int masklay_index;
unsigned int masklay_index;
mr_handle->layers_tot = BLI_countlist(&mask->masklayers);
mr_handle->layers = MEM_mallocN(sizeof(MaskRasterLayer) * mr_handle->layers_tot, STRINGIFY(MaskRasterLayer));
@@ -366,7 +385,6 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
rctf bounds;
int tri_index;
BVHTree *bvhtree;
float bvhcos[4][3];
/* now we have all the splines */
@@ -397,9 +415,6 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
tri_array = MEM_mallocN(sizeof(*tri_array) * (sf_tri_tot + tot_feather_quads), "maskrast_tri_index");
/* */
bvhtree = BLI_bvhtree_new(sf_tri_tot + tot_feather_quads, 0.000001f, 8, 6);
/* tri's */
tri = (unsigned int *)tri_array;
for (sf_tri = sf_ctx.fillfacebase.first, tri_index = 0; sf_tri; sf_tri = sf_tri->next, tri_index++) {
@@ -407,12 +422,6 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
*(tri++) = sf_tri->v2->tmp.u;
*(tri++) = sf_tri->v3->tmp.u;
*(tri++) = TRI_VERT;
copy_v3_v3(bvhcos[0], tri_coords[*(tri - 4)]);
copy_v3_v3(bvhcos[1], tri_coords[*(tri - 3)]);
copy_v3_v3(bvhcos[2], tri_coords[*(tri - 2)]);
BLI_bvhtree_insert(bvhtree, tri_index, (float *)bvhcos, 3);
}
/* start of feather faces... if we have this set,
@@ -435,7 +444,7 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
copy_v3_v3(bvhcos[2], tri_coords[*(tri - 2)]);
copy_v3_v3(bvhcos[3], tri_coords[*(tri - 1)]);
BLI_bvhtree_insert(bvhtree, tri_index++, (const float *)bvhcos, 4);
tri_index++;
}
}
}
@@ -444,21 +453,20 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
BLI_assert(tri_index == sf_tri_tot + tot_feather_quads);
BLI_bvhtree_balance(bvhtree);
{
MaskRasterLayer *raslayer = &mr_handle->layers[masklay_index];
raslayer->tri_tot = sf_tri_tot + tot_feather_quads;
raslayer->tri_coords = tri_coords;
raslayer->tri_array = tri_array;
raslayer->bounds = bounds;
raslayer->bvhtree = bvhtree;
/* copy as-is */
raslayer->alpha = masklay->alpha;
raslayer->blend = masklay->blend;
raslayer->blend_flag = masklay->blend_flag;
layer_bucket_init(raslayer);
BLI_union_rctf(&mr_handle->bounds, &bounds);
}
@@ -471,11 +479,6 @@ void BLI_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mas
}
}
//static void tri_flip_tri(unsigned int tri[3])
//{
//}
/* 2D ray test */
static float maskrasterize_layer_z_depth_tri(const float pt[2],
const float v1[3], const float v2[3], const float v3[3])
@@ -495,36 +498,31 @@ static float maskrasterize_layer_z_depth_quad(const float pt[2],
}
#endif
static void maskrasterize_layer_bvh_cb(void *userdata, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
static float maskrasterize_layer_isect(unsigned int *tri, float (*cos)[3], const float dist_orig, const float xy[2])
{
MaskRasterLayer *layer = (struct MaskRasterLayer *)userdata;
unsigned int *tri = layer->tri_array[index];
float (*cos)[3] = layer->tri_coords;
const float dist_orig = hit->dist;
/* we always cast from same place only need xy */
if (tri[3] == TRI_VERT) {
/* --- tri --- */
#if 0
/* not essential but avoids unneeded extra lookups */
if ((cos[0][2] < dist_orig) ||
(cos[1][2] < dist_orig) ||
(cos[2][2] < dist_orig))
{
if (isect_point_tri_v2(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]])) {
if (isect_point_tri_v2(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]])) {
/* we know all tris are close for now */
#if 0
const float dist = maskrasterize_layer_z_depth_tri(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]]);
if (dist < dist_orig) {
hit->index = index;
hit->dist = dist;
}
#else
hit->index = index;
hit->dist = 0.0f;
#endif
return maskrasterize_layer_z_depth_tri(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]]);
}
}
#else
/* we know all tris are close for now */
if (1) {
if (isect_point_tri_v2(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]])) {
return 0.0f;
}
}
#endif
}
else {
/* --- quad --- */
@@ -538,27 +536,15 @@ static void maskrasterize_layer_bvh_cb(void *userdata, int index, const BVHTreeR
/* needs work */
#if 0
if (isect_point_quad_v2(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]], cos[tri[3]])) {
const float dist = maskrasterize_layer_z_depth_quad(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]], cos[tri[3]]);
if (dist < dist_orig) {
hit->index = index;
hit->dist = dist;
}
if (isect_point_quad_v2(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]], cos[tri[3]])) {
return maskrasterize_layer_z_depth_quad(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]], cos[tri[3]]);
}
#elif 1
if (isect_point_tri_v2(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]])) {
const float dist = maskrasterize_layer_z_depth_tri(ray->origin, cos[tri[0]], cos[tri[1]], cos[tri[2]]);
if (dist < dist_orig) {
hit->index = index;
hit->dist = dist;
}
if (isect_point_tri_v2(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]])) {
return maskrasterize_layer_z_depth_tri(xy, cos[tri[0]], cos[tri[1]], cos[tri[2]]);
}
else if (isect_point_tri_v2(ray->origin, cos[tri[0]], cos[tri[2]], cos[tri[3]])) {
const float dist = maskrasterize_layer_z_depth_tri(ray->origin, cos[tri[0]], cos[tri[2]], cos[tri[3]]);
if (dist < dist_orig) {
hit->index = index;
hit->dist = dist;
}
else if (isect_point_tri_v2(xy, cos[tri[0]], cos[tri[2]], cos[tri[3]])) {
return maskrasterize_layer_z_depth_tri(xy, cos[tri[0]], cos[tri[2]], cos[tri[3]]);
}
#else
/* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */
@@ -566,63 +552,196 @@ static void maskrasterize_layer_bvh_cb(void *userdata, int index, const BVHTreeR
#endif
}
}
return 1.0f;
}
static void layer_bucket_init(MaskRasterLayer *layer)
{
MemArena *arena = BLI_memarena_new(1 << 16, __func__);
/* TODO - calculate best bucket size */
layer->buckets_x = 256;
layer->buckets_y = 256;
layer->buckets_xy_scalar[0] = (1.0f / ((layer->bounds.xmax - layer->bounds.xmin) + FLT_EPSILON)) * layer->buckets_x;
layer->buckets_xy_scalar[1] = (1.0f / ((layer->bounds.ymax - layer->bounds.ymin) + FLT_EPSILON)) * layer->buckets_y;
{
unsigned int *tri = &layer->tri_array[0][0];
float (*cos)[3] = layer->tri_coords;
const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y;
LinkNode **bucketstore = MEM_callocN(bucket_tot * sizeof(LinkNode *), __func__);
unsigned int *bucketstore_tot = MEM_callocN(bucket_tot * sizeof(unsigned int), __func__);
unsigned int tri_index;
for (tri_index = 0; tri_index < layer->tri_tot; tri_index++, tri += 4) {
float xmin;
float xmax;
float ymin;
float ymax;
if (tri[3] == TRI_VERT) {
const float *v1 = cos[tri[0]];
const float *v2 = cos[tri[1]];
const float *v3 = cos[tri[2]];
xmin = fminf(v1[0], fminf(v2[0], v3[0]));
xmax = fmaxf(v1[0], fmaxf(v2[0], v3[0]));
ymin = fminf(v1[1], fminf(v2[1], v3[1]));
ymax = fmaxf(v1[1], fmaxf(v2[1], v3[1]));
}
else {
const float *v1 = cos[tri[0]];
const float *v2 = cos[tri[1]];
const float *v3 = cos[tri[2]];
const float *v4 = cos[tri[3]];
xmin = fminf(v1[0], fminf(v2[0], fminf(v3[0], v4[0])));
xmax = fmaxf(v1[0], fmaxf(v2[0], fmaxf(v3[0], v4[0])));
ymin = fminf(v1[1], fminf(v2[1], fminf(v3[1], v4[1])));
ymax = fmaxf(v1[1], fmaxf(v2[1], fmaxf(v3[1], v4[1])));
}
/* not essential but may as will skip any faces outside the view */
if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
const unsigned int xi_min = (unsigned int) ((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
const unsigned int xi_max = (unsigned int) ((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
const unsigned int yi_min = (unsigned int) ((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
const unsigned int yi_max = (unsigned int) ((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
unsigned int xi, yi;
for (xi = xi_min; xi <= xi_max; xi++) {
for (yi = yi_min; yi <= yi_max; yi++) {
unsigned int bucket_index = (layer->buckets_x * yi) + xi;
BLI_assert(xi < layer->buckets_x);
BLI_assert(yi < layer->buckets_y);
BLI_assert(bucket_index < bucket_tot);
BLI_linklist_prepend_arena(&bucketstore[bucket_index],
SET_UINT_IN_POINTER(tri_index),
arena);
bucketstore_tot[bucket_index]++;
}
}
}
}
if (1) {
/* now convert linknodes into arrays for faster per pixel access */
unsigned int **buckets_tri = MEM_mallocN(bucket_tot * sizeof(unsigned int **), __func__);
unsigned int bucket_index;
for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
if (bucketstore_tot[bucket_index]) {
unsigned int *bucket = MEM_mallocN((bucketstore_tot[bucket_index] + 1) * sizeof(unsigned int), __func__);
LinkNode *bucket_node;
buckets_tri[bucket_index] = bucket;
for (bucket_node = bucketstore[bucket_index]; bucket_node; bucket_node = bucket_node->next) {
*bucket = GET_UINT_FROM_POINTER(bucket_node->link);
bucket++;
}
*bucket = TRI_TERMINATOR_ID;
}
else {
buckets_tri[bucket_index] = NULL;
}
}
layer->buckets_tri = buckets_tri;
}
MEM_freeN(bucketstore);
MEM_freeN(bucketstore_tot);
}
BLI_memarena_free(arena);
}
static unsigned int layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
{
BLI_assert(BLI_in_rctf_v(&layer->bounds, xy));
return ( (unsigned int)((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0])) +
(((unsigned int)((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1])) * layer->buckets_x);
}
static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
{
unsigned int index = layer_bucket_index_from_xy(layer, xy);
unsigned int *tri_index = layer->buckets_tri[index];
if (tri_index) {
float (*cos)[3] = layer->tri_coords;
float best_dist = 1.0f;
float test_dist;
while (*tri_index != TRI_TERMINATOR_ID) {
unsigned int *tri = layer->tri_array[*tri_index];
if ((test_dist = maskrasterize_layer_isect(tri, cos, best_dist, xy)) < best_dist) {
best_dist = test_dist;
/* comparing with 0.0f is OK here because triangles are always zero depth */
if (best_dist == 0.0f) {
/* bail early, we're as close as possible */
return 0.0f;
}
}
tri_index++;
}
return best_dist;
}
else {
return 1.0f;
}
}
float BLI_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
{
/* TODO - AA jitter */
if (BLI_in_rctf_v(&mr_handle->bounds, xy)) {
const unsigned int layers_tot = mr_handle->layers_tot;
unsigned int i;
MaskRasterLayer *layer = mr_handle->layers;
/* raycast vars*/
const float co[3] = {xy[0], xy[1], 0.0f};
const float dir[3] = {0.0f, 0.0f, 1.0f};
const float radius = 1.0f;
BVHTreeRayHit hit = {0};
/* return */
float value = 0.0f;
for (i = 0; i < layers_tot; i++, layer++) {
if (BLI_in_rctf_v(&layer->bounds, xy)) {
/* --- hit (start) --- */
const float dist = 1.0f - layer_bucket_depth_from_xy(layer, xy);
const float dist_ease = (3.0f * dist * dist - 2.0f * dist * dist * dist);
hit.dist = FLT_MAX;
hit.index = -1;
float v;
/* apply alpha */
v = dist_ease * layer->alpha;
/* TODO, and axis aligned version of this function, avoids 2 casts */
BLI_bvhtree_ray_cast(layer->bvhtree, co, dir, radius, &hit, maskrasterize_layer_bvh_cb, layer);
if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
v = 1.0f - v;
}
/* --- hit (start) --- */
if (hit.index != -1) {
const float dist = 1.0f - hit.dist;
const float dist_ease = (3.0f * dist * dist - 2.0f * dist * dist * dist);
float v;
/* apply alpha */
v = dist_ease * layer->alpha;
if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
v = 1.0f - v;
}
switch (layer->blend) {
case MASK_BLEND_SUBTRACT:
{
value -= v;
break;
}
case MASK_BLEND_ADD:
default:
{
value += v;
break;
}
}
}
switch (layer->blend) {
case MASK_BLEND_SUBTRACT:
{
value -= v;
break;
}
case MASK_BLEND_ADD:
default:
{
value += v;
break;
}
}
/* --- hit (end) --- */
}

View File

@@ -992,6 +992,9 @@ static void grid_tangent_matrix(float mat[3][3], const CCGKey *key,
copy_v3_v3(mat[2], CCG_grid_elem_no(key, grid, x, y));
}
/* XXX WARNING: subsurf elements from dm and oldGridData *must* be of the same format (size),
* because this code uses CCGKey's info from dm to access oldGridData's normals
* (through the call to grid_tangent_matrix())! */
static void multiresModifier_disp_run(DerivedMesh *dm, Mesh *me, DerivedMesh *dm2, DispOp op, CCGElem **oldGridData, int totlvl)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
@@ -2069,6 +2072,21 @@ void multires_load_old(Object *ob, Mesh *me)
me->mface[i].mat_nr = lvl->faces[i].mat_nr;
}
/* Copy the first-level data to the mesh */
/* XXX We must do this before converting tessfaces to polys/lopps! */
for (i = 0, l = me->mr->vdata.layers; i < me->mr->vdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->vdata, l->type, CD_REFERENCE, l->data, me->totvert);
for (i = 0, l = me->mr->fdata.layers; i < me->mr->fdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->fdata, l->type, CD_REFERENCE, l->data, me->totface);
memset(&me->mr->vdata, 0, sizeof(CustomData));
memset(&me->mr->fdata, 0, sizeof(CustomData));
multires_load_old_vcols(me);
multires_load_old_face_flags(me);
/* multiresModifier_subdivide (actually, multires_subdivide) expects polys, not tessfaces! */
BKE_mesh_convert_mfaces_to_mpolys(me);
/* Add a multires modifier to the object */
md = ob->modifiers.first;
while (md && modifierType_getInfo(md->type)->type == eModifierTypeType_OnlyDeform)
@@ -2081,25 +2099,19 @@ void multires_load_old(Object *ob, Mesh *me)
mmd->lvl = mmd->totlvl;
orig = CDDM_from_mesh(me, NULL);
dm = multires_make_derived_from_derived(orig, mmd, ob, 0);
/* XXX We *must* alloc paint mask here, else we have some kind of mismatch in
* multires_modifier_update_mdisps() (called by dm->release(dm)), which always creates the
* reference subsurfed dm with this option, before calling multiresModifier_disp_run(),
* which implitely expects both subsurfs from its first dm and oldGridData parameters to
* be of the same "format"! */
dm = multires_make_derived_from_derived(orig, mmd, ob, MULTIRES_ALLOC_PAINT_MASK);
multires_load_old_dm(dm, me, mmd->totlvl + 1);
multires_dm_mark_as_modified(dm, MULTIRES_COORDS_MODIFIED);
dm->release(dm);
orig->release(orig);
/* Copy the first-level data to the mesh */
for (i = 0, l = me->mr->vdata.layers; i < me->mr->vdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->vdata, l->type, CD_REFERENCE, l->data, me->totvert);
for (i = 0, l = me->mr->fdata.layers; i < me->mr->fdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->fdata, l->type, CD_REFERENCE, l->data, me->totface);
memset(&me->mr->vdata, 0, sizeof(CustomData));
memset(&me->mr->fdata, 0, sizeof(CustomData));
multires_load_old_vcols(me);
multires_load_old_face_flags(me);
/* Remove the old multires */
multires_free(me->mr);
me->mr = NULL;

View File

@@ -55,6 +55,7 @@ float area_quad_v3(const float a[3], const float b[3], const float c[3], const f
float area_poly_v3(int nr, float verts[][3], const float normal[3]);
int is_quad_convex_v3(const float v1[3], const float v2[3], const float v3[3], const float v4[3]);
int is_quad_convex_v2(const float v1[2], const float v2[2], const float v3[2], const float v4[2]);
/********************************* Distance **********************************/

View File

@@ -50,16 +50,43 @@ extern "C" {
struct MemArena;
typedef struct MemArena MemArena;
struct MemArena *BLI_memarena_new(const int bufsize, const char *name)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(2)))
#endif
;
struct MemArena *BLI_memarena_new(int bufsize, const char *name);
void BLI_memarena_free(struct MemArena *ma);
void BLI_memarena_free(struct MemArena *ma)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void BLI_memarena_use_malloc(struct MemArena *ma);
void BLI_memarena_use_calloc(struct MemArena *ma);
void BLI_memarena_use_malloc(struct MemArena *ma)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void BLI_memarena_use_calloc(struct MemArena *ma)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void BLI_memarena_use_align(struct MemArena *ma, int align);
void BLI_memarena_use_align(struct MemArena *ma, const int align)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void *BLI_memarena_alloc(struct MemArena *ma, int size);
void *BLI_memarena_alloc(struct MemArena *ma, int size)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(1)))
__attribute__((alloc_size(2)))
#endif
;
#ifdef __cplusplus
}

View File

@@ -48,16 +48,47 @@ typedef struct BLI_mempool BLI_mempool;
* first four bytes of the elements never contain the character string
* 'free'. use with care.*/
BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag);
void *BLI_mempool_alloc(BLI_mempool *pool);
void *BLI_mempool_calloc(BLI_mempool *pool);
void BLI_mempool_free(BLI_mempool *pool, void *addr);
void BLI_mempool_destroy(BLI_mempool *pool);
int BLI_mempool_count(BLI_mempool *pool);
void *BLI_mempool_findelem(BLI_mempool *pool, int index);
BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag)
#ifdef __GNUC__
__attribute__((warn_unused_result))
#endif
;
void *BLI_mempool_alloc(BLI_mempool *pool)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(1)))
#endif
;
void *BLI_mempool_calloc(BLI_mempool *pool)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(1)))
#endif
;
void BLI_mempool_free(BLI_mempool *pool, void *addr)
#ifdef __GNUC__
__attribute__((nonnull(1, 2)))
#endif
;
void BLI_mempool_destroy(BLI_mempool *pool)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
int BLI_mempool_count(BLI_mempool *pool)
#ifdef __GNUC__
__attribute__((nonnull(1)))
#endif
;
void *BLI_mempool_findelem(BLI_mempool *pool, int index)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(1)))
#endif
;
/** iteration stuff. note: this may easy to produce bugs with **/
/*private structure*/
/* private structure */
typedef struct BLI_mempool_iter {
BLI_mempool *pool;
struct BLI_mempool_chunk *curchunk;
@@ -70,11 +101,20 @@ enum {
BLI_MEMPOOL_ALLOW_ITER = (1 << 1)
};
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter);
void *BLI_mempool_iterstep(BLI_mempool_iter *iter);
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
#ifdef __GNUC__
__attribute__((nonnull(1, 2)))
#endif
;
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
#ifdef __GNUC__
__attribute__((warn_unused_result))
__attribute__((nonnull(1)))
#endif
;
#ifdef __cplusplus
}
#endif
#endif
#endif /* __BLI_MEMPOOL_H__ */

View File

@@ -232,6 +232,10 @@
#define SET_INT_IN_POINTER(i) ((void *)(intptr_t)(i))
#define GET_INT_FROM_POINTER(i) ((int)(intptr_t)(i))
#define SET_UINT_IN_POINTER(i) ((void *)(uintptr_t)(i))
#define GET_UINT_FROM_POINTER(i) ((unsigned int)(uintptr_t)(i))
/* Macro to convert a value to string in the preprocessor
* STRINGIFY_ARG: gives the argument as a string
* STRINGIFY_APPEND: appends any argument 'b' onto the string argument 'a',

View File

@@ -3278,3 +3278,10 @@ int is_quad_convex_v3(const float v1[3], const float v2[3], const float v3[3], c
/* linetests, the 2 diagonals have to instersect to be convex */
return (isect_line_line_v2(vec[0], vec[2], vec[1], vec[3]) > 0) ? TRUE : FALSE;
}
int is_quad_convex_v2(const float v1[2], const float v2[2], const float v3[2], const float v4[2])
{
/* linetests, the 2 diagonals have to instersect to be convex */
return (isect_line_line_v2(v1, v3, v2, v4) > 0) ? TRUE : FALSE;
}

View File

@@ -41,6 +41,13 @@
#include "DNA_vec_types.h"
#include "BLI_rect.h"
/**
* Determine if a rect is empty. An empty
* rect is one with a zero (or negative)
* width or height.
*
* \return True if \a rect is empty.
*/
int BLI_rcti_is_empty(const rcti *rect)
{
return ((rect->xmax <= rect->xmin) || (rect->ymax <= rect->ymin));

View File

@@ -560,14 +560,14 @@ static int project_bucket_offset(const ProjPaintState *ps, const float projCoSS[
* ps->bucketRect[x + (y*ps->buckets_y)] */
/* please explain?
* projCoSS[0] - ps->screenMin[0] : zero origin
* ... / ps->screen_width : range from 0.0 to 1.0
* ... * ps->buckets_x : use as a bucket index
* projCoSS[0] - ps->screenMin[0] : zero origin
* ... / ps->screen_width : range from 0.0 to 1.0
* ... * ps->buckets_x : use as a bucket index
*
* Second multiplication does similar but for vertical offset
*/
return ( (int)(((projCoSS[0] - ps->screenMin[0]) / ps->screen_width) * ps->buckets_x)) +
( ( (int)(((projCoSS[1] - ps->screenMin[1]) / ps->screen_height) * ps->buckets_y)) * ps->buckets_x);
return ( (int)(((projCoSS[0] - ps->screenMin[0]) / ps->screen_width) * ps->buckets_x)) +
(((int)(((projCoSS[1] - ps->screenMin[1]) / ps->screen_height) * ps->buckets_y)) * ps->buckets_x);
}
static int project_bucket_offset_safe(const ProjPaintState *ps, const float projCoSS[2])

View File

@@ -154,15 +154,8 @@ void KX_BlenderMaterial::ReleaseMaterial()
mBlenderShader->ReloadMaterial();
}
void KX_BlenderMaterial::OnConstruction(int layer)
void KX_BlenderMaterial::InitTextures()
{
if (mConstructed)
// when material are reused between objects
return;
if (mMaterial->glslmat)
SetBlenderGLSLShader(layer);
// for each unique material...
int i;
for (i=0; i<mMaterial->num_enabled; i++) {
@@ -177,7 +170,7 @@ void KX_BlenderMaterial::OnConstruction(int layer)
}
// If we're using glsl materials, the textures are handled by bf_gpu, so don't load them twice!
// However, if we're using a custom shader, then we still need to load the textures ourselves.
else if (!mMaterial->glslmat || mBlenderShader) {
else if (!mMaterial->glslmat || mShader) {
if ( mMaterial->img[i] ) {
if ( ! mTextures[i].InitFromImage(i, mMaterial->img[i], (mMaterial->flag[i] &MIPMAP)!=0 ))
spit("unable to initialize image("<<i<<") in "<<
@@ -185,6 +178,18 @@ void KX_BlenderMaterial::OnConstruction(int layer)
}
}
}
}
void KX_BlenderMaterial::OnConstruction(int layer)
{
if (mConstructed)
// when material are reused between objects
return;
if (mMaterial->glslmat)
SetBlenderGLSLShader(layer);
InitTextures();
mBlendFunc[0] =0;
mBlendFunc[1] =0;
@@ -892,6 +897,9 @@ KX_PYMETHODDEF_DOC( KX_BlenderMaterial, getShader , "getShader()")
if (!mShader && !mModified) {
mShader = new BL_Shader();
mModified = true;
// Using a custom shader, make sure to initialize textures
InitTextures();
}
if (mShader && !mShader->GetError()) {

View File

@@ -138,6 +138,8 @@ private:
bool mModified;
bool mConstructed; // if false, don't clean on exit
void InitTextures();
void SetBlenderGLSLShader(int layer);
void ActivatGLMaterials( RAS_IRasterizer* rasty )const;