Merging r49738 through r49757 from trunk into soc-2011-tomato

This commit is contained in:
Sergey Sharybin
2012-08-10 08:54:14 +00:00
17 changed files with 333 additions and 377 deletions

View File

@@ -92,7 +92,7 @@ if platform=='win32':
if not use_color=='1':
B.bc.disable()
#on defaut white Os X terminal, some colors are totally unlegible
if platform=='darwin':
B.bc.OKGREEN = '\033[34m'
@@ -123,7 +123,7 @@ if bitness:
B.bitness = bitness
else:
B.bitness = tempbitness
# first check cmdline for toolset and we create env to work on
quickie = B.arguments.get('BF_QUICK', None)
@@ -138,7 +138,7 @@ if quickie:
B.quickie=string.split(quickie,',')
else:
B.quickie=[]
toolset = B.arguments.get('BF_TOOLSET', None)
if toolset:
print "Using " + toolset
@@ -270,7 +270,7 @@ if 'blenderlite' in B.targets:
target_env_defs['WITH_BF_PYTHON'] = False
target_env_defs['WITH_BF_3DMOUSE'] = False
target_env_defs['WITH_BF_LIBMV'] = False
# Merge blenderlite, let command line to override
for k,v in target_env_defs.iteritems():
if k not in B.arguments:
@@ -319,7 +319,7 @@ if env['WITH_BF_OPENMP'] == 1:
if env['WITH_GHOST_COCOA'] == True:
env.Append(CPPFLAGS=['-DGHOST_COCOA'])
if env['USE_QTKIT'] == True:
env.Append(CPPFLAGS=['-DUSE_QTKIT'])
@@ -374,7 +374,7 @@ if not B.root_build_dir[-1]==os.sep:
B.root_build_dir += os.sep
if not B.doc_build_dir[-1]==os.sep:
B.doc_build_dir += os.sep
# We do a shortcut for clean when no quicklist is given: just delete
# builddir without reading in SConscripts
do_clean = None
@@ -422,16 +422,16 @@ if not quickie and do_clean:
# with _any_ library but since we used a fixed python version this tends to
# be most problematic.
if env['WITH_BF_PYTHON']:
py_h = os.path.join(Dir(env.subst('${BF_PYTHON_INC}')).abspath, "Python.h")
py_h = os.path.join(Dir(env.subst('${BF_PYTHON_INC}')).abspath, "Python.h")
if not os.path.exists(py_h):
print("\nMissing: \"" + env.subst('${BF_PYTHON_INC}') + os.sep + "Python.h\",\n"
" Set 'BF_PYTHON_INC' to point "
"to a valid python include path.\n Containing "
"Python.h for python version \"" + env.subst('${BF_PYTHON_VERSION}') + "\"")
if not os.path.exists(py_h):
print("\nMissing: \"" + env.subst('${BF_PYTHON_INC}') + os.sep + "Python.h\",\n"
" Set 'BF_PYTHON_INC' to point "
"to a valid python include path.\n Containing "
"Python.h for python version \"" + env.subst('${BF_PYTHON_VERSION}') + "\"")
Exit()
del py_h
Exit()
del py_h
if not os.path.isdir ( B.root_build_dir):
@@ -445,9 +445,53 @@ if not os.path.isdir ( B.root_build_dir):
# if not os.path.isdir(B.doc_build_dir) and env['WITH_BF_DOCS']:
# os.makedirs ( B.doc_build_dir )
###################################
# Ensure all data files are valid #
###################################
if not os.path.isdir ( B.root_build_dir + 'data_headers'):
os.makedirs ( B.root_build_dir + 'data_headers' )
# use for includes
env['DATA_HEADERS'] = "#" + env['BF_BUILDDIR'] + "/data_headers"
def ensure_data(FILE_FROM, FILE_TO, VAR_NAME):
if os.sep == "\\":
FILE_FROM = FILE_FROM.replace("/", "\\")
FILE_TO = FILE_TO.replace("/", "\\")
# first check if we need to bother.
if os.path.exists(FILE_TO):
if os.path.getmtime(FILE_FROM) < os.path.getmtime(FILE_TO):
return
print(B.bc.HEADER + "Generating: " + B.bc.ENDC + "%r" % os.path.basename(FILE_TO))
fpin = open(FILE_FROM, "rb")
fpin.seek(0, os.SEEK_END)
size = fpin.tell()
fpin.seek(0)
fpout = open(FILE_TO, "w")
fpout.write("int %s_size = %d;\n" % (VAR_NAME, size))
fpout.write("char %s[] = {\n" % VAR_NAME)
while size > 0:
size -= 1
if size % 32 == 31:
fpout.write("\n")
fpout.write("%3d," % ord(fpin.read(1)))
fpout.write("\n 0};\n\n")
fpin.close()
fpout.close()
ensure_data("source/blender/compositor/operations/COM_OpenCLKernels.cl",
B.root_build_dir + "data_headers/COM_OpenCLKernels.cl.h",
"clkernelstoh_COM_OpenCLKernels_cl")
##### END DATAFILES ##########
Help(opts.GenerateHelpText(env))
# default is new quieter output, but if you need to see the
# default is new quieter output, but if you need to see the
# commands, do 'scons BF_QUIET=0'
bf_quietoutput = B.arguments.get('BF_QUIET', '1')
if env['BF_QUIET']:
@@ -534,7 +578,7 @@ if env['OURPLATFORM']!='darwin':
for targetdir,srcfile in zip(datafilestargetlist, datafileslist):
td, tf = os.path.split(targetdir)
dotblenderinstall.append(env.Install(dir=td, source=srcfile))
if env['WITH_BF_PYTHON']:
#-- local/VERSION/scripts
scriptpaths=['release/scripts']
@@ -634,10 +678,10 @@ if env['OURPLATFORM']!='darwin':
if env['WITH_BF_INTERNATIONAL']:
internationalpaths=['release' + os.sep + 'datafiles']
def check_path(path, member):
return (member in path.split(os.sep))
for intpath in internationalpaths:
for dp, dn, df in os.walk(intpath):
if '.svn' in dn:
@@ -650,7 +694,7 @@ if env['OURPLATFORM']!='darwin':
pass
else:
continue
dir = os.path.join(env['BF_INSTALLDIR'], VERSION)
dir += os.sep + os.path.basename(intpath) + dp[len(intpath):]
@@ -758,7 +802,7 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'win64-vc', 'linuxcross'):
# strict: the x86 build fails on x64 Windows. We need to ship
# both builds in x86 packages.
if bitness == 32:
dllsources.append('${LCGDIR}/thumbhandler/lib/BlendThumb.dll')
dllsources.append('${LCGDIR}/thumbhandler/lib/BlendThumb.dll')
dllsources.append('${LCGDIR}/thumbhandler/lib/BlendThumb64.dll')
if env['WITH_BF_OIIO'] and env['OURPLATFORM'] != 'win32-mingw':
@@ -774,7 +818,7 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'win64-vc', 'linuxcross'):
if env['OURPLATFORM'] == 'win64-mingw':
dllsources = []
if env['WITH_BF_PYTHON']:
if env['BF_DEBUG']:
dllsources.append('${BF_PYTHON_LIBPATH}/${BF_PYTHON_DLL}_d.dll')
@@ -793,10 +837,10 @@ if env['OURPLATFORM'] == 'win64-mingw':
if env['WITH_BF_SDL']:
dllsources.append('${LCGDIR}/sdl/lib/SDL.dll')
if(env['WITH_BF_OPENMP']):
dllsources.append('${LCGDIR}/binaries/libgomp-1.dll')
if(env['WITH_BF_OPENMP']):
dllsources.append('${LCGDIR}/binaries/libgomp-1.dll')
dllsources.append('${LCGDIR}/thumbhandler/lib/BlendThumb64.dll')
dllsources.append('${LCGDIR}/binaries/libgcc_s_sjlj-1.dll')
dllsources.append('${LCGDIR}/binaries/libwinpthread-1.dll')

View File

@@ -0,0 +1,25 @@
# cmake script, to be called on its own with 3 defined args
#
# - FILE_FROM
# - FILE_TO
# - VAR_NAME
# not highly optimal, may replace with generated C program like makesdna
file(READ ${FILE_FROM} file_from_string HEX)
string(LENGTH ${file_from_string} _max_index)
math(EXPR size_on_disk ${_max_index}/2)
file(REMOVE ${FILE_TO})
file(APPEND ${FILE_TO} "int ${VAR_NAME}_size = ${size_on_disk};\n")
file(APPEND ${FILE_TO} "char ${VAR_NAME}[] = {")
set(_index 0)
while(NOT _index EQUAL _max_index)
string(SUBSTRING "${file_from_string}" ${_index} 2 _pair)
file(APPEND ${FILE_TO} "0x${_pair},")
math(EXPR _index ${_index}+2)
endwhile()
# null terminator not essential but good if we want plane strings encoded
file(APPEND ${FILE_TO} "0x00};\n")

View File

@@ -245,7 +245,7 @@ static void options_parse(int argc, const char **argv)
NULL);
if(ap.parse(argc, argv) < 0) {
fprintf(stderr, "%s\n", ap.error_message().c_str());
fprintf(stderr, "%s\n", ap.geterror().c_str());
ap.usage();
exit(EXIT_FAILURE);
}

View File

@@ -1,70 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2012 Blender Foundation.
# All rights reserved.
#
# Contributor(s): Jeroen Bakker
#
# ***** END GPL LICENCE BLOCK *****
# <pep8 compliant>
import sys
import os
if len(sys.argv) < 2:
sys.stdout.write("Usage: clkernelstoh <cl_file>\n")
sys.exit(1)
filename = sys.argv[1]
try:
fpin = open(filename, "r")
except:
sys.stdout.write("Unable to open input %s\n" % sys.argv[1])
sys.exit(1)
if filename[0:2] == "." + os.sep:
filename = filename[2:]
cname = filename + ".h"
sys.stdout.write("Making H file <%s>\n" % cname)
filename = filename.split("/")[-1].split("\\")[-1]
filename = filename.replace(".", "_")
try:
fpout = open(cname, "w")
except:
sys.stdout.write("Unable to open output %s\n" % cname)
sys.exit(1)
fpout.write("/* clkernelstoh output of file <%s> */\n\n" % filename)
fpout.write("const char * clkernelstoh_%s = " % filename)
lines = fpin.readlines()
for line in lines:
fpout.write("\"")
fpout.write(line.rstrip())
fpout.write("\\n\" \\\n")
fpout.write("\"\\0\";\n")
fpin.close()
fpout.close()

View File

@@ -32,6 +32,24 @@ from bpy.props import EnumProperty
# lazy init
node_type_items_dict = {}
# Prefixes used to distinguish base node types and node groups
node_type_prefix = 'NODE_'
node_group_prefix = 'GROUP_'
# Generate a list of enum items for a given node class
# Copy existing type enum, adding a prefix to distinguish from node groups
# Skip the base node group type, node groups will be added below for all existing group trees
def node_type_items(node_class):
return [(node_type_prefix + item.identifier, item.name, item.description)
for item in node_class.bl_rna.properties['type'].enum_items if item.identifier != 'GROUP']
# Generate items for node group types
# Filter by the given tree_type
# Node group trees don't have a description property yet (could add this as a custom property though)
def node_group_items(tree_type):
return [(node_group_prefix + group.name, group.name, '')
for group in bpy.data.node_groups if group.type == tree_type]
# Returns the enum item list for the edited tree in the context
def node_type_items_cb(self, context):
snode = context.space_data
@@ -41,21 +59,19 @@ def node_type_items_cb(self, context):
if not tree:
return []
# Lists of basic node types for each
if not node_type_items_dict:
node_type_items_dict.update({
'SHADER': [(item.identifier, item.name, item.description, item.value)
for item in bpy.types.ShaderNode.bl_rna.properties['type'].enum_items],
'COMPOSITING': [(item.identifier, item.name, item.description, item.value)
for item in bpy.types.CompositorNode.bl_rna.properties['type'].enum_items],
'TEXTURE': [(item.identifier, item.name, item.description, item.value)
for item in bpy.types.TextureNode.bl_rna.properties['type'].enum_items],
'SHADER': node_type_items(bpy.types.ShaderNode),
'COMPOSITING': node_type_items(bpy.types.CompositorNode),
'TEXTURE': node_type_items(bpy.types.TextureNode),
})
# XXX Does not work correctly, see comment above
#return [(item.identifier, item.name, item.description, item.value) for item in tree.nodes.bl_rna.functions['new'].parameters['type'].enum_items]
if tree.type in node_type_items_dict:
return node_type_items_dict[tree.type]
return node_type_items_dict[tree.type] + node_group_items(tree.type)
else:
return []
@@ -79,7 +95,17 @@ class NODE_OT_add_search(Operator):
space = context.space_data
tree = space.edit_tree
node = tree.nodes.new(type=self.type)
# Enum item identifier has an additional prefix to distinguish base node types from node groups
item = self.type
if (item.startswith(node_type_prefix)):
# item means base node type
node = tree.nodes.new(type=item[len(node_type_prefix):])
elif (item.startswith(node_group_prefix)):
# item means node group type
node = tree.nodes.new(type='GROUP', group=bpy.data.node_groups[item[len(node_group_prefix):]])
else:
return None
for n in tree.nodes:
if n == node:
node.select = True

View File

@@ -783,7 +783,13 @@ class ConstraintButtonsPanel():
if clip:
col.prop_search(con, "object", clip.tracking, "objects", icon='OBJECT_DATA')
col.prop_search(con, "track", clip.tracking, "tracks", icon='ANIM_DATA')
if con.object in clip.tracking.objects:
tracking_object = clip.tracking.objects[con.object]
else:
tracking_object = clip.tracking
col.prop_search(con, "track", tracking_object, "tracks", icon='ANIM_DATA')
col.prop(con, "camera")

View File

@@ -106,6 +106,10 @@ class IMAGE_MT_select(Menu):
layout.operator("uv.select_pinned")
layout.operator("uv.select_linked")
layout.separator()
layout.operator("uv.select_split")
class IMAGE_MT_image(Menu):
bl_label = "Image"

View File

@@ -57,7 +57,6 @@ struct CollisionTree;
/* Bits to or into the ClothVertex.flags. */
#define CLOTH_VERT_FLAG_PINNED 1
#define CLOTH_VERT_FLAG_NOSELFCOLL 2 /* vertex NOT used for self collisions */
#define CLOTH_VERT_FLAG_PINNED_EM 3
/**
* This structure describes a cloth object against which the

View File

@@ -400,8 +400,18 @@ static int do_step_cloth(Object *ob, ClothModifierData *clmd, DerivedMesh *resul
copy_v3_v3(verts->txold, verts->x);
/* Get the current position. */
copy_v3_v3(verts->xconst, mvert[i].co);
mul_m4_v3(ob->obmat, verts->xconst);
if ((clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL) &&
((!(cloth->verts[i].flags & CLOTH_VERT_FLAG_PINNED))
&& (cloth->verts[i].goal > ALMOST_ZERO)))
{
copy_v3_v3(verts->xconst, mvert[i].co);
mul_m4_v3(ob->obmat, verts->xconst);
}
else
{
/* This fixed animated goals not to jump back to "first frame position" */
copy_v3_v3(verts->xconst, verts->txold);
}
}
effectors = pdInitEffectors(clmd->scene, ob, NULL, clmd->sim_parms->effector_weights);
@@ -795,13 +805,21 @@ static void cloth_apply_vgroup ( ClothModifierData *clmd, DerivedMesh *dm )
if (cloth_uses_vgroup(clmd)) {
for ( i = 0; i < numverts; i++, verts++ ) {
/* Reset Goal values to standard */
if ( clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL )
verts->goal= clmd->sim_parms->defgoal;
else
verts->goal= 0.0f;
dvert = dm->getVertData ( dm, i, CD_MDEFORMVERT );
if ( dvert ) {
for ( j = 0; j < dvert->totweight; j++ ) {
for ( j = 0; j < dvert->totweight; j++ ) {
verts->flags &= ~CLOTH_VERT_FLAG_PINNED;
if (( dvert->dw[j].def_nr == (clmd->sim_parms->vgroup_mass-1)) && (clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL )) {
verts->goal = dvert->dw [j].weight;
/* goalfac= 1.0f; */ /* UNUSED */
/*
@@ -1082,6 +1100,20 @@ static void cloth_update_springs( ClothModifierData *clmd )
{
spring->stiffness = (cloth->verts[spring->kl].bend_stiff + cloth->verts[spring->ij].bend_stiff) / 2.0f;
}
else if(spring->type == CLOTH_SPRING_TYPE_GOAL)
{
/* Warning: Appending NEW goal springs does not work because implicit solver would need reset! */
/* Activate / Deactivate existing springs */
if ((!(cloth->verts[spring->ij].flags & CLOTH_VERT_FLAG_PINNED)) && (cloth->verts[spring->ij].goal > ALMOST_ZERO))
{
spring->flags &= ~CLOTH_SPRING_FLAG_DEACTIVATE;
}
else
{
spring->flags |= CLOTH_SPRING_FLAG_DEACTIVATE;
}
}
search = search->next;
}

View File

@@ -120,7 +120,7 @@ typedef float lfVector[3];
typedef struct fmatrix3x3 {
float m[3][3]; /* 3x3 matrix */
unsigned int c, r; /* column and row number */
int pinned; /* is this vertex allowed to move? */
/* int pinned; // is this vertex allowed to move? */
float n1, n2, n3; /* three normal vectors for collision constrains */
unsigned int vcount; /* vertex count */
unsigned int scount; /* spring count */
@@ -700,10 +700,32 @@ typedef struct Implicit_Data {
fmatrix3x3 *A, *dFdV, *dFdX, *S, *P, *Pinv, *bigI, *M;
} Implicit_Data;
/* Init constraint matrix */
static void update_matrixS(ClothVertex *verts, int numverts, fmatrix3x3 *S)
{
unsigned int pinned = 0;
int i = 0;
/* Clear matrix from old vertex constraints */
for(i = 0; i < S[0].vcount; i++)
S[i].c = S[i].r = 0;
/* Set new vertex constraints */
for (i = 0; i < numverts; i++) {
if (verts [i].flags & CLOTH_VERT_FLAG_PINNED) {
S[pinned].c = S[pinned].r = i;
pinned++;
}
}
// S is special and needs specific vcount and scount
S[0].vcount = pinned;
S[0].scount = 0;
}
int implicit_init(Object *UNUSED(ob), ClothModifierData *clmd)
{
unsigned int i = 0;
unsigned int pinned = 0;
Cloth *cloth = NULL;
ClothVertex *verts = NULL;
ClothSpring *spring = NULL;
@@ -743,24 +765,19 @@ int implicit_init(Object *UNUSED(ob), ClothModifierData *clmd)
id->dV = create_lfvector(cloth->numverts);
id->z = create_lfvector(cloth->numverts);
for (i=0;i<cloth->numverts;i++) {
id->S[0].vcount = 0;
for (i = 0; i < cloth->numverts; i++) {
id->A[i].r = id->A[i].c = id->dFdV[i].r = id->dFdV[i].c = id->dFdX[i].r = id->dFdX[i].c = id->P[i].c = id->P[i].r = id->Pinv[i].c = id->Pinv[i].r = id->bigI[i].c = id->bigI[i].r = id->M[i].r = id->M[i].c = i;
if (verts [i].flags & CLOTH_VERT_FLAG_PINNED) {
id->S[pinned].pinned = 1;
id->S[pinned].c = id->S[pinned].r = i;
pinned++;
}
update_matrixS(verts, cloth->numverts, id->S);
initdiag_fmatrixS(id->M[i].m, verts[i].mass);
}
// S is special and needs specific vcount and scount
id->S[0].vcount = pinned; id->S[0].scount = 0;
// init springs
search = cloth->springs;
for (i=0;i<cloth->numsprings;i++) {
for (i = 0; i < cloth->numsprings; i++) {
spring = search->link;
// dFdV_start[i].r = big_I[i].r = big_zero[i].r =
@@ -784,6 +801,7 @@ int implicit_init(Object *UNUSED(ob), ClothModifierData *clmd)
return 1;
}
int implicit_free(ClothModifierData *clmd)
{
Implicit_Data *id;
@@ -1640,8 +1658,9 @@ static void cloth_calc_force(ClothModifierData *clmd, float UNUSED(frame), lfVec
search = cloth->springs;
while (search) {
// only handle active springs
// if (((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && !(springs[i].flags & CSPRING_FLAG_DEACTIVATE))|| !(clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED)) {}
cloth_calc_spring_force(clmd, search->link, lF, lX, lV, dFdV, dFdX, time);
ClothSpring *spring = search->link;
if( !(spring->flags & CLOTH_SPRING_FLAG_DEACTIVATE))
cloth_calc_spring_force(clmd, search->link, lF, lX, lV, dFdV, dFdX, time);
search = search->next;
}
@@ -1650,8 +1669,9 @@ static void cloth_calc_force(ClothModifierData *clmd, float UNUSED(frame), lfVec
search = cloth->springs;
while (search) {
// only handle active springs
// if (((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && !(springs[i].flags & CSPRING_FLAG_DEACTIVATE))|| !(clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED))
cloth_apply_spring_force(clmd, search->link, lF, lX, lV, dFdV, dFdX);
ClothSpring *spring = search->link;
if (!(spring->flags & CLOTH_SPRING_FLAG_DEACTIVATE))
cloth_apply_spring_force(clmd, search->link, lF, lX, lV, dFdV, dFdX);
search = search->next;
}
// printf("\n");
@@ -1781,6 +1801,10 @@ int implicit_solver(Object *ob, float frame, ClothModifierData *clmd, ListBase *
int do_extra_solve;
if (clmd->sim_parms->flags & CLOTH_SIMSETTINGS_FLAG_GOAL) { /* do goal stuff */
/* Update vertex constraints for pinned vertices */
update_matrixS(verts, cloth->numverts, id->S);
for (i = 0; i < numverts; i++) {
// update velocities with constrained velocities from pinned verts
if (verts [i].flags & CLOTH_VERT_FLAG_PINNED) {

View File

@@ -1665,7 +1665,7 @@ void quat_apply_track(float quat[4], short axis, short upflag)
{M_SQRT1_2, 0.0, 0.0, M_SQRT1_2}, /* pos-z90 */
{M_SQRT1_2, 0.0, M_SQRT1_2, 0.0}, /* neg-y90 */
{0.5, -0.5, -0.5, 0.5}, /* Quaternion((1,0,0), radians(-90)) * Quaternion((0,1,0), radians(-90)) */
{-3.0908619663705394e-08, M_SQRT1_2, M_SQRT1_2, 3.0908619663705394e-08} /* no rotation */
{0.0, M_SQRT1_2, M_SQRT1_2, 0.0} /* no rotation */
};
assert(axis >= 0 && axis <= 5);

View File

@@ -50,6 +50,21 @@ set(INC_SYS
)
# --- data file ---
# ... may make this a macro
list(APPEND INC
${CMAKE_CURRENT_BINARY_DIR}/operations
)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/operations/COM_OpenCLKernels.cl.h
COMMAND ${CMAKE_COMMAND}
-DFILE_FROM=${CMAKE_CURRENT_SOURCE_DIR}/operations/COM_OpenCLKernels.cl
-DFILE_TO=${CMAKE_CURRENT_BINARY_DIR}/operations/COM_OpenCLKernels.cl.h
-DVAR_NAME=clkernelstoh_COM_OpenCLKernels_cl
-P ${CMAKE_SOURCE_DIR}/build_files/cmake/data_to_c.cmake
DEPENDS operations/COM_OpenCLKernels.cl)
# --- end data file --
set(SRC
COM_compositor.h
COM_defines.h
@@ -638,6 +653,9 @@ set(SRC
operations/COM_MaskOperation.cpp
operations/COM_MaskOperation.h
# generated file
${CMAKE_CURRENT_BINARY_DIR}/operations/COM_OpenCLKernels.cl.h
)
blender_add_lib(bf_compositor "${SRC}" "${INC}" "${INC_SYS}")

View File

@@ -11,4 +11,7 @@ incs += '../opencl ../nodes ../nodes/intern ../nodes/composite '
if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):
incs += ' ' + env['BF_PTHREADS_INC']
# data files
incs += ' ' + env['DATA_HEADERS']
env.BlenderLib ( 'bf_composite', sources, Split(incs), defines=defs, libtype=['core'], priority = [164] )

View File

@@ -288,7 +288,8 @@ void WorkScheduler::initialize()
g_context = clCreateContext(NULL, numberOfDevices, cldevices, clContextError, NULL, &error);
if (error != CL_SUCCESS) { printf("CLERROR[%d]: %s\n", error, clewErrorString(error)); }
g_program = clCreateProgramWithSource(g_context, 1, &clkernelstoh_COM_OpenCLKernels_cl, 0, &error);
const char *cl_str[2] = {clkernelstoh_COM_OpenCLKernels_cl, NULL};
g_program = clCreateProgramWithSource(g_context, 1, cl_str, 0, &error);
error = clBuildProgram(g_program, numberOfDevices, cldevices, 0, 0, 0);
if (error != CL_SUCCESS) {
cl_int error2;

View File

@@ -1,250 +0,0 @@
/* clkernelstoh output of file <COM_OpenCLKernels_cl> */
const char * clkernelstoh_COM_OpenCLKernels_cl = "/*\n" \
" * Copyright 2011, Blender Foundation.\n" \
" *\n" \
" * This program is free software; you can redistribute it and/or\n" \
" * modify it under the terms of the GNU General Public License\n" \
" * as published by the Free Software Foundation; either version 2\n" \
" * of the License, or (at your option) any later version.\n" \
" *\n" \
" * This program is distributed in the hope that it will be useful,\n" \
" * but WITHOUT ANY WARRANTY; without even the implied warranty of\n" \
" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" \
" * GNU General Public License for more details.\n" \
" *\n" \
" * You should have received a copy of the GNU General Public License\n" \
" * along with this program; if not, write to the Free Software Foundation,\n" \
" * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n" \
" *\n" \
" * Contributor:\n" \
" * Jeroen Bakker\n" \
" * Monique Dewanchand\n" \
" */\n" \
"\n" \
"/// This file contains all opencl kernels for node-operation implementations\n" \
"\n" \
"// Global SAMPLERS\n" \
"const sampler_t SAMPLER_NEAREST = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;\n" \
"const sampler_t SAMPLER_NEAREST_CLAMP = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;\n" \
"\n" \
"__constant const int2 zero = {0,0};\n" \
"\n" \
"// KERNEL --- BOKEH BLUR ---\n" \
"__kernel void bokehBlurKernel(__read_only image2d_t boundingBox, __read_only image2d_t inputImage,\n" \
" __read_only image2d_t bokehImage, __write_only image2d_t output,\n" \
" int2 offsetInput, int2 offsetOutput, int radius, int step, int2 dimension, int2 offset)\n" \
"{\n" \
" int2 coords = {get_global_id(0), get_global_id(1)};\n" \
" coords += offset;\n" \
" float tempBoundingBox;\n" \
" float4 color = {0.0f,0.0f,0.0f,0.0f};\n" \
" float4 multiplyer = {0.0f,0.0f,0.0f,0.0f};\n" \
" float4 bokeh;\n" \
" const float radius2 = radius*2.0f;\n" \
" const int2 realCoordinate = coords + offsetOutput;\n" \
"\n" \
" tempBoundingBox = read_imagef(boundingBox, SAMPLER_NEAREST, coords).s0;\n" \
"\n" \
" if (tempBoundingBox > 0.0f && radius > 0 ) {\n" \
" const int2 bokehImageDim = get_image_dim(bokehImage);\n" \
" const int2 bokehImageCenter = bokehImageDim/2;\n" \
" const int2 minXY = max(realCoordinate - radius, zero);\n" \
" const int2 maxXY = min(realCoordinate + radius, dimension);\n" \
" int nx, ny;\n" \
"\n" \
" float2 uv;\n" \
" int2 inputXy;\n" \
"\n" \
" for (ny = minXY.y, inputXy.y = ny - offsetInput.y ; ny < maxXY.y ; ny +=step, inputXy.y+=step) {\n" \
" uv.y = ((realCoordinate.y-ny)/radius2)*bokehImageDim.y+bokehImageCenter.y;\n" \
"\n" \
" for (nx = minXY.x, inputXy.x = nx - offsetInput.x; nx < maxXY.x ; nx +=step, inputXy.x+=step) {\n" \
" uv.x = ((realCoordinate.x-nx)/radius2)*bokehImageDim.x+bokehImageCenter.x;\n" \
" bokeh = read_imagef(bokehImage, SAMPLER_NEAREST, uv);\n" \
" color += bokeh * read_imagef(inputImage, SAMPLER_NEAREST, inputXy);\n" \
" multiplyer += bokeh;\n" \
" }\n" \
" }\n" \
" color /= multiplyer;\n" \
"\n" \
" } else {\n" \
" int2 imageCoordinates = realCoordinate - offsetInput;\n" \
" color = read_imagef(inputImage, SAMPLER_NEAREST, imageCoordinates);\n" \
" }\n" \
"\n" \
" write_imagef(output, coords, color);\n" \
"}\n" \
"\n" \
"//KERNEL --- DEFOCUS /VARIABLESIZEBOKEHBLUR ---\n" \
"__kernel void defocusKernel(__read_only image2d_t inputImage, __read_only image2d_t bokehImage,\n" \
" __read_only image2d_t inputSize,\n" \
" __write_only image2d_t output, int2 offsetInput, int2 offsetOutput,\n" \
" int step, int maxBlur, float threshold, int2 dimension, int2 offset)\n" \
"{\n" \
" float4 color = {1.0f, 0.0f, 0.0f, 1.0f};\n" \
" int2 coords = {get_global_id(0), get_global_id(1)};\n" \
" coords += offset;\n" \
" const int2 realCoordinate = coords + offsetOutput;\n" \
"\n" \
" float4 readColor;\n" \
" float4 tempColor;\n" \
" float4 bokeh;\n" \
" float size;\n" \
" float4 multiplier_accum = {1.0f, 1.0f, 1.0f, 1.0f};\n" \
" float4 color_accum;\n" \
"\n" \
" int minx = max(realCoordinate.s0 - maxBlur, 0);\n" \
" int miny = max(realCoordinate.s1 - maxBlur, 0);\n" \
" int maxx = min(realCoordinate.s0 + maxBlur, dimension.s0);\n" \
" int maxy = min(realCoordinate.s1 + maxBlur, dimension.s1);\n" \
"\n" \
" {\n" \
" int2 inputCoordinate = realCoordinate - offsetInput;\n" \
" float size_center = read_imagef(inputSize, SAMPLER_NEAREST, inputCoordinate).s0;\n" \
" color_accum = read_imagef(inputImage, SAMPLER_NEAREST, inputCoordinate);\n" \
" readColor = color_accum;\n" \
"\n" \
" if (size_center > threshold) {\n" \
" for (int ny = miny; ny < maxy; ny += step) {\n" \
" inputCoordinate.s1 = ny - offsetInput.s1;\n" \
" float dy = ny - realCoordinate.s1;\n" \
" for (int nx = minx; nx < maxx; nx += step) {\n" \
" float dx = nx - realCoordinate.s0;\n" \
" if (dx != 0 || dy != 0) {\n" \
" inputCoordinate.s0 = nx - offsetInput.s0;\n" \
" size = read_imagef(inputSize, SAMPLER_NEAREST, inputCoordinate).s0;\n" \
" if (size > threshold) {\n" \
" if (size >= fabs(dx) && size >= fabs(dy)) {\n" \
" float2 uv = {256.0f + dx * 255.0f / size,\n" \
" 256.0f + dy * 255.0f / size};\n" \
" bokeh = read_imagef(bokehImage, SAMPLER_NEAREST, uv);\n" \
" tempColor = read_imagef(inputImage, SAMPLER_NEAREST, inputCoordinate);\n" \
" color_accum += bokeh * tempColor;\n" \
" multiplier_accum += bokeh;\n" \
" }\n" \
" }\n" \
" }\n" \
" }\n" \
" }\n" \
" }\n" \
"\n" \
" color = color_accum * (1.0f / multiplier_accum);\n" \
"\n" \
" /* blend in out values over the threshold, otherwise we get sharp, ugly transitions */\n" \
" if ((size_center > threshold) &&\n" \
" (size_center < threshold * 2.0f))\n" \
" {\n" \
" /* factor from 0-1 */\n" \
" float fac = (size_center - threshold) / threshold;\n" \
" color = (readColor * (1.0f - fac)) + (color * fac);\n" \
" }\n" \
"\n" \
" write_imagef(output, coords, color);\n" \
" }\n" \
"}\n" \
"\n" \
"\n" \
"// KERNEL --- DILATE ---\n" \
"__kernel void dilateKernel(__read_only image2d_t inputImage, __write_only image2d_t output,\n" \
" int2 offsetInput, int2 offsetOutput, int scope, int distanceSquared, int2 dimension,\n" \
" int2 offset)\n" \
"{\n" \
" int2 coords = {get_global_id(0), get_global_id(1)};\n" \
" coords += offset;\n" \
" const int2 realCoordinate = coords + offsetOutput;\n" \
"\n" \
" const int2 minXY = max(realCoordinate - scope, zero);\n" \
" const int2 maxXY = min(realCoordinate + scope, dimension);\n" \
"\n" \
" float value = 0.0f;\n" \
" int nx, ny;\n" \
" int2 inputXy;\n" \
"\n" \
" for (ny = minXY.y, inputXy.y = ny - offsetInput.y ; ny < maxXY.y ; ny ++, inputXy.y++) {\n" \
" const float deltaY = (realCoordinate.y - ny);\n" \
" for (nx = minXY.x, inputXy.x = nx - offsetInput.x; nx < maxXY.x ; nx ++, inputXy.x++) {\n" \
" const float deltaX = (realCoordinate.x - nx);\n" \
" const float measuredDistance = deltaX * deltaX + deltaY * deltaY;\n" \
" if (measuredDistance <= distanceSquared) {\n" \
" value = max(value, read_imagef(inputImage, SAMPLER_NEAREST, inputXy).s0);\n" \
" }\n" \
" }\n" \
" }\n" \
"\n" \
" float4 color = {value,0.0f,0.0f,0.0f};\n" \
" write_imagef(output, coords, color);\n" \
"}\n" \
"\n" \
"// KERNEL --- DILATE ---\n" \
"__kernel void erodeKernel(__read_only image2d_t inputImage, __write_only image2d_t output,\n" \
" int2 offsetInput, int2 offsetOutput, int scope, int distanceSquared, int2 dimension,\n" \
" int2 offset)\n" \
"{\n" \
" int2 coords = {get_global_id(0), get_global_id(1)};\n" \
" coords += offset;\n" \
" const int2 realCoordinate = coords + offsetOutput;\n" \
"\n" \
" const int2 minXY = max(realCoordinate - scope, zero);\n" \
" const int2 maxXY = min(realCoordinate + scope, dimension);\n" \
"\n" \
" float value = 1.0f;\n" \
" int nx, ny;\n" \
" int2 inputXy;\n" \
"\n" \
" for (ny = minXY.y, inputXy.y = ny - offsetInput.y ; ny < maxXY.y ; ny ++, inputXy.y++) {\n" \
" for (nx = minXY.x, inputXy.x = nx - offsetInput.x; nx < maxXY.x ; nx ++, inputXy.x++) {\n" \
" const float deltaX = (realCoordinate.x - nx);\n" \
" const float deltaY = (realCoordinate.y - ny);\n" \
" const float measuredDistance = deltaX * deltaX+deltaY * deltaY;\n" \
" if (measuredDistance <= distanceSquared) {\n" \
" value = min(value, read_imagef(inputImage, SAMPLER_NEAREST, inputXy).s0);\n" \
" }\n" \
" }\n" \
" }\n" \
"\n" \
" float4 color = {value,0.0f,0.0f,0.0f};\n" \
" write_imagef(output, coords, color);\n" \
"}\n" \
"\n" \
"// KERNEL --- DIRECTIONAL BLUR ---\n" \
"__kernel void directionalBlurKernel(__read_only image2d_t inputImage, __write_only image2d_t output,\n" \
" int2 offsetOutput, int iterations, float scale, float rotation, float2 translate,\n" \
" float2 center, int2 offset)\n" \
"{\n" \
" int2 coords = {get_global_id(0), get_global_id(1)};\n" \
" coords += offset;\n" \
" const int2 realCoordinate = coords + offsetOutput;\n" \
"\n" \
" float4 col;\n" \
" float2 ltxy = translate;\n" \
" float lsc = scale;\n" \
" float lrot = rotation;\n" \
"\n" \
" col = read_imagef(inputImage, SAMPLER_NEAREST, realCoordinate);\n" \
"\n" \
" /* blur the image */\n" \
" for (int i = 0; i < iterations; ++i) {\n" \
" const float cs = cos(lrot), ss = sin(lrot);\n" \
" const float isc = 1.0f / (1.0f + lsc);\n" \
"\n" \
" const float v = isc * (realCoordinate.s1 - center.s1) + ltxy.s1;\n" \
" const float u = isc * (realCoordinate.s0 - center.s0) + ltxy.s0;\n" \
" float2 uv = {\n" \
" cs * u + ss * v + center.s0,\n" \
" cs * v - ss * u + center.s1\n" \
" };\n" \
"\n" \
" col += read_imagef(inputImage, SAMPLER_NEAREST_CLAMP, uv);\n" \
"\n" \
" /* double transformations */\n" \
" ltxy += translate;\n" \
" lrot += rotation;\n" \
" lsc += scale;\n" \
" }\n" \
"\n" \
" col *= (1.0f/(iterations+1));\n" \
"\n" \
" write_imagef(output, coords, col);\n" \
"}\n" \
"\0";

View File

@@ -2218,6 +2218,88 @@ static void UV_OT_select_linked_pick(wmOperatorType *ot)
"Location", "Mouse location in normalized coordinates, 0.0 to 1.0 is within the image bounds", -100.0f, 100.0f);
}
/* note: this is based on similar use case to MESH_OT_split(), which has a similar effect
* but in this case they are not joined to begin with (only having the behavior of being joined)
* so its best to call this select_split() instead of just split(), but assigned to the same key
* as MESH_OT_split - Campbell */
static int select_split_exec(bContext *C, wmOperator *op)
{
Scene *scene = CTX_data_scene(C);
ToolSettings *ts = scene->toolsettings;
Image *ima = CTX_data_edit_image(C);
Object *obedit = CTX_data_edit_object(C);
BMesh *bm = BMEdit_FromObject(obedit)->bm;
BMFace *efa;
BMLoop *l;
BMIter iter, liter;
MTexPoly *tf;
MLoopUV *luv;
short change = FALSE;
if (ts->uv_flag & UV_SYNC_SELECTION) {
BKE_report(op->reports, RPT_ERROR, "Can't split selection when sync selection is enabled");
return OPERATOR_CANCELLED;
}
BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
int is_sel = FALSE;
int is_unsel = FALSE;
tf = CustomData_bmesh_get(&bm->pdata, efa->head.data, CD_MTEXPOLY);
if (!uvedit_face_visible_test(scene, ima, efa, tf))
continue;
/* are we all selected? */
BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
luv = CustomData_bmesh_get(&bm->ldata, l->head.data, CD_MLOOPUV);
if (luv->flag & MLOOPUV_VERTSEL) {
is_sel = TRUE;
}
else {
is_unsel = TRUE;
}
/* we have mixed selection, bail out */
if (is_sel && is_unsel) {
break;
}
}
if (is_sel && is_unsel) {
BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
luv = CustomData_bmesh_get(&bm->ldata, l->head.data, CD_MLOOPUV);
luv->flag &= ~MLOOPUV_VERTSEL;
}
change = TRUE;
}
}
if (change) {
return OPERATOR_FINISHED;
}
else {
return OPERATOR_CANCELLED;
}
}
static void UV_OT_select_split(wmOperatorType *ot)
{
/* identifiers */
ot->name = "Select Split";
ot->description = "Select only entirely selected faces";
ot->idname = "UV_OT_select_split";
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
/* api callbacks */
ot->exec = select_split_exec;
ot->poll = ED_operator_uvedit; /* requires space image */;
}
/* ******************** unlink selection operator **************** */
static int unlink_selection_exec(bContext *C, wmOperator *op)
@@ -3680,6 +3762,7 @@ void ED_operatortypes_uvedit(void)
WM_operatortype_append(UV_OT_select_loop);
WM_operatortype_append(UV_OT_select_linked);
WM_operatortype_append(UV_OT_select_linked_pick);
WM_operatortype_append(UV_OT_select_split);
WM_operatortype_append(UV_OT_unlink_selected);
WM_operatortype_append(UV_OT_select_pinned);
WM_operatortype_append(UV_OT_select_border);
@@ -3735,6 +3818,7 @@ void ED_keymap_uvedit(wmKeyConfig *keyconf)
RNA_boolean_set(WM_keymap_add_item(keymap, "UV_OT_select", SELECTMOUSE, KM_PRESS, KM_SHIFT, 0)->ptr, "extend", TRUE);
RNA_boolean_set(WM_keymap_add_item(keymap, "UV_OT_select_loop", SELECTMOUSE, KM_PRESS, KM_ALT, 0)->ptr, "extend", FALSE);
RNA_boolean_set(WM_keymap_add_item(keymap, "UV_OT_select_loop", SELECTMOUSE, KM_PRESS, KM_SHIFT | KM_ALT, 0)->ptr, "extend", TRUE);
WM_keymap_add_item(keymap, "UV_OT_select_split", YKEY, KM_PRESS, 0, 0);
/* border/circle selection */
kmi = WM_keymap_add_item(keymap, "UV_OT_select_border", BKEY, KM_PRESS, 0, 0);

View File

@@ -439,6 +439,15 @@ static void rna_NodeGroup_update(Main *bmain, Scene *scene, PointerRNA *ptr)
node_update(bmain, scene, ntree, node);
}
static int rna_NodeGroup_node_tree_poll(PointerRNA *ptr, const PointerRNA value)
{
bNodeTree *ntree = (bNodeTree *)ptr->id.data;
bNodeTree *ngroup = (bNodeTree *)value.data;
/* only allow node trees of the same type as the group node's tree */
return (ngroup->type == ntree->type);
}
static void rna_Node_name_set(PointerRNA *ptr, const char *value)
{
bNodeTree *ntree = (bNodeTree *)ptr->id.data;
@@ -1129,6 +1138,7 @@ static void def_group(StructRNA *srna)
prop = RNA_def_property(srna, "node_tree", PROP_POINTER, PROP_NONE);
RNA_def_property_pointer_sdna(prop, NULL, "id");
RNA_def_property_struct_type(prop, "NodeTree");
RNA_def_property_pointer_funcs(prop, NULL, NULL, NULL, "rna_NodeGroup_node_tree_poll");
RNA_def_property_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Node Tree", "");
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_NodeGroup_update");