Merge with trunk r38281
This commit is contained in:
@@ -58,11 +58,39 @@ suffix_relpaths(SRC_NEW "${SRC}" "../../guardedalloc/")
|
||||
include_directories(${INC_NEW})
|
||||
add_library(guardedalloc_lib ${SRC_NEW})
|
||||
|
||||
# blenfont
|
||||
include(${CMAKE_SOURCE_DIR}/../../../source/blender/blenfont/CMakeLists.txt)
|
||||
suffix_relpaths(INC_NEW "${INC}" "../../../source/blender/blenfont/")
|
||||
suffix_relpaths(SRC_NEW "${SRC}" "../../../source/blender/blenfont/")
|
||||
include_directories(${INC_NEW})
|
||||
add_library(blenfont_lib ${SRC_NEW})
|
||||
|
||||
# grr, blenfont needs BLI
|
||||
include_directories(
|
||||
"../../../source/blender/blenlib"
|
||||
"../../../source/blender/blenloader"
|
||||
)
|
||||
add_library(bli_lib
|
||||
"../../../source/blender/blenlib/intern/fileops.c"
|
||||
"../../../source/blender/blenlib/intern/rct.c"
|
||||
"../../../source/blender/blenlib/intern/string.c"
|
||||
"../../../source/blender/blenlib/intern/listbase.c"
|
||||
"../../../source/blender/blenlib/intern/storage.c"
|
||||
"../../../source/blender/blenlib/intern/path_util.c"
|
||||
"../../../source/blender/blenlib/intern/BLI_dynstr.c"
|
||||
"../../../source/blender/blenlib/intern/BLI_linklist.c"
|
||||
"../../../source/blender/blenlib/intern/BLI_memarena.c"
|
||||
)
|
||||
|
||||
|
||||
find_package(OpenGL REQUIRED)
|
||||
|
||||
find_package(Freetype REQUIRED)
|
||||
|
||||
include_directories(${CMAKE_SOURCE_DIR}/../)
|
||||
include_directories(${OPENGL_INCLUDE_DIR})
|
||||
include_directories(${FREETYPE_INCLUDE_DIRS})
|
||||
include_directories(${CMAKE_SOURCE_DIR}/../../../source/blender/blenfont)
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
find_package(X11 REQUIRED)
|
||||
@@ -105,6 +133,7 @@ target_link_libraries(gears_cpp
|
||||
|
||||
# MultiTest (C)
|
||||
add_executable(multitest_c
|
||||
${CMAKE_SOURCE_DIR}/../../../source/blender/editors/datafiles/bfont.ttf.c
|
||||
${CMAKE_SOURCE_DIR}/multitest/Basic.c
|
||||
${CMAKE_SOURCE_DIR}/multitest/EventToBuf.c
|
||||
${CMAKE_SOURCE_DIR}/multitest/MultiTest.c
|
||||
@@ -114,10 +143,13 @@ add_executable(multitest_c
|
||||
)
|
||||
|
||||
target_link_libraries(multitest_c
|
||||
blenfont_lib
|
||||
bli_lib
|
||||
ghost_lib
|
||||
string_lib
|
||||
guardedalloc_lib
|
||||
${OPENGL_gl_LIBRARY}
|
||||
${OPENGL_glu_LIBRARY}
|
||||
${FREETYPE_LIBRARY}
|
||||
${PLATFORM_LINKLIBS}
|
||||
)
|
||||
|
||||
@@ -42,7 +42,18 @@
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "GHOST_C-api.h"
|
||||
#include "BMF_Api.h"
|
||||
|
||||
#ifdef USE_BMF
|
||||
# include "BMF_Api.h"
|
||||
#else
|
||||
# include "BLF_api.h"
|
||||
extern int datatoc_bfont_ttf_size;
|
||||
extern char datatoc_bfont_ttf[];
|
||||
|
||||
// XXX, bad, but BLI uses these
|
||||
char bprogname[160]= "";
|
||||
char U[1024]= {0};
|
||||
#endif
|
||||
|
||||
#include "Util.h"
|
||||
#include "Basic.h"
|
||||
@@ -291,7 +302,7 @@ MainWindow *mainwindow_new(MultiTestApp *app) {
|
||||
|
||||
win= GHOST_CreateWindow(sys, "MultiTest:Main", 40, 40, 400, 400,
|
||||
GHOST_kWindowStateNormal, GHOST_kDrawingContextTypeOpenGL,
|
||||
FALSE);
|
||||
FALSE, FALSE);
|
||||
|
||||
if (win) {
|
||||
MainWindow *mw= MEM_callocN(sizeof(*mw), "mainwindow_new");
|
||||
@@ -324,8 +335,12 @@ struct _LoggerWindow {
|
||||
MultiTestApp *app;
|
||||
|
||||
GHOST_WindowHandle win;
|
||||
|
||||
|
||||
#ifdef USE_BMF
|
||||
BMF_Font *font;
|
||||
#else
|
||||
int font;
|
||||
#endif
|
||||
int fonttexid;
|
||||
int fontheight;
|
||||
|
||||
@@ -429,18 +444,26 @@ static void loggerwindow_do_draw(LoggerWindow *lw) {
|
||||
char *line= lw->loglines[(lw->nloglines-1)-(i+startline)];
|
||||
int x_pos= lw->textarea[0][0] + 4;
|
||||
int y_pos= lw->textarea[0][1] + 4 + i*lw->fontheight;
|
||||
|
||||
|
||||
#ifdef USE_BMF
|
||||
if (lw->fonttexid==-1) {
|
||||
glRasterPos2i(x_pos, y_pos);
|
||||
BMF_DrawString(lw->font, line);
|
||||
} else {
|
||||
BMF_DrawStringTexture(lw->font, line, x_pos, y_pos, 0.0);
|
||||
}
|
||||
#else
|
||||
BLF_position(lw->font, x_pos, y_pos, 0.0);
|
||||
BLF_draw(lw->font, line, 256); // XXX
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef USE_BMF
|
||||
if (lw->fonttexid!=-1) {
|
||||
glDisable(GL_TEXTURE_2D);
|
||||
glDisable(GL_BLEND);
|
||||
}
|
||||
#endif
|
||||
|
||||
GHOST_SwapWindowBuffers(lw->win);
|
||||
}
|
||||
@@ -531,19 +554,25 @@ LoggerWindow *loggerwindow_new(MultiTestApp *app) {
|
||||
GHOST_GetMainDisplayDimensions(sys, &screensize[0], &screensize[1]);
|
||||
win= GHOST_CreateWindow(sys, "MultiTest:Logger", 40, screensize[1]-432,
|
||||
800, 300, GHOST_kWindowStateNormal,
|
||||
GHOST_kDrawingContextTypeOpenGL, FALSE);
|
||||
GHOST_kDrawingContextTypeOpenGL, FALSE, FALSE);
|
||||
|
||||
if (win) {
|
||||
LoggerWindow *lw= MEM_callocN(sizeof(*lw), "loggerwindow_new");
|
||||
int bbox[2][2];
|
||||
lw->app= app;
|
||||
lw->win= win;
|
||||
|
||||
|
||||
#ifdef USE_BMF
|
||||
lw->font= BMF_GetFont(BMF_kScreen12);
|
||||
lw->fonttexid= BMF_GetFontTexture(lw->font);
|
||||
|
||||
BMF_GetBoundingBox(lw->font, &bbox[0][0], &bbox[0][1], &bbox[1][0], &bbox[1][1]);
|
||||
lw->fontheight= rect_height(bbox);
|
||||
#else
|
||||
lw->font= BLF_load_mem("default", (unsigned char*)datatoc_bfont_ttf, datatoc_bfont_ttf_size);
|
||||
BLF_size(lw->font, 11, 72);
|
||||
lw->fontheight= BLF_height(lw->font, "A_");
|
||||
#endif
|
||||
|
||||
lw->nloglines= lw->logsize= 0;
|
||||
lw->loglines= MEM_mallocN(sizeof(*lw->loglines)*lw->nloglines, "loglines");
|
||||
@@ -711,7 +740,7 @@ ExtraWindow *extrawindow_new(MultiTestApp *app) {
|
||||
|
||||
win= GHOST_CreateWindow(sys, "MultiTest:Extra", 500, 40, 400, 400,
|
||||
GHOST_kWindowStateNormal, GHOST_kDrawingContextTypeOpenGL,
|
||||
FALSE);
|
||||
FALSE, FALSE);
|
||||
|
||||
if (win) {
|
||||
ExtraWindow *ew= MEM_callocN(sizeof(*ew), "mainwindow_new");
|
||||
@@ -786,7 +815,7 @@ static int multitest_event_handler(GHOST_EventHandle evt, GHOST_TUserDataPtr dat
|
||||
MultiTestApp *multitestapp_new(void) {
|
||||
MultiTestApp *app= MEM_mallocN(sizeof(*app), "multitestapp_new");
|
||||
GHOST_EventConsumerHandle consumer= GHOST_CreateEventConsumer(multitest_event_handler, app);
|
||||
|
||||
|
||||
app->sys= GHOST_CreateSystem();
|
||||
if (!app->sys)
|
||||
fatal("Unable to create ghost system");
|
||||
@@ -850,6 +879,10 @@ void multitestapp_free(MultiTestApp *app) {
|
||||
/***/
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
#ifndef USE_BMF
|
||||
BLF_init(11, 72);
|
||||
#endif
|
||||
|
||||
MultiTestApp *app= multitestapp_new();
|
||||
|
||||
multitestapp_run(app);
|
||||
|
||||
@@ -59,6 +59,7 @@ void IK_QJacobian::ArmMatrices(int dof, int task_size)
|
||||
|
||||
m_d_theta.newsize(dof);
|
||||
m_d_theta_tmp.newsize(dof);
|
||||
m_d_norm_weight.newsize(dof);
|
||||
|
||||
m_norm.newsize(dof);
|
||||
m_norm = 0.0;
|
||||
@@ -111,11 +112,13 @@ void IK_QJacobian::SetBetas(int id, int, const MT_Vector3& v)
|
||||
m_beta[id+2] = v.z();
|
||||
}
|
||||
|
||||
void IK_QJacobian::SetDerivatives(int id, int dof_id, const MT_Vector3& v)
|
||||
void IK_QJacobian::SetDerivatives(int id, int dof_id, const MT_Vector3& v, MT_Scalar norm_weight)
|
||||
{
|
||||
m_jacobian[id][dof_id] = v.x()*m_weight_sqrt[dof_id];
|
||||
m_jacobian[id+1][dof_id] = v.y()*m_weight_sqrt[dof_id];
|
||||
m_jacobian[id+2][dof_id] = v.z()*m_weight_sqrt[dof_id];
|
||||
|
||||
m_d_norm_weight[dof_id] = norm_weight;
|
||||
}
|
||||
|
||||
void IK_QJacobian::Invert()
|
||||
@@ -429,7 +432,7 @@ MT_Scalar IK_QJacobian::AngleUpdateNorm() const
|
||||
MT_Scalar mx = 0.0, dtheta_abs;
|
||||
|
||||
for (i = 0; i < m_d_theta.size(); i++) {
|
||||
dtheta_abs = MT_abs(m_d_theta[i]);
|
||||
dtheta_abs = MT_abs(m_d_theta[i]*m_d_norm_weight[i]);
|
||||
if (dtheta_abs > mx)
|
||||
mx = dtheta_abs;
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ public:
|
||||
|
||||
// Iteratively called
|
||||
void SetBetas(int id, int size, const MT_Vector3& v);
|
||||
void SetDerivatives(int id, int dof_id, const MT_Vector3& v);
|
||||
void SetDerivatives(int id, int dof_id, const MT_Vector3& v, MT_Scalar norm_weight);
|
||||
|
||||
void Invert();
|
||||
|
||||
@@ -89,6 +89,7 @@ private:
|
||||
|
||||
/// the vector of computed angle changes
|
||||
TVector m_d_theta;
|
||||
TVector m_d_norm_weight;
|
||||
|
||||
/// space required for SVD computation
|
||||
|
||||
|
||||
@@ -95,10 +95,10 @@ void IK_QPositionTask::ComputeJacobian(IK_QJacobian& jacobian)
|
||||
MT_Vector3 axis = seg->Axis(i)*m_weight;
|
||||
|
||||
if (seg->Translational())
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, axis);
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, axis, 1e2);
|
||||
else {
|
||||
MT_Vector3 pa = p.cross(axis);
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, pa);
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, pa, 1e0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,10 +147,10 @@ void IK_QOrientationTask::ComputeJacobian(IK_QJacobian& jacobian)
|
||||
for (i = 0; i < seg->NumberOfDoF(); i++) {
|
||||
|
||||
if (seg->Translational())
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, MT_Vector3(0, 0, 0));
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, MT_Vector3(0, 0, 0), 1e2);
|
||||
else {
|
||||
MT_Vector3 axis = seg->Axis(i)*m_weight;
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, axis);
|
||||
jacobian.SetDerivatives(m_id, seg->DoFId()+i, axis, 1e0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -202,10 +202,10 @@ void IK_QCenterOfMassTask::JacobianSegment(IK_QJacobian& jacobian, MT_Vector3& c
|
||||
axis *= /*segment->Mass()**/m_total_mass_inv;
|
||||
|
||||
if (segment->Translational())
|
||||
jacobian.SetDerivatives(m_id, segment->DoFId()+i, axis);
|
||||
jacobian.SetDerivatives(m_id, segment->DoFId()+i, axis, 1e2);
|
||||
else {
|
||||
MT_Vector3 pa = axis.cross(p);
|
||||
jacobian.SetDerivatives(m_id, segment->DoFId()+i, pa);
|
||||
jacobian.SetDerivatives(m_id, segment->DoFId()+i, pa, 1e0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -48,8 +48,6 @@ def paths():
|
||||
|
||||
def modules(module_cache):
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
path_list = paths()
|
||||
|
||||
@@ -173,11 +171,9 @@ def enable(module_name, default_set=True):
|
||||
:return: the loaded module or None on failier.
|
||||
:rtype: module
|
||||
"""
|
||||
# note, this still gets added to _bpy_types.TypeMap
|
||||
|
||||
import os
|
||||
import sys
|
||||
import bpy_types as _bpy_types
|
||||
import imp
|
||||
|
||||
def handle_error():
|
||||
@@ -246,8 +242,6 @@ def disable(module_name, default_set=True):
|
||||
:type module_name: string
|
||||
"""
|
||||
import sys
|
||||
import bpy_types as _bpy_types
|
||||
|
||||
mod = sys.modules.get(module_name)
|
||||
|
||||
# possible this addon is from a previous session and didnt load a module this time.
|
||||
|
||||
@@ -22,7 +22,6 @@ __all__ = (
|
||||
"region_2d_to_vector_3d",
|
||||
"region_2d_to_location_3d",
|
||||
"location_3d_to_region_2d",
|
||||
"location_3d_to_region_2d",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -241,7 +241,7 @@ class _GenericBone:
|
||||
chain.append(child)
|
||||
else:
|
||||
if len(children_basename):
|
||||
print("multiple basenames found, this is probably not what you want!", bone.name, children_basename)
|
||||
print("multiple basenames found, this is probably not what you want!", self.name, children_basename)
|
||||
|
||||
break
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
import bpy as _bpy
|
||||
import bpyml
|
||||
from bpyml import TAG, ARGS, CHILDREN
|
||||
from types import ModuleType
|
||||
|
||||
_uilayout_rna = _bpy.types.UILayout.bl_rna
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ def draw(layout, context, context_member, property_type, use_edit=True):
|
||||
to_dict = getattr(val, "to_dict", None)
|
||||
to_list = getattr(val, "to_list", None)
|
||||
|
||||
val_orig = val
|
||||
# val_orig = val # UNUSED
|
||||
if to_dict:
|
||||
val = to_dict()
|
||||
val_draw = str(val)
|
||||
|
||||
@@ -121,7 +121,6 @@ class ProjectEdit(bpy.types.Operator):
|
||||
|
||||
def execute(self, context):
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
EXT = "png" # could be made an option but for now ok
|
||||
|
||||
|
||||
@@ -87,8 +87,6 @@ class MeshMirrorUV(bpy.types.Operator):
|
||||
def execute(self, context):
|
||||
DIR = (self.direction == 'NEGATIVE')
|
||||
|
||||
from mathutils import Vector
|
||||
|
||||
ob = context.active_object
|
||||
is_editmode = (ob.mode == 'EDIT')
|
||||
if is_editmode:
|
||||
|
||||
@@ -98,8 +98,6 @@ def align_objects(align_x, align_y, align_z, align_mode, relative_to):
|
||||
# Main Loop
|
||||
|
||||
for obj, bb_world in objs:
|
||||
|
||||
loc_world = obj.location
|
||||
bb_world = [Vector(v[:]) * obj.matrix_world for v in obj.bound_box]
|
||||
|
||||
Left_Up_Front = bb_world[1]
|
||||
|
||||
@@ -78,7 +78,7 @@ class PlayRenderedAnim(bpy.types.Operator):
|
||||
|
||||
preset = prefs.filepaths.animation_player_preset
|
||||
player_path = prefs.filepaths.animation_player
|
||||
file_path = bpy.path.abspath(rd.filepath)
|
||||
# file_path = bpy.path.abspath(rd.filepath) # UNUSED
|
||||
is_movie = rd.is_movie_format
|
||||
|
||||
# try and guess a command line if it doesn't exist
|
||||
|
||||
@@ -42,7 +42,6 @@ def extend(obj, operator, EXTEND_MODE):
|
||||
edge_average_lengths = {}
|
||||
|
||||
OTHER_INDEX = 2, 3, 0, 1
|
||||
FAST_INDICIES = 0, 2, 1, 3 # order is faster
|
||||
|
||||
def extend_uvs(face_source, face_target, edge_key):
|
||||
'''
|
||||
|
||||
@@ -444,7 +444,7 @@ def lightmap_uvpack(meshes,
|
||||
del even_dict
|
||||
del odd_dict
|
||||
|
||||
orig = len(pretty_faces)
|
||||
# orig = len(pretty_faces)
|
||||
|
||||
pretty_faces = [pf for pf in pretty_faces if not pf.has_parent]
|
||||
|
||||
@@ -489,7 +489,10 @@ def lightmap_uvpack(meshes,
|
||||
|
||||
if PREF_APPLY_IMAGE:
|
||||
if not PREF_PACK_IN_ONE:
|
||||
image = Image.New("lightmap", PREF_IMG_PX_SIZE, PREF_IMG_PX_SIZE, 24)
|
||||
image = bpy.data.images.new(name="lightmap",
|
||||
width=PREF_IMG_PX_SIZE,
|
||||
height=PREF_IMG_PX_SIZE,
|
||||
)
|
||||
|
||||
for f in face_sel:
|
||||
# f.image = image
|
||||
@@ -530,7 +533,7 @@ def unwrap(operator, context, **kwargs):
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
from bpy.props import BoolProperty, FloatProperty, IntProperty, EnumProperty
|
||||
from bpy.props import BoolProperty, FloatProperty, IntProperty
|
||||
|
||||
|
||||
class LightMapPack(bpy.types.Operator):
|
||||
|
||||
@@ -284,8 +284,10 @@ static void cdDM_drawVerts(DerivedMesh *dm)
|
||||
else { /* use OpenGL VBOs or Vertex Arrays instead for better, faster rendering */
|
||||
GPU_vertex_setup(dm);
|
||||
if( !GPU_buffer_legacy(dm) ) {
|
||||
if(dm->drawObject->nelements) glDrawArrays(GL_POINTS,0, dm->drawObject->nelements);
|
||||
else glDrawArrays(GL_POINTS,0, dm->drawObject->nlooseverts);
|
||||
if(dm->drawObject->tot_triangle_point)
|
||||
glDrawArrays(GL_POINTS,0, dm->drawObject->tot_triangle_point);
|
||||
else
|
||||
glDrawArrays(GL_POINTS,0, dm->drawObject->tot_loose_point);
|
||||
}
|
||||
GPU_buffer_unbind();
|
||||
}
|
||||
@@ -547,9 +549,10 @@ static void cdDM_drawFacesSolid(DerivedMesh *dm,
|
||||
GPU_normal_setup( dm );
|
||||
if( !GPU_buffer_legacy(dm) ) {
|
||||
glShadeModel(GL_SMOOTH);
|
||||
for( a = 0; a < dm->drawObject->nmaterials; a++ ) {
|
||||
for( a = 0; a < dm->drawObject->totmaterial; a++ ) {
|
||||
if( setMaterial(dm->drawObject->materials[a].mat_nr+1, NULL) )
|
||||
glDrawArrays(GL_TRIANGLES, dm->drawObject->materials[a].start, dm->drawObject->materials[a].end-dm->drawObject->materials[a].start);
|
||||
glDrawArrays(GL_TRIANGLES, dm->drawObject->materials[a].start,
|
||||
dm->drawObject->materials[a].totpoint);
|
||||
}
|
||||
}
|
||||
GPU_buffer_unbind( );
|
||||
@@ -629,13 +632,13 @@ static void cdDM_drawFacesColored(DerivedMesh *dm, int useTwoSided, unsigned cha
|
||||
GPU_color_setup(dm);
|
||||
if( !GPU_buffer_legacy(dm) ) {
|
||||
glShadeModel(GL_SMOOTH);
|
||||
glDrawArrays(GL_TRIANGLES, 0, dm->drawObject->nelements);
|
||||
glDrawArrays(GL_TRIANGLES, 0, dm->drawObject->tot_triangle_point);
|
||||
|
||||
if( useTwoSided ) {
|
||||
GPU_color4_upload(dm,cp2);
|
||||
GPU_color_setup(dm);
|
||||
glCullFace(GL_FRONT);
|
||||
glDrawArrays(GL_TRIANGLES, 0, dm->drawObject->nelements);
|
||||
glDrawArrays(GL_TRIANGLES, 0, dm->drawObject->tot_triangle_point);
|
||||
glCullFace(GL_BACK);
|
||||
}
|
||||
}
|
||||
@@ -787,8 +790,8 @@ static void cdDM_drawFacesTex_common(DerivedMesh *dm,
|
||||
|
||||
glShadeModel( GL_SMOOTH );
|
||||
lastFlag = 0;
|
||||
for(i = 0; i < dm->drawObject->nelements/3; i++) {
|
||||
int actualFace = dm->drawObject->faceRemap[i];
|
||||
for(i = 0; i < dm->drawObject->tot_triangle_point/3; i++) {
|
||||
int actualFace = dm->drawObject->triangle_to_mface[i];
|
||||
int flag = 1;
|
||||
|
||||
if(drawParams) {
|
||||
@@ -819,13 +822,13 @@ static void cdDM_drawFacesTex_common(DerivedMesh *dm,
|
||||
startFace = i;
|
||||
}
|
||||
}
|
||||
if( startFace < dm->drawObject->nelements/3 ) {
|
||||
if( startFace < dm->drawObject->tot_triangle_point/3 ) {
|
||||
if( lastFlag != 0 ) { /* if the flag is 0 it means the face is hidden or invisible */
|
||||
if (lastFlag==1 && col)
|
||||
GPU_color_switch(1);
|
||||
else
|
||||
GPU_color_switch(0);
|
||||
glDrawArrays(GL_TRIANGLES,startFace*3,dm->drawObject->nelements-startFace*3);
|
||||
glDrawArrays(GL_TRIANGLES, startFace*3, dm->drawObject->tot_triangle_point - startFace*3);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -937,7 +940,7 @@ static void cdDM_drawMappedFaces(DerivedMesh *dm, int (*setDrawOptions)(void *us
|
||||
if( useColors && mc )
|
||||
GPU_color_setup(dm);
|
||||
if( !GPU_buffer_legacy(dm) ) {
|
||||
int tottri = dm->drawObject->nelements/3;
|
||||
int tottri = dm->drawObject->tot_triangle_point/3;
|
||||
glShadeModel(GL_SMOOTH);
|
||||
|
||||
if(tottri == 0) {
|
||||
@@ -949,17 +952,17 @@ static void cdDM_drawMappedFaces(DerivedMesh *dm, int (*setDrawOptions)(void *us
|
||||
}
|
||||
else {
|
||||
/* we need to check if the next material changes */
|
||||
int next_actualFace= dm->drawObject->faceRemap[0];
|
||||
int next_actualFace= dm->drawObject->triangle_to_mface[0];
|
||||
|
||||
for( i = 0; i < tottri; i++ ) {
|
||||
//int actualFace = dm->drawObject->faceRemap[i];
|
||||
//int actualFace = dm->drawObject->triangle_to_mface[i];
|
||||
int actualFace = next_actualFace;
|
||||
MFace *mface= mf + actualFace;
|
||||
int drawSmooth= (mface->flag & ME_SMOOTH);
|
||||
int draw = 1;
|
||||
|
||||
if(i != tottri-1)
|
||||
next_actualFace= dm->drawObject->faceRemap[i+1];
|
||||
next_actualFace= dm->drawObject->triangle_to_mface[i+1];
|
||||
|
||||
orig= (index==NULL) ? actualFace : index[actualFace];
|
||||
|
||||
@@ -1131,9 +1134,9 @@ static void cdDM_drawMappedFacesGLSL(DerivedMesh *dm, int (*setMaterial)(int, vo
|
||||
GPU_normal_setup(dm);
|
||||
|
||||
if( !GPU_buffer_legacy(dm) ) {
|
||||
for( i = 0; i < dm->drawObject->nelements/3; i++ ) {
|
||||
for( i = 0; i < dm->drawObject->tot_triangle_point/3; i++ ) {
|
||||
|
||||
a = dm->drawObject->faceRemap[i];
|
||||
a = dm->drawObject->triangle_to_mface[i];
|
||||
|
||||
mface = mf + a;
|
||||
new_matnr = mface->mat_nr + 1;
|
||||
@@ -1155,7 +1158,7 @@ static void cdDM_drawMappedFacesGLSL(DerivedMesh *dm, int (*setMaterial)(int, vo
|
||||
|
||||
if( numdata != 0 ) {
|
||||
|
||||
GPU_buffer_free(buffer, NULL);
|
||||
GPU_buffer_free(buffer);
|
||||
|
||||
buffer = NULL;
|
||||
}
|
||||
@@ -1195,7 +1198,7 @@ static void cdDM_drawMappedFacesGLSL(DerivedMesh *dm, int (*setMaterial)(int, vo
|
||||
}
|
||||
if( numdata != 0 ) {
|
||||
elementsize = GPU_attrib_element_size( datatypes, numdata );
|
||||
buffer = GPU_buffer_alloc( elementsize*dm->drawObject->nelements, NULL );
|
||||
buffer = GPU_buffer_alloc( elementsize*dm->drawObject->tot_triangle_point);
|
||||
if( buffer == NULL ) {
|
||||
GPU_buffer_unbind();
|
||||
dm->drawObject->legacy = 1;
|
||||
@@ -1204,7 +1207,7 @@ static void cdDM_drawMappedFacesGLSL(DerivedMesh *dm, int (*setMaterial)(int, vo
|
||||
varray = GPU_buffer_lock_stream(buffer);
|
||||
if( varray == NULL ) {
|
||||
GPU_buffer_unbind();
|
||||
GPU_buffer_free(buffer, NULL);
|
||||
GPU_buffer_free(buffer);
|
||||
dm->drawObject->legacy = 1;
|
||||
return;
|
||||
}
|
||||
@@ -1343,7 +1346,7 @@ static void cdDM_drawMappedFacesGLSL(DerivedMesh *dm, int (*setMaterial)(int, vo
|
||||
}
|
||||
GPU_buffer_unbind();
|
||||
}
|
||||
GPU_buffer_free( buffer, NULL );
|
||||
GPU_buffer_free(buffer);
|
||||
}
|
||||
|
||||
glShadeModel(GL_FLAT);
|
||||
|
||||
@@ -2431,6 +2431,7 @@ void calchandleNurb(BezTriple *bezt, BezTriple *prev, BezTriple *next, int mode)
|
||||
{
|
||||
float *p1,*p2,*p3, pt[3];
|
||||
float dx1,dy1,dz1,dx,dy,dz,vx,vy,vz,len,len1,len2;
|
||||
const float eps= 1e-5;
|
||||
|
||||
if(bezt->h1==0 && bezt->h2==0) return;
|
||||
|
||||
@@ -2587,30 +2588,38 @@ void calchandleNurb(BezTriple *bezt, BezTriple *prev, BezTriple *next, int mode)
|
||||
|
||||
if(bezt->f1 & SELECT) { /* order of calculation */
|
||||
if(bezt->h2==HD_ALIGN) { /* aligned */
|
||||
len= len2/len1;
|
||||
p2[3]= p2[0]+len*(p2[0]-p2[-3]);
|
||||
p2[4]= p2[1]+len*(p2[1]-p2[-2]);
|
||||
p2[5]= p2[2]+len*(p2[2]-p2[-1]);
|
||||
if(len1>eps) {
|
||||
len= len2/len1;
|
||||
p2[3]= p2[0]+len*(p2[0]-p2[-3]);
|
||||
p2[4]= p2[1]+len*(p2[1]-p2[-2]);
|
||||
p2[5]= p2[2]+len*(p2[2]-p2[-1]);
|
||||
}
|
||||
}
|
||||
if(bezt->h1==HD_ALIGN) {
|
||||
len= len1/len2;
|
||||
p2[-3]= p2[0]+len*(p2[0]-p2[3]);
|
||||
p2[-2]= p2[1]+len*(p2[1]-p2[4]);
|
||||
p2[-1]= p2[2]+len*(p2[2]-p2[5]);
|
||||
if(len2>eps) {
|
||||
len= len1/len2;
|
||||
p2[-3]= p2[0]+len*(p2[0]-p2[3]);
|
||||
p2[-2]= p2[1]+len*(p2[1]-p2[4]);
|
||||
p2[-1]= p2[2]+len*(p2[2]-p2[5]);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if(bezt->h1==HD_ALIGN) {
|
||||
len= len1/len2;
|
||||
p2[-3]= p2[0]+len*(p2[0]-p2[3]);
|
||||
p2[-2]= p2[1]+len*(p2[1]-p2[4]);
|
||||
p2[-1]= p2[2]+len*(p2[2]-p2[5]);
|
||||
if(len2>eps) {
|
||||
len= len1/len2;
|
||||
p2[-3]= p2[0]+len*(p2[0]-p2[3]);
|
||||
p2[-2]= p2[1]+len*(p2[1]-p2[4]);
|
||||
p2[-1]= p2[2]+len*(p2[2]-p2[5]);
|
||||
}
|
||||
}
|
||||
if(bezt->h2==HD_ALIGN) { /* aligned */
|
||||
len= len2/len1;
|
||||
p2[3]= p2[0]+len*(p2[0]-p2[-3]);
|
||||
p2[4]= p2[1]+len*(p2[1]-p2[-2]);
|
||||
p2[5]= p2[2]+len*(p2[2]-p2[-1]);
|
||||
if(len1>eps) {
|
||||
len= len2/len1;
|
||||
p2[3]= p2[0]+len*(p2[0]-p2[-3]);
|
||||
p2[4]= p2[1]+len*(p2[1]-p2[-2]);
|
||||
p2[5]= p2[2]+len*(p2[2]-p2[-1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2418,6 +2418,11 @@ void ntreeBeginExecTree(bNodeTree *ntree)
|
||||
|
||||
if(ntree->type==NTREE_COMPOSIT)
|
||||
composit_begin_exec(ntree, ntree->stack);
|
||||
|
||||
/* ensures only a single output node is enabled, texnode allows multiple though */
|
||||
if(ntree->type!=NTREE_TEXTURE)
|
||||
ntreeSetOutput(ntree);
|
||||
|
||||
}
|
||||
|
||||
ntree->init |= NTREE_EXEC_INIT;
|
||||
@@ -2765,9 +2770,6 @@ void ntreeCompositExecTree(bNodeTree *ntree, RenderData *rd, int do_preview)
|
||||
/* fixed seed, for example noise texture */
|
||||
BLI_srandom(rd->cfra);
|
||||
|
||||
/* ensures only a single output node is enabled */
|
||||
ntreeSetOutput(ntree);
|
||||
|
||||
/* sets need_exec tags in nodes */
|
||||
curnode = totnode= setExecutableNodes(ntree, &thdata);
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ variables on the UI for now
|
||||
#include "BKE_curve.h"
|
||||
#include "BKE_effect.h"
|
||||
#include "BKE_global.h"
|
||||
#include "BKE_modifier.h"
|
||||
#include "BKE_softbody.h"
|
||||
#include "BKE_DerivedMesh.h"
|
||||
#include "BKE_pointcache.h"
|
||||
@@ -289,21 +290,24 @@ typedef struct ccd_Mesh {
|
||||
|
||||
|
||||
|
||||
static ccd_Mesh *ccd_mesh_make(Object *ob, DerivedMesh *dm)
|
||||
static ccd_Mesh *ccd_mesh_make(Object *ob)
|
||||
{
|
||||
CollisionModifierData *cmd;
|
||||
ccd_Mesh *pccd_M = NULL;
|
||||
ccdf_minmax *mima =NULL;
|
||||
MFace *mface=NULL;
|
||||
float v[3],hull;
|
||||
int i;
|
||||
|
||||
cmd =(CollisionModifierData *)modifiers_findByType(ob, eModifierType_Collision);
|
||||
|
||||
/* first some paranoia checks */
|
||||
if (!dm) return NULL;
|
||||
if (!dm->getNumVerts(dm) || !dm->getNumFaces(dm)) return NULL;
|
||||
if (!cmd) return NULL;
|
||||
if (!cmd->numverts || !cmd->numfaces) return NULL;
|
||||
|
||||
pccd_M = MEM_mallocN(sizeof(ccd_Mesh),"ccd_Mesh");
|
||||
pccd_M->totvert = dm->getNumVerts(dm);
|
||||
pccd_M->totface = dm->getNumFaces(dm);
|
||||
pccd_M->totvert = cmd->numverts;
|
||||
pccd_M->totface = cmd->numfaces;
|
||||
pccd_M->savety = CCD_SAVETY;
|
||||
pccd_M->bbmin[0]=pccd_M->bbmin[1]=pccd_M->bbmin[2]=1e30f;
|
||||
pccd_M->bbmax[0]=pccd_M->bbmax[1]=pccd_M->bbmax[2]=-1e30f;
|
||||
@@ -314,12 +318,10 @@ static ccd_Mesh *ccd_mesh_make(Object *ob, DerivedMesh *dm)
|
||||
hull = MAX2(ob->pd->pdef_sbift,ob->pd->pdef_sboft);
|
||||
|
||||
/* alloc and copy verts*/
|
||||
pccd_M->mvert = dm->dupVertArray(dm);
|
||||
/* ah yeah, put the verices to global coords once */
|
||||
/* and determine the ortho BB on the fly */
|
||||
pccd_M->mvert = MEM_dupallocN(cmd->xnew);
|
||||
/* note that xnew coords are already in global space, */
|
||||
/* determine the ortho BB */
|
||||
for(i=0; i < pccd_M->totvert; i++){
|
||||
mul_m4_v3(ob->obmat, pccd_M->mvert[i].co);
|
||||
|
||||
/* evaluate limits */
|
||||
VECCOPY(v,pccd_M->mvert[i].co);
|
||||
pccd_M->bbmin[0] = MIN2(pccd_M->bbmin[0],v[0]-hull);
|
||||
@@ -332,7 +334,7 @@ static ccd_Mesh *ccd_mesh_make(Object *ob, DerivedMesh *dm)
|
||||
|
||||
}
|
||||
/* alloc and copy faces*/
|
||||
pccd_M->mface = dm->dupFaceArray(dm);
|
||||
pccd_M->mface = MEM_dupallocN(cmd->mfaces);
|
||||
|
||||
/* OBBs for idea1 */
|
||||
pccd_M->mima = MEM_mallocN(sizeof(ccdf_minmax)*pccd_M->totface,"ccd_Mesh_Faces_mima");
|
||||
@@ -386,19 +388,22 @@ static ccd_Mesh *ccd_mesh_make(Object *ob, DerivedMesh *dm)
|
||||
}
|
||||
return pccd_M;
|
||||
}
|
||||
static void ccd_mesh_update(Object *ob,ccd_Mesh *pccd_M, DerivedMesh *dm)
|
||||
static void ccd_mesh_update(Object *ob,ccd_Mesh *pccd_M)
|
||||
{
|
||||
ccdf_minmax *mima =NULL;
|
||||
CollisionModifierData *cmd;
|
||||
ccdf_minmax *mima =NULL;
|
||||
MFace *mface=NULL;
|
||||
float v[3],hull;
|
||||
int i;
|
||||
|
||||
/* first some paranoia checks */
|
||||
if (!dm) return ;
|
||||
if (!dm->getNumVerts(dm) || !dm->getNumFaces(dm)) return ;
|
||||
cmd =(CollisionModifierData *)modifiers_findByType(ob, eModifierType_Collision);
|
||||
|
||||
if ((pccd_M->totvert != dm->getNumVerts(dm)) ||
|
||||
(pccd_M->totface != dm->getNumFaces(dm))) return;
|
||||
/* first some paranoia checks */
|
||||
if (!cmd) return ;
|
||||
if (!cmd->numverts || !cmd->numfaces) return ;
|
||||
|
||||
if ((pccd_M->totvert != cmd->numverts) ||
|
||||
(pccd_M->totface != cmd->numfaces)) return;
|
||||
|
||||
pccd_M->bbmin[0]=pccd_M->bbmin[1]=pccd_M->bbmin[2]=1e30f;
|
||||
pccd_M->bbmax[0]=pccd_M->bbmax[1]=pccd_M->bbmax[2]=-1e30f;
|
||||
@@ -411,12 +416,10 @@ static void ccd_mesh_update(Object *ob,ccd_Mesh *pccd_M, DerivedMesh *dm)
|
||||
if(pccd_M->mprevvert) MEM_freeN(pccd_M->mprevvert);
|
||||
pccd_M->mprevvert = pccd_M->mvert;
|
||||
/* alloc and copy verts*/
|
||||
pccd_M->mvert = dm->dupVertArray(dm);
|
||||
/* ah yeah, put the verices to global coords once */
|
||||
/* and determine the ortho BB on the fly */
|
||||
pccd_M->mvert = MEM_dupallocN(cmd->xnew);
|
||||
/* note that xnew coords are already in global space, */
|
||||
/* determine the ortho BB */
|
||||
for(i=0; i < pccd_M->totvert; i++){
|
||||
mul_m4_v3(ob->obmat, pccd_M->mvert[i].co);
|
||||
|
||||
/* evaluate limits */
|
||||
VECCOPY(v,pccd_M->mvert[i].co);
|
||||
pccd_M->bbmin[0] = MIN2(pccd_M->bbmin[0],v[0]-hull);
|
||||
@@ -555,21 +558,8 @@ static void ccd_build_deflector_hash(Scene *scene, Object *vertexowner, GHash *h
|
||||
|
||||
/*+++ only with deflecting set */
|
||||
if(ob->pd && ob->pd->deflect && BLI_ghash_lookup(hash, ob) == NULL) {
|
||||
DerivedMesh *dm= NULL;
|
||||
|
||||
if(ob->softflag & OB_SB_COLLFINAL) /* so maybe someone wants overkill to collide with subsurfed */
|
||||
dm = mesh_get_derived_final(scene, ob, CD_MASK_BAREMESH);
|
||||
else
|
||||
dm = mesh_get_derived_deform(scene, ob, CD_MASK_BAREMESH);
|
||||
|
||||
if(dm){
|
||||
ccd_Mesh *ccdmesh = ccd_mesh_make(ob, dm);
|
||||
BLI_ghash_insert(hash, ob, ccdmesh);
|
||||
|
||||
/* we did copy & modify all we need so give 'em away again */
|
||||
dm->release(dm);
|
||||
|
||||
}
|
||||
ccd_Mesh *ccdmesh = ccd_mesh_make(ob);
|
||||
BLI_ghash_insert(hash, ob, ccdmesh);
|
||||
}/*--- only with deflecting set */
|
||||
|
||||
}/* mesh && layer*/
|
||||
@@ -595,21 +585,9 @@ static void ccd_update_deflector_hash(Scene *scene, Object *vertexowner, GHash *
|
||||
|
||||
/*+++ only with deflecting set */
|
||||
if(ob->pd && ob->pd->deflect) {
|
||||
DerivedMesh *dm= NULL;
|
||||
|
||||
if(ob->softflag & OB_SB_COLLFINAL) { /* so maybe someone wants overkill to collide with subsurfed */
|
||||
dm = mesh_get_derived_final(scene, ob, CD_MASK_BAREMESH);
|
||||
} else {
|
||||
dm = mesh_get_derived_deform(scene, ob, CD_MASK_BAREMESH);
|
||||
}
|
||||
if(dm){
|
||||
ccd_Mesh *ccdmesh = BLI_ghash_lookup(hash,ob);
|
||||
if (ccdmesh)
|
||||
ccd_mesh_update(ob,ccdmesh,dm);
|
||||
|
||||
/* we did copy & modify all we need so give 'em away again */
|
||||
dm->release(dm);
|
||||
}
|
||||
ccd_Mesh *ccdmesh = BLI_ghash_lookup(hash,ob);
|
||||
if (ccdmesh)
|
||||
ccd_mesh_update(ob,ccdmesh);
|
||||
}/*--- only with deflecting set */
|
||||
|
||||
}/* mesh && layer*/
|
||||
|
||||
@@ -1189,8 +1189,9 @@ void BLI_pbvh_node_get_verts(PBVH *bvh, PBVHNode *node, int **vert_indices, MVer
|
||||
void BLI_pbvh_node_num_verts(PBVH *bvh, PBVHNode *node, int *uniquevert, int *totvert)
|
||||
{
|
||||
if(bvh->grids) {
|
||||
if(totvert) *totvert= node->totprim*bvh->gridsize*bvh->gridsize;
|
||||
if(uniquevert) *uniquevert= *totvert;
|
||||
const int tot= node->totprim*bvh->gridsize*bvh->gridsize;
|
||||
if(totvert) *totvert= tot;
|
||||
if(uniquevert) *uniquevert= tot;
|
||||
}
|
||||
else {
|
||||
if(totvert) *totvert= node->uniq_verts + node->face_verts;
|
||||
|
||||
@@ -4874,7 +4874,6 @@ static void lib_link_screen(FileData *fd, Main *main)
|
||||
else if(sl->spacetype==SPACE_FILE) {
|
||||
SpaceFile *sfile= (SpaceFile *)sl;
|
||||
sfile->files= NULL;
|
||||
sfile->params= NULL;
|
||||
sfile->op= NULL;
|
||||
sfile->layout= NULL;
|
||||
sfile->folders_prev= NULL;
|
||||
@@ -5487,7 +5486,7 @@ static void direct_link_screen(FileData *fd, bScreen *sc)
|
||||
sfile->files= NULL;
|
||||
sfile->layout= NULL;
|
||||
sfile->op= NULL;
|
||||
sfile->params= NULL;
|
||||
sfile->params= newdataadr(fd, sfile->params);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2129,7 +2129,11 @@ static void write_screens(WriteData *wd, ListBase *scrbase)
|
||||
writestruct(wd, DATA, "SpaceButs", 1, sl);
|
||||
}
|
||||
else if(sl->spacetype==SPACE_FILE) {
|
||||
SpaceFile *sfile= (SpaceFile *)sl;
|
||||
|
||||
writestruct(wd, DATA, "SpaceFile", 1, sl);
|
||||
if(sfile->params)
|
||||
writestruct(wd, DATA, "FileSelectParams", 1, sfile->params);
|
||||
}
|
||||
else if(sl->spacetype==SPACE_SEQ) {
|
||||
writestruct(wd, DATA, "SpaceSeq", 1, sl);
|
||||
|
||||
@@ -121,7 +121,7 @@ static void fcurves_to_pchan_links_get (ListBase *pfLinks, Object *ob, bAction *
|
||||
pfl->oldangle = pchan->rotAngle;
|
||||
|
||||
/* make copy of custom properties */
|
||||
if (transFlags & ACT_TRANS_PROP)
|
||||
if (pchan->prop && (transFlags & ACT_TRANS_PROP))
|
||||
pfl->oldprops = IDP_CopyProperty(pchan->prop);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1319,7 +1319,7 @@ void OBJECT_OT_make_links_scene(wmOperatorType *ot)
|
||||
|
||||
/* identifiers */
|
||||
ot->name= "Link Objects to Scene";
|
||||
ot->description = "Make linked data local to each object";
|
||||
ot->description = "Link selection to another scene";
|
||||
ot->idname= "OBJECT_OT_make_links_scene";
|
||||
|
||||
/* api callbacks */
|
||||
|
||||
@@ -468,8 +468,9 @@ void VIEW3D_OT_object_as_camera(wmOperatorType *ot)
|
||||
|
||||
void ED_view3d_calc_clipping(BoundBox *bb, float planes[4][4], bglMats *mats, rcti *rect)
|
||||
{
|
||||
float modelview[4][4];
|
||||
double xs, ys, p[3];
|
||||
short val;
|
||||
int val, flip_sign, a;
|
||||
|
||||
/* near zero floating point values can give issues with gluUnProject
|
||||
in side view on some implementations */
|
||||
@@ -493,11 +494,21 @@ void ED_view3d_calc_clipping(BoundBox *bb, float planes[4][4], bglMats *mats, rc
|
||||
VECCOPY(bb->vec[4+val], p);
|
||||
}
|
||||
|
||||
/* verify if we have negative scale. doing the transform before cross
|
||||
product flips the sign of the vector compared to doing cross product
|
||||
before transform then, so we correct for that. */
|
||||
for(a=0; a<16; a++)
|
||||
((float*)modelview)[a] = mats->modelview[a];
|
||||
flip_sign = is_negative_m4(modelview);
|
||||
|
||||
/* then plane equations */
|
||||
for(val=0; val<4; val++) {
|
||||
|
||||
normal_tri_v3(planes[val], bb->vec[val], bb->vec[val==3?0:val+1], bb->vec[val+4]);
|
||||
|
||||
if(flip_sign)
|
||||
negate_v3(planes[val]);
|
||||
|
||||
planes[val][3]= - planes[val][0]*bb->vec[val][0]
|
||||
- planes[val][1]*bb->vec[val][1]
|
||||
- planes[val][2]*bb->vec[val][2];
|
||||
|
||||
@@ -4995,6 +4995,8 @@ void special_aftertrans_update(bContext *C, TransInfo *t)
|
||||
|
||||
/* automatic inserting of keys and unkeyed tagging - only if transform wasn't cancelled (or TFM_DUMMY) */
|
||||
if (!cancelled && (t->mode != TFM_DUMMY)) {
|
||||
/* set BONE_TRANSFORM flags, they get changed by manipulator draw */
|
||||
count_set_pose_transflags(&t->mode, t->around, ob);
|
||||
autokeyframe_pose_cb_func(C, t->scene, (View3D *)t->view, ob, t->mode, targetless_ik);
|
||||
DAG_id_tag_update(&ob->id, OB_RECALC_DATA);
|
||||
}
|
||||
|
||||
@@ -37,8 +37,6 @@
|
||||
#ifndef __GPU_BUFFERS_H__
|
||||
#define __GPU_BUFFERS_H__
|
||||
|
||||
#define MAX_FREE_GPU_BUFFERS 8
|
||||
|
||||
#ifdef _DEBUG
|
||||
/*#define DEBUG_VBO(X) printf(X)*/
|
||||
#define DEBUG_VBO(X)
|
||||
@@ -46,112 +44,92 @@
|
||||
#define DEBUG_VBO(X)
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define ERROR_VBO(X) printf(X)
|
||||
#else
|
||||
#define ERROR_VBO(X)
|
||||
#endif
|
||||
|
||||
struct DerivedMesh;
|
||||
struct DMGridData;
|
||||
struct GHash;
|
||||
struct DMGridData;
|
||||
struct GPUVertPointLink;
|
||||
|
||||
/* V - vertex, N - normal, T - uv, C - color
|
||||
F - float, UB - unsigned byte */
|
||||
#define GPU_BUFFER_INTER_V3F 1
|
||||
#define GPU_BUFFER_INTER_N3F 2
|
||||
#define GPU_BUFFER_INTER_T2F 3
|
||||
#define GPU_BUFFER_INTER_C3UB 4
|
||||
#define GPU_BUFFER_INTER_C4UB 5
|
||||
#define GPU_BUFFER_INTER_END -1
|
||||
|
||||
typedef struct GPUBuffer
|
||||
{
|
||||
typedef struct GPUBuffer {
|
||||
int size; /* in bytes */
|
||||
void *pointer; /* used with vertex arrays */
|
||||
unsigned int id; /* used with vertex buffer objects */
|
||||
} GPUBuffer;
|
||||
|
||||
/* stores deleted buffers so that new buffers wouldn't have to
|
||||
be recreated that often. */
|
||||
typedef struct GPUBufferPool
|
||||
{
|
||||
int size; /* number of allocated buffers stored */
|
||||
int maxsize; /* size of the array */
|
||||
GPUBuffer **buffers;
|
||||
} GPUBufferPool;
|
||||
typedef struct GPUBufferMaterial {
|
||||
/* range of points used for this material */
|
||||
int start;
|
||||
int totpoint;
|
||||
|
||||
typedef struct GPUBufferMaterial
|
||||
{
|
||||
int start; /* at which vertex in the buffer the material starts */
|
||||
int end; /* at which vertex it ends */
|
||||
char mat_nr;
|
||||
/* original material index */
|
||||
short mat_nr;
|
||||
} GPUBufferMaterial;
|
||||
|
||||
typedef struct IndexLink {
|
||||
int element;
|
||||
struct IndexLink *next;
|
||||
} IndexLink;
|
||||
/* meshes are split up by material since changing materials requires
|
||||
GL state changes that can't occur in the middle of drawing an
|
||||
array.
|
||||
|
||||
typedef struct GPUDrawObject
|
||||
{
|
||||
GPUBuffer *vertices;
|
||||
some simplifying assumptions are made:
|
||||
* all quads are treated as two triangles.
|
||||
* no vertex sharing is used; each triangle gets its own copy of the
|
||||
vertices it uses (this makes it easy to deal with a vertex used
|
||||
by faces with different properties, such as smooth/solid shading,
|
||||
different MCols, etc.)
|
||||
|
||||
to avoid confusion between the original MVert vertices and the
|
||||
arrays of OpenGL vertices, the latter are referred to here and in
|
||||
the source as `points'. similarly, the OpenGL triangles generated
|
||||
for MFaces are referred to as triangles rather than faces.
|
||||
*/
|
||||
typedef struct GPUDrawObject {
|
||||
GPUBuffer *points;
|
||||
GPUBuffer *normals;
|
||||
GPUBuffer *uv;
|
||||
GPUBuffer *colors;
|
||||
GPUBuffer *edges;
|
||||
GPUBuffer *uvedges;
|
||||
|
||||
int *faceRemap; /* at what index was the face originally in DerivedMesh */
|
||||
IndexLink *indices; /* given an index, find all elements using it */
|
||||
IndexLink *indexMem; /* for faster memory allocation/freeing */
|
||||
int indexMemUsage; /* how many are already allocated */
|
||||
/* for each triangle, the original MFace index */
|
||||
int *triangle_to_mface;
|
||||
|
||||
/* for each original vertex, the list of related points */
|
||||
struct GPUVertPointLink *vert_points;
|
||||
/* storage for the vert_points lists */
|
||||
struct GPUVertPointLink *vert_points_mem;
|
||||
int vert_points_usage;
|
||||
|
||||
int colType;
|
||||
|
||||
GPUBufferMaterial *materials;
|
||||
int totmaterial;
|
||||
|
||||
int tot_triangle_point;
|
||||
int tot_loose_point;
|
||||
|
||||
/* caches of the original DerivedMesh values */
|
||||
int totvert;
|
||||
int totedge;
|
||||
|
||||
int nmaterials;
|
||||
int nelements; /* (number of faces) * 3 */
|
||||
int nlooseverts;
|
||||
int nedges;
|
||||
int nindices;
|
||||
int legacy; /* if there was a failure allocating some buffer, use old rendering code */
|
||||
|
||||
/* if there was a failure allocating some buffer, use old
|
||||
rendering code */
|
||||
int legacy;
|
||||
} GPUDrawObject;
|
||||
|
||||
/* used for GLSL materials */
|
||||
typedef struct GPUAttrib
|
||||
{
|
||||
typedef struct GPUAttrib {
|
||||
int index;
|
||||
int size;
|
||||
int type;
|
||||
} GPUAttrib;
|
||||
|
||||
GPUBufferPool *GPU_buffer_pool_new(void);
|
||||
void GPU_buffer_pool_free( GPUBufferPool *pool );
|
||||
void GPU_buffer_pool_free_unused( GPUBufferPool *pool );
|
||||
void GPU_global_buffer_pool_free(void);
|
||||
|
||||
GPUBuffer *GPU_buffer_alloc( int size, GPUBufferPool *pool );
|
||||
void GPU_buffer_free( GPUBuffer *buffer, GPUBufferPool *pool );
|
||||
GPUBuffer *GPU_buffer_alloc(int size);
|
||||
void GPU_buffer_free(GPUBuffer *buffer);
|
||||
|
||||
GPUDrawObject *GPU_drawobject_new( struct DerivedMesh *dm );
|
||||
void GPU_drawobject_free( struct DerivedMesh *dm );
|
||||
|
||||
/* Buffers for non-DerivedMesh drawing */
|
||||
void *GPU_build_mesh_buffers(struct GHash *map, struct MVert *mvert,
|
||||
struct MFace *mface, int *face_indices,
|
||||
int totface, int *vert_indices, int uniq_verts,
|
||||
int totvert);
|
||||
void GPU_update_mesh_buffers(void *buffers, struct MVert *mvert,
|
||||
int *vert_indices, int totvert);
|
||||
void *GPU_build_grid_buffers(struct DMGridData **grids,
|
||||
int *grid_indices, int totgrid, int gridsize);
|
||||
void GPU_update_grid_buffers(void *buffers_v, struct DMGridData **grids,
|
||||
int *grid_indices, int totgrid, int gridsize, int smooth);
|
||||
void GPU_draw_buffers(void *buffers);
|
||||
void GPU_free_buffers(void *buffers);
|
||||
|
||||
/* called before drawing */
|
||||
void GPU_vertex_setup( struct DerivedMesh *dm );
|
||||
void GPU_normal_setup( struct DerivedMesh *dm );
|
||||
@@ -175,6 +153,7 @@ void GPU_color4_upload( struct DerivedMesh *dm, unsigned char *data );
|
||||
/* switch color rendering on=1/off=0 */
|
||||
void GPU_color_switch( int mode );
|
||||
|
||||
/* used for drawing edges */
|
||||
void GPU_buffer_draw_elements( GPUBuffer *elements, unsigned int mode, int start, int count );
|
||||
|
||||
/* called after drawing */
|
||||
@@ -183,4 +162,18 @@ void GPU_buffer_unbind(void);
|
||||
/* used to check whether to use the old (without buffers) code */
|
||||
int GPU_buffer_legacy( struct DerivedMesh *dm );
|
||||
|
||||
/* Buffers for non-DerivedMesh drawing */
|
||||
void *GPU_build_mesh_buffers(struct GHash *map, struct MVert *mvert,
|
||||
struct MFace *mface, int *face_indices,
|
||||
int totface, int *vert_indices, int uniq_verts,
|
||||
int totvert);
|
||||
void GPU_update_mesh_buffers(void *buffers, struct MVert *mvert,
|
||||
int *vert_indices, int totvert);
|
||||
void *GPU_build_grid_buffers(struct DMGridData **grids,
|
||||
int *grid_indices, int totgrid, int gridsize);
|
||||
void GPU_update_grid_buffers(void *buffers_v, struct DMGridData **grids,
|
||||
int *grid_indices, int totgrid, int gridsize, int smooth);
|
||||
void GPU_draw_buffers(void *buffers);
|
||||
void GPU_free_buffers(void *buffers);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -56,11 +56,13 @@
|
||||
|
||||
#include "GPU_buffers.h"
|
||||
|
||||
#define GPU_BUFFER_VERTEX_STATE 1
|
||||
#define GPU_BUFFER_NORMAL_STATE 2
|
||||
#define GPU_BUFFER_TEXCOORD_STATE 4
|
||||
#define GPU_BUFFER_COLOR_STATE 8
|
||||
#define GPU_BUFFER_ELEMENT_STATE 16
|
||||
typedef enum {
|
||||
GPU_BUFFER_VERTEX_STATE = 1,
|
||||
GPU_BUFFER_NORMAL_STATE = 2,
|
||||
GPU_BUFFER_TEXCOORD_STATE = 4,
|
||||
GPU_BUFFER_COLOR_STATE = 8,
|
||||
GPU_BUFFER_ELEMENT_STATE = 16,
|
||||
} GPUBufferState;
|
||||
|
||||
#define MAX_GPU_ATTRIB_DATA 32
|
||||
|
||||
@@ -69,342 +71,1207 @@
|
||||
|
||||
/* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
|
||||
static int useVBOs = -1;
|
||||
static GPUBufferPool *globalPool = 0;
|
||||
static int GLStates = 0;
|
||||
static GPUBufferState GLStates = 0;
|
||||
static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
|
||||
|
||||
GPUBufferPool *GPU_buffer_pool_new(void)
|
||||
/* stores recently-deleted buffers so that new buffers won't have to
|
||||
be recreated as often
|
||||
|
||||
only one instance of this pool is created, stored in
|
||||
gpu_buffer_pool
|
||||
|
||||
note that the number of buffers in the pool is usually limited to
|
||||
MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
|
||||
when a GPUBuffer is released outside the main thread; due to OpenGL
|
||||
restrictions it cannot be immediately released
|
||||
*/
|
||||
typedef struct GPUBufferPool {
|
||||
/* number of allocated buffers stored */
|
||||
int totbuf;
|
||||
/* actual allocated length of the array */
|
||||
int maxsize;
|
||||
GPUBuffer **buffers;
|
||||
} GPUBufferPool;
|
||||
#define MAX_FREE_GPU_BUFFERS 8
|
||||
|
||||
/* create a new GPUBufferPool */
|
||||
static GPUBufferPool *gpu_buffer_pool_new(void)
|
||||
{
|
||||
GPUBufferPool *pool;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_pool_new\n");
|
||||
/* enable VBOs if supported */
|
||||
if(useVBOs == -1)
|
||||
useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
|
||||
|
||||
if( useVBOs < 0 ) {
|
||||
if( GLEW_ARB_vertex_buffer_object ) {
|
||||
DEBUG_VBO( "Vertex Buffer Objects supported.\n" );
|
||||
useVBOs = 1;
|
||||
}
|
||||
else {
|
||||
DEBUG_VBO( "Vertex Buffer Objects NOT supported.\n" );
|
||||
useVBOs = 0;
|
||||
}
|
||||
}
|
||||
pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer");
|
||||
|
||||
pool = MEM_callocN(sizeof(GPUBufferPool), "GPU_buffer_pool_new");
|
||||
pool->maxsize = MAX_FREE_GPU_BUFFERS;
|
||||
pool->buffers = MEM_callocN(sizeof(GPUBuffer*)*pool->maxsize, "GPU_buffer_pool_new buffers");
|
||||
pool->buffers = MEM_callocN(sizeof(GPUBuffer*)*pool->maxsize,
|
||||
"GPUBuffer.buffers");
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
static void GPU_buffer_pool_remove( int index, GPUBufferPool *pool )
|
||||
/* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
|
||||
static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
|
||||
{
|
||||
int i;
|
||||
|
||||
if( index >= pool->size || index < 0 ) {
|
||||
ERROR_VBO("Wrong index, out of bounds in call to GPU_buffer_pool_remove");
|
||||
if(!pool || index < 0 || index >= pool->totbuf)
|
||||
return;
|
||||
}
|
||||
DEBUG_VBO("GPU_buffer_pool_remove\n");
|
||||
|
||||
for( i = index; i < pool->size-1; i++ ) {
|
||||
/* shift entries down, overwriting the buffer at `index' */
|
||||
for(i = index; i < pool->totbuf - 1; i++)
|
||||
pool->buffers[i] = pool->buffers[i+1];
|
||||
}
|
||||
if( pool->size > 0 )
|
||||
pool->buffers[pool->size-1] = 0;
|
||||
|
||||
pool->size--;
|
||||
/* clear the last entry */
|
||||
if(pool->totbuf > 0)
|
||||
pool->buffers[pool->totbuf - 1] = NULL;
|
||||
|
||||
pool->totbuf--;
|
||||
}
|
||||
|
||||
static void GPU_buffer_pool_delete_last( GPUBufferPool *pool )
|
||||
/* delete the last entry in the pool */
|
||||
static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
|
||||
{
|
||||
int last;
|
||||
GPUBuffer *last;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_pool_delete_last\n");
|
||||
|
||||
if( pool->size <= 0 )
|
||||
if(pool->totbuf <= 0)
|
||||
return;
|
||||
|
||||
last = pool->size-1;
|
||||
/* get the last entry */
|
||||
if(!(last = pool->buffers[pool->totbuf - 1]))
|
||||
return;
|
||||
|
||||
if( pool->buffers[last] != 0 ) {
|
||||
if( useVBOs ) {
|
||||
glDeleteBuffersARB(1,&pool->buffers[last]->id);
|
||||
MEM_freeN( pool->buffers[last] );
|
||||
}
|
||||
else {
|
||||
MEM_freeN( pool->buffers[last]->pointer );
|
||||
MEM_freeN( pool->buffers[last] );
|
||||
}
|
||||
pool->buffers[last] = 0;
|
||||
} else {
|
||||
DEBUG_VBO("Why are we accessing a null buffer?\n");
|
||||
}
|
||||
pool->size--;
|
||||
/* delete the buffer's data */
|
||||
if(useVBOs)
|
||||
glDeleteBuffersARB(1, &last->id);
|
||||
else
|
||||
MEM_freeN(last->pointer);
|
||||
|
||||
/* delete the buffer and remove from pool */
|
||||
MEM_freeN(last);
|
||||
pool->totbuf--;
|
||||
pool->buffers[pool->totbuf] = NULL;
|
||||
}
|
||||
|
||||
void GPU_buffer_pool_free(GPUBufferPool *pool)
|
||||
/* free a GPUBufferPool; also frees the data in the pool's
|
||||
GPUBuffers */
|
||||
static void gpu_buffer_pool_free(GPUBufferPool *pool)
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_pool_free\n");
|
||||
|
||||
if( pool == 0 )
|
||||
pool = globalPool;
|
||||
if( pool == 0 )
|
||||
if(!pool)
|
||||
return;
|
||||
|
||||
while( pool->size )
|
||||
GPU_buffer_pool_delete_last(pool);
|
||||
while(pool->totbuf)
|
||||
gpu_buffer_pool_delete_last(pool);
|
||||
|
||||
MEM_freeN(pool->buffers);
|
||||
MEM_freeN(pool);
|
||||
/* if we are releasing the global pool, stop keeping a reference to it */
|
||||
if (pool == globalPool)
|
||||
globalPool = NULL;
|
||||
}
|
||||
|
||||
void GPU_buffer_pool_free_unused(GPUBufferPool *pool)
|
||||
static GPUBufferPool *gpu_buffer_pool = NULL;
|
||||
static GPUBufferPool *gpu_get_global_buffer_pool(void)
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_pool_free_unused\n");
|
||||
/* initialize the pool */
|
||||
if(!gpu_buffer_pool)
|
||||
gpu_buffer_pool = gpu_buffer_pool_new();
|
||||
|
||||
if( pool == 0 )
|
||||
pool = globalPool;
|
||||
if( pool == 0 )
|
||||
return;
|
||||
|
||||
while( pool->size > MAX_FREE_GPU_BUFFERS )
|
||||
GPU_buffer_pool_delete_last(pool);
|
||||
return gpu_buffer_pool;
|
||||
}
|
||||
|
||||
GPUBuffer *GPU_buffer_alloc( int size, GPUBufferPool *pool )
|
||||
void GPU_global_buffer_pool_free(void)
|
||||
{
|
||||
char buffer[60];
|
||||
int i;
|
||||
int cursize;
|
||||
GPUBuffer *allocated;
|
||||
int bestfit = -1;
|
||||
gpu_buffer_pool_free(gpu_buffer_pool);
|
||||
gpu_buffer_pool = NULL;
|
||||
}
|
||||
|
||||
DEBUG_VBO("GPU_buffer_alloc\n");
|
||||
/* get a GPUBuffer of at least `size' bytes; uses one from the buffer
|
||||
pool if possible, otherwise creates a new one */
|
||||
GPUBuffer *GPU_buffer_alloc(int size)
|
||||
{
|
||||
GPUBufferPool *pool;
|
||||
GPUBuffer *buf;
|
||||
int i, bufsize, bestfit = -1;
|
||||
|
||||
if( pool == 0 ) {
|
||||
if( globalPool == 0 )
|
||||
globalPool = GPU_buffer_pool_new();
|
||||
pool = globalPool;
|
||||
}
|
||||
pool = gpu_get_global_buffer_pool();
|
||||
|
||||
for( i = 0; i < pool->size; i++ ) {
|
||||
cursize = pool->buffers[i]->size;
|
||||
if( cursize == size ) {
|
||||
allocated = pool->buffers[i];
|
||||
GPU_buffer_pool_remove(i,pool);
|
||||
DEBUG_VBO("free buffer of exact size found\n");
|
||||
return allocated;
|
||||
/* not sure if this buffer pool code has been profiled much,
|
||||
seems to me that the graphics driver and system memory
|
||||
management might do this stuff anyway. --nicholas
|
||||
*/
|
||||
|
||||
/* check the global buffer pool for a recently-deleted buffer
|
||||
that is at least as big as the request, but not more than
|
||||
twice as big */
|
||||
for(i = 0; i < pool->totbuf; i++) {
|
||||
bufsize = pool->buffers[i]->size;
|
||||
|
||||
/* check for an exact size match */
|
||||
if(bufsize == size) {
|
||||
bestfit = i;
|
||||
break;
|
||||
}
|
||||
/* smaller buffers won't fit data and buffers at least twice as big are a waste of memory */
|
||||
else if( cursize > size && size > cursize/2 ) {
|
||||
/* is it closer to the required size than the last appropriate buffer found. try to save memory */
|
||||
if( bestfit == -1 || pool->buffers[bestfit]->size > cursize ) {
|
||||
/* smaller buffers won't fit data and buffers at least
|
||||
twice as big are a waste of memory */
|
||||
else if(bufsize > size && size > (bufsize / 2)) {
|
||||
/* is it closer to the required size than the
|
||||
last appropriate buffer found. try to save
|
||||
memory */
|
||||
if(bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
|
||||
bestfit = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
if( bestfit == -1 ) {
|
||||
DEBUG_VBO("allocating a new buffer\n");
|
||||
|
||||
allocated = MEM_mallocN(sizeof(GPUBuffer), "GPU_buffer_alloc");
|
||||
allocated->size = size;
|
||||
if( useVBOs == 1 ) {
|
||||
glGenBuffersARB( 1, &allocated->id );
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, allocated->id );
|
||||
glBufferDataARB( GL_ARRAY_BUFFER_ARB, size, 0, GL_STATIC_DRAW_ARB );
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, 0 );
|
||||
}
|
||||
else {
|
||||
allocated->pointer = MEM_mallocN(size, "GPU_buffer_alloc_vertexarray");
|
||||
while( allocated->pointer == 0 && pool->size > 0 ) {
|
||||
GPU_buffer_pool_delete_last(pool);
|
||||
allocated->pointer = MEM_mallocN(size, "GPU_buffer_alloc_vertexarray");
|
||||
}
|
||||
if( allocated->pointer == 0 && pool->size == 0 ) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* if an acceptable buffer was found in the pool, remove it
|
||||
from the pool and return it */
|
||||
if(bestfit != -1) {
|
||||
buf = pool->buffers[bestfit];
|
||||
gpu_buffer_pool_remove_index(pool, bestfit);
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* no acceptable buffer found in the pool, create a new one */
|
||||
buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
|
||||
buf->size = size;
|
||||
|
||||
if(useVBOs == 1) {
|
||||
/* create a new VBO and initialize it to the requested
|
||||
size */
|
||||
glGenBuffersARB(1, &buf->id);
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
|
||||
glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, 0, GL_STATIC_DRAW_ARB);
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
|
||||
}
|
||||
else {
|
||||
sprintf(buffer,"free buffer found. Wasted %d bytes\n", pool->buffers[bestfit]->size-size);
|
||||
DEBUG_VBO(buffer);
|
||||
|
||||
allocated = pool->buffers[bestfit];
|
||||
GPU_buffer_pool_remove(bestfit,pool);
|
||||
buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
|
||||
|
||||
/* purpose of this seems to be dealing with
|
||||
out-of-memory errors? looks a bit iffy to me
|
||||
though, at least on Linux I expect malloc() would
|
||||
just overcommit. --nicholas */
|
||||
while(!buf->pointer && pool->totbuf > 0) {
|
||||
gpu_buffer_pool_delete_last(pool);
|
||||
buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
|
||||
}
|
||||
if(!buf->pointer)
|
||||
return NULL;
|
||||
}
|
||||
return allocated;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
void GPU_buffer_free( GPUBuffer *buffer, GPUBufferPool *pool )
|
||||
/* release a GPUBuffer; does not free the actual buffer or its data,
|
||||
but rather moves it to the pool of recently-free'd buffers for
|
||||
possible re-use*/
|
||||
void GPU_buffer_free(GPUBuffer *buffer)
|
||||
{
|
||||
GPUBufferPool *pool;
|
||||
int i;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_free\n");
|
||||
|
||||
if( buffer == 0 )
|
||||
if(!buffer)
|
||||
return;
|
||||
if( pool == 0 )
|
||||
pool = globalPool;
|
||||
if( pool == 0 )
|
||||
pool = globalPool = GPU_buffer_pool_new();
|
||||
|
||||
pool = gpu_get_global_buffer_pool();
|
||||
|
||||
/* free the last used buffer in the queue if no more space, but only
|
||||
if we are in the main thread. for e.g. rendering or baking it can
|
||||
happen that we are in other thread and can't call OpenGL, in that
|
||||
case cleanup will be done GPU_buffer_pool_free_unused */
|
||||
if( BLI_thread_is_main() ) {
|
||||
while( pool->size >= MAX_FREE_GPU_BUFFERS )
|
||||
GPU_buffer_pool_delete_last( pool );
|
||||
if(BLI_thread_is_main()) {
|
||||
/* in main thread, safe to decrease size of pool back
|
||||
down to MAX_FREE_GPU_BUFFERS */
|
||||
while(pool->totbuf >= MAX_FREE_GPU_BUFFERS)
|
||||
gpu_buffer_pool_delete_last(pool);
|
||||
}
|
||||
else {
|
||||
if( pool->maxsize == pool->size ) {
|
||||
/* outside of main thread, can't safely delete the
|
||||
buffer, so increase pool size */
|
||||
if(pool->maxsize == pool->totbuf) {
|
||||
pool->maxsize += MAX_FREE_GPU_BUFFERS;
|
||||
pool->buffers = MEM_reallocN(pool->buffers, sizeof(GPUBuffer*)*pool->maxsize);
|
||||
pool->buffers = MEM_reallocN(pool->buffers,
|
||||
sizeof(GPUBuffer*) * pool->maxsize);
|
||||
}
|
||||
}
|
||||
|
||||
for( i =pool->size; i > 0; i-- ) {
|
||||
/* shift pool entries up by one */
|
||||
for(i = pool->totbuf; i > 0; i--)
|
||||
pool->buffers[i] = pool->buffers[i-1];
|
||||
}
|
||||
|
||||
/* insert the buffer into the beginning of the pool */
|
||||
pool->buffers[0] = buffer;
|
||||
pool->size++;
|
||||
pool->totbuf++;
|
||||
}
|
||||
|
||||
GPUDrawObject *GPU_drawobject_new( DerivedMesh *dm )
|
||||
typedef struct GPUVertPointLink {
|
||||
struct GPUVertPointLink *next;
|
||||
/* -1 means uninitialized */
|
||||
int point_index;
|
||||
} GPUVertPointLink;
|
||||
|
||||
/* add a new point to the list of points related to a particular
|
||||
vertex */
|
||||
static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
|
||||
{
|
||||
GPUDrawObject *object;
|
||||
MFace *mface;
|
||||
int numverts[MAX_MATERIALS];
|
||||
int redir[MAX_MATERIALS];
|
||||
int *index;
|
||||
int i;
|
||||
int curmat, curverts, numfaces;
|
||||
GPUVertPointLink *lnk;
|
||||
|
||||
DEBUG_VBO("GPU_drawobject_new\n");
|
||||
lnk = &gdo->vert_points[vert_index];
|
||||
|
||||
object = MEM_callocN(sizeof(GPUDrawObject),"GPU_drawobject_new_object");
|
||||
object->nindices = dm->getNumVerts(dm);
|
||||
object->indices = MEM_mallocN(sizeof(IndexLink)*object->nindices, "GPU_drawobject_new_indices");
|
||||
object->nedges = dm->getNumEdges(dm);
|
||||
/* if first link is in use, add a new link at the end */
|
||||
if(lnk->point_index != -1) {
|
||||
/* get last link */
|
||||
for(; lnk->next; lnk = lnk->next);
|
||||
|
||||
for( i = 0; i < object->nindices; i++ ) {
|
||||
object->indices[i].element = -1;
|
||||
object->indices[i].next = 0;
|
||||
}
|
||||
/*object->legacy = 1;*/
|
||||
memset(numverts,0,sizeof(int)*MAX_MATERIALS);
|
||||
|
||||
mface = dm->getFaceArray(dm);
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
if( mface[i].v4 )
|
||||
numverts[mface[i].mat_nr] += 6; /* split every quad into two triangles */
|
||||
else
|
||||
numverts[mface[i].mat_nr] += 3;
|
||||
/* add a new link from the pool */
|
||||
lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
|
||||
gdo->vert_points_usage++;
|
||||
}
|
||||
|
||||
for( i = 0; i < MAX_MATERIALS; i++ ) {
|
||||
if( numverts[i] > 0 ) {
|
||||
object->nmaterials++;
|
||||
object->nelements += numverts[i];
|
||||
lnk->point_index = point_index;
|
||||
}
|
||||
|
||||
/* update the vert_points and triangle_to_mface fields with a new
|
||||
triangle */
|
||||
static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
|
||||
int base_point_index,
|
||||
int face_index,
|
||||
int v1, int v2, int v3)
|
||||
{
|
||||
int i, v[3] = {v1, v2, v3};
|
||||
for(i = 0; i < 3; i++)
|
||||
gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
|
||||
gdo->triangle_to_mface[base_point_index / 3] = face_index;
|
||||
}
|
||||
|
||||
/* for each vertex, build a list of points related to it; these lists
|
||||
are stored in an array sized to the number of vertices */
|
||||
static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
|
||||
{
|
||||
GPUBufferMaterial *mat;
|
||||
int i, mat_orig_to_new[MAX_MATERIALS];
|
||||
|
||||
/* allocate the array and space for links */
|
||||
gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
|
||||
"GPUDrawObject.vert_points");
|
||||
gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
|
||||
"GPUDrawObject.vert_points_mem");
|
||||
gdo->vert_points_usage = 0;
|
||||
|
||||
/* build a map from the original material indices to the new
|
||||
GPUBufferMaterial indices */
|
||||
for(i = 0; i < gdo->totmaterial; i++)
|
||||
mat_orig_to_new[gdo->materials[i].mat_nr] = i;
|
||||
|
||||
/* -1 indicates the link is not yet used */
|
||||
for(i = 0; i < gdo->totvert; i++)
|
||||
gdo->vert_points[i].point_index = -1;
|
||||
|
||||
for(i = 0; i < totface; i++, f++) {
|
||||
mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
|
||||
|
||||
/* add triangle */
|
||||
gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
|
||||
i, f->v1, f->v2, f->v3);
|
||||
mat->totpoint += 3;
|
||||
|
||||
/* add second triangle for quads */
|
||||
if(f->v4) {
|
||||
gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
|
||||
i, f->v3, f->v4, f->v1);
|
||||
mat->totpoint += 3;
|
||||
}
|
||||
}
|
||||
object->materials = MEM_mallocN(sizeof(GPUBufferMaterial)*object->nmaterials,"GPU_drawobject_new_materials");
|
||||
index = MEM_mallocN(sizeof(int)*object->nmaterials,"GPU_drawobject_new_index");
|
||||
|
||||
curmat = curverts = 0;
|
||||
for( i = 0; i < MAX_MATERIALS; i++ ) {
|
||||
if( numverts[i] > 0 ) {
|
||||
object->materials[curmat].mat_nr = i;
|
||||
object->materials[curmat].start = curverts;
|
||||
index[curmat] = curverts/3;
|
||||
object->materials[curmat].end = curverts+numverts[i];
|
||||
curverts += numverts[i];
|
||||
/* map any unused vertices to loose points */
|
||||
for(i = 0; i < gdo->totvert; i++) {
|
||||
if(gdo->vert_points[i].point_index == -1) {
|
||||
gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
|
||||
gdo->tot_loose_point++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* see GPUDrawObject's structure definition for a description of the
|
||||
data being initialized here */
|
||||
GPUDrawObject *GPU_drawobject_new( DerivedMesh *dm )
|
||||
{
|
||||
GPUDrawObject *gdo;
|
||||
MFace *mface;
|
||||
int points_per_mat[MAX_MATERIALS];
|
||||
int i, curmat, curpoint, totface;
|
||||
|
||||
mface = dm->getFaceArray(dm);
|
||||
totface= dm->getNumFaces(dm);
|
||||
|
||||
/* get the number of points used by each material, treating
|
||||
each quad as two triangles */
|
||||
memset(points_per_mat, 0, sizeof(int)*MAX_MATERIALS);
|
||||
for(i = 0; i < totface; i++)
|
||||
points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
|
||||
|
||||
/* create the GPUDrawObject */
|
||||
gdo = MEM_callocN(sizeof(GPUDrawObject),"GPUDrawObject");
|
||||
gdo->totvert = dm->getNumVerts(dm);
|
||||
gdo->totedge = dm->getNumEdges(dm);
|
||||
|
||||
/* count the number of materials used by this DerivedMesh */
|
||||
for(i = 0; i < MAX_MATERIALS; i++) {
|
||||
if(points_per_mat[i] > 0)
|
||||
gdo->totmaterial++;
|
||||
}
|
||||
|
||||
/* allocate an array of materials used by this DerivedMesh */
|
||||
gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
|
||||
"GPUDrawObject.materials");
|
||||
|
||||
/* initialize the materials array */
|
||||
for(i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
|
||||
if(points_per_mat[i] > 0) {
|
||||
gdo->materials[curmat].start = curpoint;
|
||||
gdo->materials[curmat].totpoint = 0;
|
||||
gdo->materials[curmat].mat_nr = i;
|
||||
|
||||
curpoint += points_per_mat[i];
|
||||
curmat++;
|
||||
}
|
||||
}
|
||||
object->faceRemap = MEM_mallocN(sizeof(int)*object->nelements/3,"GPU_drawobject_new_faceRemap");
|
||||
for( i = 0; i < object->nmaterials; i++ ) {
|
||||
redir[object->materials[i].mat_nr] = i; /* material number -> material index */
|
||||
|
||||
/* store total number of points used for triangles */
|
||||
gdo->tot_triangle_point = curpoint;
|
||||
|
||||
gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
|
||||
"GPUDrawObject.triangle_to_mface");
|
||||
|
||||
gpu_drawobject_init_vert_points(gdo, mface, totface);
|
||||
|
||||
return gdo;
|
||||
}
|
||||
|
||||
void GPU_drawobject_free(DerivedMesh *dm)
|
||||
{
|
||||
GPUDrawObject *gdo;
|
||||
|
||||
if(!dm || !(gdo = dm->drawObject))
|
||||
return;
|
||||
|
||||
MEM_freeN(gdo->materials);
|
||||
MEM_freeN(gdo->triangle_to_mface);
|
||||
MEM_freeN(gdo->vert_points);
|
||||
MEM_freeN(gdo->vert_points_mem);
|
||||
GPU_buffer_free(gdo->points);
|
||||
GPU_buffer_free(gdo->normals);
|
||||
GPU_buffer_free(gdo->uv);
|
||||
GPU_buffer_free(gdo->colors);
|
||||
GPU_buffer_free(gdo->edges);
|
||||
GPU_buffer_free(gdo->uvedges);
|
||||
|
||||
MEM_freeN(gdo);
|
||||
dm->drawObject = NULL;
|
||||
}
|
||||
|
||||
typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
|
||||
int *mat_orig_to_new, void *user_data);
|
||||
|
||||
static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
|
||||
int vector_size, int size, GLenum target,
|
||||
void *user, GPUBufferCopyFunc copy_f)
|
||||
{
|
||||
GPUBufferPool *pool;
|
||||
GPUBuffer *buffer;
|
||||
float *varray;
|
||||
int mat_orig_to_new[MAX_MATERIALS];
|
||||
int *cur_index_per_mat;
|
||||
int i;
|
||||
int success;
|
||||
GLboolean uploaded;
|
||||
|
||||
pool = gpu_get_global_buffer_pool();
|
||||
|
||||
/* alloc a GPUBuffer; fall back to legacy mode on failure */
|
||||
if(!(buffer = GPU_buffer_alloc(size)))
|
||||
dm->drawObject->legacy = 1;
|
||||
|
||||
/* nothing to do for legacy mode */
|
||||
if(dm->drawObject->legacy)
|
||||
return 0;
|
||||
|
||||
cur_index_per_mat = MEM_mallocN(sizeof(int)*object->totmaterial,
|
||||
"GPU_buffer_setup.cur_index_per_mat");
|
||||
for(i = 0; i < object->totmaterial; i++) {
|
||||
/* for each material, the current index to copy data to */
|
||||
cur_index_per_mat[i] = object->materials[i].start * vector_size;
|
||||
|
||||
/* map from original material index to new
|
||||
GPUBufferMaterial index */
|
||||
mat_orig_to_new[object->materials[i].mat_nr] = i;
|
||||
}
|
||||
|
||||
object->indexMem = MEM_callocN(sizeof(IndexLink)*object->nelements,"GPU_drawobject_new_indexMem");
|
||||
object->indexMemUsage = 0;
|
||||
if(useVBOs) {
|
||||
success = 0;
|
||||
|
||||
#define ADDLINK( INDEX, ACTUAL ) \
|
||||
if( object->indices[INDEX].element == -1 ) { \
|
||||
object->indices[INDEX].element = ACTUAL; \
|
||||
} else { \
|
||||
IndexLink *lnk = &object->indices[INDEX]; \
|
||||
while( lnk->next != 0 ) lnk = lnk->next; \
|
||||
lnk->next = &object->indexMem[object->indexMemUsage]; \
|
||||
lnk->next->element = ACTUAL; \
|
||||
object->indexMemUsage++; \
|
||||
while(!success) {
|
||||
/* bind the buffer and discard previous data,
|
||||
avoids stalling gpu */
|
||||
glBindBufferARB(target, buffer->id);
|
||||
glBufferDataARB(target, buffer->size, 0, GL_STATIC_DRAW_ARB);
|
||||
|
||||
/* attempt to map the buffer */
|
||||
if(!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
|
||||
/* failed to map the buffer; delete it */
|
||||
GPU_buffer_free(buffer);
|
||||
gpu_buffer_pool_delete_last(pool);
|
||||
buffer= NULL;
|
||||
|
||||
/* try freeing an entry from the pool
|
||||
and reallocating the buffer */
|
||||
if(pool->totbuf > 0) {
|
||||
gpu_buffer_pool_delete_last(pool);
|
||||
buffer = GPU_buffer_alloc(size);
|
||||
}
|
||||
|
||||
/* allocation still failed; fall back
|
||||
to legacy mode */
|
||||
if(!buffer) {
|
||||
dm->drawObject->legacy = 1;
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
int curInd = index[redir[mface[i].mat_nr]];
|
||||
object->faceRemap[curInd] = i;
|
||||
ADDLINK( mface[i].v1, curInd*3 );
|
||||
ADDLINK( mface[i].v2, curInd*3+1 );
|
||||
ADDLINK( mface[i].v3, curInd*3+2 );
|
||||
if( mface[i].v4 ) {
|
||||
object->faceRemap[curInd+1] = i;
|
||||
ADDLINK( mface[i].v3, curInd*3+3 );
|
||||
ADDLINK( mface[i].v4, curInd*3+4 );
|
||||
ADDLINK( mface[i].v1, curInd*3+5 );
|
||||
|
||||
index[redir[mface[i].mat_nr]]+=2;
|
||||
/* check legacy fallback didn't happen */
|
||||
if(dm->drawObject->legacy == 0) {
|
||||
uploaded = GL_FALSE;
|
||||
/* attempt to upload the data to the VBO */
|
||||
while(uploaded == GL_FALSE) {
|
||||
(*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
|
||||
/* glUnmapBuffer returns GL_FALSE if
|
||||
the data store is corrupted; retry
|
||||
in that case */
|
||||
uploaded = glUnmapBufferARB(target);
|
||||
}
|
||||
}
|
||||
glBindBufferARB(target, 0);
|
||||
}
|
||||
else {
|
||||
/* VBO not supported, use vertex array fallback */
|
||||
if(buffer->pointer) {
|
||||
varray = buffer->pointer;
|
||||
(*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
|
||||
}
|
||||
else {
|
||||
index[redir[mface[i].mat_nr]]++;
|
||||
dm->drawObject->legacy = 1;
|
||||
}
|
||||
}
|
||||
|
||||
for( i = 0; i < object->nindices; i++ ) {
|
||||
if( object->indices[i].element == -1 ) {
|
||||
object->indices[i].element = object->nelements + object->nlooseverts;
|
||||
object->nlooseverts++;
|
||||
}
|
||||
}
|
||||
#undef ADDLINK
|
||||
MEM_freeN(cur_index_per_mat);
|
||||
|
||||
MEM_freeN(index);
|
||||
return object;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void GPU_drawobject_free( DerivedMesh *dm )
|
||||
static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
|
||||
{
|
||||
GPUDrawObject *object;
|
||||
MVert *mvert;
|
||||
MFace *f;
|
||||
int i, j, start, totface;
|
||||
|
||||
DEBUG_VBO("GPU_drawobject_free\n");
|
||||
mvert = dm->getVertArray(dm);
|
||||
f = dm->getFaceArray(dm);
|
||||
|
||||
if( dm == 0 )
|
||||
return;
|
||||
object = dm->drawObject;
|
||||
if( object == 0 )
|
||||
return;
|
||||
totface= dm->getNumFaces(dm);
|
||||
for(i = 0; i < totface; i++, f++) {
|
||||
start = index[mat_orig_to_new[f->mat_nr]];
|
||||
|
||||
MEM_freeN(object->materials);
|
||||
MEM_freeN(object->faceRemap);
|
||||
MEM_freeN(object->indices);
|
||||
MEM_freeN(object->indexMem);
|
||||
GPU_buffer_free( object->vertices, globalPool );
|
||||
GPU_buffer_free( object->normals, globalPool );
|
||||
GPU_buffer_free( object->uv, globalPool );
|
||||
GPU_buffer_free( object->colors, globalPool );
|
||||
GPU_buffer_free( object->edges, globalPool );
|
||||
GPU_buffer_free( object->uvedges, globalPool );
|
||||
/* v1 v2 v3 */
|
||||
copy_v3_v3(&varray[start], mvert[f->v1].co);
|
||||
copy_v3_v3(&varray[start+3], mvert[f->v2].co);
|
||||
copy_v3_v3(&varray[start+6], mvert[f->v3].co);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
|
||||
MEM_freeN(object);
|
||||
dm->drawObject = 0;
|
||||
if(f->v4) {
|
||||
/* v3 v4 v1 */
|
||||
copy_v3_v3(&varray[start+9], mvert[f->v3].co);
|
||||
copy_v3_v3(&varray[start+12], mvert[f->v4].co);
|
||||
copy_v3_v3(&varray[start+15], mvert[f->v1].co);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
}
|
||||
}
|
||||
|
||||
/* copy loose points */
|
||||
j = dm->drawObject->tot_triangle_point*3;
|
||||
for(i = 0; i < dm->drawObject->totvert; i++) {
|
||||
if(dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
|
||||
copy_v3_v3(&varray[j],mvert[i].co);
|
||||
j+=3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
|
||||
{
|
||||
int i, totface;
|
||||
int start;
|
||||
float f_no[3];
|
||||
|
||||
float *nors= dm->getFaceDataArray(dm, CD_NORMAL);
|
||||
MVert *mvert = dm->getVertArray(dm);
|
||||
MFace *f = dm->getFaceArray(dm);
|
||||
|
||||
totface= dm->getNumFaces(dm);
|
||||
for(i = 0; i < totface; i++, f++) {
|
||||
const int smoothnormal = (f->flag & ME_SMOOTH);
|
||||
|
||||
start = index[mat_orig_to_new[f->mat_nr]];
|
||||
index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
|
||||
|
||||
if(smoothnormal) {
|
||||
/* copy vertex normal */
|
||||
normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
|
||||
normal_short_to_float_v3(&varray[start+3], mvert[f->v2].no);
|
||||
normal_short_to_float_v3(&varray[start+6], mvert[f->v3].no);
|
||||
|
||||
if(f->v4) {
|
||||
normal_short_to_float_v3(&varray[start+9], mvert[f->v3].no);
|
||||
normal_short_to_float_v3(&varray[start+12], mvert[f->v4].no);
|
||||
normal_short_to_float_v3(&varray[start+15], mvert[f->v1].no);
|
||||
}
|
||||
}
|
||||
else if(nors) {
|
||||
/* copy cached face normal */
|
||||
copy_v3_v3(&varray[start], &nors[i*3]);
|
||||
copy_v3_v3(&varray[start+3], &nors[i*3]);
|
||||
copy_v3_v3(&varray[start+6], &nors[i*3]);
|
||||
|
||||
if(f->v4) {
|
||||
copy_v3_v3(&varray[start+9], &nors[i*3]);
|
||||
copy_v3_v3(&varray[start+12], &nors[i*3]);
|
||||
copy_v3_v3(&varray[start+15], &nors[i*3]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* calculate face normal */
|
||||
if(f->v4)
|
||||
normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
|
||||
else
|
||||
normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
|
||||
|
||||
copy_v3_v3(&varray[start], f_no);
|
||||
copy_v3_v3(&varray[start+3], f_no);
|
||||
copy_v3_v3(&varray[start+6], f_no);
|
||||
|
||||
if(f->v4) {
|
||||
copy_v3_v3(&varray[start+9], f_no);
|
||||
copy_v3_v3(&varray[start+12], f_no);
|
||||
copy_v3_v3(&varray[start+15], f_no);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
|
||||
{
|
||||
int start;
|
||||
int i, totface;
|
||||
|
||||
MTFace *mtface;
|
||||
MFace *f;
|
||||
|
||||
if(!(mtface = DM_get_face_data_layer(dm, CD_MTFACE)))
|
||||
return;
|
||||
f = dm->getFaceArray(dm);
|
||||
|
||||
totface = dm->getNumFaces(dm);
|
||||
for(i = 0; i < totface; i++, f++) {
|
||||
start = index[mat_orig_to_new[f->mat_nr]];
|
||||
|
||||
/* v1 v2 v3 */
|
||||
copy_v2_v2(&varray[start],mtface[i].uv[0]);
|
||||
copy_v2_v2(&varray[start+2],mtface[i].uv[1]);
|
||||
copy_v2_v2(&varray[start+4],mtface[i].uv[2]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 6;
|
||||
|
||||
if(f->v4) {
|
||||
/* v3 v4 v1 */
|
||||
copy_v2_v2(&varray[start+6],mtface[i].uv[2]);
|
||||
copy_v2_v2(&varray[start+8],mtface[i].uv[3]);
|
||||
copy_v2_v2(&varray[start+10],mtface[i].uv[0]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 6;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void GPU_buffer_copy_color3(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
|
||||
{
|
||||
int i, totface;
|
||||
unsigned char *varray = (unsigned char *)varray_;
|
||||
unsigned char *mcol = (unsigned char *)user;
|
||||
MFace *f = dm->getFaceArray(dm);
|
||||
|
||||
totface= dm->getNumFaces(dm);
|
||||
for(i=0; i < totface; i++, f++) {
|
||||
int start = index[mat_orig_to_new[f->mat_nr]];
|
||||
|
||||
/* v1 v2 v3 */
|
||||
VECCOPY(&varray[start], &mcol[i*12]);
|
||||
VECCOPY(&varray[start+3], &mcol[i*12+3]);
|
||||
VECCOPY(&varray[start+6], &mcol[i*12+6]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
|
||||
if(f->v4) {
|
||||
/* v3 v4 v1 */
|
||||
VECCOPY(&varray[start+9], &mcol[i*12+6]);
|
||||
VECCOPY(&varray[start+12], &mcol[i*12+9]);
|
||||
VECCOPY(&varray[start+15], &mcol[i*12]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
|
||||
{
|
||||
v[0] = col[3];
|
||||
v[1] = col[2];
|
||||
v[2] = col[1];
|
||||
}
|
||||
|
||||
/* treat varray_ as an array of MCol, four MCol's per face */
|
||||
static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
|
||||
{
|
||||
int i, totface;
|
||||
unsigned char *varray = (unsigned char *)varray_;
|
||||
unsigned char *mcol = (unsigned char *)user;
|
||||
MFace *f = dm->getFaceArray(dm);
|
||||
|
||||
totface= dm->getNumFaces(dm);
|
||||
for(i=0; i < totface; i++, f++) {
|
||||
int start = index[mat_orig_to_new[f->mat_nr]];
|
||||
|
||||
/* v1 v2 v3 */
|
||||
copy_mcol_uc3(&varray[start], &mcol[i*16]);
|
||||
copy_mcol_uc3(&varray[start+3], &mcol[i*16+4]);
|
||||
copy_mcol_uc3(&varray[start+6], &mcol[i*16+8]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
|
||||
if(f->v4) {
|
||||
/* v3 v4 v1 */
|
||||
copy_mcol_uc3(&varray[start+9], &mcol[i*16+8]);
|
||||
copy_mcol_uc3(&varray[start+12], &mcol[i*16+12]);
|
||||
copy_mcol_uc3(&varray[start+15], &mcol[i*16]);
|
||||
index[mat_orig_to_new[f->mat_nr]] += 9;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
|
||||
{
|
||||
MEdge *medge;
|
||||
unsigned int *varray = (unsigned int *)varray_;
|
||||
int i, totedge;
|
||||
|
||||
medge = dm->getEdgeArray(dm);
|
||||
totedge = dm->getNumEdges(dm);
|
||||
|
||||
for(i = 0; i < totedge; i++, medge++) {
|
||||
varray[i*2] = dm->drawObject->vert_points[medge->v1].point_index;
|
||||
varray[i*2+1] = dm->drawObject->vert_points[medge->v2].point_index;
|
||||
}
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
|
||||
{
|
||||
MTFace *tf = DM_get_face_data_layer(dm, CD_MTFACE);
|
||||
int i, j=0;
|
||||
|
||||
if(!tf)
|
||||
return;
|
||||
|
||||
for(i = 0; i < dm->numFaceData; i++, tf++) {
|
||||
MFace mf;
|
||||
dm->getFace(dm,i,&mf);
|
||||
|
||||
copy_v2_v2(&varray[j],tf->uv[0]);
|
||||
copy_v2_v2(&varray[j+2],tf->uv[1]);
|
||||
|
||||
copy_v2_v2(&varray[j+4],tf->uv[1]);
|
||||
copy_v2_v2(&varray[j+6],tf->uv[2]);
|
||||
|
||||
if(!mf.v4) {
|
||||
copy_v2_v2(&varray[j+8],tf->uv[2]);
|
||||
copy_v2_v2(&varray[j+10],tf->uv[0]);
|
||||
j+=12;
|
||||
} else {
|
||||
copy_v2_v2(&varray[j+8],tf->uv[2]);
|
||||
copy_v2_v2(&varray[j+10],tf->uv[3]);
|
||||
|
||||
copy_v2_v2(&varray[j+12],tf->uv[3]);
|
||||
copy_v2_v2(&varray[j+14],tf->uv[0]);
|
||||
j+=16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* get the DerivedMesh's MCols; choose (in decreasing order of
|
||||
preference) from CD_ID_MCOL, CD_WEIGHT_MCOL, or CD_MCOL */
|
||||
static MCol *gpu_buffer_color_type(DerivedMesh *dm)
|
||||
{
|
||||
MCol *c;
|
||||
int type;
|
||||
|
||||
type = CD_ID_MCOL;
|
||||
c = DM_get_face_data_layer(dm, type);
|
||||
if(!c) {
|
||||
type = CD_WEIGHT_MCOL;
|
||||
c = DM_get_face_data_layer(dm, type);
|
||||
if(!c) {
|
||||
type = CD_MCOL;
|
||||
c = DM_get_face_data_layer(dm, type);
|
||||
}
|
||||
}
|
||||
|
||||
dm->drawObject->colType = type;
|
||||
return c;
|
||||
}
|
||||
|
||||
typedef enum {
|
||||
GPU_BUFFER_VERTEX = 0,
|
||||
GPU_BUFFER_NORMAL,
|
||||
GPU_BUFFER_COLOR,
|
||||
GPU_BUFFER_UV,
|
||||
GPU_BUFFER_EDGE,
|
||||
GPU_BUFFER_UVEDGE,
|
||||
} GPUBufferType;
|
||||
|
||||
typedef struct {
|
||||
GPUBufferCopyFunc copy;
|
||||
GLenum gl_buffer_type;
|
||||
int vector_size;
|
||||
} GPUBufferTypeSettings;
|
||||
|
||||
const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
|
||||
{GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
|
||||
{GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
|
||||
{GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
|
||||
{GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
|
||||
{GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
|
||||
{GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
|
||||
};
|
||||
|
||||
/* get the GPUDrawObject buffer associated with a type */
|
||||
static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
|
||||
{
|
||||
switch(type) {
|
||||
case GPU_BUFFER_VERTEX:
|
||||
return &gdo->points;
|
||||
case GPU_BUFFER_NORMAL:
|
||||
return &gdo->normals;
|
||||
case GPU_BUFFER_COLOR:
|
||||
return &gdo->colors;
|
||||
case GPU_BUFFER_UV:
|
||||
return &gdo->uv;
|
||||
case GPU_BUFFER_EDGE:
|
||||
return &gdo->edges;
|
||||
case GPU_BUFFER_UVEDGE:
|
||||
return &gdo->uvedges;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* get the amount of space to allocate for a buffer of a particular type */
|
||||
static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
|
||||
{
|
||||
switch(type) {
|
||||
case GPU_BUFFER_VERTEX:
|
||||
return sizeof(float)*3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
|
||||
case GPU_BUFFER_NORMAL:
|
||||
return sizeof(float)*3*dm->drawObject->tot_triangle_point;
|
||||
case GPU_BUFFER_COLOR:
|
||||
return sizeof(char)*3*dm->drawObject->tot_triangle_point;
|
||||
case GPU_BUFFER_UV:
|
||||
return sizeof(float)*2*dm->drawObject->tot_triangle_point;
|
||||
case GPU_BUFFER_EDGE:
|
||||
return sizeof(int)*2*dm->drawObject->totedge;
|
||||
case GPU_BUFFER_UVEDGE:
|
||||
/* each face gets 3 points, 3 edges per triangle, and
|
||||
each edge has its own, non-shared coords, so each
|
||||
tri corner needs minimum of 4 floats, quads used
|
||||
less so here we can over allocate and assume all
|
||||
tris. */
|
||||
return sizeof(float) * dm->drawObject->tot_triangle_point;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* call gpu_buffer_setup with settings for a particular type of buffer */
|
||||
static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
|
||||
{
|
||||
const GPUBufferTypeSettings *ts;
|
||||
void *user_data = NULL;
|
||||
GPUBuffer *buf;
|
||||
|
||||
ts = &gpu_buffer_type_settings[type];
|
||||
|
||||
/* special handling for MCol and UV buffers */
|
||||
if(type == GPU_BUFFER_COLOR) {
|
||||
if(!(user_data = gpu_buffer_color_type(dm)))
|
||||
return NULL;
|
||||
}
|
||||
else if(type == GPU_BUFFER_UV) {
|
||||
if(!DM_get_face_data_layer(dm, CD_MTFACE))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
|
||||
gpu_buffer_size_from_type(dm, type),
|
||||
ts->gl_buffer_type, user_data, ts->copy);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* get the buffer of `type', initializing the GPUDrawObject and
|
||||
buffer if needed */
|
||||
static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
|
||||
{
|
||||
GPUBuffer **buf;
|
||||
|
||||
if(!dm->drawObject)
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
|
||||
buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
|
||||
if(!(*buf))
|
||||
*buf = gpu_buffer_setup_type(dm, type);
|
||||
|
||||
return *buf;
|
||||
}
|
||||
|
||||
void GPU_vertex_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
|
||||
glVertexPointer(3, GL_FLOAT, 0, 0);
|
||||
}
|
||||
else {
|
||||
glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
}
|
||||
|
||||
void GPU_normal_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_NORMAL_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
|
||||
glNormalPointer(GL_FLOAT, 0, 0);
|
||||
}
|
||||
else {
|
||||
glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_NORMAL_STATE;
|
||||
}
|
||||
|
||||
void GPU_uv_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
|
||||
glTexCoordPointer(2, GL_FLOAT, 0, 0);
|
||||
}
|
||||
else {
|
||||
glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_TEXCOORD_STATE;
|
||||
}
|
||||
|
||||
void GPU_color_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_COLOR_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
|
||||
glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
|
||||
}
|
||||
else {
|
||||
glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
}
|
||||
|
||||
void GPU_edge_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
|
||||
return;
|
||||
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
|
||||
glVertexPointer(3, GL_FLOAT, 0, 0);
|
||||
}
|
||||
else {
|
||||
glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
|
||||
if(useVBOs)
|
||||
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
|
||||
|
||||
GLStates |= GPU_BUFFER_ELEMENT_STATE;
|
||||
}
|
||||
|
||||
void GPU_uvedge_setup(DerivedMesh *dm)
|
||||
{
|
||||
if(!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
|
||||
return;
|
||||
|
||||
glEnableClientState(GL_VERTEX_ARRAY);
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
|
||||
glVertexPointer(2, GL_FLOAT, 0, 0);
|
||||
}
|
||||
else {
|
||||
glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
}
|
||||
|
||||
static int GPU_typesize(int type) {
|
||||
switch(type) {
|
||||
case GL_FLOAT:
|
||||
return sizeof(float);
|
||||
case GL_INT:
|
||||
return sizeof(int);
|
||||
case GL_UNSIGNED_INT:
|
||||
return sizeof(unsigned int);
|
||||
case GL_BYTE:
|
||||
return sizeof(char);
|
||||
case GL_UNSIGNED_BYTE:
|
||||
return sizeof(unsigned char);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int GPU_attrib_element_size(GPUAttrib data[], int numdata) {
|
||||
int i, elementsize = 0;
|
||||
|
||||
for(i = 0; i < numdata; i++) {
|
||||
int typesize = GPU_typesize(data[i].type);
|
||||
if(typesize != 0)
|
||||
elementsize += typesize*data[i].size;
|
||||
}
|
||||
return elementsize;
|
||||
}
|
||||
|
||||
void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata) {
|
||||
int i;
|
||||
int elementsize;
|
||||
intptr_t offset = 0;
|
||||
|
||||
for(i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
|
||||
if(attribData[i].index != -1) {
|
||||
glDisableVertexAttribArrayARB(attribData[i].index);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
elementsize = GPU_attrib_element_size(data, numdata);
|
||||
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
|
||||
for(i = 0; i < numdata; i++) {
|
||||
glEnableVertexAttribArrayARB(data[i].index);
|
||||
glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
|
||||
GL_FALSE, elementsize, (void *)offset);
|
||||
offset += data[i].size*GPU_typesize(data[i].type);
|
||||
|
||||
attribData[i].index = data[i].index;
|
||||
attribData[i].size = data[i].size;
|
||||
attribData[i].type = data[i].type;
|
||||
}
|
||||
attribData[numdata].index = -1;
|
||||
}
|
||||
else {
|
||||
for(i = 0; i < numdata; i++) {
|
||||
glEnableVertexAttribArrayARB(data[i].index);
|
||||
glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
|
||||
GL_FALSE, elementsize, (char *)buffer->pointer + offset);
|
||||
offset += data[i].size*GPU_typesize(data[i].type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GPU_buffer_unbind(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if(GLStates & GPU_BUFFER_VERTEX_STATE)
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
if(GLStates & GPU_BUFFER_NORMAL_STATE)
|
||||
glDisableClientState(GL_NORMAL_ARRAY);
|
||||
if(GLStates & GPU_BUFFER_TEXCOORD_STATE)
|
||||
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
|
||||
if(GLStates & GPU_BUFFER_COLOR_STATE)
|
||||
glDisableClientState(GL_COLOR_ARRAY);
|
||||
if(GLStates & GPU_BUFFER_ELEMENT_STATE) {
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
|
||||
}
|
||||
}
|
||||
GLStates &= !(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
|
||||
GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
|
||||
GPU_BUFFER_ELEMENT_STATE);
|
||||
|
||||
for(i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
|
||||
if(attribData[i].index != -1) {
|
||||
glDisableVertexAttribArrayARB(attribData[i].index);
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if(useVBOs)
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
|
||||
}
|
||||
|
||||
/* confusion: code in cdderivedmesh calls both GPU_color_setup and
|
||||
GPU_color3_upload; both of these set the `colors' buffer, so seems
|
||||
like it will just needlessly overwrite? --nicholas */
|
||||
void GPU_color3_upload(DerivedMesh *dm, unsigned char *data)
|
||||
{
|
||||
if(dm->drawObject == 0)
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
GPU_buffer_free(dm->drawObject->colors);
|
||||
|
||||
dm->drawObject->colors = gpu_buffer_setup(dm, dm->drawObject, 3,
|
||||
sizeof(char)*3*dm->drawObject->tot_triangle_point,
|
||||
GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color3);
|
||||
}
|
||||
|
||||
/* this is used only in cdDM_drawFacesColored, which I think is no
|
||||
longer used, so can probably remove this --nicholas */
|
||||
void GPU_color4_upload(DerivedMesh *UNUSED(dm), unsigned char *UNUSED(data))
|
||||
{
|
||||
/*if(dm->drawObject == 0)
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
GPU_buffer_free(dm->drawObject->colors);
|
||||
dm->drawObject->colors = gpu_buffer_setup(dm, dm->drawObject, 3,
|
||||
sizeof(char)*3*dm->drawObject->tot_triangle_point,
|
||||
GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color4);*/
|
||||
}
|
||||
|
||||
void GPU_color_switch(int mode)
|
||||
{
|
||||
if(mode) {
|
||||
if(!(GLStates & GPU_BUFFER_COLOR_STATE))
|
||||
glEnableClientState(GL_COLOR_ARRAY);
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
}
|
||||
else {
|
||||
if(GLStates & GPU_BUFFER_COLOR_STATE)
|
||||
glDisableClientState(GL_COLOR_ARRAY);
|
||||
GLStates &= (!GPU_BUFFER_COLOR_STATE);
|
||||
}
|
||||
}
|
||||
|
||||
/* return 1 if drawing should be done using old immediate-mode
|
||||
code, 0 otherwise */
|
||||
int GPU_buffer_legacy(DerivedMesh *dm)
|
||||
{
|
||||
int test= (U.gameflags & USER_DISABLE_VBO);
|
||||
if(test)
|
||||
return 1;
|
||||
|
||||
if(dm->drawObject == 0)
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
return dm->drawObject->legacy;
|
||||
}
|
||||
|
||||
void *GPU_buffer_lock(GPUBuffer *buffer)
|
||||
{
|
||||
float *varray;
|
||||
|
||||
if(!buffer)
|
||||
return 0;
|
||||
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
|
||||
varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
|
||||
return varray;
|
||||
}
|
||||
else {
|
||||
return buffer->pointer;
|
||||
}
|
||||
}
|
||||
|
||||
void *GPU_buffer_lock_stream(GPUBuffer *buffer)
|
||||
{
|
||||
float *varray;
|
||||
|
||||
if(!buffer)
|
||||
return 0;
|
||||
|
||||
if(useVBOs) {
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
|
||||
/* discard previous data, avoid stalling gpu */
|
||||
glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
|
||||
varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
|
||||
return varray;
|
||||
}
|
||||
else {
|
||||
return buffer->pointer;
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_buffer_unlock(GPUBuffer *buffer)
|
||||
{
|
||||
if(useVBOs) {
|
||||
if(buffer) {
|
||||
/* note: this operation can fail, could return
|
||||
an error code from this function? */
|
||||
glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
|
||||
}
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* used for drawing edges */
|
||||
void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
|
||||
{
|
||||
glDrawElements(mode, count, GL_UNSIGNED_INT,
|
||||
(useVBOs ?
|
||||
(void*)(start * sizeof(unsigned int)) :
|
||||
((int*)elements->pointer) + start));
|
||||
}
|
||||
|
||||
|
||||
/* XXX: the rest of the code in this file is used for optimized PBVH
|
||||
drawing and doesn't interact at all with the buffer code above */
|
||||
|
||||
/* Convenience struct for building the VBO. */
|
||||
typedef struct {
|
||||
float co[3];
|
||||
@@ -781,887 +1648,3 @@ void GPU_free_buffers(void *buffers_v)
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_setup( DerivedMesh *dm, GPUDrawObject *object, int vector_size, int size, GLenum target, void *user, void (*copy_f)(DerivedMesh *, float *, int *, int *, void *) )
|
||||
{
|
||||
GPUBuffer *buffer;
|
||||
float *varray;
|
||||
int redir[MAX_MATERIALS];
|
||||
int *index;
|
||||
int i;
|
||||
int success;
|
||||
GLboolean uploaded;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_setup\n");
|
||||
|
||||
if( globalPool == 0 )
|
||||
globalPool = GPU_buffer_pool_new();
|
||||
|
||||
buffer = GPU_buffer_alloc(size,globalPool);
|
||||
if( buffer == 0 ) {
|
||||
dm->drawObject->legacy = 1;
|
||||
}
|
||||
if( dm->drawObject->legacy ) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
index = MEM_mallocN(sizeof(int)*object->nmaterials,"GPU_buffer_setup");
|
||||
for( i = 0; i < object->nmaterials; i++ ) {
|
||||
index[i] = object->materials[i].start*vector_size;
|
||||
redir[object->materials[i].mat_nr] = i;
|
||||
}
|
||||
|
||||
if( useVBOs ) {
|
||||
success = 0;
|
||||
while( success == 0 ) {
|
||||
glBindBufferARB( target, buffer->id );
|
||||
glBufferDataARB( target, buffer->size, 0, GL_STATIC_DRAW_ARB ); /* discard previous data, avoid stalling gpu */
|
||||
varray = glMapBufferARB( target, GL_WRITE_ONLY_ARB );
|
||||
if( varray == 0 ) {
|
||||
DEBUG_VBO( "Failed to map buffer to client address space\n" );
|
||||
GPU_buffer_free( buffer, globalPool );
|
||||
GPU_buffer_pool_delete_last( globalPool );
|
||||
buffer= NULL;
|
||||
if( globalPool->size > 0 ) {
|
||||
GPU_buffer_pool_delete_last( globalPool );
|
||||
buffer = GPU_buffer_alloc( size, globalPool );
|
||||
if( buffer == 0 ) {
|
||||
dm->drawObject->legacy = 1;
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
dm->drawObject->legacy = 1;
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
success = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if( dm->drawObject->legacy == 0 ) {
|
||||
uploaded = GL_FALSE;
|
||||
while( !uploaded ) {
|
||||
(*copy_f)( dm, varray, index, redir, user );
|
||||
uploaded = glUnmapBufferARB( target ); /* returns false if data got corruped during transfer */
|
||||
}
|
||||
}
|
||||
glBindBufferARB(target, 0);
|
||||
}
|
||||
else {
|
||||
if( buffer->pointer != 0 ) {
|
||||
varray = buffer->pointer;
|
||||
(*copy_f)( dm, varray, index, redir, user );
|
||||
}
|
||||
else {
|
||||
dm->drawObject->legacy = 1;
|
||||
}
|
||||
}
|
||||
|
||||
MEM_freeN(index);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *redir, void *UNUSED(user))
|
||||
{
|
||||
int start;
|
||||
int i, j, numfaces;
|
||||
|
||||
MVert *mvert;
|
||||
MFace *mface;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_vertex\n");
|
||||
|
||||
mvert = dm->getVertArray(dm);
|
||||
mface = dm->getFaceArray(dm);
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
start = index[redir[mface[i].mat_nr]];
|
||||
if( mface[i].v4 )
|
||||
index[redir[mface[i].mat_nr]] += 18;
|
||||
else
|
||||
index[redir[mface[i].mat_nr]] += 9;
|
||||
|
||||
/* v1 v2 v3 */
|
||||
VECCOPY(&varray[start],mvert[mface[i].v1].co);
|
||||
VECCOPY(&varray[start+3],mvert[mface[i].v2].co);
|
||||
VECCOPY(&varray[start+6],mvert[mface[i].v3].co);
|
||||
|
||||
if( mface[i].v4 ) {
|
||||
/* v3 v4 v1 */
|
||||
VECCOPY(&varray[start+9],mvert[mface[i].v3].co);
|
||||
VECCOPY(&varray[start+12],mvert[mface[i].v4].co);
|
||||
VECCOPY(&varray[start+15],mvert[mface[i].v1].co);
|
||||
}
|
||||
}
|
||||
j = dm->drawObject->nelements*3;
|
||||
for( i = 0; i < dm->drawObject->nindices; i++ ) {
|
||||
if( dm->drawObject->indices[i].element >= dm->drawObject->nelements ) {
|
||||
VECCOPY(&varray[j],mvert[i].co);
|
||||
j+=3;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_vertex( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_vertex\n");
|
||||
|
||||
return GPU_buffer_setup( dm, dm->drawObject, 3, sizeof(float)*3*(dm->drawObject->nelements+dm->drawObject->nlooseverts), GL_ARRAY_BUFFER_ARB, 0, GPU_buffer_copy_vertex);
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *redir, void *UNUSED(user))
|
||||
{
|
||||
int i, numfaces;
|
||||
int start;
|
||||
float norm[3];
|
||||
|
||||
float *nors= dm->getFaceDataArray(dm, CD_NORMAL);
|
||||
MVert *mvert = dm->getVertArray(dm);
|
||||
MFace *mface = dm->getFaceArray(dm);
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_normal\n");
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
const int smoothnormal = (mface[i].flag & ME_SMOOTH);
|
||||
|
||||
start = index[redir[mface[i].mat_nr]];
|
||||
if( mface[i].v4 )
|
||||
index[redir[mface[i].mat_nr]] += 18;
|
||||
else
|
||||
index[redir[mface[i].mat_nr]] += 9;
|
||||
|
||||
/* v1 v2 v3 */
|
||||
if(smoothnormal) {
|
||||
VECCOPY(&varray[start],mvert[mface[i].v1].no);
|
||||
VECCOPY(&varray[start+3],mvert[mface[i].v2].no);
|
||||
VECCOPY(&varray[start+6],mvert[mface[i].v3].no);
|
||||
}
|
||||
else {
|
||||
if( nors ) {
|
||||
VECCOPY(&varray[start],&nors[i*3]);
|
||||
VECCOPY(&varray[start+3],&nors[i*3]);
|
||||
VECCOPY(&varray[start+6],&nors[i*3]);
|
||||
}
|
||||
if( mface[i].v4 )
|
||||
normal_quad_v3( norm,mvert[mface[i].v1].co, mvert[mface[i].v2].co, mvert[mface[i].v3].co, mvert[mface[i].v4].co);
|
||||
else
|
||||
normal_tri_v3( norm,mvert[mface[i].v1].co, mvert[mface[i].v2].co, mvert[mface[i].v3].co);
|
||||
VECCOPY(&varray[start],norm);
|
||||
VECCOPY(&varray[start+3],norm);
|
||||
VECCOPY(&varray[start+6],norm);
|
||||
}
|
||||
|
||||
if( mface[i].v4 ) {
|
||||
/* v3 v4 v1 */
|
||||
if(smoothnormal) {
|
||||
VECCOPY(&varray[start+9],mvert[mface[i].v3].no);
|
||||
VECCOPY(&varray[start+12],mvert[mface[i].v4].no);
|
||||
VECCOPY(&varray[start+15],mvert[mface[i].v1].no);
|
||||
}
|
||||
else {
|
||||
VECCOPY(&varray[start+9],norm);
|
||||
VECCOPY(&varray[start+12],norm);
|
||||
VECCOPY(&varray[start+15],norm);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_normal( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_normal\n");
|
||||
|
||||
return GPU_buffer_setup( dm, dm->drawObject, 3, sizeof(float)*3*dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, 0, GPU_buffer_copy_normal);
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *redir, void *UNUSED(user))
|
||||
{
|
||||
int start;
|
||||
int i, numfaces;
|
||||
|
||||
MTFace *mtface;
|
||||
MFace *mface;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_uv\n");
|
||||
|
||||
mface = dm->getFaceArray(dm);
|
||||
mtface = DM_get_face_data_layer(dm, CD_MTFACE);
|
||||
|
||||
if( mtface == 0 ) {
|
||||
DEBUG_VBO("Texture coordinates do not exist for this mesh");
|
||||
return;
|
||||
}
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
start = index[redir[mface[i].mat_nr]];
|
||||
if( mface[i].v4 )
|
||||
index[redir[mface[i].mat_nr]] += 12;
|
||||
else
|
||||
index[redir[mface[i].mat_nr]] += 6;
|
||||
|
||||
/* v1 v2 v3 */
|
||||
VECCOPY2D(&varray[start],mtface[i].uv[0]);
|
||||
VECCOPY2D(&varray[start+2],mtface[i].uv[1]);
|
||||
VECCOPY2D(&varray[start+4],mtface[i].uv[2]);
|
||||
|
||||
if( mface[i].v4 ) {
|
||||
/* v3 v4 v1 */
|
||||
VECCOPY2D(&varray[start+6],mtface[i].uv[2]);
|
||||
VECCOPY2D(&varray[start+8],mtface[i].uv[3]);
|
||||
VECCOPY2D(&varray[start+10],mtface[i].uv[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_uv( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_uv\n");
|
||||
if( DM_get_face_data_layer(dm, CD_MTFACE) != 0 )
|
||||
return GPU_buffer_setup( dm, dm->drawObject, 2, sizeof(float)*2*dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, 0, GPU_buffer_copy_uv);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_color3( DerivedMesh *dm, float *varray_, int *index, int *redir, void *user )
|
||||
{
|
||||
int i, numfaces;
|
||||
unsigned char *varray = (unsigned char *)varray_;
|
||||
unsigned char *mcol = (unsigned char *)user;
|
||||
MFace *mface = dm->getFaceArray(dm);
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_color3\n");
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
int start = index[redir[mface[i].mat_nr]];
|
||||
if( mface[i].v4 )
|
||||
index[redir[mface[i].mat_nr]] += 18;
|
||||
else
|
||||
index[redir[mface[i].mat_nr]] += 9;
|
||||
|
||||
/* v1 v2 v3 */
|
||||
VECCOPY(&varray[start],&mcol[i*12]);
|
||||
VECCOPY(&varray[start+3],&mcol[i*12+3]);
|
||||
VECCOPY(&varray[start+6],&mcol[i*12+6]);
|
||||
if( mface[i].v4 ) {
|
||||
/* v3 v4 v1 */
|
||||
VECCOPY(&varray[start+9],&mcol[i*12+6]);
|
||||
VECCOPY(&varray[start+12],&mcol[i*12+9]);
|
||||
VECCOPY(&varray[start+15],&mcol[i*12]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_color4( DerivedMesh *dm, float *varray_, int *index, int *redir, void *user )
|
||||
{
|
||||
int i, numfaces;
|
||||
unsigned char *varray = (unsigned char *)varray_;
|
||||
unsigned char *mcol = (unsigned char *)user;
|
||||
MFace *mface = dm->getFaceArray(dm);
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_color4\n");
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
for( i=0; i < numfaces; i++ ) {
|
||||
int start = index[redir[mface[i].mat_nr]];
|
||||
if( mface[i].v4 )
|
||||
index[redir[mface[i].mat_nr]] += 18;
|
||||
else
|
||||
index[redir[mface[i].mat_nr]] += 9;
|
||||
|
||||
/* v1 v2 v3 */
|
||||
VECCOPY(&varray[start],&mcol[i*16]);
|
||||
VECCOPY(&varray[start+3],&mcol[i*16+4]);
|
||||
VECCOPY(&varray[start+6],&mcol[i*16+8]);
|
||||
if( mface[i].v4 ) {
|
||||
/* v3 v4 v1 */
|
||||
VECCOPY(&varray[start+9],&mcol[i*16+8]);
|
||||
VECCOPY(&varray[start+12],&mcol[i*16+12]);
|
||||
VECCOPY(&varray[start+15],&mcol[i*16]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_color( DerivedMesh *dm )
|
||||
{
|
||||
unsigned char *colors;
|
||||
int i, numfaces;
|
||||
MCol *mcol;
|
||||
GPUBuffer *result;
|
||||
DEBUG_VBO("GPU_buffer_color\n");
|
||||
|
||||
mcol = DM_get_face_data_layer(dm, CD_ID_MCOL);
|
||||
dm->drawObject->colType = CD_ID_MCOL;
|
||||
if(!mcol) {
|
||||
mcol = DM_get_face_data_layer(dm, CD_WEIGHT_MCOL);
|
||||
dm->drawObject->colType = CD_WEIGHT_MCOL;
|
||||
}
|
||||
if(!mcol) {
|
||||
mcol = DM_get_face_data_layer(dm, CD_MCOL);
|
||||
dm->drawObject->colType = CD_MCOL;
|
||||
}
|
||||
|
||||
numfaces= dm->getNumFaces(dm);
|
||||
colors = MEM_mallocN(numfaces*12*sizeof(unsigned char), "GPU_buffer_color");
|
||||
for( i=0; i < numfaces*4; i++ ) {
|
||||
colors[i*3] = mcol[i].b;
|
||||
colors[i*3+1] = mcol[i].g;
|
||||
colors[i*3+2] = mcol[i].r;
|
||||
}
|
||||
|
||||
result = GPU_buffer_setup( dm, dm->drawObject, 3, sizeof(char)*3*dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, colors, GPU_buffer_copy_color3 );
|
||||
|
||||
MEM_freeN(colors);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(redir), void *UNUSED(user))
|
||||
{
|
||||
int i;
|
||||
|
||||
MEdge *medge;
|
||||
unsigned int *varray_ = (unsigned int *)varray;
|
||||
int numedges;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_edge\n");
|
||||
|
||||
medge = dm->getEdgeArray(dm);
|
||||
|
||||
numedges= dm->getNumEdges(dm);
|
||||
for(i = 0; i < numedges; i++) {
|
||||
varray_[i*2] = (unsigned int)dm->drawObject->indices[medge[i].v1].element;
|
||||
varray_[i*2+1] = (unsigned int)dm->drawObject->indices[medge[i].v2].element;
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_edge( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_edge\n");
|
||||
|
||||
return GPU_buffer_setup( dm, dm->drawObject, 2, sizeof(int)*2*dm->drawObject->nedges, GL_ELEMENT_ARRAY_BUFFER_ARB, 0, GPU_buffer_copy_edge);
|
||||
}
|
||||
|
||||
static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(redir), void *UNUSED(user))
|
||||
{
|
||||
MTFace *tf = DM_get_face_data_layer(dm, CD_MTFACE);
|
||||
int i, j=0;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_copy_uvedge\n");
|
||||
|
||||
if(tf) {
|
||||
for(i = 0; i < dm->numFaceData; i++, tf++) {
|
||||
MFace mf;
|
||||
dm->getFace(dm,i,&mf);
|
||||
|
||||
VECCOPY2D(&varray[j],tf->uv[0]);
|
||||
VECCOPY2D(&varray[j+2],tf->uv[1]);
|
||||
|
||||
VECCOPY2D(&varray[j+4],tf->uv[1]);
|
||||
VECCOPY2D(&varray[j+6],tf->uv[2]);
|
||||
|
||||
if(!mf.v4) {
|
||||
VECCOPY2D(&varray[j+8],tf->uv[2]);
|
||||
VECCOPY2D(&varray[j+10],tf->uv[0]);
|
||||
j+=12;
|
||||
} else {
|
||||
VECCOPY2D(&varray[j+8],tf->uv[2]);
|
||||
VECCOPY2D(&varray[j+10],tf->uv[3]);
|
||||
|
||||
VECCOPY2D(&varray[j+12],tf->uv[3]);
|
||||
VECCOPY2D(&varray[j+14],tf->uv[0]);
|
||||
j+=16;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
DEBUG_VBO("Could not get MTFACE data layer");
|
||||
}
|
||||
}
|
||||
|
||||
static GPUBuffer *GPU_buffer_uvedge( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_buffer_uvedge\n");
|
||||
/* logic here:
|
||||
* ...each face gets 3 'nelements'
|
||||
* ...3 edges per triangle
|
||||
* ...each edge has its own, non-shared coords.
|
||||
* so each tri corner needs minimum of 4 floats, quads used less so here we can over allocate and assume all tris.
|
||||
* */
|
||||
return GPU_buffer_setup( dm, dm->drawObject, 4, 4 * sizeof(float) * dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, 0, GPU_buffer_copy_uvedge);
|
||||
}
|
||||
|
||||
|
||||
void GPU_vertex_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_vertex_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->vertices == 0 )
|
||||
dm->drawObject->vertices = GPU_buffer_vertex( dm );
|
||||
if( dm->drawObject->vertices == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup vertices\n" );
|
||||
return;
|
||||
}
|
||||
|
||||
glEnableClientState( GL_VERTEX_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->vertices->id );
|
||||
glVertexPointer( 3, GL_FLOAT, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glVertexPointer( 3, GL_FLOAT, 0, dm->drawObject->vertices->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
}
|
||||
|
||||
void GPU_normal_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_normal_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->normals == 0 )
|
||||
dm->drawObject->normals = GPU_buffer_normal( dm );
|
||||
if( dm->drawObject->normals == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup normals\n" );
|
||||
return;
|
||||
}
|
||||
glEnableClientState( GL_NORMAL_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id );
|
||||
glNormalPointer( GL_FLOAT, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glNormalPointer( GL_FLOAT, 0, dm->drawObject->normals->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_NORMAL_STATE;
|
||||
}
|
||||
|
||||
void GPU_uv_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_uv_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->uv == 0 )
|
||||
dm->drawObject->uv = GPU_buffer_uv( dm );
|
||||
|
||||
if( dm->drawObject->uv != 0 ) {
|
||||
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id );
|
||||
glTexCoordPointer( 2, GL_FLOAT, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glTexCoordPointer( 2, GL_FLOAT, 0, dm->drawObject->uv->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_TEXCOORD_STATE;
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_color_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_color_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->colors == 0 )
|
||||
dm->drawObject->colors = GPU_buffer_color( dm );
|
||||
if( dm->drawObject->colors == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup colors\n" );
|
||||
return;
|
||||
}
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id );
|
||||
glColorPointer( 3, GL_UNSIGNED_BYTE, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glColorPointer( 3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
}
|
||||
|
||||
void GPU_edge_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_edge_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->edges == 0 )
|
||||
dm->drawObject->edges = GPU_buffer_edge( dm );
|
||||
if( dm->drawObject->edges == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup edges\n" );
|
||||
return;
|
||||
}
|
||||
if( dm->drawObject->vertices == 0 )
|
||||
dm->drawObject->vertices = GPU_buffer_vertex( dm );
|
||||
if( dm->drawObject->vertices == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup vertices\n" );
|
||||
return;
|
||||
}
|
||||
|
||||
glEnableClientState( GL_VERTEX_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->vertices->id );
|
||||
glVertexPointer( 3, GL_FLOAT, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glVertexPointer( 3, GL_FLOAT, 0, dm->drawObject->vertices->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_ELEMENT_STATE;
|
||||
}
|
||||
|
||||
void GPU_uvedge_setup( DerivedMesh *dm )
|
||||
{
|
||||
DEBUG_VBO("GPU_uvedge_setup\n");
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new( dm );
|
||||
if( dm->drawObject->uvedges == 0 )
|
||||
dm->drawObject->uvedges = GPU_buffer_uvedge( dm );
|
||||
if( dm->drawObject->uvedges == 0 ) {
|
||||
DEBUG_VBO( "Failed to setup UV edges\n" );
|
||||
return;
|
||||
}
|
||||
|
||||
glEnableClientState( GL_VERTEX_ARRAY );
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id );
|
||||
glVertexPointer( 2, GL_FLOAT, 0, 0 );
|
||||
}
|
||||
else {
|
||||
glVertexPointer( 2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer );
|
||||
}
|
||||
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
}
|
||||
|
||||
void GPU_interleaved_setup( GPUBuffer *buffer, int data[] ) {
|
||||
int i;
|
||||
int elementsize = 0;
|
||||
intptr_t offset = 0;
|
||||
|
||||
DEBUG_VBO("GPU_interleaved_setup\n");
|
||||
|
||||
for( i = 0; data[i] != GPU_BUFFER_INTER_END; i++ ) {
|
||||
switch( data[i] ) {
|
||||
case GPU_BUFFER_INTER_V3F:
|
||||
elementsize += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_N3F:
|
||||
elementsize += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_T2F:
|
||||
elementsize += 2*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C3UB:
|
||||
elementsize += 3*sizeof(unsigned char);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C4UB:
|
||||
elementsize += 4*sizeof(unsigned char);
|
||||
break;
|
||||
default:
|
||||
DEBUG_VBO( "Unknown element in data type array in GPU_interleaved_setup\n" );
|
||||
}
|
||||
}
|
||||
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, buffer->id );
|
||||
for( i = 0; data[i] != GPU_BUFFER_INTER_END; i++ ) {
|
||||
switch( data[i] ) {
|
||||
case GPU_BUFFER_INTER_V3F:
|
||||
glEnableClientState( GL_VERTEX_ARRAY );
|
||||
glVertexPointer( 3, GL_FLOAT, elementsize, (void *)offset );
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
offset += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_N3F:
|
||||
glEnableClientState( GL_NORMAL_ARRAY );
|
||||
glNormalPointer( GL_FLOAT, elementsize, (void *)offset );
|
||||
GLStates |= GPU_BUFFER_NORMAL_STATE;
|
||||
offset += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_T2F:
|
||||
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
|
||||
glTexCoordPointer( 2, GL_FLOAT, elementsize, (void *)offset );
|
||||
GLStates |= GPU_BUFFER_TEXCOORD_STATE;
|
||||
offset += 2*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C3UB:
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
glColorPointer( 3, GL_UNSIGNED_BYTE, elementsize, (void *)offset );
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
offset += 3*sizeof(unsigned char);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C4UB:
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
glColorPointer( 4, GL_UNSIGNED_BYTE, elementsize, (void *)offset );
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
offset += 4*sizeof(unsigned char);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for( i = 0; data[i] != GPU_BUFFER_INTER_END; i++ ) {
|
||||
switch( data[i] ) {
|
||||
case GPU_BUFFER_INTER_V3F:
|
||||
glEnableClientState( GL_VERTEX_ARRAY );
|
||||
glVertexPointer( 3, GL_FLOAT, elementsize, offset+(char *)buffer->pointer );
|
||||
GLStates |= GPU_BUFFER_VERTEX_STATE;
|
||||
offset += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_N3F:
|
||||
glEnableClientState( GL_NORMAL_ARRAY );
|
||||
glNormalPointer( GL_FLOAT, elementsize, offset+(char *)buffer->pointer );
|
||||
GLStates |= GPU_BUFFER_NORMAL_STATE;
|
||||
offset += 3*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_T2F:
|
||||
glEnableClientState( GL_TEXTURE_COORD_ARRAY );
|
||||
glTexCoordPointer( 2, GL_FLOAT, elementsize, offset+(char *)buffer->pointer );
|
||||
GLStates |= GPU_BUFFER_TEXCOORD_STATE;
|
||||
offset += 2*sizeof(float);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C3UB:
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
glColorPointer( 3, GL_UNSIGNED_BYTE, elementsize, offset+(char *)buffer->pointer );
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
offset += 3*sizeof(unsigned char);
|
||||
break;
|
||||
case GPU_BUFFER_INTER_C4UB:
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
glColorPointer( 4, GL_UNSIGNED_BYTE, elementsize, offset+(char *)buffer->pointer );
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
offset += 4*sizeof(unsigned char);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int GPU_typesize( int type ) {
|
||||
switch( type ) {
|
||||
case GL_FLOAT:
|
||||
return sizeof(float);
|
||||
case GL_INT:
|
||||
return sizeof(int);
|
||||
case GL_UNSIGNED_INT:
|
||||
return sizeof(unsigned int);
|
||||
case GL_BYTE:
|
||||
return sizeof(char);
|
||||
case GL_UNSIGNED_BYTE:
|
||||
return sizeof(unsigned char);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int GPU_attrib_element_size( GPUAttrib data[], int numdata ) {
|
||||
int i, elementsize = 0;
|
||||
|
||||
for( i = 0; i < numdata; i++ ) {
|
||||
int typesize = GPU_typesize(data[i].type);
|
||||
if( typesize == 0 )
|
||||
DEBUG_VBO( "Unknown element in data type array in GPU_attrib_element_size\n" );
|
||||
else {
|
||||
elementsize += typesize*data[i].size;
|
||||
}
|
||||
}
|
||||
return elementsize;
|
||||
}
|
||||
|
||||
void GPU_interleaved_attrib_setup( GPUBuffer *buffer, GPUAttrib data[], int numdata ) {
|
||||
int i;
|
||||
int elementsize;
|
||||
intptr_t offset = 0;
|
||||
|
||||
DEBUG_VBO("GPU_interleaved_attrib_setup\n");
|
||||
|
||||
for( i = 0; i < MAX_GPU_ATTRIB_DATA; i++ ) {
|
||||
if( attribData[i].index != -1 ) {
|
||||
glDisableVertexAttribArrayARB( attribData[i].index );
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
elementsize = GPU_attrib_element_size( data, numdata );
|
||||
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, buffer->id );
|
||||
for( i = 0; i < numdata; i++ ) {
|
||||
glEnableVertexAttribArrayARB( data[i].index );
|
||||
glVertexAttribPointerARB( data[i].index, data[i].size, data[i].type, GL_FALSE, elementsize, (void *)offset );
|
||||
offset += data[i].size*GPU_typesize(data[i].type);
|
||||
|
||||
attribData[i].index = data[i].index;
|
||||
attribData[i].size = data[i].size;
|
||||
attribData[i].type = data[i].type;
|
||||
}
|
||||
attribData[numdata].index = -1;
|
||||
}
|
||||
else {
|
||||
for( i = 0; i < numdata; i++ ) {
|
||||
glEnableVertexAttribArrayARB( data[i].index );
|
||||
glVertexAttribPointerARB( data[i].index, data[i].size, data[i].type, GL_FALSE, elementsize, (char *)buffer->pointer + offset );
|
||||
offset += data[i].size*GPU_typesize(data[i].type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GPU_buffer_unbind(void)
|
||||
{
|
||||
int i;
|
||||
DEBUG_VBO("GPU_buffer_unbind\n");
|
||||
|
||||
if( GLStates & GPU_BUFFER_VERTEX_STATE )
|
||||
glDisableClientState( GL_VERTEX_ARRAY );
|
||||
if( GLStates & GPU_BUFFER_NORMAL_STATE )
|
||||
glDisableClientState( GL_NORMAL_ARRAY );
|
||||
if( GLStates & GPU_BUFFER_TEXCOORD_STATE )
|
||||
glDisableClientState( GL_TEXTURE_COORD_ARRAY );
|
||||
if( GLStates & GPU_BUFFER_COLOR_STATE )
|
||||
glDisableClientState( GL_COLOR_ARRAY );
|
||||
if( GLStates & GPU_BUFFER_ELEMENT_STATE ) {
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, 0 );
|
||||
}
|
||||
}
|
||||
GLStates &= !(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE | GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE | GPU_BUFFER_ELEMENT_STATE);
|
||||
|
||||
for( i = 0; i < MAX_GPU_ATTRIB_DATA; i++ ) {
|
||||
if( attribData[i].index != -1 ) {
|
||||
glDisableVertexAttribArrayARB( attribData[i].index );
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
if( GLStates != 0 ) {
|
||||
DEBUG_VBO( "Some weird OpenGL state is still set. Why?" );
|
||||
}
|
||||
if( useVBOs )
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, 0 );
|
||||
}
|
||||
|
||||
void GPU_color3_upload( DerivedMesh *dm, unsigned char *data )
|
||||
{
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
GPU_buffer_free(dm->drawObject->colors,globalPool);
|
||||
dm->drawObject->colors = GPU_buffer_setup( dm, dm->drawObject, 3, sizeof(char)*3*dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color3 );
|
||||
}
|
||||
void GPU_color4_upload( DerivedMesh *dm, unsigned char *data )
|
||||
{
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
GPU_buffer_free(dm->drawObject->colors,globalPool);
|
||||
dm->drawObject->colors = GPU_buffer_setup( dm, dm->drawObject, 3, sizeof(char)*3*dm->drawObject->nelements, GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color4 );
|
||||
}
|
||||
|
||||
void GPU_color_switch( int mode )
|
||||
{
|
||||
if( mode ) {
|
||||
if( !(GLStates & GPU_BUFFER_COLOR_STATE) )
|
||||
glEnableClientState( GL_COLOR_ARRAY );
|
||||
GLStates |= GPU_BUFFER_COLOR_STATE;
|
||||
}
|
||||
else {
|
||||
if( GLStates & GPU_BUFFER_COLOR_STATE )
|
||||
glDisableClientState( GL_COLOR_ARRAY );
|
||||
GLStates &= (!GPU_BUFFER_COLOR_STATE);
|
||||
}
|
||||
}
|
||||
|
||||
int GPU_buffer_legacy( DerivedMesh *dm )
|
||||
{
|
||||
int test= (U.gameflags & USER_DISABLE_VBO);
|
||||
if( test )
|
||||
return 1;
|
||||
|
||||
if( dm->drawObject == 0 )
|
||||
dm->drawObject = GPU_drawobject_new(dm);
|
||||
return dm->drawObject->legacy;
|
||||
}
|
||||
|
||||
void *GPU_buffer_lock( GPUBuffer *buffer )
|
||||
{
|
||||
float *varray;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_lock\n");
|
||||
if( buffer == 0 ) {
|
||||
DEBUG_VBO( "Failed to lock NULL buffer\n" );
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, buffer->id );
|
||||
varray = glMapBufferARB( GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB );
|
||||
if( varray == 0 ) {
|
||||
DEBUG_VBO( "Failed to map buffer to client address space\n" );
|
||||
}
|
||||
return varray;
|
||||
}
|
||||
else {
|
||||
return buffer->pointer;
|
||||
}
|
||||
}
|
||||
|
||||
void *GPU_buffer_lock_stream( GPUBuffer *buffer )
|
||||
{
|
||||
float *varray;
|
||||
|
||||
DEBUG_VBO("GPU_buffer_lock_stream\n");
|
||||
if( buffer == 0 ) {
|
||||
DEBUG_VBO( "Failed to lock NULL buffer\n" );
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( useVBOs ) {
|
||||
glBindBufferARB( GL_ARRAY_BUFFER_ARB, buffer->id );
|
||||
glBufferDataARB( GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB ); /* discard previous data, avoid stalling gpu */
|
||||
varray = glMapBufferARB( GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB );
|
||||
if( varray == 0 ) {
|
||||
DEBUG_VBO( "Failed to map buffer to client address space\n" );
|
||||
}
|
||||
return varray;
|
||||
}
|
||||
else {
|
||||
return buffer->pointer;
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_buffer_unlock( GPUBuffer *buffer )
|
||||
{
|
||||
DEBUG_VBO( "GPU_buffer_unlock\n" );
|
||||
if( useVBOs ) {
|
||||
if( buffer != 0 ) {
|
||||
if( glUnmapBufferARB( GL_ARRAY_BUFFER_ARB ) == 0 ) {
|
||||
DEBUG_VBO( "Failed to copy new data\n" );
|
||||
}
|
||||
}
|
||||
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_buffer_draw_elements( GPUBuffer *elements, unsigned int mode, int start, int count )
|
||||
{
|
||||
if( useVBOs ) {
|
||||
glDrawElements( mode, count, GL_UNSIGNED_INT, (void *)(start*sizeof(unsigned int)) );
|
||||
}
|
||||
else {
|
||||
glDrawElements( mode, count, GL_UNSIGNED_INT, ((int *)elements->pointer)+start );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ typedef struct SoftBody {
|
||||
#define OB_SB_SELF 512
|
||||
#define OB_SB_FACECOLL 1024
|
||||
#define OB_SB_EDGECOLL 2048
|
||||
#define OB_SB_COLLFINAL 4096
|
||||
#define OB_SB_COLLFINAL 4096 /* deprecated */
|
||||
#define OB_SB_BIG_UI 8192
|
||||
#define OB_SB_AERO_ANGLE 16384
|
||||
|
||||
|
||||
@@ -926,13 +926,6 @@ static void rna_def_collision(BlenderRNA *brna)
|
||||
RNA_def_property_range(prop, 0.0f, 1.0f);
|
||||
RNA_def_property_ui_text(prop, "Damping", "Amount of damping during collision");
|
||||
RNA_def_property_update(prop, 0, "rna_CollisionSettings_update");
|
||||
|
||||
/* Does this belong here?
|
||||
prop= RNA_def_property(srna, "collision_stack", PROP_BOOLEAN, PROP_NONE);
|
||||
RNA_def_property_boolean_sdna(prop, NULL, "softflag", OB_SB_COLLFINAL);
|
||||
RNA_def_property_ui_text(prop, "Collision from Stack", "Pick collision object from modifier stack (softbody only)");
|
||||
RNA_def_property_update(prop, 0, "rna_CollisionSettings_update");
|
||||
*/
|
||||
|
||||
prop= RNA_def_property(srna, "absorption", PROP_FLOAT, PROP_FACTOR);
|
||||
RNA_def_property_range(prop, 0.0f, 1.0f);
|
||||
|
||||
@@ -226,7 +226,7 @@ static int gpu_shader_material(GPUMaterial *mat, bNode *node, GPUNodeStack *in,
|
||||
GPUShadeInput shi;
|
||||
GPUShadeResult shr;
|
||||
bNodeSocket *sock;
|
||||
char hasinput[NUM_MAT_IN];
|
||||
char hasinput[NUM_MAT_IN]= {'\0'};
|
||||
int i;
|
||||
|
||||
/* note: cannot use the in[]->hasinput flags directly, as these are not necessarily
|
||||
|
||||
@@ -36,8 +36,6 @@
|
||||
#include "BLI_path_util.h"
|
||||
#endif
|
||||
|
||||
#define PYC_INTERPRETER_ACTIVE (((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current)) != NULL)
|
||||
|
||||
/* array utility function */
|
||||
int PyC_AsArray(void *array, PyObject *value, const int length, const PyTypeObject *type, const short is_double, const char *error_prefix)
|
||||
{
|
||||
|
||||
@@ -50,4 +50,6 @@ void PyC_MainModule_Restore(PyObject *main_mod);
|
||||
|
||||
void PyC_SetHomePath(const char *py_path_bundle);
|
||||
|
||||
#define PYC_INTERPRETER_ACTIVE (((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current)) != NULL)
|
||||
|
||||
#endif // PY_CAPI_UTILS_H
|
||||
|
||||
@@ -41,6 +41,8 @@
|
||||
|
||||
#include "bpy_driver.h"
|
||||
|
||||
#include "../generic/py_capi_utils.h"
|
||||
|
||||
/* for pydrivers (drivers using one-line Python expressions to express relationships between targets) */
|
||||
PyObject *bpy_pydriver_Dict= NULL;
|
||||
|
||||
@@ -87,7 +89,7 @@ int bpy_pydriver_create_dict(void)
|
||||
void BPY_driver_reset(void)
|
||||
{
|
||||
PyGILState_STATE gilstate;
|
||||
int use_gil= 1; // (PyThreadState_Get()==NULL);
|
||||
int use_gil= !PYC_INTERPRETER_ACTIVE;
|
||||
|
||||
if(use_gil)
|
||||
gilstate= PyGILState_Ensure();
|
||||
@@ -120,7 +122,7 @@ static void pydriver_error(ChannelDriver *driver)
|
||||
*
|
||||
* note: PyGILState_Ensure() isnt always called because python can call the
|
||||
* bake operator which intern starts a thread which calls scene update which
|
||||
* does a driver update. to avoid a deadlock check PyThreadState_Get() if PyGILState_Ensure() is needed.
|
||||
* does a driver update. to avoid a deadlock check PYC_INTERPRETER_ACTIVE if PyGILState_Ensure() is needed.
|
||||
*/
|
||||
float BPY_driver_exec(ChannelDriver *driver)
|
||||
{
|
||||
@@ -147,7 +149,7 @@ float BPY_driver_exec(ChannelDriver *driver)
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
use_gil= 1; //(PyThreadState_Get()==NULL);
|
||||
use_gil= !PYC_INTERPRETER_ACTIVE;
|
||||
|
||||
if(use_gil)
|
||||
gilstate= PyGILState_Ensure();
|
||||
|
||||
@@ -3847,9 +3847,11 @@ static PyObject *foreach_getset(BPy_PropertyRNA *self, PyObject *args, int set)
|
||||
case PROP_RAW_DOUBLE:
|
||||
item= PyFloat_FromDouble((double) ((double *)array)[i]);
|
||||
break;
|
||||
case PROP_RAW_UNSET:
|
||||
default: /* PROP_RAW_UNSET */
|
||||
/* should never happen */
|
||||
BLI_assert(!"Invalid array type - get");
|
||||
item= Py_None;
|
||||
Py_INCREF(item);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -2932,8 +2932,10 @@ static void init_render_curve(Render *re, ObjectRen *obr, int timeoffset)
|
||||
vlr->v3= RE_findOrAddVert(obr, startvert+index[2]);
|
||||
vlr->v4= NULL;
|
||||
|
||||
normal_tri_v3(tmp, vlr->v3->co, vlr->v2->co, vlr->v1->co);
|
||||
add_v3_v3(n, tmp);
|
||||
if(area_tri_v3(vlr->v3->co, vlr->v2->co, vlr->v1->co)>FLT_EPSILON) {
|
||||
normal_tri_v3(tmp, vlr->v3->co, vlr->v2->co, vlr->v1->co);
|
||||
add_v3_v3(n, tmp);
|
||||
}
|
||||
|
||||
vlr->mat= matar[ dl->col ];
|
||||
vlr->flag= 0;
|
||||
|
||||
@@ -416,7 +416,7 @@ void WM_exit(bContext *C)
|
||||
BPY_python_end();
|
||||
#endif
|
||||
|
||||
GPU_buffer_pool_free(NULL);
|
||||
GPU_global_buffer_pool_free();
|
||||
GPU_free_unused_buffers();
|
||||
GPU_extensions_exit();
|
||||
|
||||
|
||||
@@ -1147,7 +1147,7 @@ PyObject *PyObjectPlus::NewProxyPlus_Ext(PyObjectPlus *self, PyTypeObject *tp, v
|
||||
BGE_PROXY_REF(proxy) = NULL;
|
||||
BGE_PROXY_PTR(proxy) = ptr;
|
||||
#ifdef USE_WEAKREFS
|
||||
BGE_PROXY_WKREF(self->m_proxy) = NULL;
|
||||
BGE_PROXY_WKREF(proxy) = NULL;
|
||||
#endif
|
||||
return proxy;
|
||||
}
|
||||
|
||||
@@ -1022,10 +1022,8 @@ KX_PYMETHODDEF_DOC_VARARGS(KX_Camera, getScreenRay,
|
||||
return NULL;
|
||||
|
||||
PyObject* argValue = PyTuple_New(2);
|
||||
if (argValue) {
|
||||
PyTuple_SET_ITEM(argValue, 0, PyFloat_FromDouble(x));
|
||||
PyTuple_SET_ITEM(argValue, 1, PyFloat_FromDouble(y));
|
||||
}
|
||||
PyTuple_SET_ITEM(argValue, 0, PyFloat_FromDouble(x));
|
||||
PyTuple_SET_ITEM(argValue, 1, PyFloat_FromDouble(y));
|
||||
|
||||
if(!PyVecTo(PygetScreenVect(argValue), vect))
|
||||
{
|
||||
|
||||
@@ -327,7 +327,7 @@ void KX_GameObject::RemoveParent(KX_Scene *scene)
|
||||
rootobj->m_pPhysicsController1->RemoveCompoundChild(m_pPhysicsController1);
|
||||
}
|
||||
m_pPhysicsController1->RestoreDynamics();
|
||||
if (m_pPhysicsController1->IsDyna() && rootobj->m_pPhysicsController1)
|
||||
if (m_pPhysicsController1->IsDyna() && (rootobj != NULL && rootobj->m_pPhysicsController1))
|
||||
{
|
||||
// dynamic object should remember the velocity they had while being parented
|
||||
MT_Point3 childPoint = GetSGNode()->GetWorldPosition();
|
||||
|
||||
Reference in New Issue
Block a user