Manual merge of soc-2009-kazanbas branch:

* copied I/O scripts
* copied, modified rna_*_api.c and rna_*.c

I/O scripts not working yet due to slight BPY differences and RNA changes. Will fix them later.

Not merged changes:

* C unit testing integration, because it is clumsy
* scons cross-compiling, can be merged easily later
This commit is contained in:
Arystanbek Dyussenov
2009-09-22 16:35:07 +00:00
33 changed files with 10936 additions and 132 deletions

View File

@@ -281,7 +281,7 @@ def write_pov(filename, scene=None, info_callback = None):
me = ob.data
me_materials= me.materials
me = ob.create_render_mesh(scene)
me = ob.create_mesh(True, 'RENDER')
if not me:
continue

1128
release/io/export_3ds.py Normal file
View File

@@ -0,0 +1,1128 @@
#!BPY
# coding: utf-8
"""
Name: '3D Studio (.3ds)...'
Blender: 243
Group: 'Export'
Tooltip: 'Export to 3DS file format (.3ds).'
"""
__author__ = ["Campbell Barton", "Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Mark Stijnman"]
__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/")
__version__ = "0.90a"
__bpydoc__ = """\
3ds Exporter
This script Exports a 3ds file.
Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Bob Holcomb
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
######################################################
# Importing modules
######################################################
import struct
import os
import time
import bpy
# import Blender
# from BPyMesh import getMeshFromObject
# from BPyObject import getDerivedObjects
# try:
# import struct
# except:
# struct = None
# also used by X3D exporter
# return a tuple (free, object list), free is True if memory should be freed later with free_derived_objects()
def create_derived_objects(ob):
if ob.parent and ob.parent.dupli_type != 'NONE':
return False, None
if ob.dupli_type != 'NONE':
ob.create_dupli_list()
return True, [(dob.object, dob.matrix) for dob in ob.dupli_list]
else:
return False, [(ob, ob.matrix)]
# also used by X3D exporter
def free_derived_objects(ob):
ob.free_dupli_list()
# So 3ds max can open files, limit names to 12 in length
# this is verry annoying for filenames!
name_unique = []
name_mapping = {}
def sane_name(name):
name_fixed = name_mapping.get(name)
if name_fixed != None:
return name_fixed
if len(name) > 12:
new_name = name[:12]
else:
new_name = name
i = 0
while new_name in name_unique:
new_name = new_name[:-4] + '.%.3d' % i
i+=1
name_unique.append(new_name)
name_mapping[name] = new_name
return new_name
######################################################
# Data Structures
######################################################
#Some of the chunks that we will export
#----- Primary Chunk, at the beginning of each file
PRIMARY= int("0x4D4D",16)
#------ Main Chunks
OBJECTINFO = int("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information
VERSION = int("0x0002",16); #This gives the version of the .3ds file
KFDATA = int("0xB000",16); #This is the header for all of the key frame info
#------ sub defines of OBJECTINFO
MATERIAL=45055 #0xAFFF // This stored the texture info
OBJECT=16384 #0x4000 // This stores the faces, vertices, etc...
#>------ sub defines of MATERIAL
MATNAME = int("0xA000",16); # This holds the material name
MATAMBIENT = int("0xA010",16); # Ambient color of the object/material
MATDIFFUSE = int("0xA020",16); # This holds the color of the object/material
MATSPECULAR = int("0xA030",16); # SPecular color of the object/material
MATSHINESS = int("0xA040",16); # ??
MATMAP = int("0xA200",16); # This is a header for a new material
MATMAPFILE = int("0xA300",16); # This holds the file name of the texture
RGB1= int("0x0011",16)
RGB2= int("0x0012",16)
#>------ sub defines of OBJECT
OBJECT_MESH = int("0x4100",16); # This lets us know that we are reading a new object
OBJECT_LIGHT = int("0x4600",16); # This lets un know we are reading a light object
OBJECT_CAMERA= int("0x4700",16); # This lets un know we are reading a camera object
#>------ sub defines of CAMERA
OBJECT_CAM_RANGES= int("0x4720",16); # The camera range values
#>------ sub defines of OBJECT_MESH
OBJECT_VERTICES = int("0x4110",16); # The objects vertices
OBJECT_FACES = int("0x4120",16); # The objects faces
OBJECT_MATERIAL = int("0x4130",16); # This is found if the object has a material, either texture map or color
OBJECT_UV = int("0x4140",16); # The UV texture coordinates
OBJECT_TRANS_MATRIX = int("0x4160",16); # The Object Matrix
#>------ sub defines of KFDATA
KFDATA_KFHDR = int("0xB00A",16);
KFDATA_KFSEG = int("0xB008",16);
KFDATA_KFCURTIME = int("0xB009",16);
KFDATA_OBJECT_NODE_TAG = int("0xB002",16);
#>------ sub defines of OBJECT_NODE_TAG
OBJECT_NODE_ID = int("0xB030",16);
OBJECT_NODE_HDR = int("0xB010",16);
OBJECT_PIVOT = int("0xB013",16);
OBJECT_INSTANCE_NAME = int("0xB011",16);
POS_TRACK_TAG = int("0xB020",16);
ROT_TRACK_TAG = int("0xB021",16);
SCL_TRACK_TAG = int("0xB022",16);
def uv_key(uv):
return round(uv[0], 6), round(uv[1], 6)
# return round(uv.x, 6), round(uv.y, 6)
# size defines:
SZ_SHORT = 2
SZ_INT = 4
SZ_FLOAT = 4
class _3ds_short(object):
'''Class representing a short (2-byte integer) for a 3ds file.
*** This looks like an unsigned short H is unsigned from the struct docs - Cam***'''
__slots__ = 'value'
def __init__(self, val=0):
self.value=val
def get_size(self):
return SZ_SHORT
def write(self,file):
file.write(struct.pack("<H", self.value))
def __str__(self):
return str(self.value)
class _3ds_int(object):
'''Class representing an int (4-byte integer) for a 3ds file.'''
__slots__ = 'value'
def __init__(self, val=0):
self.value=val
def get_size(self):
return SZ_INT
def write(self,file):
file.write(struct.pack("<I", self.value))
def __str__(self):
return str(self.value)
class _3ds_float(object):
'''Class representing a 4-byte IEEE floating point number for a 3ds file.'''
__slots__ = 'value'
def __init__(self, val=0.0):
self.value=val
def get_size(self):
return SZ_FLOAT
def write(self,file):
file.write(struct.pack("<f", self.value))
def __str__(self):
return str(self.value)
class _3ds_string(object):
'''Class representing a zero-terminated string for a 3ds file.'''
__slots__ = 'value'
def __init__(self, val=""):
self.value=val
def get_size(self):
return (len(self.value)+1)
def write(self,file):
binary_format = "<%ds" % (len(self.value)+1)
file.write(struct.pack(binary_format, self.value))
def __str__(self):
return self.value
class _3ds_point_3d(object):
'''Class representing a three-dimensional point for a 3ds file.'''
__slots__ = 'x','y','z'
def __init__(self, point=(0.0,0.0,0.0)):
self.x, self.y, self.z = point
def get_size(self):
return 3*SZ_FLOAT
def write(self,file):
file.write(struct.pack('<3f', self.x, self.y, self.z))
def __str__(self):
return '(%f, %f, %f)' % (self.x, self.y, self.z)
# Used for writing a track
"""
class _3ds_point_4d(object):
'''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.'''
__slots__ = 'x','y','z','w'
def __init__(self, point=(0.0,0.0,0.0,0.0)):
self.x, self.y, self.z, self.w = point
def get_size(self):
return 4*SZ_FLOAT
def write(self,file):
data=struct.pack('<4f', self.x, self.y, self.z, self.w)
file.write(data)
def __str__(self):
return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
"""
class _3ds_point_uv(object):
'''Class representing a UV-coordinate for a 3ds file.'''
__slots__ = 'uv'
def __init__(self, point=(0.0,0.0)):
self.uv = point
def __cmp__(self, other):
return cmp(self.uv,other.uv)
def get_size(self):
return 2*SZ_FLOAT
def write(self,file):
data=struct.pack('<2f', self.uv[0], self.uv[1])
file.write(data)
def __str__(self):
return '(%g, %g)' % self.uv
class _3ds_rgb_color(object):
'''Class representing a (24-bit) rgb color for a 3ds file.'''
__slots__ = 'r','g','b'
def __init__(self, col=(0,0,0)):
self.r, self.g, self.b = col
def get_size(self):
return 3
def write(self,file):
file.write( struct.pack('<3B', int(255*self.r), int(255*self.g), int(255*self.b) ) )
# file.write( struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) )
def __str__(self):
return '{%f, %f, %f}' % (self.r, self.g, self.b)
class _3ds_face(object):
'''Class representing a face for a 3ds file.'''
__slots__ = 'vindex'
def __init__(self, vindex):
self.vindex = vindex
def get_size(self):
return 4*SZ_SHORT
def write(self,file):
# The last zero is only used by 3d studio
file.write(struct.pack("<4H", self.vindex[0],self.vindex[1], self.vindex[2], 0))
def __str__(self):
return '[%d %d %d]' % (self.vindex[0],self.vindex[1], self.vindex[2])
class _3ds_array(object):
'''Class representing an array of variables for a 3ds file.
Consists of a _3ds_short to indicate the number of items, followed by the items themselves.
'''
__slots__ = 'values', 'size'
def __init__(self):
self.values=[]
self.size=SZ_SHORT
# add an item:
def add(self,item):
self.values.append(item)
self.size+=item.get_size()
def get_size(self):
return self.size
def write(self,file):
_3ds_short(len(self.values)).write(file)
#_3ds_int(len(self.values)).write(file)
for value in self.values:
value.write(file)
# To not overwhelm the output in a dump, a _3ds_array only
# outputs the number of items, not all of the actual items.
def __str__(self):
return '(%d items)' % len(self.values)
class _3ds_named_variable(object):
'''Convenience class for named variables.'''
__slots__ = 'value', 'name'
def __init__(self, name, val=None):
self.name=name
self.value=val
def get_size(self):
if (self.value==None):
return 0
else:
return self.value.get_size()
def write(self, file):
if (self.value!=None):
self.value.write(file)
def dump(self,indent):
if (self.value!=None):
spaces=""
for i in range(indent):
spaces+=" ";
if (self.name!=""):
print(spaces, self.name, " = ", self.value)
else:
print(spaces, "[unnamed]", " = ", self.value)
#the chunk class
class _3ds_chunk(object):
'''Class representing a chunk in a 3ds file.
Chunks contain zero or more variables, followed by zero or more subchunks.
'''
__slots__ = 'ID', 'size', 'variables', 'subchunks'
def __init__(self, id=0):
self.ID=_3ds_short(id)
self.size=_3ds_int(0)
self.variables=[]
self.subchunks=[]
def set_ID(id):
self.ID=_3ds_short(id)
def add_variable(self, name, var):
'''Add a named variable.
The name is mostly for debugging purposes.'''
self.variables.append(_3ds_named_variable(name,var))
def add_subchunk(self, chunk):
'''Add a subchunk.'''
self.subchunks.append(chunk)
def get_size(self):
'''Calculate the size of the chunk and return it.
The sizes of the variables and subchunks are used to determine this chunk\'s size.'''
tmpsize=self.ID.get_size()+self.size.get_size()
for variable in self.variables:
tmpsize+=variable.get_size()
for subchunk in self.subchunks:
tmpsize+=subchunk.get_size()
self.size.value=tmpsize
return self.size.value
def write(self, file):
'''Write the chunk to a file.
Uses the write function of the variables and the subchunks to do the actual work.'''
#write header
self.ID.write(file)
self.size.write(file)
for variable in self.variables:
variable.write(file)
for subchunk in self.subchunks:
subchunk.write(file)
def dump(self, indent=0):
'''Write the chunk to a file.
Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
Uses the dump function of the named variables and the subchunks to do the actual work.'''
spaces=""
for i in range(indent):
spaces+=" ";
print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size())
for variable in self.variables:
variable.dump(indent+1)
for subchunk in self.subchunks:
subchunk.dump(indent+1)
######################################################
# EXPORT
######################################################
def get_material_images(material):
# blender utility func.
if material:
return [s.texture.image for s in material.textures if s and s.texture.type == 'IMAGE' and s.texture.image]
return []
# images = []
# if material:
# for mtex in material.getTextures():
# if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
# image = mtex.tex.image
# if image:
# images.append(image) # maye want to include info like diffuse, spec here.
# return images
def make_material_subchunk(id, color):
'''Make a material subchunk.
Used for color subchunks, such as diffuse color or ambient color subchunks.'''
mat_sub = _3ds_chunk(id)
col1 = _3ds_chunk(RGB1)
col1.add_variable("color1", _3ds_rgb_color(color));
mat_sub.add_subchunk(col1)
# optional:
# col2 = _3ds_chunk(RGB1)
# col2.add_variable("color2", _3ds_rgb_color(color));
# mat_sub.add_subchunk(col2)
return mat_sub
def make_material_texture_chunk(id, images):
"""Make Material Map texture chunk
"""
mat_sub = _3ds_chunk(id)
def add_image(img):
filename = os.path.basename(image.filename)
# filename = image.filename.split('\\')[-1].split('/')[-1]
mat_sub_file = _3ds_chunk(MATMAPFILE)
mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
mat_sub.add_subchunk(mat_sub_file)
for image in images:
add_image(image)
return mat_sub
def make_material_chunk(material, image):
'''Make a material chunk out of a blender material.'''
material_chunk = _3ds_chunk(MATERIAL)
name = _3ds_chunk(MATNAME)
if material: name_str = material.name
else: name_str = 'None'
if image: name_str += image.name
name.add_variable("name", _3ds_string(sane_name(name_str)))
material_chunk.add_subchunk(name)
if not material:
material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0,0,0) ))
material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8) ))
material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1,1,1) ))
else:
material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.ambient for a in material.diffuse_color] ))
# material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.amb for a in material.rgbCol] ))
material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color))
# material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.rgbCol))
material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color))
# material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specCol))
images = get_material_images(material) # can be None
if image: images.append(image)
if images:
material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images))
return material_chunk
class tri_wrapper(object):
'''Class representing a triangle.
Used when converting faces to triangles'''
__slots__ = 'vertex_index', 'mat', 'image', 'faceuvs', 'offset'
def __init__(self, vindex=(0,0,0), mat=None, image=None, faceuvs=None):
self.vertex_index= vindex
self.mat= mat
self.image= image
self.faceuvs= faceuvs
self.offset= [0, 0, 0] # offset indicies
def extract_triangles(mesh):
'''Extract triangles from a mesh.
If the mesh contains quads, they will be split into triangles.'''
tri_list = []
do_uv = len(mesh.uv_textures)
# do_uv = mesh.faceUV
# if not do_uv:
# face_uv = None
img = None
for i, face in enumerate(mesh.faces):
f_v = face.verts
# f_v = face.v
uf = mesh.active_uv_texture.data[i] if do_uv else None
if do_uv:
f_uv = (uf.uv1, uf.uv2, uf.uv3, uf.uv4) if face.verts[3] else (uf.uv1, uf.uv2, uf.uv3)
# f_uv = face.uv
img = uf.image if uf else None
# img = face.image
if img: img = img.name
if f_v[3] == 0:
# if len(f_v)==3:
new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
# new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
if (do_uv): new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
tri_list.append(new_tri)
else: #it's a quad
new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
# new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
# new_tri_2 = tri_wrapper((f_v[0].index, f_v[2].index, f_v[3].index), face.mat, img)
if (do_uv):
new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
new_tri_2.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
tri_list.append( new_tri )
tri_list.append( new_tri_2 )
return tri_list
def remove_face_uv(verts, tri_list):
'''Remove face UV coordinates from a list of triangles.
Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
there are multiple uv coordinates per vertex.'''
# initialize a list of UniqueLists, one per vertex:
#uv_list = [UniqueList() for i in xrange(len(verts))]
unique_uvs= [{} for i in range(len(verts))]
# for each face uv coordinate, add it to the UniqueList of the vertex
for tri in tri_list:
for i in range(3):
# store the index into the UniqueList for future reference:
# offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
context_uv_vert= unique_uvs[tri.vertex_index[i]]
uvkey= tri.faceuvs[i]
offset_index__uv_3ds = context_uv_vert.get(uvkey)
if not offset_index__uv_3ds:
offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
tri.offset[i] = offset_index__uv_3ds[0]
# At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
# only once.
# Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
# faces refer to the new face indices:
vert_index = 0
vert_array = _3ds_array()
uv_array = _3ds_array()
index_list = []
for i,vert in enumerate(verts):
index_list.append(vert_index)
pt = _3ds_point_3d(vert.co) # reuse, should be ok
uvmap = [None] * len(unique_uvs[i])
for ii, uv_3ds in unique_uvs[i].values():
# add a vertex duplicate to the vertex_array for every uv associated with this vertex:
vert_array.add(pt)
# add the uv coordinate to the uv array:
# This for loop does not give uv's ordered by ii, so we create a new map
# and add the uv's later
# uv_array.add(uv_3ds)
uvmap[ii] = uv_3ds
# Add the uv's in the correct order
for uv_3ds in uvmap:
# add the uv coordinate to the uv array:
uv_array.add(uv_3ds)
vert_index += len(unique_uvs[i])
# Make sure the triangle vertex indices now refer to the new vertex list:
for tri in tri_list:
for i in range(3):
tri.offset[i]+=index_list[tri.vertex_index[i]]
tri.vertex_index= tri.offset
return vert_array, uv_array, tri_list
def make_faces_chunk(tri_list, mesh, materialDict):
'''Make a chunk for the faces.
Also adds subchunks assigning materials to all faces.'''
materials = mesh.materials
if not materials:
mat = None
face_chunk = _3ds_chunk(OBJECT_FACES)
face_list = _3ds_array()
if len(mesh.uv_textures):
# if mesh.faceUV:
# Gather materials used in this mesh - mat/image pairs
unique_mats = {}
for i,tri in enumerate(tri_list):
face_list.add(_3ds_face(tri.vertex_index))
if materials:
mat = materials[tri.mat]
if mat: mat = mat.name
img = tri.image
try:
context_mat_face_array = unique_mats[mat, img][1]
except:
if mat: name_str = mat
else: name_str = 'None'
if img: name_str += img
context_mat_face_array = _3ds_array()
unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
context_mat_face_array.add(_3ds_short(i))
# obj_material_faces[tri.mat].add(_3ds_short(i))
face_chunk.add_variable("faces", face_list)
for mat_name, mat_faces in unique_mats.values():
obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
obj_material_chunk.add_variable("name", mat_name)
obj_material_chunk.add_variable("face_list", mat_faces)
face_chunk.add_subchunk(obj_material_chunk)
else:
obj_material_faces=[]
obj_material_names=[]
for m in materials:
if m:
obj_material_names.append(_3ds_string(sane_name(m.name)))
obj_material_faces.append(_3ds_array())
n_materials = len(obj_material_names)
for i,tri in enumerate(tri_list):
face_list.add(_3ds_face(tri.vertex_index))
if (tri.mat < n_materials):
obj_material_faces[tri.mat].add(_3ds_short(i))
face_chunk.add_variable("faces", face_list)
for i in range(n_materials):
obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
obj_material_chunk.add_variable("name", obj_material_names[i])
obj_material_chunk.add_variable("face_list", obj_material_faces[i])
face_chunk.add_subchunk(obj_material_chunk)
return face_chunk
def make_vert_chunk(vert_array):
'''Make a vertex chunk out of an array of vertices.'''
vert_chunk = _3ds_chunk(OBJECT_VERTICES)
vert_chunk.add_variable("vertices",vert_array)
return vert_chunk
def make_uv_chunk(uv_array):
'''Make a UV chunk out of an array of UVs.'''
uv_chunk = _3ds_chunk(OBJECT_UV)
uv_chunk.add_variable("uv coords", uv_array)
return uv_chunk
def make_mesh_chunk(mesh, materialDict):
'''Make a chunk out of a Blender mesh.'''
# Extract the triangles from the mesh:
tri_list = extract_triangles(mesh)
if len(mesh.uv_textures):
# if mesh.faceUV:
# Remove the face UVs and convert it to vertex UV:
vert_array, uv_array, tri_list = remove_face_uv(mesh.verts, tri_list)
else:
# Add the vertices to the vertex array:
vert_array = _3ds_array()
for vert in mesh.verts:
vert_array.add(_3ds_point_3d(vert.co))
# If the mesh has vertex UVs, create an array of UVs:
if len(mesh.sticky):
# if mesh.vertexUV:
uv_array = _3ds_array()
for uv in mesh.sticky:
# for vert in mesh.verts:
uv_array.add(_3ds_point_uv(uv.co))
# uv_array.add(_3ds_point_uv(vert.uvco))
else:
# no UV at all:
uv_array = None
# create the chunk:
mesh_chunk = _3ds_chunk(OBJECT_MESH)
# add vertex chunk:
mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
# add faces chunk:
mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
# if available, add uv chunk:
if uv_array:
mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
return mesh_chunk
""" # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
def make_kfdata(start=0, stop=0, curtime=0):
'''Make the basic keyframe data chunk'''
kfdata = _3ds_chunk(KFDATA)
kfhdr = _3ds_chunk(KFDATA_KFHDR)
kfhdr.add_variable("revision", _3ds_short(0))
# Not really sure what filename is used for, but it seems it is usually used
# to identify the program that generated the .3ds:
kfhdr.add_variable("filename", _3ds_string("Blender"))
kfhdr.add_variable("animlen", _3ds_int(stop-start))
kfseg = _3ds_chunk(KFDATA_KFSEG)
kfseg.add_variable("start", _3ds_int(start))
kfseg.add_variable("stop", _3ds_int(stop))
kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
kfcurtime.add_variable("curtime", _3ds_int(curtime))
kfdata.add_subchunk(kfhdr)
kfdata.add_subchunk(kfseg)
kfdata.add_subchunk(kfcurtime)
return kfdata
"""
"""
def make_track_chunk(ID, obj):
'''Make a chunk for track data.
Depending on the ID, this will construct a position, rotation or scale track.'''
track_chunk = _3ds_chunk(ID)
track_chunk.add_variable("track_flags", _3ds_short())
track_chunk.add_variable("unknown", _3ds_int())
track_chunk.add_variable("unknown", _3ds_int())
track_chunk.add_variable("nkeys", _3ds_int(1))
# Next section should be repeated for every keyframe, but for now, animation is not actually supported.
track_chunk.add_variable("tcb_frame", _3ds_int(0))
track_chunk.add_variable("tcb_flags", _3ds_short())
if obj.type=='Empty':
if ID==POS_TRACK_TAG:
# position vector:
track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
elif ID==ROT_TRACK_TAG:
# rotation (quaternion, angle first, followed by axis):
q = obj.getEuler().toQuat()
track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
elif ID==SCL_TRACK_TAG:
# scale vector:
track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
else:
# meshes have their transformations applied before
# exporting, so write identity transforms here:
if ID==POS_TRACK_TAG:
# position vector:
track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
elif ID==ROT_TRACK_TAG:
# rotation (quaternion, angle first, followed by axis):
track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
elif ID==SCL_TRACK_TAG:
# scale vector:
track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
return track_chunk
"""
"""
def make_kf_obj_node(obj, name_to_id):
'''Make a node chunk for a Blender object.
Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
Blender Empty objects are converted to dummy nodes.'''
name = obj.name
# main object node chunk:
kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
# chunk for the object id:
obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
# object id is from the name_to_id dictionary:
obj_id_chunk.add_variable("node_id", _3ds_short(name_to_id[name]))
# object node header:
obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
# object name:
if obj.type == 'Empty':
# Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
# for their name (see below):
obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
else:
# Add the name:
obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
# Add Flag variables (not sure what they do):
obj_node_header_chunk.add_variable("flags1", _3ds_short(0))
obj_node_header_chunk.add_variable("flags2", _3ds_short(0))
# Check parent-child relationships:
parent = obj.parent
if (parent == None) or (parent.name not in name_to_id):
# If no parent, or the parents name is not in the name_to_id dictionary,
# parent id becomes -1:
obj_node_header_chunk.add_variable("parent", _3ds_short(-1))
else:
# Get the parent's id from the name_to_id dictionary:
obj_node_header_chunk.add_variable("parent", _3ds_short(name_to_id[parent.name]))
# Add pivot chunk:
obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
kf_obj_node.add_subchunk(obj_pivot_chunk)
# add subchunks for object id and node header:
kf_obj_node.add_subchunk(obj_id_chunk)
kf_obj_node.add_subchunk(obj_node_header_chunk)
# Empty objects need to have an extra chunk for the instance name:
if obj.type == 'Empty':
obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
kf_obj_node.add_subchunk(obj_instance_name_chunk)
# Add track chunks for position, rotation and scale:
kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
return kf_obj_node
"""
# import BPyMessages
def save_3ds(filename, context):
'''Save the Blender scene to a 3ds file.'''
# Time the export
if not filename.lower().endswith('.3ds'):
filename += '.3ds'
# XXX
# if not BPyMessages.Warning_SaveOver(filename):
# return
# XXX
time1 = time.clock()
# time1= Blender.sys.time()
# Blender.Window.WaitCursor(1)
sce = context.scene
# sce= bpy.data.scenes.active
# Initialize the main chunk (primary):
primary = _3ds_chunk(PRIMARY)
# Add version chunk:
version_chunk = _3ds_chunk(VERSION)
version_chunk.add_variable("version", _3ds_int(3))
primary.add_subchunk(version_chunk)
# init main object info chunk:
object_info = _3ds_chunk(OBJECTINFO)
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# init main key frame data chunk:
kfdata = make_kfdata()
'''
# Get all the supported objects selected in this scene:
# ob_sel= list(sce.objects.context)
# mesh_objects = [ (ob, me) for ob in ob_sel for me in (BPyMesh.getMeshFromObject(ob, None, True, False, sce),) if me ]
# empty_objects = [ ob for ob in ob_sel if ob.type == 'Empty' ]
# Make a list of all materials used in the selected meshes (use a dictionary,
# each material is added once):
materialDict = {}
mesh_objects = []
for ob in [ob for ob in context.scene.objects if ob.is_visible()]:
# for ob in sce.objects.context:
# get derived objects
free, derived = create_derived_objects(ob)
if derived == None: continue
for ob_derived, mat in derived:
# for ob_derived, mat in getDerivedObjects(ob, False):
if ob.type not in ('MESH', 'CURVE', 'SURFACE', 'TEXT', 'META'):
continue
data = ob_derived.create_mesh(True, 'PREVIEW')
# data = getMeshFromObject(ob_derived, None, True, False, sce)
if data:
data.transform(mat)
# data.transform(mat, recalc_normals=False)
mesh_objects.append((ob_derived, data))
mat_ls = data.materials
mat_ls_len = len(mat_ls)
# get material/image tuples.
if len(data.uv_textures):
# if data.faceUV:
if not mat_ls:
mat = mat_name = None
for f, uf in zip(data.faces, data.active_uv_texture.data):
if mat_ls:
mat_index = f.material_index
# mat_index = f.mat
if mat_index >= mat_ls_len:
mat_index = f.mat = 0
mat = mat_ls[mat_index]
if mat: mat_name = mat.name
else: mat_name = None
# else there alredy set to none
img = uf.image
# img = f.image
if img: img_name = img.name
else: img_name = None
materialDict.setdefault((mat_name, img_name), (mat, img) )
else:
for mat in mat_ls:
if mat: # material may be None so check its not.
materialDict.setdefault((mat.name, None), (mat, None) )
# Why 0 Why!
for f in data.faces:
if f.material_index >= mat_ls_len:
# if f.mat >= mat_ls_len:
f.material_index = 0
# f.mat = 0
if free:
free_derived_objects(ob)
# Make material chunks for all materials used in the meshes:
for mat_and_image in materialDict.values():
object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
# Give all objects a unique ID and build a dictionary from object name to object id:
"""
name_to_id = {}
for ob, data in mesh_objects:
name_to_id[ob.name]= len(name_to_id)
#for ob in empty_objects:
# name_to_id[ob.name]= len(name_to_id)
"""
# Create object chunks for all meshes:
i = 0
for ob, blender_mesh in mesh_objects:
# create a new object chunk
object_chunk = _3ds_chunk(OBJECT)
# set the object name
object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
# make a mesh chunk out of the mesh:
object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict))
object_info.add_subchunk(object_chunk)
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# make a kf object node for the object:
kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
'''
# if not blender_mesh.users:
bpy.data.remove_mesh(blender_mesh)
# blender_mesh.verts = None
i+=i
# Create chunks for all empties:
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
for ob in empty_objects:
# Empties only require a kf object node:
kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
pass
'''
# Add main object info chunk to primary chunk:
primary.add_subchunk(object_info)
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# Add main keyframe data chunk to primary chunk:
primary.add_subchunk(kfdata)
'''
# At this point, the chunk hierarchy is completely built.
# Check the size:
primary.get_size()
# Open the file for writing:
file = open( filename, 'wb' )
# Recursively write the chunks to file:
primary.write(file)
# Close the file:
file.close()
# Debugging only: report the exporting time:
# Blender.Window.WaitCursor(0)
print("3ds export time: %.2f" % (time.clock() - time1))
# print("3ds export time: %.2f" % (Blender.sys.time() - time1))
# Debugging only: dump the chunk hierarchy:
#primary.dump()
# if __name__=='__main__':
# if struct:
# Blender.Window.FileSelector(save_3ds, "Export 3DS", Blender.sys.makename(ext='.3ds'))
# else:
# Blender.Draw.PupMenu("Error%t|This script requires a full python installation")
# # save_3ds('/test_b.3ds')
class EXPORT_OT_3ds(bpy.types.Operator):
'''
3DS Exporter
'''
__idname__ = "export.3ds"
__label__ = 'Export 3DS'
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the 3DS file", maxlen= 1024, default= ""),
]
def execute(self, context):
save_3ds(self.filename, context)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
def poll(self, context): # Poll isnt working yet
print("Poll")
return context.active_object != None
bpy.ops.add(EXPORT_OT_3ds)

3457
release/io/export_fbx.py Normal file
View File

@@ -0,0 +1,3457 @@
#!BPY
"""
Name: 'Autodesk FBX (.fbx)...'
Blender: 249
Group: 'Export'
Tooltip: 'Selection to an ASCII Autodesk FBX '
"""
__author__ = "Campbell Barton"
__url__ = ['www.blender.org', 'blenderartists.org']
__version__ = "1.2"
__bpydoc__ = """\
This script is an exporter to the FBX file format.
http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx
"""
# --------------------------------------------------------------------------
# FBX Export v0.1 by Campbell Barton (AKA Ideasman)
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import os
import time
import math # math.pi
import shutil # for file copying
# try:
# import time
# # import os # only needed for batch export, nbot used yet
# except:
# time = None # use this to check if they have python modules installed
# for python 2.3 support
try:
set()
except:
try:
from sets import Set as set
except:
set = None # so it complains you dont have a !
# # os is only needed for batch 'own dir' option
# try:
# import os
# except:
# os = None
# import Blender
import bpy
import Mathutils
# from Blender.Mathutils import Matrix, Vector, RotationMatrix
# import BPyObject
# import BPyMesh
# import BPySys
# import BPyMessages
## This was used to make V, but faster not to do all that
##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}'
##v = range(255)
##for c in valid: v.remove(ord(c))
v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254]
invalid = ''.join([chr(i) for i in v])
def cleanName(name):
for ch in invalid: name = name.replace(ch, '_')
return name
# del v, i
def copy_file(source, dest):
file = open(source, 'rb')
data = file.read()
file.close()
file = open(dest, 'wb')
file.write(data)
file.close()
def copy_images(dest_dir, textures):
if not dest_dir.endswith(os.sep):
dest_dir += os.sep
image_paths = set()
for tex in textures:
image_paths.add(Blender.sys.expandpath(tex.filename))
# Now copy images
copyCount = 0
for image_path in image_paths:
if Blender.sys.exists(image_path):
# Make a name for the target path.
dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
if not Blender.sys.exists(dest_image_path): # Image isnt alredy there
print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
try:
copy_file(image_path, dest_image_path)
copyCount+=1
except:
print('\t\tWarning, file failed to copy, skipping.')
print('\tCopied %d images' % copyCount)
# I guess FBX uses degrees instead of radians (Arystan).
# Call this function just before writing to FBX.
def eulerRadToDeg(eul):
ret = Mathutils.Euler()
ret.x = 180 / math.pi * eul[0]
ret.y = 180 / math.pi * eul[1]
ret.z = 180 / math.pi * eul[2]
return ret
mtx4_identity = Mathutils.Matrix()
# testing
mtx_x90 = Mathutils.RotationMatrix( math.pi/2, 3, 'x') # used
#mtx_x90n = RotationMatrix(-90, 3, 'x')
#mtx_y90 = RotationMatrix( 90, 3, 'y')
#mtx_y90n = RotationMatrix(-90, 3, 'y')
#mtx_z90 = RotationMatrix( 90, 3, 'z')
#mtx_z90n = RotationMatrix(-90, 3, 'z')
#mtx4_x90 = RotationMatrix( 90, 4, 'x')
mtx4_x90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'x') # used
#mtx4_y90 = RotationMatrix( 90, 4, 'y')
mtx4_y90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'y') # used
mtx4_z90 = Mathutils.RotationMatrix( math.pi/2, 4, 'z') # used
mtx4_z90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'z') # used
# def strip_path(p):
# return p.split('\\')[-1].split('/')[-1]
# Used to add the scene name into the filename without using odd chars
sane_name_mapping_ob = {}
sane_name_mapping_mat = {}
sane_name_mapping_tex = {}
sane_name_mapping_take = {}
sane_name_mapping_group = {}
# Make sure reserved names are not used
sane_name_mapping_ob['Scene'] = 'Scene_'
sane_name_mapping_ob['blend_root'] = 'blend_root_'
def increment_string(t):
name = t
num = ''
while name and name[-1].isdigit():
num = name[-1] + num
name = name[:-1]
if num: return '%s%d' % (name, int(num)+1)
else: return name + '_0'
# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up.
def sane_name(data, dct):
#if not data: return None
if type(data)==tuple: # materials are paired up with images
data, other = data
use_other = True
else:
other = None
use_other = False
if data: name = data.name
else: name = None
orig_name = name
if other:
orig_name_other = other.name
name = '%s #%s' % (name, orig_name_other)
else:
orig_name_other = None
# dont cache, only ever call once for each data type now,
# so as to avoid namespace collision between types - like with objects <-> bones
#try: return dct[name]
#except: pass
if not name:
name = 'unnamed' # blank string, ASKING FOR TROUBLE!
else:
#name = BPySys.cleanName(name)
name = cleanName(name) # use our own
while name in iter(dct.values()): name = increment_string(name)
if use_other: # even if other is None - orig_name_other will be a string or None
dct[orig_name, orig_name_other] = name
else:
dct[orig_name] = name
return name
def sane_obname(data): return sane_name(data, sane_name_mapping_ob)
def sane_matname(data): return sane_name(data, sane_name_mapping_mat)
def sane_texname(data): return sane_name(data, sane_name_mapping_tex)
def sane_takename(data): return sane_name(data, sane_name_mapping_take)
def sane_groupname(data): return sane_name(data, sane_name_mapping_group)
# def derived_paths(fname_orig, basepath, FORCE_CWD=False):
# '''
# fname_orig - blender path, can be relative
# basepath - fname_rel will be relative to this
# FORCE_CWD - dont use the basepath, just add a ./ to the filename.
# use when we know the file will be in the basepath.
# '''
# fname = bpy.sys.expandpath(fname_orig)
# # fname = Blender.sys.expandpath(fname_orig)
# fname_strip = os.path.basename(fname)
# # fname_strip = strip_path(fname)
# if FORCE_CWD:
# fname_rel = '.' + os.sep + fname_strip
# else:
# fname_rel = bpy.sys.relpath(fname, basepath)
# # fname_rel = Blender.sys.relpath(fname, basepath)
# if fname_rel.startswith('//'): fname_rel = '.' + os.sep + fname_rel[2:]
# return fname, fname_strip, fname_rel
def mat4x4str(mat):
return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([ f for v in mat for f in v ])
# XXX not used
# duplicated in OBJ exporter
def getVertsFromGroup(me, group_index):
ret = []
for i, v in enumerate(me.verts):
for g in v.groups:
if g.group == group_index:
ret.append((i, g.weight))
return ret
# ob must be OB_MESH
def BPyMesh_meshWeight2List(ob):
''' Takes a mesh and return its group names and a list of lists, one list per vertex.
aligning the each vert list with the group names, each list contains float value for the weight.
These 2 lists can be modified and then used with list2MeshWeight to apply the changes.
'''
me = ob.data
# Clear the vert group.
groupNames= [g.name for g in ob.vertex_groups]
len_groupNames= len(groupNames)
if not len_groupNames:
# no verts? return a vert aligned empty list
return [[] for i in range(len(me.verts))], []
else:
vWeightList= [[0.0]*len_groupNames for i in range(len(me.verts))]
for i, v in enumerate(me.verts):
for g in v.groups:
vWeightList[i][g.group] = g.weight
return groupNames, vWeightList
def meshNormalizedWeights(me):
try: # account for old bad BPyMesh
groupNames, vWeightList = BPyMesh_meshWeight2List(me)
# groupNames, vWeightList = BPyMesh.meshWeight2List(me)
except:
return [],[]
if not groupNames:
return [],[]
for i, vWeights in enumerate(vWeightList):
tot = 0.0
for w in vWeights:
tot+=w
if tot:
for j, w in enumerate(vWeights):
vWeights[j] = w/tot
return groupNames, vWeightList
header_comment = \
'''; FBX 6.1.0 project file
; Created by Blender FBX Exporter
; for support mail: ideasman42@gmail.com
; ----------------------------------------------------
'''
# This func can be called with just the filename
def write(filename, batch_objects = None, \
context = None,
EXP_OBS_SELECTED = True,
EXP_MESH = True,
EXP_MESH_APPLY_MOD = True,
# EXP_MESH_HQ_NORMALS = False,
EXP_ARMATURE = True,
EXP_LAMP = True,
EXP_CAMERA = True,
EXP_EMPTY = True,
EXP_IMAGE_COPY = False,
GLOBAL_MATRIX = Mathutils.Matrix(),
ANIM_ENABLE = True,
ANIM_OPTIMIZE = True,
ANIM_OPTIMIZE_PRECISSION = 6,
ANIM_ACTION_ALL = False,
BATCH_ENABLE = False,
BATCH_GROUP = True,
BATCH_FILE_PREFIX = '',
BATCH_OWN_DIR = False
):
# ----------------- Batch support!
if BATCH_ENABLE:
if os == None: BATCH_OWN_DIR = False
fbxpath = filename
# get the path component of filename
tmp_exists = bpy.sys.exists(fbxpath)
# tmp_exists = Blender.sys.exists(fbxpath)
if tmp_exists != 2: # a file, we want a path
fbxpath = os.path.dirname(fbxpath)
# while fbxpath and fbxpath[-1] not in ('/', '\\'):
# fbxpath = fbxpath[:-1]
if not fbxpath:
# if not filename:
# XXX
print('Error%t|Directory does not exist!')
# Draw.PupMenu('Error%t|Directory does not exist!')
return
tmp_exists = bpy.sys.exists(fbxpath)
# tmp_exists = Blender.sys.exists(fbxpath)
if tmp_exists != 2:
# XXX
print('Error%t|Directory does not exist!')
# Draw.PupMenu('Error%t|Directory does not exist!')
return
if not fbxpath.endswith(os.sep):
fbxpath += os.sep
del tmp_exists
if BATCH_GROUP:
data_seq = bpy.data.groups
else:
data_seq = bpy.data.scenes
# call this function within a loop with BATCH_ENABLE == False
orig_sce = context.scene
# orig_sce = bpy.data.scenes.active
new_fbxpath = fbxpath # own dir option modifies, we need to keep an original
for data in data_seq: # scene or group
newname = BATCH_FILE_PREFIX + cleanName(data.name)
# newname = BATCH_FILE_PREFIX + BPySys.cleanName(data.name)
if BATCH_OWN_DIR:
new_fbxpath = fbxpath + newname + os.sep
# path may alredy exist
# TODO - might exist but be a file. unlikely but should probably account for it.
if bpy.sys.exists(new_fbxpath) == 0:
# if Blender.sys.exists(new_fbxpath) == 0:
os.mkdir(new_fbxpath)
filename = new_fbxpath + newname + '.fbx'
print('\nBatch exporting %s as...\n\t"%s"' % (data, filename))
# XXX don't know what to do with this, probably do the same? (Arystan)
if BATCH_GROUP: #group
# group, so objects update properly, add a dummy scene.
sce = bpy.data.scenes.new()
sce.Layers = (1<<20) -1
bpy.data.scenes.active = sce
for ob_base in data.objects:
sce.objects.link(ob_base)
sce.update(1)
# TODO - BUMMER! Armatures not in the group wont animate the mesh
else:# scene
data_seq.active = data
# Call self with modified args
# Dont pass batch options since we alredy usedt them
write(filename, data.objects,
context,
False,
EXP_MESH,
EXP_MESH_APPLY_MOD,
# EXP_MESH_HQ_NORMALS,
EXP_ARMATURE,
EXP_LAMP,
EXP_CAMERA,
EXP_EMPTY,
EXP_IMAGE_COPY,
GLOBAL_MATRIX,
ANIM_ENABLE,
ANIM_OPTIMIZE,
ANIM_OPTIMIZE_PRECISSION,
ANIM_ACTION_ALL
)
if BATCH_GROUP:
# remove temp group scene
bpy.data.remove_scene(sce)
# bpy.data.scenes.unlink(sce)
bpy.data.scenes.active = orig_sce
return # so the script wont run after we have batch exported.
# end batch support
# Use this for working out paths relative to the export location
basepath = os.path.dirname(filename) or '.'
basepath += os.sep
# basepath = Blender.sys.dirname(filename)
# ----------------------------------------------
# storage classes
class my_bone_class:
__slots__ =(\
'blenName',\
'blenBone',\
'blenMeshes',\
'restMatrix',\
'parent',\
'blenName',\
'fbxName',\
'fbxArm',\
'__pose_bone',\
'__anim_poselist')
def __init__(self, blenBone, fbxArm):
# This is so 2 armatures dont have naming conflicts since FBX bones use object namespace
self.fbxName = sane_obname(blenBone)
self.blenName = blenBone.name
self.blenBone = blenBone
self.blenMeshes = {} # fbxMeshObName : mesh
self.fbxArm = fbxArm
self.restMatrix = blenBone.armature_matrix
# self.restMatrix = blenBone.matrix['ARMATURESPACE']
# not used yet
# self.restMatrixInv = self.restMatrix.copy().invert()
# self.restMatrixLocal = None # set later, need parent matrix
self.parent = None
# not public
pose = fbxArm.blenObject.pose
# pose = fbxArm.blenObject.getPose()
self.__pose_bone = pose.pose_channels[self.blenName]
# self.__pose_bone = pose.bones[self.blenName]
# store a list if matricies here, (poseMatrix, head, tail)
# {frame:posematrix, frame:posematrix, ...}
self.__anim_poselist = {}
'''
def calcRestMatrixLocal(self):
if self.parent:
self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert()
else:
self.restMatrixLocal = self.restMatrix.copy()
'''
def setPoseFrame(self, f):
# cache pose info here, frame must be set beforehand
# Didnt end up needing head or tail, if we do - here it is.
'''
self.__anim_poselist[f] = (\
self.__pose_bone.poseMatrix.copy(),\
self.__pose_bone.head.copy(),\
self.__pose_bone.tail.copy() )
'''
self.__anim_poselist[f] = self.__pose_bone.pose_matrix.copy()
# self.__anim_poselist[f] = self.__pose_bone.poseMatrix.copy()
# get pose from frame.
def getPoseMatrix(self, f):# ----------------------------------------------
return self.__anim_poselist[f]
'''
def getPoseHead(self, f):
#return self.__pose_bone.head.copy()
return self.__anim_poselist[f][1].copy()
def getPoseTail(self, f):
#return self.__pose_bone.tail.copy()
return self.__anim_poselist[f][2].copy()
'''
# end
def getAnimParRelMatrix(self, frame):
#arm_mat = self.fbxArm.matrixWorld
#arm_mat = self.fbxArm.parRelMatrix()
if not self.parent:
#return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore
return mtx4_z90 * self.getPoseMatrix(frame)
else:
#return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert()
return (mtx4_z90 * (self.getPoseMatrix(frame))) * (mtx4_z90 * self.parent.getPoseMatrix(frame)).invert()
# we need thes because cameras and lights modified rotations
def getAnimParRelMatrixRot(self, frame):
return self.getAnimParRelMatrix(frame)
def flushAnimData(self):
self.__anim_poselist.clear()
class my_object_generic:
# Other settings can be applied for each type - mesh, armature etc.
def __init__(self, ob, matrixWorld = None):
self.fbxName = sane_obname(ob)
self.blenObject = ob
self.fbxGroupNames = []
self.fbxParent = None # set later on IF the parent is in the selection.
if matrixWorld: self.matrixWorld = matrixWorld * GLOBAL_MATRIX
else: self.matrixWorld = ob.matrix * GLOBAL_MATRIX
# else: self.matrixWorld = ob.matrixWorld * GLOBAL_MATRIX
self.__anim_poselist = {} # we should only access this
def parRelMatrix(self):
if self.fbxParent:
return self.matrixWorld * self.fbxParent.matrixWorld.copy().invert()
else:
return self.matrixWorld
def setPoseFrame(self, f):
self.__anim_poselist[f] = self.blenObject.matrix.copy()
# self.__anim_poselist[f] = self.blenObject.matrixWorld.copy()
def getAnimParRelMatrix(self, frame):
if self.fbxParent:
#return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX
return (self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert()
else:
return self.__anim_poselist[frame] * GLOBAL_MATRIX
def getAnimParRelMatrixRot(self, frame):
type = self.blenObject.type
if self.fbxParent:
matrix_rot = (((self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert())).rotationPart()
else:
matrix_rot = (self.__anim_poselist[frame] * GLOBAL_MATRIX).rotationPart()
# Lamps need to be rotated
if type =='LAMP':
matrix_rot = mtx_x90 * matrix_rot
elif type =='CAMERA':
# elif ob and type =='Camera':
y = Mathutils.Vector(0,1,0) * matrix_rot
matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y)
return matrix_rot
# ----------------------------------------------
print('\nFBX export starting...', filename)
start_time = time.clock()
# start_time = Blender.sys.time()
try:
file = open(filename, 'w')
except:
return False
sce = context.scene
# sce = bpy.data.scenes.active
world = sce.world
# ---------------------------- Write the header first
file.write(header_comment)
if time:
curtime = time.localtime()[0:6]
else:
curtime = (0,0,0,0,0,0)
#
file.write(\
'''FBXHeaderExtension: {
FBXHeaderVersion: 1003
FBXVersion: 6100
CreationTimeStamp: {
Version: 1000
Year: %.4i
Month: %.2i
Day: %.2i
Hour: %.2i
Minute: %.2i
Second: %.2i
Millisecond: 0
}
Creator: "FBX SDK/FBX Plugins build 20070228"
OtherFlags: {
FlagPLE: 0
}
}''' % (curtime))
file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime)
file.write('\nCreator: "Blender3D version 2.5"')
# file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version'))
pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way
# --------------- funcs for exporting
def object_tx(ob, loc, matrix, matrix_mod = None):
'''
Matrix mod is so armature objects can modify their bone matricies
'''
if isinstance(ob, bpy.types.Bone):
# if isinstance(ob, Blender.Types.BoneType):
# we know we have a matrix
# matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod)
matrix = mtx4_z90 * ob.armature_matrix # dont apply armature matrix anymore
# matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
parent = ob.parent
if parent:
#par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod)
par_matrix = mtx4_z90 * parent.armature_matrix # dont apply armature matrix anymore
# par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
matrix = matrix * par_matrix.copy().invert()
matrix_rot = matrix.rotationPart()
loc = tuple(matrix.translationPart())
scale = tuple(matrix.scalePart())
rot = tuple(matrix_rot.toEuler())
else:
# This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore
#if ob and not matrix: matrix = ob.matrixWorld * GLOBAL_MATRIX
if ob and not matrix: raise Exception("error: this should never happen!")
matrix_rot = matrix
#if matrix:
# matrix = matrix_scale * matrix
if matrix:
loc = tuple(matrix.translationPart())
scale = tuple(matrix.scalePart())
matrix_rot = matrix.rotationPart()
# Lamps need to be rotated
if ob and ob.type =='Lamp':
matrix_rot = mtx_x90 * matrix_rot
rot = tuple(matrix_rot.toEuler())
elif ob and ob.type =='Camera':
y = Mathutils.Vector(0,1,0) * matrix_rot
matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y)
rot = tuple(matrix_rot.toEuler())
else:
rot = tuple(matrix_rot.toEuler())
else:
if not loc:
loc = 0,0,0
scale = 1,1,1
rot = 0,0,0
return loc, rot, scale, matrix, matrix_rot
def write_object_tx(ob, loc, matrix, matrix_mod= None):
'''
We have loc to set the location if non blender objects that have a location
matrix_mod is only used for bones at the moment
'''
loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod)
file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc)
file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple(eulerRadToDeg(rot)))
# file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot)
file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale)
return loc, rot, scale, matrix, matrix_rot
def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None):
# if the type is 0 its an empty otherwise its a mesh
# only difference at the moment is one has a color
file.write('''
Properties60: {
Property: "QuaternionInterpolate", "bool", "",0
Property: "Visibility", "Visibility", "A+",1''')
loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod)
# Rotation order, note, for FBX files Iv loaded normal order is 1
# setting to zero.
# eEULER_XYZ = 0
# eEULER_XZY
# eEULER_YZX
# eEULER_YXZ
# eEULER_ZXY
# eEULER_ZYX
file.write('''
Property: "RotationOffset", "Vector3D", "",0,0,0
Property: "RotationPivot", "Vector3D", "",0,0,0
Property: "ScalingOffset", "Vector3D", "",0,0,0
Property: "ScalingPivot", "Vector3D", "",0,0,0
Property: "TranslationActive", "bool", "",0
Property: "TranslationMin", "Vector3D", "",0,0,0
Property: "TranslationMax", "Vector3D", "",0,0,0
Property: "TranslationMinX", "bool", "",0
Property: "TranslationMinY", "bool", "",0
Property: "TranslationMinZ", "bool", "",0
Property: "TranslationMaxX", "bool", "",0
Property: "TranslationMaxY", "bool", "",0
Property: "TranslationMaxZ", "bool", "",0
Property: "RotationOrder", "enum", "",0
Property: "RotationSpaceForLimitOnly", "bool", "",0
Property: "AxisLen", "double", "",10
Property: "PreRotation", "Vector3D", "",0,0,0
Property: "PostRotation", "Vector3D", "",0,0,0
Property: "RotationActive", "bool", "",0
Property: "RotationMin", "Vector3D", "",0,0,0
Property: "RotationMax", "Vector3D", "",0,0,0
Property: "RotationMinX", "bool", "",0
Property: "RotationMinY", "bool", "",0
Property: "RotationMinZ", "bool", "",0
Property: "RotationMaxX", "bool", "",0
Property: "RotationMaxY", "bool", "",0
Property: "RotationMaxZ", "bool", "",0
Property: "RotationStiffnessX", "double", "",0
Property: "RotationStiffnessY", "double", "",0
Property: "RotationStiffnessZ", "double", "",0
Property: "MinDampRangeX", "double", "",0
Property: "MinDampRangeY", "double", "",0
Property: "MinDampRangeZ", "double", "",0
Property: "MaxDampRangeX", "double", "",0
Property: "MaxDampRangeY", "double", "",0
Property: "MaxDampRangeZ", "double", "",0
Property: "MinDampStrengthX", "double", "",0
Property: "MinDampStrengthY", "double", "",0
Property: "MinDampStrengthZ", "double", "",0
Property: "MaxDampStrengthX", "double", "",0
Property: "MaxDampStrengthY", "double", "",0
Property: "MaxDampStrengthZ", "double", "",0
Property: "PreferedAngleX", "double", "",0
Property: "PreferedAngleY", "double", "",0
Property: "PreferedAngleZ", "double", "",0
Property: "InheritType", "enum", "",0
Property: "ScalingActive", "bool", "",0
Property: "ScalingMin", "Vector3D", "",1,1,1
Property: "ScalingMax", "Vector3D", "",1,1,1
Property: "ScalingMinX", "bool", "",0
Property: "ScalingMinY", "bool", "",0
Property: "ScalingMinZ", "bool", "",0
Property: "ScalingMaxX", "bool", "",0
Property: "ScalingMaxY", "bool", "",0
Property: "ScalingMaxZ", "bool", "",0
Property: "GeometricTranslation", "Vector3D", "",0,0,0
Property: "GeometricRotation", "Vector3D", "",0,0,0
Property: "GeometricScaling", "Vector3D", "",1,1,1
Property: "LookAtProperty", "object", ""
Property: "UpVectorProperty", "object", ""
Property: "Show", "bool", "",1
Property: "NegativePercentShapeSupport", "bool", "",1
Property: "DefaultAttributeIndex", "int", "",0''')
if ob and not isinstance(ob, bpy.types.Bone):
# if ob and type(ob) != Blender.Types.BoneType:
# Only mesh objects have color
file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
file.write('\n\t\t\tProperty: "Size", "double", "",100')
file.write('\n\t\t\tProperty: "Look", "enum", "",1')
return loc, rot, scale, matrix, matrix_rot
# -------------------------------------------- Armatures
#def write_bone(bone, name, matrix_mod):
def write_bone(my_bone):
file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName)
file.write('\n\t\tVersion: 232')
#poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3]
poseMatrix = write_object_props(my_bone.blenBone)[3] # dont apply bone matricies anymore
pose_items.append( (my_bone.fbxName, poseMatrix) )
# file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
file.write('\n\t\t\tProperty: "Size", "double", "",1')
#((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length)
"""
file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\
((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
"""
file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %
(my_bone.blenBone.armature_head - my_bone.blenBone.armature_tail).length)
# (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length)
#file.write('\n\t\t\tProperty: "LimbLength", "double", "",1')
file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8')
file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
file.write('\n\t\t}')
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tMultiTake: 1')
file.write('\n\t\tShading: Y')
file.write('\n\t\tCulling: "CullingOff"')
file.write('\n\t\tTypeFlags: "Skeleton"')
file.write('\n\t}')
def write_camera_switch():
file.write('''
Model: "Model::Camera Switcher", "CameraSwitcher" {
Version: 232''')
write_object_props()
file.write('''
Property: "Color", "Color", "A",0.8,0.8,0.8
Property: "Camera Index", "Integer", "A+",100
}
MultiLayer: 0
MultiTake: 1
Hidden: "True"
Shading: W
Culling: "CullingOff"
Version: 101
Name: "Model::Camera Switcher"
CameraId: 0
CameraName: 100
CameraIndexName:
}''')
def write_camera_dummy(name, loc, near, far, proj_type, up):
file.write('\n\tModel: "Model::%s", "Camera" {' % name )
file.write('\n\t\tVersion: 232')
write_object_props(None, loc)
file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40')
file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0')
file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0')
file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63')
file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486')
file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
file.write('\n\t\t\tProperty: "AspectW", "double", "",320')
file.write('\n\t\t\tProperty: "AspectH", "double", "",200')
file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1')
file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near)
file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far)
file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816')
file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612')
file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333')
file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4')
file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
file.write('\n\t\t\tProperty: "Center", "bool", "",1')
file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333')
file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type)
file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
file.write('\n\t\t}')
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tMultiTake: 0')
file.write('\n\t\tHidden: "True"')
file.write('\n\t\tShading: Y')
file.write('\n\t\tCulling: "CullingOff"')
file.write('\n\t\tTypeFlags: "Camera"')
file.write('\n\t\tGeometryVersion: 124')
file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
file.write('\n\t\tUp: %i,%i,%i' % up)
file.write('\n\t\tLookAt: 0,0,0')
file.write('\n\t\tShowInfoOnMoving: 1')
file.write('\n\t\tShowAudio: 0')
file.write('\n\t\tAudioColor: 0,1,0')
file.write('\n\t\tCameraOrthoZoom: 1')
file.write('\n\t}')
def write_camera_default():
# This sucks but to match FBX converter its easier to
# write the cameras though they are not needed.
write_camera_dummy('Producer Perspective', (0,71.3,287.5), 10, 4000, 0, (0,1,0))
write_camera_dummy('Producer Top', (0,4000,0), 1, 30000, 1, (0,0,-1))
write_camera_dummy('Producer Bottom', (0,-4000,0), 1, 30000, 1, (0,0,-1))
write_camera_dummy('Producer Front', (0,0,4000), 1, 30000, 1, (0,1,0))
write_camera_dummy('Producer Back', (0,0,-4000), 1, 30000, 1, (0,1,0))
write_camera_dummy('Producer Right', (4000,0,0), 1, 30000, 1, (0,1,0))
write_camera_dummy('Producer Left', (-4000,0,0), 1, 30000, 1, (0,1,0))
def write_camera(my_cam):
'''
Write a blender camera
'''
render = sce.render_data
width = render.resolution_x
height = render.resolution_y
# render = sce.render
# width = render.sizeX
# height = render.sizeY
aspect = float(width)/height
data = my_cam.blenObject.data
file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName )
file.write('\n\t\tVersion: 232')
loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix())
file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % data.angle)
file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026')
file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x) # not sure if this is in the correct units?
# file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shiftX) # not sure if this is in the correct units?
file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y) # ditto
# file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shiftY) # ditto
file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0')
file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width)
file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height)
'''Camera aspect ratio modes.
0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant.
1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value.
2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels.
3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value.
4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value.
Definition at line 234 of file kfbxcamera.h. '''
file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2')
file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start)
# file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clipStart)
file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end)
# file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clipStart)
file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0')
file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0')
file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect)
file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0')
file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
file.write('\n\t\t\tProperty: "Center", "bool", "",1')
file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect)
file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0')
file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
file.write('\n\t\t}')
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tMultiTake: 0')
file.write('\n\t\tShading: Y')
file.write('\n\t\tCulling: "CullingOff"')
file.write('\n\t\tTypeFlags: "Camera"')
file.write('\n\t\tGeometryVersion: 124')
file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,1,0) * matrix_rot) )
file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,0,-1)*matrix_rot) )
#file.write('\n\t\tUp: 0,0,0' )
#file.write('\n\t\tLookAt: 0,0,0' )
file.write('\n\t\tShowInfoOnMoving: 1')
file.write('\n\t\tShowAudio: 0')
file.write('\n\t\tAudioColor: 0,1,0')
file.write('\n\t\tCameraOrthoZoom: 1')
file.write('\n\t}')
def write_light(my_light):
light = my_light.blenObject.data
file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName)
file.write('\n\t\tVersion: 232')
write_object_props(my_light.blenObject, None, my_light.parRelMatrix())
# Why are these values here twice?????? - oh well, follow the holy sdk's output
# Blender light types match FBX's, funny coincidence, we just need to
# be sure that all unsupported types are made into a point light
#ePOINT,
#eDIRECTIONAL
#eSPOT
light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4}
light_type = light_type_items[light.type]
# light_type = light.type
if light_type > 2: light_type = 1 # hemi and area lights become directional
# mode = light.mode
if light.shadow_method == 'RAY_SHADOW' or light.shadow_method == 'BUFFER_SHADOW':
# if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows:
do_shadow = 1
else:
do_shadow = 0
if light.only_shadow or (not light.diffuse and not light.specular):
# if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular):
do_light = 0
else:
do_light = 1
scale = abs(GLOBAL_MATRIX.scalePart()[0]) # scale is always uniform in this case
file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1')
file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1')
file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200
if light.type == 'SPOT':
file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spot_size * scale))
# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale))
file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color))
# file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.col))
file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200
#
# duplication? see ^ (Arystan)
# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale))
file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light)
file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
file.write('\n\t\t\tProperty: "DecayType", "enum", "",0')
file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance)
# file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.dist)
file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0')
file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0')
file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0')
file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0')
file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0')
file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0')
file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow)
file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1')
file.write('\n\t\t}')
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tMultiTake: 0')
file.write('\n\t\tShading: Y')
file.write('\n\t\tCulling: "CullingOff"')
file.write('\n\t\tTypeFlags: "Light"')
file.write('\n\t\tGeometryVersion: 124')
file.write('\n\t}')
# matrixOnly is not used at the moment
def write_null(my_null = None, fbxName = None, matrixOnly = None):
# ob can be null
if not fbxName: fbxName = my_null.fbxName
file.write('\n\tModel: "Model::%s", "Null" {' % fbxName)
file.write('\n\t\tVersion: 232')
# only use this for the root matrix at the moment
if matrixOnly:
poseMatrix = write_object_props(None, None, matrixOnly)[3]
else: # all other Null's
if my_null: poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3]
else: poseMatrix = write_object_props()[3]
pose_items.append((fbxName, poseMatrix))
file.write('''
}
MultiLayer: 0
MultiTake: 1
Shading: Y
Culling: "CullingOff"
TypeFlags: "Null"
}''')
# Material Settings
if world: world_amb = tuple(world.ambient_color)
# if world: world_amb = world.getAmb()
else: world_amb = (0,0,0) # Default value
def write_material(matname, mat):
file.write('\n\tMaterial: "Material::%s", "" {' % matname)
# Todo, add more material Properties.
if mat:
mat_cold = tuple(mat.diffuse_color)
# mat_cold = tuple(mat.rgbCol)
mat_cols = tuple(mat.specular_color)
# mat_cols = tuple(mat.specCol)
#mat_colm = tuple(mat.mirCol) # we wont use the mirror color
mat_colamb = world_amb
# mat_colamb = tuple([c for c in world_amb])
mat_dif = mat.diffuse_reflection
# mat_dif = mat.ref
mat_amb = mat.ambient
# mat_amb = mat.amb
mat_hard = (float(mat.specular_hardness)-1)/5.10
# mat_hard = (float(mat.hard)-1)/5.10
mat_spec = mat.specular_reflection/2.0
# mat_spec = mat.spec/2.0
mat_alpha = mat.alpha
mat_emit = mat.emit
mat_shadeless = mat.shadeless
# mat_shadeless = mat.mode & Blender.Material.Modes.SHADELESS
if mat_shadeless:
mat_shader = 'Lambert'
else:
if mat.diffuse_shader == 'LAMBERT':
# if mat.diffuseShader == Blender.Material.Shaders.DIFFUSE_LAMBERT:
mat_shader = 'Lambert'
else:
mat_shader = 'Phong'
else:
mat_cols = mat_cold = 0.8, 0.8, 0.8
mat_colamb = 0.0,0.0,0.0
# mat_colm
mat_dif = 1.0
mat_amb = 0.5
mat_hard = 20.0
mat_spec = 0.2
mat_alpha = 1.0
mat_emit = 0.0
mat_shadeless = False
mat_shader = 'Phong'
file.write('\n\t\tVersion: 102')
file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower())
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tProperties60: {')
file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader)
file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0')
file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender
file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit)
file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb)
file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb)
file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold)
file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif)
file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0')
file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1')
file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha))
if not mat_shadeless:
file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols)
file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec)
file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0')
file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0')
file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1')
file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0')
file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb)
file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold)
if not mat_shadeless:
file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols)
file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard)
file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha)
if not mat_shadeless:
file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0')
file.write('\n\t\t}')
file.write('\n\t}')
def copy_image(image):
rel = image.get_export_path(basepath, True)
base = os.path.basename(rel)
if EXP_IMAGE_COPY:
src = bpy.sys.expandpath(image.filename)
absp = image.get_export_path(basepath, False)
if not os.path.exists(absp):
shutil.copy(src, absp)
return (rel, base)
# tex is an Image (Arystan)
def write_video(texname, tex):
# Same as texture really!
file.write('\n\tVideo: "Video::%s", "Clip" {' % texname)
file.write('''
Type: "Clip"
Properties60: {
Property: "FrameRate", "double", "",0
Property: "LastFrame", "int", "",0
Property: "Width", "int", "",0
Property: "Height", "int", "",0''')
if tex:
fname_rel, fname_strip = copy_image(tex)
# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY)
else:
fname = fname_strip = fname_rel = ''
file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip)
file.write('''
Property: "StartFrame", "int", "",0
Property: "StopFrame", "int", "",0
Property: "PlaySpeed", "double", "",1
Property: "Offset", "KTime", "",0
Property: "InterlaceMode", "enum", "",0
Property: "FreeRunning", "bool", "",0
Property: "Loop", "bool", "",0
Property: "AccessMode", "enum", "",0
}
UseMipMap: 0''')
file.write('\n\t\tFilename: "%s"' % fname_strip)
if fname_strip: fname_strip = '/' + fname_strip
file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative
file.write('\n\t}')
def write_texture(texname, tex, num):
# if tex == None then this is a dummy tex
file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname)
file.write('\n\t\tType: "TextureVideoClip"')
file.write('\n\t\tVersion: 202')
# TODO, rare case _empty_ exists as a name.
file.write('\n\t\tTextureName: "Texture::%s"' % texname)
file.write('''
Properties60: {
Property: "Translation", "Vector", "A+",0,0,0
Property: "Rotation", "Vector", "A+",0,0,0
Property: "Scaling", "Vector", "A+",1,1,1''')
file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num)
# WrapModeU/V 0==rep, 1==clamp, TODO add support
file.write('''
Property: "TextureTypeUse", "enum", "",0
Property: "CurrentTextureBlendMode", "enum", "",1
Property: "UseMaterial", "bool", "",0
Property: "UseMipMap", "bool", "",0
Property: "CurrentMappingType", "enum", "",0
Property: "UVSwap", "bool", "",0''')
file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clamp_x)
# file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clampX)
file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clamp_y)
# file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clampY)
file.write('''
Property: "TextureRotationPivot", "Vector3D", "",0,0,0
Property: "TextureScalingPivot", "Vector3D", "",0,0,0
Property: "VideoProperty", "object", ""
}''')
file.write('\n\t\tMedia: "Video::%s"' % texname)
if tex:
fname_rel, fname_strip = copy_image(tex)
# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY)
else:
fname = fname_strip = fname_rel = ''
file.write('\n\t\tFileName: "%s"' % fname_strip)
file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command
file.write('''
ModelUVTranslation: 0,0
ModelUVScaling: 1,1
Texture_Alpha_Source: "None"
Cropping: 0,0,0,0
}''')
def write_deformer_skin(obname):
'''
Each mesh has its own deformer
'''
file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname)
file.write('''
Version: 100
MultiLayer: 0
Type: "Skin"
Properties60: {
}
Link_DeformAcuracy: 50
}''')
# in the example was 'Bip01 L Thigh_2'
def write_sub_deformer_skin(my_mesh, my_bone, weights):
'''
Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers
So the SubDeformer needs the mesh-object name as a prefix to make it unique
Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer,
a but silly but dosnt really matter
'''
file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName))
file.write('''
Version: 100
MultiLayer: 0
Type: "Cluster"
Properties60: {
Property: "SrcModel", "object", ""
Property: "SrcModelReference", "object", ""
}
UserData: "", ""''')
# Support for bone parents
if my_mesh.fbxBoneParent:
if my_mesh.fbxBoneParent == my_bone:
# TODO - this is a bit lazy, we could have a simple write loop
# for this case because all weights are 1.0 but for now this is ok
# Parent Bones arent used all that much anyway.
vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.verts))]
else:
# This bone is not a parent of this mesh object, no weights
vgroup_data = []
else:
# Normal weight painted mesh
if my_bone.blenName in weights[0]:
# Before we used normalized wright list
#vgroup_data = me.getVertsFromGroup(bone.name, 1)
group_index = weights[0].index(my_bone.blenName)
vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]]
else:
vgroup_data = []
file.write('\n\t\tIndexes: ')
i = -1
for vg in vgroup_data:
if i == -1:
file.write('%i' % vg[0])
i=0
else:
if i==23:
file.write('\n\t\t')
i=0
file.write(',%i' % vg[0])
i+=1
file.write('\n\t\tWeights: ')
i = -1
for vg in vgroup_data:
if i == -1:
file.write('%.8f' % vg[1])
i=0
else:
if i==38:
file.write('\n\t\t')
i=0
file.write(',%.8f' % vg[1])
i+=1
if my_mesh.fbxParent:
# TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible!
m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() )
else:
# Yes! this is it... - but dosnt work when the mesh is a.
m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() )
#m = mtx4_z90 * my_bone.restMatrix
matstr = mat4x4str(m)
matstr_i = mat4x4str(m.invert())
file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/
file.write('\n\t\tTransformLink: %s' % matstr)
file.write('\n\t}')
def write_mesh(my_mesh):
me = my_mesh.blenData
# if there are non NULL materials on this mesh
if my_mesh.blenMaterials: do_materials = True
else: do_materials = False
if my_mesh.blenTextures: do_textures = True
else: do_textures = False
do_uvs = len(me.uv_textures) > 0
# do_uvs = me.faceUV
file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName)
file.write('\n\t\tVersion: 232') # newline is added in write_object_props
poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3]
pose_items.append((my_mesh.fbxName, poseMatrix))
file.write('\n\t\t}')
file.write('\n\t\tMultiLayer: 0')
file.write('\n\t\tMultiTake: 1')
file.write('\n\t\tShading: Y')
file.write('\n\t\tCulling: "CullingOff"')
# Write the Real Mesh data here
file.write('\n\t\tVertices: ')
i=-1
for v in me.verts:
if i==-1:
file.write('%.6f,%.6f,%.6f' % tuple(v.co)); i=0
else:
if i==7:
file.write('\n\t\t'); i=0
file.write(',%.6f,%.6f,%.6f'% tuple(v.co))
i+=1
file.write('\n\t\tPolygonVertexIndex: ')
i=-1
for f in me.faces:
fi = [v_index for j, v_index in enumerate(f.verts) if v_index != 0 or j != 3]
# fi = [v.index for v in f]
# flip the last index, odd but it looks like
# this is how fbx tells one face from another
fi[-1] = -(fi[-1]+1)
fi = tuple(fi)
if i==-1:
if len(fi) == 3: file.write('%i,%i,%i' % fi )
# if len(f) == 3: file.write('%i,%i,%i' % fi )
else: file.write('%i,%i,%i,%i' % fi )
i=0
else:
if i==13:
file.write('\n\t\t')
i=0
if len(fi) == 3: file.write(',%i,%i,%i' % fi )
# if len(f) == 3: file.write(',%i,%i,%i' % fi )
else: file.write(',%i,%i,%i,%i' % fi )
i+=1
file.write('\n\t\tEdges: ')
i=-1
for ed in me.edges:
if i==-1:
file.write('%i,%i' % (ed.verts[0], ed.verts[1]))
# file.write('%i,%i' % (ed.v1.index, ed.v2.index))
i=0
else:
if i==13:
file.write('\n\t\t')
i=0
file.write(',%i,%i' % (ed.verts[0], ed.verts[1]))
# file.write(',%i,%i' % (ed.v1.index, ed.v2.index))
i+=1
file.write('\n\t\tGeometryVersion: 124')
file.write('''
LayerElementNormal: 0 {
Version: 101
Name: ""
MappingInformationType: "ByVertice"
ReferenceInformationType: "Direct"
Normals: ''')
i=-1
for v in me.verts:
if i==-1:
file.write('%.15f,%.15f,%.15f' % tuple(v.normal)); i=0
# file.write('%.15f,%.15f,%.15f' % tuple(v.no)); i=0
else:
if i==2:
file.write('\n '); i=0
file.write(',%.15f,%.15f,%.15f' % tuple(v.normal))
# file.write(',%.15f,%.15f,%.15f' % tuple(v.no))
i+=1
file.write('\n\t\t}')
# Write Face Smoothing
file.write('''
LayerElementSmoothing: 0 {
Version: 102
Name: ""
MappingInformationType: "ByPolygon"
ReferenceInformationType: "Direct"
Smoothing: ''')
i=-1
for f in me.faces:
if i==-1:
file.write('%i' % f.smooth); i=0
else:
if i==54:
file.write('\n '); i=0
file.write(',%i' % f.smooth)
i+=1
file.write('\n\t\t}')
# Write Edge Smoothing
file.write('''
LayerElementSmoothing: 0 {
Version: 101
Name: ""
MappingInformationType: "ByEdge"
ReferenceInformationType: "Direct"
Smoothing: ''')
# SHARP = Blender.Mesh.EdgeFlags.SHARP
i=-1
for ed in me.edges:
if i==-1:
file.write('%i' % (ed.sharp)); i=0
# file.write('%i' % ((ed.flag&SHARP)!=0)); i=0
else:
if i==54:
file.write('\n '); i=0
file.write(',%i' % (ed.sharp))
# file.write(',%i' % ((ed.flag&SHARP)!=0))
i+=1
file.write('\n\t\t}')
# del SHARP
# small utility function
# returns a slice of data depending on number of face verts
# data is either a MeshTextureFace or MeshColor
def face_data(data, face):
if f.verts[3] == 0:
totvert = 3
else:
totvert = 4
return data[:totvert]
# Write VertexColor Layers
# note, no programs seem to use this info :/
collayers = []
if len(me.vertex_colors):
# if me.vertexColors:
collayers = me.vertex_colors
# collayers = me.getColorLayerNames()
collayer_orig = me.active_vertex_color
# collayer_orig = me.activeColorLayer
for colindex, collayer in enumerate(collayers):
# me.activeColorLayer = collayer
file.write('\n\t\tLayerElementColor: %i {' % colindex)
file.write('\n\t\t\tVersion: 101')
file.write('\n\t\t\tName: "%s"' % collayer.name)
# file.write('\n\t\t\tName: "%s"' % collayer)
file.write('''
MappingInformationType: "ByPolygonVertex"
ReferenceInformationType: "IndexToDirect"
Colors: ''')
i = -1
ii = 0 # Count how many Colors we write
for f, cf in zip(me.faces, collayer.data):
colors = [cf.color1, cf.color2, cf.color3, cf.color4]
# determine number of verts
colors = face_data(colors, f)
for col in colors:
if i==-1:
file.write('%.4f,%.4f,%.4f,1' % tuple(col))
i=0
else:
if i==7:
file.write('\n\t\t\t\t')
i=0
file.write(',%.4f,%.4f,%.4f,1' % tuple(col))
i+=1
ii+=1 # One more Color
# for f in me.faces:
# for col in f.col:
# if i==-1:
# file.write('%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0))
# i=0
# else:
# if i==7:
# file.write('\n\t\t\t\t')
# i=0
# file.write(',%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0))
# i+=1
# ii+=1 # One more Color
file.write('\n\t\t\tColorIndex: ')
i = -1
for j in range(ii):
if i == -1:
file.write('%i' % j)
i=0
else:
if i==55:
file.write('\n\t\t\t\t')
i=0
file.write(',%i' % j)
i+=1
file.write('\n\t\t}')
# Write UV and texture layers.
uvlayers = []
if do_uvs:
uvlayers = me.uv_textures
# uvlayers = me.getUVLayerNames()
uvlayer_orig = me.active_uv_texture
# uvlayer_orig = me.activeUVLayer
for uvindex, uvlayer in enumerate(me.uv_textures):
# for uvindex, uvlayer in enumerate(uvlayers):
# me.activeUVLayer = uvlayer
file.write('\n\t\tLayerElementUV: %i {' % uvindex)
file.write('\n\t\t\tVersion: 101')
file.write('\n\t\t\tName: "%s"' % uvlayer.name)
# file.write('\n\t\t\tName: "%s"' % uvlayer)
file.write('''
MappingInformationType: "ByPolygonVertex"
ReferenceInformationType: "IndexToDirect"
UV: ''')
i = -1
ii = 0 # Count how many UVs we write
for f, uf in zip(me.faces, uvlayer.data):
# for f in me.faces:
uvs = [uf.uv1, uf.uv2, uf.uv3, uf.uv4]
uvs = face_data(uvs, f)
for uv in uvs:
# for uv in f.uv:
if i==-1:
file.write('%.6f,%.6f' % tuple(uv))
i=0
else:
if i==7:
file.write('\n ')
i=0
file.write(',%.6f,%.6f' % tuple(uv))
i+=1
ii+=1 # One more UV
file.write('\n\t\t\tUVIndex: ')
i = -1
for j in range(ii):
if i == -1:
file.write('%i' % j)
i=0
else:
if i==55:
file.write('\n\t\t\t\t')
i=0
file.write(',%i' % j)
i+=1
file.write('\n\t\t}')
if do_textures:
file.write('\n\t\tLayerElementTexture: %i {' % uvindex)
file.write('\n\t\t\tVersion: 101')
file.write('\n\t\t\tName: "%s"' % uvlayer.name)
# file.write('\n\t\t\tName: "%s"' % uvlayer)
if len(my_mesh.blenTextures) == 1:
file.write('\n\t\t\tMappingInformationType: "AllSame"')
else:
file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
file.write('\n\t\t\tBlendMode: "Translucent"')
file.write('\n\t\t\tTextureAlpha: 1')
file.write('\n\t\t\tTextureId: ')
if len(my_mesh.blenTextures) == 1:
file.write('0')
else:
texture_mapping_local = {None:-1}
i = 0 # 1 for dummy
for tex in my_mesh.blenTextures:
if tex: # None is set above
texture_mapping_local[tex] = i
i+=1
i=-1
for f in uvlayer.data:
# for f in me.faces:
img_key = f.image
if i==-1:
i=0
file.write( '%s' % texture_mapping_local[img_key])
else:
if i==55:
file.write('\n ')
i=0
file.write(',%s' % texture_mapping_local[img_key])
i+=1
else:
file.write('''
LayerElementTexture: 0 {
Version: 101
Name: ""
MappingInformationType: "NoMappingInformation"
ReferenceInformationType: "IndexToDirect"
BlendMode: "Translucent"
TextureAlpha: 1
TextureId: ''')
file.write('\n\t\t}')
# me.activeUVLayer = uvlayer_orig
# Done with UV/textures.
if do_materials:
file.write('\n\t\tLayerElementMaterial: 0 {')
file.write('\n\t\t\tVersion: 101')
file.write('\n\t\t\tName: ""')
if len(my_mesh.blenMaterials) == 1:
file.write('\n\t\t\tMappingInformationType: "AllSame"')
else:
file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
file.write('\n\t\t\tMaterials: ')
if len(my_mesh.blenMaterials) == 1:
file.write('0')
else:
# Build a material mapping for this
material_mapping_local = {} # local-mat & tex : global index.
for j, mat_tex_pair in enumerate(my_mesh.blenMaterials):
material_mapping_local[mat_tex_pair] = j
len_material_mapping_local = len(material_mapping_local)
mats = my_mesh.blenMaterialList
if me.active_uv_texture:
uv_faces = me.active_uv_texture.data
else:
uv_faces = [None] * len(me.faces)
i=-1
for f, uf in zip(me.faces, uv_faces):
# for f in me.faces:
try: mat = mats[f.material_index]
# try: mat = mats[f.mat]
except:mat = None
if do_uvs: tex = uf.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
# if do_uvs: tex = f.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
else: tex = None
if i==-1:
i=0
file.write( '%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok
else:
if i==55:
file.write('\n\t\t\t\t')
i=0
file.write(',%s' % (material_mapping_local[mat, tex]))
i+=1
file.write('\n\t\t}')
file.write('''
Layer: 0 {
Version: 100
LayerElement: {
Type: "LayerElementNormal"
TypedIndex: 0
}''')
if do_materials:
file.write('''
LayerElement: {
Type: "LayerElementMaterial"
TypedIndex: 0
}''')
# Always write this
if do_textures:
file.write('''
LayerElement: {
Type: "LayerElementTexture"
TypedIndex: 0
}''')
if me.vertex_colors:
# if me.vertexColors:
file.write('''
LayerElement: {
Type: "LayerElementColor"
TypedIndex: 0
}''')
if do_uvs: # same as me.faceUV
file.write('''
LayerElement: {
Type: "LayerElementUV"
TypedIndex: 0
}''')
file.write('\n\t\t}')
if len(uvlayers) > 1:
for i in range(1, len(uvlayers)):
file.write('\n\t\tLayer: %i {' % i)
file.write('\n\t\t\tVersion: 100')
file.write('''
LayerElement: {
Type: "LayerElementUV"''')
file.write('\n\t\t\t\tTypedIndex: %i' % i)
file.write('\n\t\t\t}')
if do_textures:
file.write('''
LayerElement: {
Type: "LayerElementTexture"''')
file.write('\n\t\t\t\tTypedIndex: %i' % i)
file.write('\n\t\t\t}')
file.write('\n\t\t}')
if len(collayers) > 1:
# Take into account any UV layers
layer_offset = 0
if uvlayers: layer_offset = len(uvlayers)-1
for i in range(layer_offset, len(collayers)+layer_offset):
file.write('\n\t\tLayer: %i {' % i)
file.write('\n\t\t\tVersion: 100')
file.write('''
LayerElement: {
Type: "LayerElementColor"''')
file.write('\n\t\t\t\tTypedIndex: %i' % i)
file.write('\n\t\t\t}')
file.write('\n\t\t}')
file.write('\n\t}')
def write_group(name):
file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name)
file.write('''
Properties60: {
Property: "MultiLayer", "bool", "",0
Property: "Pickable", "bool", "",1
Property: "Transformable", "bool", "",1
Property: "Show", "bool", "",1
}
MultiLayer: 0
}''')
# add meshes here to clear because they are not used anywhere.
meshes_to_clear = []
ob_meshes = []
ob_lights = []
ob_cameras = []
# in fbx we export bones as children of the mesh
# armatures not a part of a mesh, will be added to ob_arms
ob_bones = []
ob_arms = []
ob_null = [] # emptys
# List of types that have blender objects (not bones)
ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null]
groups = [] # blender groups, only add ones that have objects in the selections
materials = {} # (mat, image) keys, should be a set()
textures = {} # should be a set()
tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error
# if EXP_OBS_SELECTED is false, use sceens objects
if not batch_objects:
if EXP_OBS_SELECTED: tmp_objects = context.selected_objects
# if EXP_OBS_SELECTED: tmp_objects = sce.objects.context
else: tmp_objects = sce.objects
else:
tmp_objects = batch_objects
if EXP_ARMATURE:
# This is needed so applying modifiers dosnt apply the armature deformation, its also needed
# ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes.
# set every armature to its rest, backup the original values so we done mess up the scene
ob_arms_orig_rest = [arm.rest_position for arm in bpy.data.armatures]
# ob_arms_orig_rest = [arm.restPosition for arm in bpy.data.armatures]
for arm in bpy.data.armatures:
arm.rest_position = True
# arm.restPosition = True
if ob_arms_orig_rest:
for ob_base in bpy.data.objects:
#if ob_base.type == 'Armature':
ob_base.make_display_list()
# ob_base.makeDisplayList()
# This causes the makeDisplayList command to effect the mesh
sce.set_frame(sce.current_frame)
# Blender.Set('curframe', Blender.Get('curframe'))
for ob_base in tmp_objects:
# ignore dupli children
if ob_base.parent and ob_base.parent.dupli_type != 'NONE':
continue
obs = [(ob_base, ob_base.matrix)]
if ob_base.dupli_type != 'NONE':
ob_base.create_dupli_list()
obs = [(dob.object, dob.matrix) for dob in ob_base.dupli_list]
for ob, mtx in obs:
# for ob, mtx in BPyObject.getDerivedObjects(ob_base):
tmp_ob_type = ob.type
if tmp_ob_type == 'CAMERA':
# if tmp_ob_type == 'Camera':
if EXP_CAMERA:
ob_cameras.append(my_object_generic(ob, mtx))
elif tmp_ob_type == 'LAMP':
# elif tmp_ob_type == 'Lamp':
if EXP_LAMP:
ob_lights.append(my_object_generic(ob, mtx))
elif tmp_ob_type == 'ARMATURE':
# elif tmp_ob_type == 'Armature':
if EXP_ARMATURE:
# TODO - armatures dont work in dupligroups!
if ob not in ob_arms: ob_arms.append(ob)
# ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)"
elif tmp_ob_type == 'EMPTY':
# elif tmp_ob_type == 'Empty':
if EXP_EMPTY:
ob_null.append(my_object_generic(ob, mtx))
elif EXP_MESH:
origData = True
if tmp_ob_type != 'MESH':
# if tmp_ob_type != 'Mesh':
# me = bpy.data.meshes.new()
try: me = ob.create_mesh(True, 'PREVIEW')
# try: me.getFromObject(ob)
except: me = None
if me:
meshes_to_clear.append( me )
mats = me.materials
origData = False
else:
# Mesh Type!
if EXP_MESH_APPLY_MOD:
# me = bpy.data.meshes.new()
me = ob.create_mesh(True, 'PREVIEW')
# me.getFromObject(ob)
# so we keep the vert groups
# if EXP_ARMATURE:
# orig_mesh = ob.getData(mesh=1)
# if orig_mesh.getVertGroupNames():
# ob.copy().link(me)
# # If new mesh has no vgroups we can try add if verts are teh same
# if not me.getVertGroupNames(): # vgroups were not kept by the modifier
# if len(me.verts) == len(orig_mesh.verts):
# groupNames, vWeightDict = BPyMesh.meshWeight2Dict(orig_mesh)
# BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
# print ob, me, me.getVertGroupNames()
meshes_to_clear.append( me )
origData = False
mats = me.materials
else:
me = ob.data
# me = ob.getData(mesh=1)
mats = me.materials
# # Support object colors
# tmp_colbits = ob.colbits
# if tmp_colbits:
# tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too.
# for i in xrange(16):
# if tmp_colbits & (1<<i):
# mats[i] = tmp_ob_mats[i]
# del tmp_ob_mats
# del tmp_colbits
if me:
# # This WILL modify meshes in blender if EXP_MESH_APPLY_MOD is disabled.
# # so strictly this is bad. but only in rare cases would it have negative results
# # say with dupliverts the objects would rotate a bit differently
# if EXP_MESH_HQ_NORMALS:
# BPyMesh.meshCalcNormals(me) # high quality normals nice for realtime engines.
texture_mapping_local = {}
material_mapping_local = {}
if len(me.uv_textures) > 0:
# if me.faceUV:
uvlayer_orig = me.active_uv_texture
# uvlayer_orig = me.activeUVLayer
for uvlayer in me.uv_textures:
# for uvlayer in me.getUVLayerNames():
# me.activeUVLayer = uvlayer
for f, uf in zip(me.faces, uvlayer.data):
# for f in me.faces:
tex = uf.image
# tex = f.image
textures[tex] = texture_mapping_local[tex] = None
try: mat = mats[f.material_index]
# try: mat = mats[f.mat]
except: mat = None
materials[mat, tex] = material_mapping_local[mat, tex] = None # should use sets, wait for blender 2.5
# me.activeUVLayer = uvlayer_orig
else:
for mat in mats:
# 2.44 use mat.lib too for uniqueness
materials[mat, None] = material_mapping_local[mat, None] = None
else:
materials[None, None] = None
if EXP_ARMATURE:
armob = ob.find_armature()
blenParentBoneName = None
# parent bone - special case
if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \
ob.parent_type == 'BONE':
# if (not armob) and ob.parent and ob.parent.type == 'Armature' and ob.parentType == Blender.Object.ParentTypes.BONE:
armob = ob.parent
blenParentBoneName = ob.parent_bone
# blenParentBoneName = ob.parentbonename
if armob and armob not in ob_arms:
ob_arms.append(armob)
else:
blenParentBoneName = armob = None
my_mesh = my_object_generic(ob, mtx)
my_mesh.blenData = me
my_mesh.origData = origData
my_mesh.blenMaterials = list(material_mapping_local.keys())
my_mesh.blenMaterialList = mats
my_mesh.blenTextures = list(texture_mapping_local.keys())
# if only 1 null texture then empty the list
if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] == None:
my_mesh.blenTextures = []
my_mesh.fbxArm = armob # replace with my_object_generic armature instance later
my_mesh.fbxBoneParent = blenParentBoneName # replace with my_bone instance later
ob_meshes.append( my_mesh )
# not forgetting to free dupli_list
if ob_base.dupli_list: ob_base.free_dupli_list()
if EXP_ARMATURE:
# now we have the meshes, restore the rest arm position
for i, arm in enumerate(bpy.data.armatures):
arm.rest_position = ob_arms_orig_rest[i]
# arm.restPosition = ob_arms_orig_rest[i]
if ob_arms_orig_rest:
for ob_base in bpy.data.objects:
if ob_base.type == 'ARMATURE':
# if ob_base.type == 'Armature':
ob_base.make_display_list()
# ob_base.makeDisplayList()
# This causes the makeDisplayList command to effect the mesh
sce.set_frame(sce.current_frame)
# Blender.Set('curframe', Blender.Get('curframe'))
del tmp_ob_type, tmp_objects
# now we have collected all armatures, add bones
for i, ob in enumerate(ob_arms):
ob_arms[i] = my_arm = my_object_generic(ob)
my_arm.fbxBones = []
my_arm.blenData = ob.data
if ob.animation_data:
my_arm.blenAction = ob.animation_data.action
else:
my_arm.blenAction = None
# my_arm.blenAction = ob.action
my_arm.blenActionList = []
# fbxName, blenderObject, my_bones, blenderActions
#ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, [])
for bone in my_arm.blenData.bones:
# for bone in my_arm.blenData.bones.values():
my_bone = my_bone_class(bone, my_arm)
my_arm.fbxBones.append( my_bone )
ob_bones.append( my_bone )
# add the meshes to the bones and replace the meshes armature with own armature class
#for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
for my_mesh in ob_meshes:
# Replace
# ...this could be sped up with dictionary mapping but its unlikely for
# it ever to be a bottleneck - (would need 100+ meshes using armatures)
if my_mesh.fbxArm:
for my_arm in ob_arms:
if my_arm.blenObject == my_mesh.fbxArm:
my_mesh.fbxArm = my_arm
break
for my_bone in ob_bones:
# The mesh uses this bones armature!
if my_bone.fbxArm == my_mesh.fbxArm:
my_bone.blenMeshes[my_mesh.fbxName] = me
# parent bone: replace bone names with our class instances
# my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match.
if my_mesh.fbxBoneParent == my_bone.blenName:
my_mesh.fbxBoneParent = my_bone
bone_deformer_count = 0 # count how many bones deform a mesh
my_bone_blenParent = None
for my_bone in ob_bones:
my_bone_blenParent = my_bone.blenBone.parent
if my_bone_blenParent:
for my_bone_parent in ob_bones:
# Note 2.45rc2 you can compare bones normally
if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm:
my_bone.parent = my_bone_parent
break
# Not used at the moment
# my_bone.calcRestMatrixLocal()
bone_deformer_count += len(my_bone.blenMeshes)
del my_bone_blenParent
# Build blenObject -> fbxObject mapping
# this is needed for groups as well as fbxParenting
# for ob in bpy.data.objects: ob.tag = False
# bpy.data.objects.tag = False
# using a list of object names for tagging (Arystan)
tagged_objects = []
tmp_obmapping = {}
for ob_generic in ob_all_typegroups:
for ob_base in ob_generic:
tagged_objects.append(ob_base.blenObject.name)
# ob_base.blenObject.tag = True
tmp_obmapping[ob_base.blenObject] = ob_base
# Build Groups from objects we export
for blenGroup in bpy.data.groups:
fbxGroupName = None
for ob in blenGroup.objects:
if ob.name in tagged_objects:
# if ob.tag:
if fbxGroupName == None:
fbxGroupName = sane_groupname(blenGroup)
groups.append((fbxGroupName, blenGroup))
tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames
groups.sort() # not really needed
# Assign parents using this mapping
for ob_generic in ob_all_typegroups:
for my_ob in ob_generic:
parent = my_ob.blenObject.parent
if parent and parent.name in tagged_objects: # does it exist and is it in the mapping
# if parent and parent.tag: # does it exist and is it in the mapping
my_ob.fbxParent = tmp_obmapping[parent]
del tmp_obmapping
# Finished finding groups we use
materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()]
textures = [(sane_texname(tex), tex) for tex in textures.keys() if tex]
materials.sort() # sort by name
textures.sort()
camera_count = 8
file.write('''
; Object definitions
;------------------------------------------------------------------
Definitions: {
Version: 100
Count: %i''' % (\
1+1+camera_count+\
len(ob_meshes)+\
len(ob_lights)+\
len(ob_cameras)+\
len(ob_arms)+\
len(ob_null)+\
len(ob_bones)+\
bone_deformer_count+\
len(materials)+\
(len(textures)*2))) # add 1 for the root model 1 for global settings
del bone_deformer_count
file.write('''
ObjectType: "Model" {
Count: %i
}''' % (\
1+camera_count+\
len(ob_meshes)+\
len(ob_lights)+\
len(ob_cameras)+\
len(ob_arms)+\
len(ob_null)+\
len(ob_bones))) # add 1 for the root model
file.write('''
ObjectType: "Geometry" {
Count: %i
}''' % len(ob_meshes))
if materials:
file.write('''
ObjectType: "Material" {
Count: %i
}''' % len(materials))
if textures:
file.write('''
ObjectType: "Texture" {
Count: %i
}''' % len(textures)) # add 1 for an empty tex
file.write('''
ObjectType: "Video" {
Count: %i
}''' % len(textures)) # add 1 for an empty tex
tmp = 0
# Add deformer nodes
for my_mesh in ob_meshes:
if my_mesh.fbxArm:
tmp+=1
# Add subdeformers
for my_bone in ob_bones:
tmp += len(my_bone.blenMeshes)
if tmp:
file.write('''
ObjectType: "Deformer" {
Count: %i
}''' % tmp)
del tmp
# we could avoid writing this possibly but for now just write it
file.write('''
ObjectType: "Pose" {
Count: 1
}''')
if groups:
file.write('''
ObjectType: "GroupSelection" {
Count: %i
}''' % len(groups))
file.write('''
ObjectType: "GlobalSettings" {
Count: 1
}
}''')
file.write('''
; Object properties
;------------------------------------------------------------------
Objects: {''')
# To comply with other FBX FILES
write_camera_switch()
# Write the null object
write_null(None, 'blend_root')# , GLOBAL_MATRIX)
for my_null in ob_null:
write_null(my_null)
for my_arm in ob_arms:
write_null(my_arm)
for my_cam in ob_cameras:
write_camera(my_cam)
for my_light in ob_lights:
write_light(my_light)
for my_mesh in ob_meshes:
write_mesh(my_mesh)
#for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
write_bone(my_bone)
write_camera_default()
for matname, (mat, tex) in materials:
write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard)
# each texture uses a video, odd
for texname, tex in textures:
write_video(texname, tex)
i = 0
for texname, tex in textures:
write_texture(texname, tex, i)
i+=1
for groupname, group in groups:
write_group(groupname)
# NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do.
# Write armature modifiers
# TODO - add another MODEL? - because of this skin definition.
for my_mesh in ob_meshes:
if my_mesh.fbxArm:
write_deformer_skin(my_mesh.fbxName)
# Get normalized weights for temorary use
if my_mesh.fbxBoneParent:
weights = None
else:
weights = meshNormalizedWeights(my_mesh.blenObject)
# weights = meshNormalizedWeights(my_mesh.blenData)
#for bonename, bone, obname, bone_mesh, armob in ob_bones:
for my_bone in ob_bones:
if me in iter(my_bone.blenMeshes.values()):
write_sub_deformer_skin(my_mesh, my_bone, weights)
# Write pose's really weired, only needed when an armature and mesh are used together
# each by themselves dont need pose data. for now only pose meshes and bones
file.write('''
Pose: "Pose::BIND_POSES", "BindPose" {
Type: "BindPose"
Version: 100
Properties60: {
}
NbPoseNodes: ''')
file.write(str(len(pose_items)))
for fbxName, matrix in pose_items:
file.write('\n\t\tPoseNode: {')
file.write('\n\t\t\tNode: "Model::%s"' % fbxName )
if matrix: file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix))
else: file.write('\n\t\t\tMatrix: %s' % mat4x4str(mtx4_identity))
file.write('\n\t\t}')
file.write('\n\t}')
# Finish Writing Objects
# Write global settings
file.write('''
GlobalSettings: {
Version: 1000
Properties60: {
Property: "UpAxis", "int", "",1
Property: "UpAxisSign", "int", "",1
Property: "FrontAxis", "int", "",2
Property: "FrontAxisSign", "int", "",1
Property: "CoordAxis", "int", "",0
Property: "CoordAxisSign", "int", "",1
Property: "UnitScaleFactor", "double", "",100
}
}
''')
file.write('}')
file.write('''
; Object relations
;------------------------------------------------------------------
Relations: {''')
file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}')
for my_null in ob_null:
file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName)
for my_arm in ob_arms:
file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName)
for my_mesh in ob_meshes:
file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName)
# TODO - limbs can have the same name for multiple armatures, should prefix.
#for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName)
for my_cam in ob_cameras:
file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName)
for my_light in ob_lights:
file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName)
file.write('''
Model: "Model::Producer Perspective", "Camera" {
}
Model: "Model::Producer Top", "Camera" {
}
Model: "Model::Producer Bottom", "Camera" {
}
Model: "Model::Producer Front", "Camera" {
}
Model: "Model::Producer Back", "Camera" {
}
Model: "Model::Producer Right", "Camera" {
}
Model: "Model::Producer Left", "Camera" {
}
Model: "Model::Camera Switcher", "CameraSwitcher" {
}''')
for matname, (mat, tex) in materials:
file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname)
if textures:
for texname, tex in textures:
file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname)
for texname, tex in textures:
file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname)
# deformers - modifiers
for my_mesh in ob_meshes:
if my_mesh.fbxArm:
file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName)
#for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName
# is this bone effecting a mesh?
file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName))
# This should be at the end
# file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}')
for groupname, group in groups:
file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname)
file.write('\n}')
file.write('''
; Object connections
;------------------------------------------------------------------
Connections: {''')
# NOTE - The FBX SDK dosnt care about the order but some importers DO!
# for instance, defining the material->mesh connection
# before the mesh->blend_root crashes cinema4d
# write the fake root node
file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"')
for ob_generic in ob_all_typegroups: # all blender 'Object's we support
for my_ob in ob_generic:
if my_ob.fbxParent:
file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName))
else:
file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName)
if materials:
for my_mesh in ob_meshes:
# Connect all materials to all objects, not good form but ok for now.
for mat, tex in my_mesh.blenMaterials:
if mat: mat_name = mat.name
else: mat_name = None
if tex: tex_name = tex.name
else: tex_name = None
file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName))
if textures:
for my_mesh in ob_meshes:
if my_mesh.blenTextures:
# file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName)
for tex in my_mesh.blenTextures:
if tex:
file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName))
for texname, tex in textures:
file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname))
for my_mesh in ob_meshes:
if my_mesh.fbxArm:
file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName))
#for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
for fbxMeshObName in my_bone.blenMeshes: # .keys()
file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName))
# limbs -> deformers
# for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
for fbxMeshObName in my_bone.blenMeshes: # .keys()
file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName))
#for bonename, bone, obname, me, armob in ob_bones:
for my_bone in ob_bones:
# Always parent to armature now
if my_bone.parent:
file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName) )
else:
# the armature object is written as an empty and all root level bones connect to it
file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName) )
# groups
if groups:
for ob_generic in ob_all_typegroups:
for ob_base in ob_generic:
for fbxGroupName in ob_base.fbxGroupNames:
file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName))
for my_arm in ob_arms:
file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName)
file.write('\n}')
# Needed for scene footer as well as animation
render = sce.render_data
# render = sce.render
# from the FBX sdk
#define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000))
def fbx_time(t):
# 0.5 + val is the same as rounding.
return int(0.5 + ((t/fps) * 46186158000))
fps = float(render.fps)
start = sce.start_frame
# start = render.sFrame
end = sce.end_frame
# end = render.eFrame
if end < start: start, end = end, start
if start==end: ANIM_ENABLE = False
# animations for these object types
ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms
if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]:
frame_orig = sce.current_frame
# frame_orig = Blender.Get('curframe')
if ANIM_OPTIMIZE:
ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION
# default action, when no actions are avaioable
tmp_actions = [None] # None is the default action
blenActionDefault = None
action_lastcompat = None
# instead of tagging
tagged_actions = []
if ANIM_ACTION_ALL:
# bpy.data.actions.tag = False
tmp_actions = list(bpy.data.actions)
# find which actions are compatible with the armatures
# blenActions is not yet initialized so do it now.
tmp_act_count = 0
for my_arm in ob_arms:
# get the default name
if not blenActionDefault:
blenActionDefault = my_arm.blenAction
arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones])
for action in tmp_actions:
action_chan_names = arm_bone_names.intersection( set([g.name for g in action.groups]) )
# action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) )
if action_chan_names: # at least one channel matches.
my_arm.blenActionList.append(action)
tagged_actions.append(action.name)
# action.tag = True
tmp_act_count += 1
# incase there is no actions applied to armatures
action_lastcompat = action
if tmp_act_count:
# unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature.
if not blenActionDefault:
blenActionDefault = action_lastcompat
del action_lastcompat
file.write('''
;Takes and animation section
;----------------------------------------------------
Takes: {''')
if blenActionDefault:
file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault))
else:
file.write('\n\tCurrent: "Default Take"')
for blenAction in tmp_actions:
# we have tagged all actious that are used be selected armatures
if blenAction:
if blenAction.name in tagged_actions:
# if blenAction.tag:
print('\taction: "%s" exporting...' % blenAction.name)
else:
print('\taction: "%s" has no armature using it, skipping' % blenAction.name)
continue
if blenAction == None:
# Warning, this only accounts for tmp_actions being [None]
file.write('\n\tTake: "Default Take" {')
act_start = start
act_end = end
else:
# use existing name
if blenAction == blenActionDefault: # have we alredy got the name
file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name])
else:
file.write('\n\tTake: "%s" {' % sane_takename(blenAction))
act_start, act_end = blenAction.get_frame_range()
# tmp = blenAction.getFrameNumbers()
# if tmp:
# act_start = min(tmp)
# act_end = max(tmp)
# del tmp
# else:
# # Fallback on this, theres not much else we can do? :/
# # when an action has no length
# act_start = start
# act_end = end
# Set the action active
for my_bone in ob_arms:
if blenAction in my_bone.blenActionList:
ob.action = blenAction
# print '\t\tSetting Action!', blenAction
# sce.update(1)
file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed
file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed
file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed
file.write('''
;Models animation
;----------------------------------------------------''')
# set pose data for all bones
# do this here incase the action changes
'''
for my_bone in ob_bones:
my_bone.flushAnimData()
'''
i = act_start
while i <= act_end:
sce.set_frame(i)
# Blender.Set('curframe', i)
for ob_generic in ob_anim_lists:
for my_ob in ob_generic:
#Blender.Window.RedrawAll()
if ob_generic == ob_meshes and my_ob.fbxArm:
# We cant animate armature meshes!
pass
else:
my_ob.setPoseFrame(i)
i+=1
#for bonename, bone, obname, me, armob in ob_bones:
for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms):
for my_ob in ob_generic:
if ob_generic == ob_meshes and my_ob.fbxArm:
# do nothing,
pass
else:
file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed
file.write('\n\t\t\tVersion: 1.1')
file.write('\n\t\t\tChannel: "Transform" {')
context_bone_anim_mats = [ (my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end+1) ]
# ----------------
# ----------------
for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale
if TX_CHAN=='T': context_bone_anim_vecs = [mtx[0].translationPart() for mtx in context_bone_anim_mats]
elif TX_CHAN=='S': context_bone_anim_vecs = [mtx[0].scalePart() for mtx in context_bone_anim_mats]
elif TX_CHAN=='R':
# Was....
# elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].toEuler() for mtx in context_bone_anim_mats]
#
# ...but we need to use the previous euler for compatible conversion.
context_bone_anim_vecs = []
prev_eul = None
for mtx in context_bone_anim_mats:
if prev_eul: prev_eul = mtx[1].toEuler(prev_eul)
else: prev_eul = mtx[1].toEuler()
context_bone_anim_vecs.append(eulerRadToDeg(prev_eul))
# context_bone_anim_vecs.append(prev_eul)
file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation
for i in range(3):
# Loop on each axis of the bone
file.write('\n\t\t\t\t\tChannel: "%s" {'% ('XYZ'[i])) # translation
file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i] )
file.write('\n\t\t\t\t\t\tKeyVer: 4005')
if not ANIM_OPTIMIZE:
# Just write all frames, simple but in-eficient
file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start))
file.write('\n\t\t\t\t\t\tKey: ')
frame = act_start
while frame <= act_end:
if frame!=act_start:
file.write(',')
# Curve types are 'C,n' for constant, 'L' for linear
# C,n is for bezier? - linear is best for now so we can do simple keyframe removal
file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame-1), context_bone_anim_vecs[frame-act_start][i] ))
frame+=1
else:
# remove unneeded keys, j is the frame, needed when some frames are removed.
context_bone_anim_keys = [ (vec[i], j) for j, vec in enumerate(context_bone_anim_vecs) ]
# last frame to fisrt frame, missing 1 frame on either side.
# removeing in a backwards loop is faster
#for j in xrange( (act_end-act_start)-1, 0, -1 ):
# j = (act_end-act_start)-1
j = len(context_bone_anim_keys)-2
while j > 0 and len(context_bone_anim_keys) > 2:
# print j, len(context_bone_anim_keys)
# Is this key the same as the ones next to it?
# co-linear horizontal...
if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j-1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and\
abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j+1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
del context_bone_anim_keys[j]
else:
frame_range = float(context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j-1][1])
frame_range_fac1 = (context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j][1]) / frame_range
frame_range_fac2 = 1.0 - frame_range_fac1
if abs(((context_bone_anim_keys[j-1][0]*frame_range_fac1 + context_bone_anim_keys[j+1][0]*frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
del context_bone_anim_keys[j]
else:
j-=1
# keep the index below the list length
if j > len(context_bone_anim_keys)-2:
j = len(context_bone_anim_keys)-2
if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]:
# This axis has no moton, its okay to skip KeyCount and Keys in this case
pass
else:
# We only need to write these if there is at least one
file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys))
file.write('\n\t\t\t\t\t\tKey: ')
for val, frame in context_bone_anim_keys:
if frame != context_bone_anim_keys[0][1]: # not the first
file.write(',')
# frame is alredy one less then blenders frame
file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val ))
if i==0: file.write('\n\t\t\t\t\t\tColor: 1,0,0')
elif i==1: file.write('\n\t\t\t\t\t\tColor: 0,1,0')
elif i==2: file.write('\n\t\t\t\t\t\tColor: 0,0,1')
file.write('\n\t\t\t\t\t}')
file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER+1) )
file.write('\n\t\t\t\t}')
# ---------------
file.write('\n\t\t\t}')
file.write('\n\t\t}')
# end the take
file.write('\n\t}')
# end action loop. set original actions
# do this after every loop incase actions effect eachother.
for my_bone in ob_arms:
my_bone.blenObject.action = my_bone.blenAction
file.write('\n}')
sce.set_frame(frame_orig)
# Blender.Set('curframe', frame_orig)
else:
# no animation
file.write('\n;Takes and animation section')
file.write('\n;----------------------------------------------------')
file.write('\n')
file.write('\nTakes: {')
file.write('\n\tCurrent: ""')
file.write('\n}')
# write meshes animation
#for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
# Clear mesh data Only when writing with modifiers applied
for me in meshes_to_clear:
bpy.data.remove_mesh(me)
# me.verts = None
# --------------------------- Footer
if world:
m = world.mist
has_mist = m.enabled
# has_mist = world.mode & 1
mist_intense = m.intensity
mist_start = m.start
mist_end = m.depth
mist_height = m.height
# mist_intense, mist_start, mist_end, mist_height = world.mist
world_hor = world.horizon_color
# world_hor = world.hor
else:
has_mist = mist_intense = mist_start = mist_end = mist_height = 0
world_hor = 0,0,0
file.write('\n;Version 5 settings')
file.write('\n;------------------------------------------------------------------')
file.write('\n')
file.write('\nVersion5: {')
file.write('\n\tAmbientRenderSettings: {')
file.write('\n\t\tVersion: 101')
file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb))
file.write('\n\t}')
file.write('\n\tFogOptions: {')
file.write('\n\t\tFlogEnable: %i' % has_mist)
file.write('\n\t\tFogMode: 0')
file.write('\n\t\tFogDensity: %.3f' % mist_intense)
file.write('\n\t\tFogStart: %.3f' % mist_start)
file.write('\n\t\tFogEnd: %.3f' % mist_end)
file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor))
file.write('\n\t}')
file.write('\n\tSettings: {')
file.write('\n\t\tFrameRate: "%i"' % int(fps))
file.write('\n\t\tTimeFormat: 1')
file.write('\n\t\tSnapOnFrames: 0')
file.write('\n\t\tReferenceTimeIndex: -1')
file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start-1))
file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end-1))
file.write('\n\t}')
file.write('\n\tRendererSetting: {')
file.write('\n\t\tDefaultCamera: "Producer Perspective"')
file.write('\n\t\tDefaultViewingMode: 0')
file.write('\n\t}')
file.write('\n}')
file.write('\n')
# Incase sombody imports this, clean up by clearing global dicts
sane_name_mapping_ob.clear()
sane_name_mapping_mat.clear()
sane_name_mapping_tex.clear()
ob_arms[:] = []
ob_bones[:] = []
ob_cameras[:] = []
ob_lights[:] = []
ob_meshes[:] = []
ob_null[:] = []
# copy images if enabled
# if EXP_IMAGE_COPY:
# # copy_images( basepath, [ tex[1] for tex in textures if tex[1] != None ])
# bpy.util.copy_images( [ tex[1] for tex in textures if tex[1] != None ], basepath)
print('export finished in %.4f sec.' % (time.clock() - start_time))
# print 'export finished in %.4f sec.' % (Blender.sys.time() - start_time)
return True
# --------------------------------------------
# UI Function - not a part of the exporter.
# this is to seperate the user interface from the rest of the exporter.
# from Blender import Draw, Window
EVENT_NONE = 0
EVENT_EXIT = 1
EVENT_REDRAW = 2
EVENT_FILESEL = 3
GLOBALS = {}
# export opts
def do_redraw(e,v): GLOBALS['EVENT'] = e
# toggle between these 2, only allow one on at once
def do_obs_sel(e,v):
GLOBALS['EVENT'] = e
GLOBALS['EXP_OBS_SCENE'].val = 0
GLOBALS['EXP_OBS_SELECTED'].val = 1
def do_obs_sce(e,v):
GLOBALS['EVENT'] = e
GLOBALS['EXP_OBS_SCENE'].val = 1
GLOBALS['EXP_OBS_SELECTED'].val = 0
def do_batch_type_grp(e,v):
GLOBALS['EVENT'] = e
GLOBALS['BATCH_GROUP'].val = 1
GLOBALS['BATCH_SCENE'].val = 0
def do_batch_type_sce(e,v):
GLOBALS['EVENT'] = e
GLOBALS['BATCH_GROUP'].val = 0
GLOBALS['BATCH_SCENE'].val = 1
def do_anim_act_all(e,v):
GLOBALS['EVENT'] = e
GLOBALS['ANIM_ACTION_ALL'][0].val = 1
GLOBALS['ANIM_ACTION_ALL'][1].val = 0
def do_anim_act_cur(e,v):
if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val:
Draw.PupMenu('Warning%t|Cant use this with batch export group option')
else:
GLOBALS['EVENT'] = e
GLOBALS['ANIM_ACTION_ALL'][0].val = 0
GLOBALS['ANIM_ACTION_ALL'][1].val = 1
def fbx_ui_exit(e,v):
GLOBALS['EVENT'] = e
def do_help(e,v):
url = 'http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx'
print('Trying to open web browser with documentation at this address...')
print('\t' + url)
try:
import webbrowser
webbrowser.open(url)
except:
Blender.Draw.PupMenu("Error%t|Opening a webbrowser requires a full python installation")
print('...could not open a browser window.')
# run when export is pressed
#def fbx_ui_write(e,v):
def fbx_ui_write(filename, context):
# Dont allow overwriting files when saving normally
if not GLOBALS['BATCH_ENABLE'].val:
if not BPyMessages.Warning_SaveOver(filename):
return
GLOBALS['EVENT'] = EVENT_EXIT
# Keep the order the same as above for simplicity
# the [] is a dummy arg used for objects
Blender.Window.WaitCursor(1)
# Make the matrix
GLOBAL_MATRIX = mtx4_identity
GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = GLOBALS['_SCALE'].val
if GLOBALS['_XROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n
if GLOBALS['_YROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n
if GLOBALS['_ZROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n
ret = write(\
filename, None,\
context,
GLOBALS['EXP_OBS_SELECTED'].val,\
GLOBALS['EXP_MESH'].val,\
GLOBALS['EXP_MESH_APPLY_MOD'].val,\
GLOBALS['EXP_MESH_HQ_NORMALS'].val,\
GLOBALS['EXP_ARMATURE'].val,\
GLOBALS['EXP_LAMP'].val,\
GLOBALS['EXP_CAMERA'].val,\
GLOBALS['EXP_EMPTY'].val,\
GLOBALS['EXP_IMAGE_COPY'].val,\
GLOBAL_MATRIX,\
GLOBALS['ANIM_ENABLE'].val,\
GLOBALS['ANIM_OPTIMIZE'].val,\
GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val,\
GLOBALS['ANIM_ACTION_ALL'][0].val,\
GLOBALS['BATCH_ENABLE'].val,\
GLOBALS['BATCH_GROUP'].val,\
GLOBALS['BATCH_SCENE'].val,\
GLOBALS['BATCH_FILE_PREFIX'].val,\
GLOBALS['BATCH_OWN_DIR'].val,\
)
Blender.Window.WaitCursor(0)
GLOBALS.clear()
if ret == False:
Draw.PupMenu('Error%t|Path cannot be written to!')
def fbx_ui():
# Only to center the UI
x,y = GLOBALS['MOUSE']
x-=180; y-=0 # offset... just to get it centered
Draw.Label('Export Objects...', x+20,y+165, 200, 20)
if not GLOBALS['BATCH_ENABLE'].val:
Draw.BeginAlign()
GLOBALS['EXP_OBS_SELECTED'] = Draw.Toggle('Selected Objects', EVENT_REDRAW, x+20, y+145, 160, 20, GLOBALS['EXP_OBS_SELECTED'].val, 'Export selected objects on visible layers', do_obs_sel)
GLOBALS['EXP_OBS_SCENE'] = Draw.Toggle('Scene Objects', EVENT_REDRAW, x+180, y+145, 160, 20, GLOBALS['EXP_OBS_SCENE'].val, 'Export all objects in this scene', do_obs_sce)
Draw.EndAlign()
Draw.BeginAlign()
GLOBALS['_SCALE'] = Draw.Number('Scale:', EVENT_NONE, x+20, y+120, 140, 20, GLOBALS['_SCALE'].val, 0.01, 1000.0, 'Scale all data, (Note! some imports dont support scaled armatures)')
GLOBALS['_XROT90'] = Draw.Toggle('Rot X90', EVENT_NONE, x+160, y+120, 60, 20, GLOBALS['_XROT90'].val, 'Rotate all objects 90 degrese about the X axis')
GLOBALS['_YROT90'] = Draw.Toggle('Rot Y90', EVENT_NONE, x+220, y+120, 60, 20, GLOBALS['_YROT90'].val, 'Rotate all objects 90 degrese about the Y axis')
GLOBALS['_ZROT90'] = Draw.Toggle('Rot Z90', EVENT_NONE, x+280, y+120, 60, 20, GLOBALS['_ZROT90'].val, 'Rotate all objects 90 degrese about the Z axis')
Draw.EndAlign()
y -= 35
Draw.BeginAlign()
GLOBALS['EXP_EMPTY'] = Draw.Toggle('Empty', EVENT_NONE, x+20, y+120, 60, 20, GLOBALS['EXP_EMPTY'].val, 'Export empty objects')
GLOBALS['EXP_CAMERA'] = Draw.Toggle('Camera', EVENT_NONE, x+80, y+120, 60, 20, GLOBALS['EXP_CAMERA'].val, 'Export camera objects')
GLOBALS['EXP_LAMP'] = Draw.Toggle('Lamp', EVENT_NONE, x+140, y+120, 60, 20, GLOBALS['EXP_LAMP'].val, 'Export lamp objects')
GLOBALS['EXP_ARMATURE'] = Draw.Toggle('Armature', EVENT_NONE, x+200, y+120, 60, 20, GLOBALS['EXP_ARMATURE'].val, 'Export armature objects')
GLOBALS['EXP_MESH'] = Draw.Toggle('Mesh', EVENT_REDRAW, x+260, y+120, 80, 20, GLOBALS['EXP_MESH'].val, 'Export mesh objects', do_redraw) #, do_axis_z)
Draw.EndAlign()
if GLOBALS['EXP_MESH'].val:
# below mesh but
Draw.BeginAlign()
GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Toggle('Modifiers', EVENT_NONE, x+260, y+100, 80, 20, GLOBALS['EXP_MESH_APPLY_MOD'].val, 'Apply modifiers to mesh objects') #, do_axis_z)
GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Toggle('HQ Normals', EVENT_NONE, x+260, y+80, 80, 20, GLOBALS['EXP_MESH_HQ_NORMALS'].val, 'Generate high quality normals') #, do_axis_z)
Draw.EndAlign()
GLOBALS['EXP_IMAGE_COPY'] = Draw.Toggle('Copy Image Files', EVENT_NONE, x+20, y+80, 160, 20, GLOBALS['EXP_IMAGE_COPY'].val, 'Copy image files to the destination path') #, do_axis_z)
Draw.Label('Export Armature Animation...', x+20,y+45, 300, 20)
GLOBALS['ANIM_ENABLE'] = Draw.Toggle('Enable Animation', EVENT_REDRAW, x+20, y+25, 160, 20, GLOBALS['ANIM_ENABLE'].val, 'Export keyframe animation', do_redraw)
if GLOBALS['ANIM_ENABLE'].val:
Draw.BeginAlign()
GLOBALS['ANIM_OPTIMIZE'] = Draw.Toggle('Optimize Keyframes', EVENT_REDRAW, x+20, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE'].val, 'Remove double keyframes', do_redraw)
if GLOBALS['ANIM_OPTIMIZE'].val:
GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Number('Precission: ', EVENT_NONE, x+180, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val, 1, 16, 'Tolerence for comparing double keyframes (higher for greater accuracy)')
Draw.EndAlign()
Draw.BeginAlign()
GLOBALS['ANIM_ACTION_ALL'][1] = Draw.Toggle('Current Action', EVENT_REDRAW, x+20, y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][1].val, 'Use actions currently applied to the armatures (use scene start/end frame)', do_anim_act_cur)
GLOBALS['ANIM_ACTION_ALL'][0] = Draw.Toggle('All Actions', EVENT_REDRAW, x+180,y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][0].val, 'Use all actions for armatures', do_anim_act_all)
Draw.EndAlign()
Draw.Label('Export Batch...', x+20,y-60, 300, 20)
GLOBALS['BATCH_ENABLE'] = Draw.Toggle('Enable Batch', EVENT_REDRAW, x+20, y-80, 160, 20, GLOBALS['BATCH_ENABLE'].val, 'Automate exporting multiple scenes or groups to files', do_redraw)
if GLOBALS['BATCH_ENABLE'].val:
Draw.BeginAlign()
GLOBALS['BATCH_GROUP'] = Draw.Toggle('Group > File', EVENT_REDRAW, x+20, y-105, 160, 20, GLOBALS['BATCH_GROUP'].val, 'Export each group as an FBX file', do_batch_type_grp)
GLOBALS['BATCH_SCENE'] = Draw.Toggle('Scene > File', EVENT_REDRAW, x+180, y-105, 160, 20, GLOBALS['BATCH_SCENE'].val, 'Export each scene as an FBX file', do_batch_type_sce)
# Own dir requires OS module
if os:
GLOBALS['BATCH_OWN_DIR'] = Draw.Toggle('Own Dir', EVENT_NONE, x+20, y-125, 80, 20, GLOBALS['BATCH_OWN_DIR'].val, 'Create a dir for each exported file')
GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+100, y-125, 240, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ')
else:
GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+20, y-125, 320, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ')
Draw.EndAlign()
#y+=80
'''
Draw.BeginAlign()
GLOBALS['FILENAME'] = Draw.String('path: ', EVENT_NONE, x+20, y-170, 300, 20, GLOBALS['FILENAME'].val, 64, 'Prefix each file with this name ')
Draw.PushButton('..', EVENT_FILESEL, x+320, y-170, 20, 20, 'Select the path', do_redraw)
'''
# Until batch is added
#
#Draw.BeginAlign()
Draw.PushButton('Online Help', EVENT_REDRAW, x+20, y-160, 100, 20, 'Open online help in a browser window', do_help)
Draw.PushButton('Cancel', EVENT_EXIT, x+130, y-160, 100, 20, 'Exit the exporter', fbx_ui_exit)
Draw.PushButton('Export', EVENT_FILESEL, x+240, y-160, 100, 20, 'Export the fbx file', do_redraw)
#Draw.PushButton('Export', EVENT_EXIT, x+180, y-160, 160, 20, 'Export the fbx file', fbx_ui_write)
#Draw.EndAlign()
# exit when mouse out of the view?
# GLOBALS['EVENT'] = EVENT_EXIT
#def write_ui(filename):
def write_ui():
# globals
GLOBALS['EVENT'] = EVENT_REDRAW
#GLOBALS['MOUSE'] = Window.GetMouseCoords()
GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
GLOBALS['FILENAME'] = ''
'''
# IF called from the fileselector
if filename == None:
GLOBALS['FILENAME'] = filename # Draw.Create(Blender.sys.makename(ext='.fbx'))
else:
GLOBALS['FILENAME'].val = filename
'''
GLOBALS['EXP_OBS_SELECTED'] = Draw.Create(1) # dont need 2 variables but just do this for clarity
GLOBALS['EXP_OBS_SCENE'] = Draw.Create(0)
GLOBALS['EXP_MESH'] = Draw.Create(1)
GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Create(1)
GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Create(0)
GLOBALS['EXP_ARMATURE'] = Draw.Create(1)
GLOBALS['EXP_LAMP'] = Draw.Create(1)
GLOBALS['EXP_CAMERA'] = Draw.Create(1)
GLOBALS['EXP_EMPTY'] = Draw.Create(1)
GLOBALS['EXP_IMAGE_COPY'] = Draw.Create(0)
# animation opts
GLOBALS['ANIM_ENABLE'] = Draw.Create(1)
GLOBALS['ANIM_OPTIMIZE'] = Draw.Create(1)
GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Create(4) # decimal places
GLOBALS['ANIM_ACTION_ALL'] = [Draw.Create(0), Draw.Create(1)] # not just the current action
# batch export options
GLOBALS['BATCH_ENABLE'] = Draw.Create(0)
GLOBALS['BATCH_GROUP'] = Draw.Create(1) # cant have both of these enabled at once.
GLOBALS['BATCH_SCENE'] = Draw.Create(0) # see above
GLOBALS['BATCH_FILE_PREFIX'] = Draw.Create(Blender.sys.makename(ext='_').split('\\')[-1].split('/')[-1])
GLOBALS['BATCH_OWN_DIR'] = Draw.Create(0)
# done setting globals
# Used by the user interface
GLOBALS['_SCALE'] = Draw.Create(1.0)
GLOBALS['_XROT90'] = Draw.Create(True)
GLOBALS['_YROT90'] = Draw.Create(False)
GLOBALS['_ZROT90'] = Draw.Create(False)
# best not do move the cursor
# Window.SetMouseCoords(*[i/2 for i in Window.GetScreenSize()])
# hack so the toggle buttons redraw. this is not nice at all
while GLOBALS['EVENT'] != EVENT_EXIT:
if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val and GLOBALS['ANIM_ACTION_ALL'][1].val:
#Draw.PupMenu("Warning%t|Cant batch export groups with 'Current Action' ")
GLOBALS['ANIM_ACTION_ALL'][0].val = 1
GLOBALS['ANIM_ACTION_ALL'][1].val = 0
if GLOBALS['EVENT'] == EVENT_FILESEL:
if GLOBALS['BATCH_ENABLE'].val:
txt = 'Batch FBX Dir'
name = Blender.sys.expandpath('//')
else:
txt = 'Export FBX'
name = Blender.sys.makename(ext='.fbx')
Blender.Window.FileSelector(fbx_ui_write, txt, name)
#fbx_ui_write('/test.fbx')
break
Draw.UIBlock(fbx_ui, 0)
# GLOBALS.clear()
class EXPORT_OT_fbx(bpy.types.Operator):
'''
Operator documentation text, will be used for the operator tooltip and python docs.
'''
__idname__ = "export.fbx"
__label__ = "Export FBX"
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the PLY file", maxlen= 1024, default=""),
bpy.props.BoolProperty(attr="EXP_OBS_SELECTED", name="Selected Objects", description="Export selected objects on visible layers", default=True),
# bpy.props.BoolProperty(attr="EXP_OBS_SCENE", name="Scene Objects", description="Export all objects in this scene", default=True),
bpy.props.FloatProperty(attr="_SCALE", name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0),
bpy.props.BoolProperty(attr="_XROT90", name="Rot X90", description="Rotate all objects 90 degrese about the X axis", default=True),
bpy.props.BoolProperty(attr="_YROT90", name="Rot Y90", description="Rotate all objects 90 degrese about the Y axis", default=False),
bpy.props.BoolProperty(attr="_ZROT90", name="Rot Z90", description="Rotate all objects 90 degrese about the Z axis", default=False),
bpy.props.BoolProperty(attr="EXP_EMPTY", name="Empties", description="Export empty objects", default=True),
bpy.props.BoolProperty(attr="EXP_CAMERA", name="Cameras", description="Export camera objects", default=True),
bpy.props.BoolProperty(attr="EXP_LAMP", name="Lamps", description="Export lamp objects", default=True),
bpy.props.BoolProperty(attr="EXP_ARMATURE", name="Armatures", description="Export armature objects", default=True),
bpy.props.BoolProperty(attr="EXP_MESH", name="Meshes", description="Export mesh objects", default=True),
bpy.props.BoolProperty(attr="EXP_MESH_APPLY_MOD", name="Modifiers", description="Apply modifiers to mesh objects", default=True),
bpy.props.BoolProperty(attr="EXP_MESH_HQ_NORMALS", name="HQ Normals", description="Generate high quality normals", default=True),
bpy.props.BoolProperty(attr="EXP_IMAGE_COPY", name="Copy Image Files", description="Copy image files to the destination path", default=False),
# armature animation
bpy.props.BoolProperty(attr="ANIM_ENABLE", name="Enable Animation", description="Export keyframe animation", default=True),
bpy.props.BoolProperty(attr="ANIM_OPTIMIZE", name="Optimize Keyframes", description="Remove double keyframes", default=True),
bpy.props.FloatProperty(attr="ANIM_OPTIMIZE_PRECISSION", name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0),
# bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True),
bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="All Actions", description="Use all actions for armatures, if false, use current action", default=False),
# batch
bpy.props.BoolProperty(attr="BATCH_ENABLE", name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False),
bpy.props.BoolProperty(attr="BATCH_GROUP", name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False),
bpy.props.BoolProperty(attr="BATCH_OWN_DIR", name="Own Dir", description="Create a dir for each exported file", default=True),
bpy.props.StringProperty(attr="BATCH_FILE_PREFIX", name="Prefix", description="Prefix each file with this name", maxlen= 1024, default=""),
]
def poll(self, context):
print("Poll")
return context.active_object != None
def execute(self, context):
if not self.filename:
raise Exception("filename not set")
GLOBAL_MATRIX = mtx4_identity
GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self._SCALE
if self._XROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n
if self._YROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n
if self._ZROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n
write(self.filename,
None, # XXX
context,
self.EXP_OBS_SELECTED,
self.EXP_MESH,
self.EXP_MESH_APPLY_MOD,
# self.EXP_MESH_HQ_NORMALS,
self.EXP_ARMATURE,
self.EXP_LAMP,
self.EXP_CAMERA,
self.EXP_EMPTY,
self.EXP_IMAGE_COPY,
GLOBAL_MATRIX,
self.ANIM_ENABLE,
self.ANIM_OPTIMIZE,
self.ANIM_OPTIMIZE_PRECISSION,
self.ANIM_ACTION_ALL,
self.BATCH_ENABLE,
self.BATCH_GROUP,
self.BATCH_FILE_PREFIX,
self.BATCH_OWN_DIR)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
bpy.ops.add(EXPORT_OT_fbx)
# if __name__ == "__main__":
# bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply")
# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts)
# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print
# - get rid of cleanName somehow
# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565
# + get rid of BPyObject_getObjectArmature, move it in RNA?
# - BATCH_ENABLE and BATCH_GROUP options: line 327
# - implement all BPyMesh_* used here with RNA
# - getDerivedObjects is not fully replicated with .dupli* funcs
# - talk to Campbell, this code won't work? lines 1867-1875
# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893
# - no hq normals: 1900-1901
# TODO
# - bpy.data.remove_scene: line 366
# - bpy.sys.time move to bpy.sys.util?
# - new scene creation, activation: lines 327-342, 368
# - uses bpy.sys.expandpath, *.relpath - replace at least relpath
# SMALL or COSMETICAL
# - find a way to get blender version, and put it in bpy.util?, old was Blender.Get('version')

988
release/io/export_obj.py Normal file
View File

@@ -0,0 +1,988 @@
#!BPY
"""
Name: 'Wavefront (.obj)...'
Blender: 248
Group: 'Export'
Tooltip: 'Save a Wavefront OBJ File'
"""
__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone"
__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org']
__version__ = "1.21"
__bpydoc__ = """\
This script is an exporter to OBJ file format.
Usage:
Select the objects you wish to export and run this script from "File->Export" menu.
Selecting the default options from the popup box will be good in most cases.
All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d)
will be exported as mesh data.
"""
# --------------------------------------------------------------------------
# OBJ Export v1.1 by Campbell Barton (AKA Ideasman)
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
# import math and other in functions that use them for the sake of fast Blender startup
# import math
import os
import bpy
import Mathutils
# Returns a tuple - path,extension.
# 'hello.obj' > ('hello', '.obj')
def splitExt(path):
dotidx = path.rfind('.')
if dotidx == -1:
return path, ''
else:
return path[:dotidx], path[dotidx:]
def fixName(name):
if name == None:
return 'None'
else:
return name.replace(' ', '_')
# this used to be in BPySys module
# frankly, I don't understand how it works
def BPySys_cleanName(name):
v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,58,59,60,61,62,63,64,91,92,93,94,96,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254]
invalid = ''.join([chr(i) for i in v])
for ch in invalid:
name = name.replace(ch, '_')
return name
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
MTL_DICT = {}
def write_mtl(scene, filename, copy_images):
world = scene.world
worldAmb = world.ambient_color
dest_dir = os.path.dirname(filename)
def copy_image(image):
rel = image.get_export_path(dest_dir, True)
if copy_images:
abspath = image.get_export_path(dest_dir, False)
if not os.path.exists(abs_path):
shutil.copy(bpy.sys.expandpath(image.filename), abs_path)
return rel
file = open(filename, "w")
# XXX
# file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1])
file.write('# Material Count: %i\n' % len(MTL_DICT))
# Write material/image combinations we have used.
for key, (mtl_mat_name, mat, img) in MTL_DICT.items():
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
if mat:
file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's
file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.ambient for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.diffuse_intensity for c in mat.diffuse_color]) ) # Diffuse
file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.specular_intensity for c in mat.specular_color]) ) # Specular
if hasattr(mat, "ior"):
file.write('Ni %.6f\n' % mat.ior) # Refraction index
else:
file.write('Ni %.6f\n' % 1.0)
file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
# 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
if mat.shadeless:
file.write('illum 0\n') # ignore lighting
elif mat.specular_intensity == 0:
file.write('illum 1\n') # no specular.
else:
file.write('illum 2\n') # light normaly
else:
#write a dummy material here?
file.write('Ns 0\n')
file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd 0.8 0.8 0.8\n')
file.write('Ks 0.8 0.8 0.8\n')
file.write('d 1\n') # No alpha
file.write('illum 2\n') # light normaly
# Write images!
if img: # We have an image on the face!
# write relative image path
rel = copy_image(img)
file.write('map_Kd %s\n' % rel) # Diffuse mapping image
# file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
elif mat: # No face image. if we havea material search for MTex image.
for mtex in mat.textures:
if mtex and mtex.texure.type == 'IMAGE':
try:
filename = copy_image(mtex.texture.image)
# filename = mtex.texture.image.filename.split('\\')[-1].split('/')[-1]
file.write('map_Kd %s\n' % filename) # Diffuse mapping image
break
except:
# Texture has no image though its an image type, best ignore.
pass
file.write('\n\n')
file.close()
# XXX not used
def copy_file(source, dest):
file = open(source, 'rb')
data = file.read()
file.close()
file = open(dest, 'wb')
file.write(data)
file.close()
# XXX not used
def copy_images(dest_dir):
if dest_dir[-1] != os.sep:
dest_dir += os.sep
# if dest_dir[-1] != sys.sep:
# dest_dir += sys.sep
# Get unique image names
uniqueImages = {}
for matname, mat, image in MTL_DICT.values(): # Only use image name
# Get Texface images
if image:
uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
# Get MTex images
if mat:
for mtex in mat.textures:
if mtex and mtex.texture.type == 'IMAGE':
image_tex = mtex.texture.image
if image_tex:
try:
uniqueImages[image_tex] = image_tex
except:
pass
# Now copy images
copyCount = 0
# for bImage in uniqueImages.values():
# image_path = bpy.sys.expandpath(bImage.filename)
# if bpy.sys.exists(image_path):
# # Make a name for the target path.
# dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
# if not bpy.sys.exists(dest_image_path): # Image isnt alredy there
# print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
# copy_file(image_path, dest_image_path)
# copyCount+=1
# paths= bpy.util.copy_images(uniqueImages.values(), dest_dir)
print('\tCopied %d images' % copyCount)
# print('\tCopied %d images' % copyCount)
# XXX not converted
def test_nurbs_compat(ob):
if ob.type != 'Curve':
return False
for nu in ob.data:
if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier
return True
return False
# XXX not converted
def write_nurb(file, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
Vector = Blender.Mathutils.Vector
for nu in cu:
if nu.type==0: DEG_ORDER_U = 1
else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct
if nu.type==1:
print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
continue
if nu.knotsV:
print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
continue
if len(nu) <= DEG_ORDER_U:
print("\tWarning, orderU is lower then vert count, skipping:", ob.name)
continue
pt_num = 0
do_closed = (nu.flagU & 1)
do_endpoints = (do_closed==0) and (nu.flagU & 2)
for pt in nu:
pt = Vector(pt[0], pt[1], pt[2]) * ob_mat
file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
pt_num += 1
tot_verts += pt_num
file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
file.write('cstype bspline\n') # not ideal, hard coded
file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i+1) for i in range(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm-1)
parm_ls = [(i/tot_parm_div) for i in range(tot_parm)]
if do_endpoints: # end points, force param
for i in range(DEG_ORDER_U+1):
parm_ls[i] = 0.0
parm_ls[-(1+i)] = 1.0
file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
file.write('end\n')
return tot_verts
def write(filename, objects, scene,
EXPORT_TRI=False,
EXPORT_EDGES=False,
EXPORT_NORMALS=False,
EXPORT_NORMALS_HQ=False,
EXPORT_UV=True,
EXPORT_MTL=True,
EXPORT_COPY_IMAGES=False,
EXPORT_APPLY_MODIFIERS=True,
EXPORT_ROTX90=True,
EXPORT_BLEN_OBS=True,
EXPORT_GROUP_BY_OB=False,
EXPORT_GROUP_BY_MAT=False,
EXPORT_KEEP_VERT_ORDER=False,
EXPORT_POLYGROUPS=False,
EXPORT_CURVE_AS_NURBS=True):
'''
Basic write function. The context and options must be alredy set
This can be accessed externaly
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
'''
# XXX
import math
def veckey3d(v):
return round(v.x, 6), round(v.y, 6), round(v.z, 6)
def veckey2d(v):
return round(v.x, 6), round(v.y, 6)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vetex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert_index in face.verts:
# for vert in face:
vWeights = vWeightMap[vert_index]
# vWeights = vWeightMap[vert]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
if weightDict:
alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight
alist.sort()
return(alist[-1][1]) # highest value last
else:
return '(null)'
# TODO: implement this in C? dunno how it should be called...
def getVertsFromGroup(me, group_index):
ret = []
for i, v in enumerate(me.verts):
for g in v.groups:
if g.group == group_index:
ret.append((i, g.weight))
return ret
print('OBJ Export path: "%s"' % filename)
temp_mesh_name = '~tmp-mesh'
time1 = bpy.sys.time()
# time1 = sys.time()
# scn = Scene.GetCurrent()
file = open(filename, "w")
# Write Header
version = "2.5"
file.write('# Blender3D v%s OBJ File: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] ))
file.write('# www.blender3d.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
if EXPORT_ROTX90:
mat_xrot90= Mathutils.RotationMatrix(-math.pi/2, 4, 'x')
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
globalNormals = {}
# Get all meshes
for ob_main in objects:
# ignore dupli children
if ob_main.parent and ob_main.parent.dupli_type != 'NONE':
# XXX
print(ob_main.name, 'is a dupli child - ignoring')
continue
obs = []
if ob_main.dupli_type != 'NONE':
# XXX
print('creating dupli_list on', ob_main.name)
ob_main.create_dupli_list()
obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
# XXX debug print
print(ob_main.name, 'has', len(obs), 'dupli children')
else:
obs = [(ob_main, ob_main.matrix)]
for ob, ob_mat in obs:
# XXX postponed
# # Nurbs curve support
# if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
# if EXPORT_ROTX90:
# ob_mat = ob_mat * mat_xrot90
# totverts += write_nurb(file, ob, ob_mat)
# continue
# end nurbs
if ob.type != 'MESH':
continue
me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW')
if EXPORT_ROTX90:
me.transform(ob_mat * mat_xrot90)
else:
me.transform(ob_mat)
# # Will work for non meshes now! :)
# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
# if not me:
# continue
if EXPORT_UV:
faceuv = len(me.uv_textures) > 0
else:
faceuv = False
# We have a valid mesh
if EXPORT_TRI and me.faces:
# Add a dummy object to it.
has_quads = False
for f in me.faces:
if f.verts[3] != 0:
has_quads = True
break
if has_quads:
newob = bpy.data.add_object('MESH', 'temp_object')
newob.data = me
# if we forget to set Object.data - crash
scene.add_object(newob)
newob.convert_to_triface(scene)
# mesh will still be there
scene.remove_object(newob)
# Make our own list so it can be sorted to reduce context switching
face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)]
# faces = [ f for f in me.faces ]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(face_index_pairs)+len(edges)+len(me.verts)): # Make sure there is somthing to write
# clean up
bpy.data.remove_mesh(me)
continue # dont bother with this mesh.
# XXX
# High Quality Normals
if EXPORT_NORMALS and face_index_pairs:
me.calc_normals()
# if EXPORT_NORMALS_HQ:
# BPyMesh.meshCalcNormals(me)
# else:
# # transforming normals is incorrect
# # when the matrix is scaled,
# # better to recalculate them
# me.calcNormals()
materials = me.materials
materialNames = []
materialItems = [m for m in materials]
if materials:
for mat in materials:
if mat: # !=None
materialNames.append(mat.name)
else:
materialNames.append(None)
# Cant use LC because some materials are None.
# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
# Possible there null materials, will mess up indicies
# but at least it will export, wait until Blender gets fixed.
materialNames.extend((16-len(materialNames)) * [None])
materialItems.extend((16-len(materialItems)) * [None])
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
elif faceuv:
# XXX update
tface = me.active_uv_texture.data
# exception only raised if Python 2.3 or lower...
try:
face_index_pairs.sort(key = lambda a: (a[0].material_index, tface[a[1]].image, a[0].smooth))
except:
face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, tface[a[1]].image, a[0].smooth),
(b[0].material_index, tface[b[1]].image, b[0].smooth)))
elif len(materials) > 1:
try:
face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].smooth))
except:
face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, a[0].smooth),
(b[0].material_index, b[0].smooth)))
else:
# no materials
try:
face_index_pairs.sort(key = lambda a: a[0].smooth)
except:
face_index_pairs.sort(lambda a,b: cmp(a[0].smooth, b[0].smooth))
# if EXPORT_KEEP_VERT_ORDER:
# pass
# elif faceuv:
# try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
# elif len(materials) > 1:
# try: faces.sort(key = lambda a: (a.mat, a.smooth))
# except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
# else:
# # no materials
# try: faces.sort(key = lambda a: a.smooth)
# except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
faces = [pair[0] for pair in face_index_pairs]
# Set the default mat to no material and no image.
contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.data.name
if name1 == name2:
obnamestring = fixName(name1)
else:
obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
if EXPORT_BLEN_OBS:
file.write('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
file.write('g %s\n' % obnamestring)
# Vert
for v in me.verts:
file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
# UV
if faceuv:
uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
uv_dict = {} # could use a set() here
uv_layer = me.active_uv_texture
for f, f_index in face_index_pairs:
tface = uv_layer.data[f_index]
uvs = [tface.uv1, tface.uv2, tface.uv3]
# add another UV if it's a quad
if f.verts[3] != 0:
uvs.append(tface.uv4)
for uv_index, uv in enumerate(uvs):
uvkey = veckey2d(uv)
try:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
except:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
file.write('vt %.6f %.6f\n' % tuple(uv))
# uv_dict = {} # could use a set() here
# for f_index, f in enumerate(faces):
# for uv_index, uv in enumerate(f.uv):
# uvkey = veckey2d(uv)
# try:
# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
# except:
# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
# file.write('vt %.6f %.6f\n' % tuple(uv))
uv_unique_count = len(uv_dict)
# del uv, uvkey, uv_dict, f_index, uv_index
# Only need uv_unique_count and uv_face_mapping
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
for f in faces:
if f.smooth:
for v in f:
noKey = veckey3d(v.normal)
if noKey not in globalNormals:
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
else:
# Hard, 1 normal from the face.
noKey = veckey3d(f.normal)
if noKey not in globalNormals:
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
if not faceuv:
f_image = None
# XXX
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
# vertGroupNames = me.getVertGroupNames()
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in range(len(me.verts))]
# vgroupsMap = [[] for _i in xrange(len(me.verts))]
for g in ob.vertex_groups:
# for vertexGroupName in vertGroupNames:
for vIdx, vWeight in getVertsFromGroup(me, g.index):
# for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1):
vgroupsMap[vIdx].append((g.name, vWeight))
for f_index, f in enumerate(faces):
f_v = [{"index": index, "vertex": me.verts[index]} for index in f.verts]
if f.verts[3] == 0:
f_v.pop()
# f_v= f.v
f_smooth= f.smooth
f_mat = min(f.material_index, len(materialNames)-1)
# f_mat = min(f.mat, len(materialNames)-1)
if faceuv:
tface = me.active_uv_texture.data[face_index_pairs[f_index][1]]
f_image = tface.image
f_uv= [tface.uv1, tface.uv2, tface.uv3]
if f.verts[3] != 0:
f_uv.append(tface.uv4)
# f_image = f.image
# f_uv= f.uv
# MAKE KEY
if faceuv and f_image: # Object is always true.
key = materialNames[f_mat], f_image.name
else:
key = materialNames[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if len(ob.vertex_groups):
# find what vertext group the face belongs to
theVGroup = findVertexGroupName(f,vgroupsMap)
if theVGroup != currentVGroup:
currentVGroup = theVGroup
file.write('g %s\n' % theVGroup)
# # Write the vertex group
# if EXPORT_POLYGROUPS:
# if vertGroupNames:
# # find what vertext group the face belongs to
# theVGroup = findVertexGroupName(f,vgroupsMap)
# if theVGroup != currentVGroup:
# currentVGroup = theVGroup
# file.write('g %s\n' % theVGroup)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context alredy switched, dont do anything
else:
if key[0] == None and key[1] == None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null)
file.write('usemtl (null)\n') # mat, image
else:
mat_data= MTL_DICT.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with fixName.
# If none image dont bother adding it to the name
if key[1] == None:
mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
else:
mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
if EXPORT_GROUP_BY_MAT:
file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null)
file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
file.write('s 1\n')
contextSmooth = f_smooth
else: # was off now on
file.write('s off\n')
contextSmooth = f_smooth
file.write('f')
if faceuv:
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % \
(v["index"] + totverts,
totuvco + uv_face_mapping[f_index][vi],
globalNormals[ veckey3d(v["vertex"].normal) ]) ) # vert, uv, normal
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.normal) ]
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % \
(v["index"] + totverts,
totuvco + uv_face_mapping[f_index][vi],
no) ) # vert, uv, normal
else: # No Normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d' % (\
v["index"] + totverts,\
totuvco + uv_face_mapping[f_index][vi])) # vert, uv
face_vert_index += len(f_v)
else: # No UV's
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for v in f_v:
file.write( ' %d//%d' %
(v["index"] + totverts, globalNormals[ veckey3d(v["vertex"].normal) ]) )
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.normal) ]
for v in f_v:
file.write( ' %d//%d' % (v["index"] + totverts, no) )
else: # No Normals
for v in f_v:
file.write( ' %d' % (v["index"] + totverts) )
file.write('\n')
# Write edges.
if EXPORT_EDGES:
for ed in edges:
if ed.loose:
file.write('f %d %d\n' % (ed.verts[0] + totverts, ed.verts[1] + totverts))
# Make the indicies global rather then per mesh
totverts += len(me.verts)
if faceuv:
totuvco += uv_unique_count
# clean up
bpy.data.remove_mesh(me)
if ob_main.dupli_type != 'NONE':
ob_main.free_dupli_list()
file.close()
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(scene, mtlfilename, EXPORT_COPY_IMAGES)
# if EXPORT_COPY_IMAGES:
# dest_dir = os.path.basename(filename)
# # dest_dir = filename
# # # Remove chars until we are just the path.
# # while dest_dir and dest_dir[-1] not in '\\/':
# # dest_dir = dest_dir[:-1]
# if dest_dir:
# copy_images(dest_dir)
# else:
# print('\tError: "%s" could not be used as a base for an image path.' % filename)
print("OBJ Export time: %.2f" % (bpy.sys.time() - time1))
# print "OBJ Export time: %.2f" % (sys.time() - time1)
def do_export(filename, context,
EXPORT_APPLY_MODIFIERS = True, # not used
EXPORT_ROTX90 = True, # wrong
EXPORT_TRI = False, # ok
EXPORT_EDGES = False,
EXPORT_NORMALS = False, # not yet
EXPORT_NORMALS_HQ = False, # not yet
EXPORT_UV = True, # ok
EXPORT_MTL = True,
EXPORT_SEL_ONLY = True, # ok
EXPORT_ALL_SCENES = False, # XXX not working atm
EXPORT_ANIMATION = False,
EXPORT_COPY_IMAGES = False,
EXPORT_BLEN_OBS = True,
EXPORT_GROUP_BY_OB = False,
EXPORT_GROUP_BY_MAT = False,
EXPORT_KEEP_VERT_ORDER = False,
EXPORT_POLYGROUPS = False,
EXPORT_CURVE_AS_NURBS = True):
# Window.EditMode(0)
# Window.WaitCursor(1)
base_name, ext = splitExt(filename)
context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
orig_scene = context.scene
# if EXPORT_ALL_SCENES:
# export_scenes = bpy.data.scenes
# else:
# export_scenes = [orig_scene]
# XXX only exporting one scene atm since changing
# current scene is not possible.
# Brecht says that ideally in 2.5 we won't need such a function,
# allowing multiple scenes open at once.
export_scenes = [orig_scene]
# Export all scenes.
for scn in export_scenes:
# scn.makeCurrent() # If already current, this is not slow.
# context = scn.getRenderingContext()
orig_frame = scn.current_frame
if EXPORT_ALL_SCENES: # Add scene name into the context_name
context_name[1] = '_%s' % BPySys_cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = range(scn.start_frame, context.end_frame+1) # Up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filename.
context_name[2] = '_%.6d' % frame
scn.current_frame = frame
if EXPORT_SEL_ONLY:
export_objects = context.selected_objects
else:
export_objects = scn.objects
full_path= ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
write(full_path, export_objects, scn,
EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS,
EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL,
EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS,
EXPORT_ROTX90, EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS)
scn.current_frame = orig_frame
# Restore old active scene.
# orig_scene.makeCurrent()
# Window.WaitCursor(0)
class EXPORT_OT_obj(bpy.types.Operator):
'''
Currently the exporter lacks these features:
* nurbs
* multiple scene export (only active scene is written)
* particles
'''
__idname__ = "export.obj"
__label__ = 'Export OBJ'
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the OBJ file", maxlen= 1024, default= ""),
# context group
bpy.props.BoolProperty(attr="use_selection", name="Selection Only", description="", default= False),
bpy.props.BoolProperty(attr="use_all_scenes", name="All Scenes", description="", default= False),
bpy.props.BoolProperty(attr="use_animation", name="All Animation", description="", default= False),
# object group
bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="", default= True),
bpy.props.BoolProperty(attr="use_rotate90", name="Rotate X90", description="", default= True),
# extra data group
bpy.props.BoolProperty(attr="use_edges", name="Edges", description="", default= True),
bpy.props.BoolProperty(attr="use_normals", name="Normals", description="", default= False),
bpy.props.BoolProperty(attr="use_hq_normals", name="High Quality Normals", description="", default= True),
bpy.props.BoolProperty(attr="use_uvs", name="UVs", description="", default= True),
bpy.props.BoolProperty(attr="use_materials", name="Materials", description="", default= True),
bpy.props.BoolProperty(attr="copy_images", name="Copy Images", description="", default= False),
bpy.props.BoolProperty(attr="use_triangles", name="Triangulate", description="", default= False),
bpy.props.BoolProperty(attr="use_vertex_groups", name="Polygroups", description="", default= False),
bpy.props.BoolProperty(attr="use_nurbs", name="Nurbs", description="", default= False),
# grouping group
bpy.props.BoolProperty(attr="use_blen_objects", name="Objects as OBJ Objects", description="", default= True),
bpy.props.BoolProperty(attr="group_by_object", name="Objects as OBJ Groups ", description="", default= False),
bpy.props.BoolProperty(attr="group_by_material", name="Material Groups", description="", default= False),
bpy.props.BoolProperty(attr="keep_vertex_order", name="Keep Vertex Order", description="", default= False)
]
def execute(self, context):
do_export(self.filename, context,
EXPORT_TRI=self.use_triangles,
EXPORT_EDGES=self.use_edges,
EXPORT_NORMALS=self.use_normals,
EXPORT_NORMALS_HQ=self.use_hq_normals,
EXPORT_UV=self.use_uvs,
EXPORT_MTL=self.use_materials,
EXPORT_COPY_IMAGES=self.copy_images,
EXPORT_APPLY_MODIFIERS=self.use_modifiers,
EXPORT_ROTX90=self.use_rotate90,
EXPORT_BLEN_OBS=self.use_blen_objects,
EXPORT_GROUP_BY_OB=self.group_by_object,
EXPORT_GROUP_BY_MAT=self.group_by_material,
EXPORT_KEEP_VERT_ORDER=self.keep_vertex_order,
EXPORT_POLYGROUPS=self.use_vertex_groups,
EXPORT_CURVE_AS_NURBS=self.use_nurbs,
EXPORT_SEL_ONLY=self.use_selection,
EXPORT_ALL_SCENES=self.use_all_scenes)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
def poll(self, context): # Poll isnt working yet
print("Poll")
return context.active_object != None
bpy.ops.add(EXPORT_OT_obj)
if __name__ == "__main__":
bpy.ops.EXPORT_OT_obj(filename="/tmp/test.obj")
# CONVERSION ISSUES
# - matrix problem
# - duplis - only tested dupliverts
# - NURBS - needs API additions
# - all scenes export
# + normals calculation
# - get rid of cleanName somehow

View File

@@ -78,7 +78,7 @@ def write(filename, scene, ob, \
#mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn) # XXX
if EXPORT_APPLY_MODIFIERS:
mesh = ob.create_render_mesh(scene)
mesh = ob.create_mesh(True, 'PREVIEW')
else:
mesh = ob.data

1239
release/io/export_x3d.py Normal file
View File

@@ -0,0 +1,1239 @@
#!BPY
""" Registration info for Blender menus:
Name: 'X3D Extensible 3D (.x3d)...'
Blender: 245
Group: 'Export'
Tooltip: 'Export selection to Extensible 3D file (.x3d)'
"""
__author__ = ("Bart", "Campbell Barton")
__email__ = ["Bart, bart:neeneenee*de"]
__url__ = ["Author's (Bart) homepage, http://www.neeneenee.de/vrml"]
__version__ = "2006/01/17"
__bpydoc__ = """\
This script exports to X3D format.
Usage:
Run this script from "File->Export" menu. A pop-up will ask whether you
want to export only selected or all relevant objects.
Known issues:<br>
Doesn't handle multiple materials (don't use material indices);<br>
Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
Can't get the texture array associated with material * not the UV ones;
"""
# $Id: export_x3d.py 23222 2009-09-14 14:55:49Z kazanbas $
#
#------------------------------------------------------------------------
# X3D exporter for blender 2.36 or above
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
#
####################################
# Library dependancies
####################################
import math
import os
import bpy
import Mathutils
from export_3ds import create_derived_objects, free_derived_objects
# import Blender
# from Blender import Object, Lamp, Draw, Image, Text, sys, Mesh
# from Blender.Scene import Render
# import BPyObject
# import BPyMesh
#
DEG2RAD=0.017453292519943295
MATWORLD= Mathutils.RotationMatrix(-90, 4, 'x')
####################################
# Global Variables
####################################
filename = ""
# filename = Blender.Get('filename')
_safeOverwrite = True
extension = ''
##########################################################
# Functions for writing output file
##########################################################
class x3d_class:
def __init__(self, filename):
#--- public you can change these ---
self.writingcolor = 0
self.writingtexture = 0
self.writingcoords = 0
self.proto = 1
self.matonly = 0
self.share = 0
self.billnode = 0
self.halonode = 0
self.collnode = 0
self.tilenode = 0
self.verbose=2 # level of verbosity in console 0-none, 1-some, 2-most
self.cp=3 # decimals for material color values 0.000 - 1.000
self.vp=3 # decimals for vertex coordinate values 0.000 - n.000
self.tp=3 # decimals for texture coordinate values 0.000 - 1.000
self.it=3
#--- class private don't touch ---
self.texNames={} # dictionary of textureNames
self.matNames={} # dictionary of materiaNames
self.meshNames={} # dictionary of meshNames
self.indentLevel=0 # keeps track of current indenting
self.filename=filename
self.file = None
if filename.lower().endswith('.x3dz'):
try:
import gzip
self.file = gzip.open(filename, "w")
except:
print("failed to import compression modules, exporting uncompressed")
self.filename = filename[:-1] # remove trailing z
if self.file == None:
self.file = open(self.filename, "w")
self.bNav=0
self.nodeID=0
self.namesReserved=[ "Anchor","Appearance","Arc2D","ArcClose2D","AudioClip","Background","Billboard",
"BooleanFilter","BooleanSequencer","BooleanToggle","BooleanTrigger","Box","Circle2D",
"Collision","Color","ColorInterpolator","ColorRGBA","component","Cone","connect",
"Contour2D","ContourPolyline2D","Coordinate","CoordinateDouble","CoordinateInterpolator",
"CoordinateInterpolator2D","Cylinder","CylinderSensor","DirectionalLight","Disk2D",
"ElevationGrid","EspduTransform","EXPORT","ExternProtoDeclare","Extrusion","field",
"fieldValue","FillProperties","Fog","FontStyle","GeoCoordinate","GeoElevationGrid",
"GeoLocationLocation","GeoLOD","GeoMetadata","GeoOrigin","GeoPositionInterpolator",
"GeoTouchSensor","GeoViewpoint","Group","HAnimDisplacer","HAnimHumanoid","HAnimJoint",
"HAnimSegment","HAnimSite","head","ImageTexture","IMPORT","IndexedFaceSet",
"IndexedLineSet","IndexedTriangleFanSet","IndexedTriangleSet","IndexedTriangleStripSet",
"Inline","IntegerSequencer","IntegerTrigger","IS","KeySensor","LineProperties","LineSet",
"LoadSensor","LOD","Material","meta","MetadataDouble","MetadataFloat","MetadataInteger",
"MetadataSet","MetadataString","MovieTexture","MultiTexture","MultiTextureCoordinate",
"MultiTextureTransform","NavigationInfo","Normal","NormalInterpolator","NurbsCurve",
"NurbsCurve2D","NurbsOrientationInterpolator","NurbsPatchSurface",
"NurbsPositionInterpolator","NurbsSet","NurbsSurfaceInterpolator","NurbsSweptSurface",
"NurbsSwungSurface","NurbsTextureCoordinate","NurbsTrimmedSurface","OrientationInterpolator",
"PixelTexture","PlaneSensor","PointLight","PointSet","Polyline2D","Polypoint2D",
"PositionInterpolator","PositionInterpolator2D","ProtoBody","ProtoDeclare","ProtoInstance",
"ProtoInterface","ProximitySensor","ReceiverPdu","Rectangle2D","ROUTE","ScalarInterpolator",
"Scene","Script","Shape","SignalPdu","Sound","Sphere","SphereSensor","SpotLight","StaticGroup",
"StringSensor","Switch","Text","TextureBackground","TextureCoordinate","TextureCoordinateGenerator",
"TextureTransform","TimeSensor","TimeTrigger","TouchSensor","Transform","TransmitterPdu",
"TriangleFanSet","TriangleSet","TriangleSet2D","TriangleStripSet","Viewpoint","VisibilitySensor",
"WorldInfo","X3D","XvlShell","VertexShader","FragmentShader","MultiShaderAppearance","ShaderAppearance" ]
self.namesStandard=[ "Empty","Empty.000","Empty.001","Empty.002","Empty.003","Empty.004","Empty.005",
"Empty.006","Empty.007","Empty.008","Empty.009","Empty.010","Empty.011","Empty.012",
"Scene.001","Scene.002","Scene.003","Scene.004","Scene.005","Scene.06","Scene.013",
"Scene.006","Scene.007","Scene.008","Scene.009","Scene.010","Scene.011","Scene.012",
"World","World.000","World.001","World.002","World.003","World.004","World.005" ]
self.namesFog=[ "","LINEAR","EXPONENTIAL","" ]
##########################################################
# Writing nodes routines
##########################################################
def writeHeader(self):
#bfile = sys.expandpath( Blender.Get('filename') ).replace('<', '&lt').replace('>', '&gt')
bfile = self.filename.replace('<', '&lt').replace('>', '&gt') # use outfile name
self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
self.file.write("<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n")
self.file.write("<X3D version=\"3.0\" profile=\"Immersive\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\">\n")
self.file.write("<head>\n")
self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % os.path.basename(bfile))
# self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % sys.basename(bfile))
self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % '2.5')
# self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % Blender.Get('version'))
self.file.write("\t<meta name=\"translator\" content=\"X3D exporter v1.55 (2006/01/17)\" />\n")
self.file.write("</head>\n")
self.file.write("<Scene>\n")
# This functionality is poorly defined, disabling for now - campbell
'''
def writeInline(self):
inlines = Blender.Scene.Get()
allinlines = len(inlines)
if scene != inlines[0]:
return
else:
for i in xrange(allinlines):
nameinline=inlines[i].name
if (nameinline not in self.namesStandard) and (i > 0):
self.file.write("<Inline DEF=\"%s\" " % (self.cleanStr(nameinline)))
nameinline = nameinline+".x3d"
self.file.write("url=\"%s\" />" % nameinline)
self.file.write("\n\n")
def writeScript(self):
textEditor = Blender.Text.Get()
alltext = len(textEditor)
for i in xrange(alltext):
nametext = textEditor[i].name
nlines = textEditor[i].getNLines()
if (self.proto == 1):
if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None):
nalllines = len(textEditor[i].asLines())
alllines = textEditor[i].asLines()
for j in xrange(nalllines):
self.writeIndented(alllines[j] + "\n")
elif (self.proto == 0):
if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None):
nalllines = len(textEditor[i].asLines())
alllines = textEditor[i].asLines()
for j in xrange(nalllines):
self.writeIndented(alllines[j] + "\n")
self.writeIndented("\n")
'''
def writeViewpoint(self, ob, mat, scene):
context = scene.render_data
# context = scene.render
ratio = float(context.resolution_x)/float(context.resolution_y)
# ratio = float(context.imageSizeY())/float(context.imageSizeX())
lens = (360* (math.atan(ratio *16 / ob.data.lens) / math.pi))*(math.pi/180)
# lens = (360* (math.atan(ratio *16 / ob.data.getLens()) / math.pi))*(math.pi/180)
lens = min(lens, math.pi)
# get the camera location, subtract 90 degress from X to orient like X3D does
# mat = ob.matrixWorld - mat is now passed!
loc = self.rotatePointForVRML(mat.translationPart())
rot = mat.toEuler()
rot = (((rot[0]-90)), rot[1], rot[2])
# rot = (((rot[0]-90)*DEG2RAD), rot[1]*DEG2RAD, rot[2]*DEG2RAD)
nRot = self.rotatePointForVRML( rot )
# convert to Quaternion and to Angle Axis
Q = self.eulerToQuaternions(nRot[0], nRot[1], nRot[2])
Q1 = self.multiplyQuaternions(Q[0], Q[1])
Qf = self.multiplyQuaternions(Q1, Q[2])
angleAxis = self.quaternionToAngleAxis(Qf)
self.file.write("<Viewpoint DEF=\"%s\" " % (self.cleanStr(ob.name)))
self.file.write("description=\"%s\" " % (ob.name))
self.file.write("centerOfRotation=\"0 0 0\" ")
self.file.write("position=\"%3.2f %3.2f %3.2f\" " % (loc[0], loc[1], loc[2]))
self.file.write("orientation=\"%3.2f %3.2f %3.2f %3.2f\" " % (angleAxis[0], angleAxis[1], -angleAxis[2], angleAxis[3]))
self.file.write("fieldOfView=\"%.3f\" />\n\n" % (lens))
def writeFog(self, world):
if world:
mtype = world.mist.falloff
# mtype = world.getMistype()
mparam = world.mist
# mparam = world.getMist()
grd = world.horizon_color
# grd = world.getHor()
grd0, grd1, grd2 = grd[0], grd[1], grd[2]
else:
return
if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'):
mtype = 1 if mtype == 'LINEAR' else 2
# if (mtype == 1 or mtype == 2):
self.file.write("<Fog fogType=\"%s\" " % self.namesFog[mtype])
self.file.write("color=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.file.write("visibilityRange=\"%s\" />\n\n" % round(mparam[2],self.cp))
else:
return
def writeNavigationInfo(self, scene):
self.file.write('<NavigationInfo headlight="FALSE" visibilityLimit="0.0" type=\'"EXAMINE","ANY"\' avatarSize="0.25, 1.75, 0.75" />\n')
def writeSpotLight(self, ob, mtx, lamp, world):
safeName = self.cleanStr(ob.name)
if world:
ambi = world.ambient_color
# ambi = world.amb
ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
else:
ambi = 0
ambientIntensity = 0
# compute cutoff and beamwidth
intensity=min(lamp.energy/1.75,1.0)
beamWidth=((lamp.spot_size*math.pi)/180.0)*.37;
# beamWidth=((lamp.spotSize*math.pi)/180.0)*.37;
cutOffAngle=beamWidth*1.3
dx,dy,dz=self.computeDirection(mtx)
# note -dx seems to equal om[3][0]
# note -dz seems to equal om[3][1]
# note dy seems to equal om[3][2]
#location=(ob.matrixWorld*MATWORLD).translationPart() # now passed
location=(mtx*MATWORLD).translationPart()
radius = lamp.distance*math.cos(beamWidth)
# radius = lamp.dist*math.cos(beamWidth)
self.file.write("<SpotLight DEF=\"%s\" " % safeName)
self.file.write("radius=\"%s\" " % (round(radius,self.cp)))
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
self.file.write("intensity=\"%s\" " % (round(intensity,self.cp)))
self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
# self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
self.file.write("beamWidth=\"%s\" " % (round(beamWidth,self.cp)))
self.file.write("cutOffAngle=\"%s\" " % (round(cutOffAngle,self.cp)))
self.file.write("direction=\"%s %s %s\" " % (round(dx,3),round(dy,3),round(dz,3)))
self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
def writeDirectionalLight(self, ob, mtx, lamp, world):
safeName = self.cleanStr(ob.name)
if world:
ambi = world.ambient_color
# ambi = world.amb
ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
else:
ambi = 0
ambientIntensity = 0
intensity=min(lamp.energy/1.75,1.0)
(dx,dy,dz)=self.computeDirection(mtx)
self.file.write("<DirectionalLight DEF=\"%s\" " % safeName)
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
# self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
self.file.write("intensity=\"%s\" " % (round(intensity,self.cp)))
self.file.write("direction=\"%s %s %s\" />\n\n" % (round(dx,4),round(dy,4),round(dz,4)))
def writePointLight(self, ob, mtx, lamp, world):
safeName = self.cleanStr(ob.name)
if world:
ambi = world.ambient_color
# ambi = world.amb
ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
else:
ambi = 0
ambientIntensity = 0
# location=(ob.matrixWorld*MATWORLD).translationPart() # now passed
location= (mtx*MATWORLD).translationPart()
self.file.write("<PointLight DEF=\"%s\" " % safeName)
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
# self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
self.file.write("intensity=\"%s\" " % (round( min(lamp.energy/1.75,1.0) ,self.cp)))
self.file.write("radius=\"%s\" " % lamp.distance )
# self.file.write("radius=\"%s\" " % lamp.dist )
self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
'''
def writeNode(self, ob, mtx):
obname=str(ob.name)
if obname in self.namesStandard:
return
else:
dx,dy,dz = self.computeDirection(mtx)
# location=(ob.matrixWorld*MATWORLD).translationPart()
location=(mtx*MATWORLD).translationPart()
self.writeIndented("<%s\n" % obname,1)
self.writeIndented("direction=\"%s %s %s\"\n" % (round(dx,3),round(dy,3),round(dz,3)))
self.writeIndented("location=\"%s %s %s\"\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
self.writeIndented("/>\n",-1)
self.writeIndented("\n")
'''
def secureName(self, name):
name = name + str(self.nodeID)
self.nodeID=self.nodeID+1
if len(name) <= 3:
newname = "_" + str(self.nodeID)
return "%s" % (newname)
else:
for bad in ['"','#',"'",',','.','[','\\',']','{','}']:
name=name.replace(bad,'_')
if name in self.namesReserved:
newname = name[0:3] + "_" + str(self.nodeID)
return "%s" % (newname)
elif name[0].isdigit():
newname = "_" + name + str(self.nodeID)
return "%s" % (newname)
else:
newname = name
return "%s" % (newname)
def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI = False):
imageMap={} # set of used images
sided={} # 'one':cnt , 'two':cnt
vColors={} # 'multi':1
meshName = self.cleanStr(ob.name)
meshME = self.cleanStr(ob.data.name) # We dont care if its the mesh name or not
# meshME = self.cleanStr(ob.getData(mesh=1).name) # We dont care if its the mesh name or not
if len(mesh.faces) == 0: return
mode = []
# mode = 0
if mesh.active_uv_texture:
# if mesh.faceUV:
for face in mesh.active_uv_texture.data:
# for face in mesh.faces:
if face.halo and 'HALO' not in mode:
mode += ['HALO']
if face.billboard and 'BILLBOARD' not in mode:
mode += ['BILLBOARD']
if face.object_color and 'OBJECT_COLOR' not in mode:
mode += ['OBJECT_COLOR']
if face.collision and 'COLLISION' not in mode:
mode += ['COLLISION']
# mode |= face.mode
if 'HALO' in mode and self.halonode == 0:
# if mode & Mesh.FaceModes.HALO and self.halonode == 0:
self.writeIndented("<Billboard axisOfRotation=\"0 0 0\">\n",1)
self.halonode = 1
elif 'BILLBOARD' in mode and self.billnode == 0:
# elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0:
self.writeIndented("<Billboard axisOfRotation=\"0 1 0\">\n",1)
self.billnode = 1
elif 'OBJECT_COLOR' in mode and self.matonly == 0:
# elif mode & Mesh.FaceModes.OBCOL and self.matonly == 0:
self.matonly = 1
# TF_TILES is marked as deprecated in DNA_meshdata_types.h
# elif mode & Mesh.FaceModes.TILES and self.tilenode == 0:
# self.tilenode = 1
elif 'COLLISION' not in mode and self.collnode == 0:
# elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0:
self.writeIndented("<Collision enabled=\"false\">\n",1)
self.collnode = 1
nIFSCnt=self.countIFSSetsNeeded(mesh, imageMap, sided, vColors)
if nIFSCnt > 1:
self.writeIndented("<Group DEF=\"%s%s\">\n" % ("G_", meshName),1)
if 'two' in sided and sided['two'] > 0:
bTwoSided=1
else:
bTwoSided=0
# mtx = ob.matrixWorld * MATWORLD # mtx is now passed
mtx = mtx * MATWORLD
loc= mtx.translationPart()
sca= mtx.scalePart()
quat = mtx.toQuat()
rot= quat.axis
self.writeIndented('<Transform DEF="%s" translation="%.6f %.6f %.6f" scale="%.6f %.6f %.6f" rotation="%.6f %.6f %.6f %.6f">\n' % \
(meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle) )
# self.writeIndented('<Transform DEF="%s" translation="%.6f %.6f %.6f" scale="%.6f %.6f %.6f" rotation="%.6f %.6f %.6f %.6f">\n' % \
# (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle*DEG2RAD) )
self.writeIndented("<Shape>\n",1)
maters=mesh.materials
hasImageTexture=0
issmooth=0
if len(maters) > 0 or mesh.active_uv_texture:
# if len(maters) > 0 or mesh.faceUV:
self.writeIndented("<Appearance>\n", 1)
# right now this script can only handle a single material per mesh.
if len(maters) >= 1:
mat=maters[0]
# matFlags = mat.getMode()
if not mat.face_texture:
# if not matFlags & Blender.Material.Modes['TEXFACE']:
self.writeMaterial(mat, self.cleanStr(mat.name,''), world)
# self.writeMaterial(mat, self.cleanStr(maters[0].name,''), world)
if len(maters) > 1:
print("Warning: mesh named %s has multiple materials" % meshName)
print("Warning: only one material per object handled")
#-- textures
face = None
if mesh.active_uv_texture:
# if mesh.faceUV:
for face in mesh.active_uv_texture.data:
# for face in mesh.faces:
if face.image:
# if (hasImageTexture == 0) and (face.image):
self.writeImageTexture(face.image)
# hasImageTexture=1 # keep track of face texture
break
if self.tilenode == 1 and face and face.image:
# if self.tilenode == 1:
self.writeIndented("<TextureTransform scale=\"%s %s\" />\n" % (face.image.xrep, face.image.yrep))
self.tilenode = 0
self.writeIndented("</Appearance>\n", -1)
#-- IndexedFaceSet or IndexedLineSet
# user selected BOUNDS=1, SOLID=3, SHARED=4, or TEXTURE=5
ifStyle="IndexedFaceSet"
# look up mesh name, use it if available
if meshME in self.meshNames:
self.writeIndented("<%s USE=\"ME_%s\">" % (ifStyle, meshME), 1)
self.meshNames[meshME]+=1
else:
if int(mesh.users) > 1:
self.writeIndented("<%s DEF=\"ME_%s\" " % (ifStyle, meshME), 1)
self.meshNames[meshME]=1
else:
self.writeIndented("<%s " % ifStyle, 1)
if bTwoSided == 1:
self.file.write("solid=\"false\" ")
else:
self.file.write("solid=\"true\" ")
for face in mesh.faces:
if face.smooth:
issmooth=1
break
if issmooth==1:
creaseAngle=(mesh.autosmooth_angle)*(math.pi/180.0)
# creaseAngle=(mesh.degr)*(math.pi/180.0)
self.file.write("creaseAngle=\"%s\" " % (round(creaseAngle,self.cp)))
#--- output textureCoordinates if UV texture used
if mesh.active_uv_texture:
# if mesh.faceUV:
if self.matonly == 1 and self.share == 1:
self.writeFaceColors(mesh)
elif hasImageTexture == 1:
self.writeTextureCoordinates(mesh)
#--- output coordinates
self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI)
self.writingcoords = 1
self.writingtexture = 1
self.writingcolor = 1
self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI)
#--- output textureCoordinates if UV texture used
if mesh.active_uv_texture:
# if mesh.faceUV:
if hasImageTexture == 1:
self.writeTextureCoordinates(mesh)
elif self.matonly == 1 and self.share == 1:
self.writeFaceColors(mesh)
#--- output vertexColors
self.matonly = 0
self.share = 0
self.writingcoords = 0
self.writingtexture = 0
self.writingcolor = 0
#--- output closing braces
self.writeIndented("</%s>\n" % ifStyle, -1)
self.writeIndented("</Shape>\n", -1)
self.writeIndented("</Transform>\n", -1)
if self.halonode == 1:
self.writeIndented("</Billboard>\n", -1)
self.halonode = 0
if self.billnode == 1:
self.writeIndented("</Billboard>\n", -1)
self.billnode = 0
if self.collnode == 1:
self.writeIndented("</Collision>\n", -1)
self.collnode = 0
if nIFSCnt > 1:
self.writeIndented("</Group>\n", -1)
self.file.write("\n")
def writeCoordinates(self, ob, mesh, meshName, EXPORT_TRI = False):
# create vertex list and pre rotate -90 degrees X for VRML
if self.writingcoords == 0:
self.file.write('coordIndex="')
for face in mesh.faces:
fv = face.verts
# fv = face.v
if len(fv)==3:
# if len(face)==3:
self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
# self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index))
else:
if EXPORT_TRI:
self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
# self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index))
self.file.write("%i %i %i -1, " % (fv[0], fv[2], fv[3]))
# self.file.write("%i %i %i -1, " % (fv[0].index, fv[2].index, fv[3].index))
else:
self.file.write("%i %i %i %i -1, " % (fv[0], fv[1], fv[2], fv[3]))
# self.file.write("%i %i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index, fv[3].index))
self.file.write("\">\n")
else:
#-- vertices
# mesh.transform(ob.matrixWorld)
self.writeIndented("<Coordinate DEF=\"%s%s\" \n" % ("coord_",meshName), 1)
self.file.write("\t\t\t\tpoint=\"")
for v in mesh.verts:
self.file.write("%.6f %.6f %.6f, " % tuple(v.co))
self.file.write("\" />")
self.writeIndented("\n", -1)
def writeTextureCoordinates(self, mesh):
texCoordList=[]
texIndexList=[]
j=0
for face in mesh.active_uv_texture.data:
# for face in mesh.faces:
uvs = [face.uv1, face.uv2, face.uv3, face.uv4] if face.verts[3] else [face.uv1, face.uv2, face.uv3]
for uv in uvs:
# for uv in face.uv:
texIndexList.append(j)
texCoordList.append(uv)
j=j+1
texIndexList.append(-1)
if self.writingtexture == 0:
self.file.write("\n\t\t\ttexCoordIndex=\"")
texIndxStr=""
for i in range(len(texIndexList)):
texIndxStr = texIndxStr + "%d, " % texIndexList[i]
if texIndexList[i]==-1:
self.file.write(texIndxStr)
texIndxStr=""
self.file.write("\"\n\t\t\t")
else:
self.writeIndented("<TextureCoordinate point=\"", 1)
for i in range(len(texCoordList)):
self.file.write("%s %s, " % (round(texCoordList[i][0],self.tp), round(texCoordList[i][1],self.tp)))
self.file.write("\" />")
self.writeIndented("\n", -1)
def writeFaceColors(self, mesh):
if self.writingcolor == 0:
self.file.write("colorPerVertex=\"false\" ")
elif mesh.active_vertex_color:
# else:
self.writeIndented("<Color color=\"", 1)
for face in mesh.active_vertex_color.data:
c = face.color1
if self.verbose > 2:
print("Debug: face.col r=%d g=%d b=%d" % (c[0], c[1], c[2]))
# print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b))
aColor = self.rgbToFS(c)
self.file.write("%s, " % aColor)
# for face in mesh.faces:
# if face.col:
# c=face.col[0]
# if self.verbose > 2:
# print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b))
# aColor = self.rgbToFS(c)
# self.file.write("%s, " % aColor)
self.file.write("\" />")
self.writeIndented("\n",-1)
def writeMaterial(self, mat, matName, world):
# look up material name, use it if available
if matName in self.matNames:
self.writeIndented("<Material USE=\"MA_%s\" />\n" % matName)
self.matNames[matName]+=1
return;
self.matNames[matName]=1
ambient = mat.ambient/3
# ambient = mat.amb/3
diffuseR, diffuseG, diffuseB = tuple(mat.diffuse_color)
# diffuseR, diffuseG, diffuseB = mat.rgbCol[0], mat.rgbCol[1],mat.rgbCol[2]
if world:
ambi = world.ambient_color
# ambi = world.getAmb()
ambi0, ambi1, ambi2 = (ambi[0]*mat.ambient)*2, (ambi[1]*mat.ambient)*2, (ambi[2]*mat.ambient)*2
# ambi0, ambi1, ambi2 = (ambi[0]*mat.amb)*2, (ambi[1]*mat.amb)*2, (ambi[2]*mat.amb)*2
else:
ambi0, ambi1, ambi2 = 0, 0, 0
emisR, emisG, emisB = (diffuseR*mat.emit+ambi0)/2, (diffuseG*mat.emit+ambi1)/2, (diffuseB*mat.emit+ambi2)/2
shininess = mat.specular_hardness/512.0
# shininess = mat.hard/512.0
specR = (mat.specular_color[0]+0.001)/(1.25/(mat.specular_reflection+0.001))
# specR = (mat.specCol[0]+0.001)/(1.25/(mat.spec+0.001))
specG = (mat.specular_color[1]+0.001)/(1.25/(mat.specular_reflection+0.001))
# specG = (mat.specCol[1]+0.001)/(1.25/(mat.spec+0.001))
specB = (mat.specular_color[2]+0.001)/(1.25/(mat.specular_reflection+0.001))
# specB = (mat.specCol[2]+0.001)/(1.25/(mat.spec+0.001))
transp = 1-mat.alpha
# matFlags = mat.getMode()
if mat.shadeless:
# if matFlags & Blender.Material.Modes['SHADELESS']:
ambient = 1
shine = 1
specR = emitR = diffuseR
specG = emitG = diffuseG
specB = emitB = diffuseB
self.writeIndented("<Material DEF=\"MA_%s\" " % matName, 1)
self.file.write("diffuseColor=\"%s %s %s\" " % (round(diffuseR,self.cp), round(diffuseG,self.cp), round(diffuseB,self.cp)))
self.file.write("specularColor=\"%s %s %s\" " % (round(specR,self.cp), round(specG,self.cp), round(specB,self.cp)))
self.file.write("emissiveColor=\"%s %s %s\" \n" % (round(emisR,self.cp), round(emisG,self.cp), round(emisB,self.cp)))
self.writeIndented("ambientIntensity=\"%s\" " % (round(ambient,self.cp)))
self.file.write("shininess=\"%s\" " % (round(shininess,self.cp)))
self.file.write("transparency=\"%s\" />" % (round(transp,self.cp)))
self.writeIndented("\n",-1)
def writeImageTexture(self, image):
name = image.name
filename = image.filename.split('/')[-1].split('\\')[-1]
if name in self.texNames:
self.writeIndented("<ImageTexture USE=\"%s\" />\n" % self.cleanStr(name))
self.texNames[name] += 1
return
else:
self.writeIndented("<ImageTexture DEF=\"%s\" " % self.cleanStr(name), 1)
self.file.write("url=\"%s\" />" % name)
self.writeIndented("\n",-1)
self.texNames[name] = 1
def writeBackground(self, world, alltextures):
if world: worldname = world.name
else: return
blending = (world.blend_sky, world.paper_sky, world.real_sky)
# blending = world.getSkytype()
grd = world.horizon_color
# grd = world.getHor()
grd0, grd1, grd2 = grd[0], grd[1], grd[2]
sky = world.zenith_color
# sky = world.getZen()
sky0, sky1, sky2 = sky[0], sky[1], sky[2]
mix0, mix1, mix2 = grd[0]+sky[0], grd[1]+sky[1], grd[2]+sky[2]
mix0, mix1, mix2 = mix0/2, mix1/2, mix2/2
self.file.write("<Background ")
if worldname not in self.namesStandard:
self.file.write("DEF=\"%s\" " % self.secureName(worldname))
# No Skytype - just Hor color
if blending == (0, 0, 0):
# if blending == 0:
self.file.write("groundColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.file.write("skyColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
# Blend Gradient
elif blending == (1, 0, 0):
# elif blending == 1:
self.file.write("groundColor=\"%s %s %s, " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
self.file.write("skyColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
# Blend+Real Gradient Inverse
elif blending == (1, 0, 1):
# elif blending == 3:
self.file.write("groundColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
self.file.write("skyColor=\"%s %s %s, " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
# Paper - just Zen Color
elif blending == (0, 0, 1):
# elif blending == 4:
self.file.write("groundColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
self.file.write("skyColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
# Blend+Real+Paper - komplex gradient
elif blending == (1, 1, 1):
# elif blending == 7:
self.writeIndented("groundColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
self.writeIndented("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.writeIndented("skyColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
self.writeIndented("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
# Any Other two colors
else:
self.file.write("groundColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
self.file.write("skyColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
alltexture = len(alltextures)
for i in range(alltexture):
tex = alltextures[i]
if tex.type != 'IMAGE' or tex.image == None:
continue
namemat = tex.name
# namemat = alltextures[i].name
pic = tex.image
# using .expandpath just in case, os.path may not expect //
basename = os.path.basename(bpy.sys.expandpath(pic.filename))
pic = alltextures[i].image
# pic = alltextures[i].getImage()
if (namemat == "back") and (pic != None):
self.file.write("\n\tbackUrl=\"%s\" " % basename)
# self.file.write("\n\tbackUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
elif (namemat == "bottom") and (pic != None):
self.writeIndented("bottomUrl=\"%s\" " % basename)
# self.writeIndented("bottomUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
elif (namemat == "front") and (pic != None):
self.writeIndented("frontUrl=\"%s\" " % basename)
# self.writeIndented("frontUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
elif (namemat == "left") and (pic != None):
self.writeIndented("leftUrl=\"%s\" " % basename)
# self.writeIndented("leftUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
elif (namemat == "right") and (pic != None):
self.writeIndented("rightUrl=\"%s\" " % basename)
# self.writeIndented("rightUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
elif (namemat == "top") and (pic != None):
self.writeIndented("topUrl=\"%s\" " % basename)
# self.writeIndented("topUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
self.writeIndented("/>\n\n")
##########################################################
# export routine
##########################################################
def export(self, scene, world, alltextures,\
EXPORT_APPLY_MODIFIERS = False,\
EXPORT_TRI= False,\
):
print("Info: starting X3D export to " + self.filename + "...")
self.writeHeader()
# self.writeScript()
self.writeNavigationInfo(scene)
self.writeBackground(world, alltextures)
self.writeFog(world)
self.proto = 0
# # COPIED FROM OBJ EXPORTER
# if EXPORT_APPLY_MODIFIERS:
# temp_mesh_name = '~tmp-mesh'
# # Get the container mesh. - used for applying modifiers and non mesh objects.
# containerMesh = meshName = tempMesh = None
# for meshName in Blender.NMesh.GetNames():
# if meshName.startswith(temp_mesh_name):
# tempMesh = Mesh.Get(meshName)
# if not tempMesh.users:
# containerMesh = tempMesh
# if not containerMesh:
# containerMesh = Mesh.New(temp_mesh_name)
# --------------------------
for ob_main in [o for o in scene.objects if o.is_visible()]:
# for ob_main in scene.objects.context:
free, derived = create_derived_objects(ob_main)
if derived == None: continue
for ob, ob_mat in derived:
# for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
objType=ob.type
objName=ob.name
self.matonly = 0
if objType == "CAMERA":
# if objType == "Camera":
self.writeViewpoint(ob, ob_mat, scene)
elif objType in ("MESH", "CURVE", "SURF", "TEXT") :
# elif objType in ("Mesh", "Curve", "Surf", "Text") :
if EXPORT_APPLY_MODIFIERS or objType != 'MESH':
# if EXPORT_APPLY_MODIFIERS or objType != 'Mesh':
me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW')
# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scene)
else:
me = ob.data
# me = ob.getData(mesh=1)
self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI = EXPORT_TRI)
# free mesh created with create_mesh()
if me != ob.data:
bpy.data.remove_mesh(me)
elif objType == "LAMP":
# elif objType == "Lamp":
data= ob.data
datatype=data.type
if datatype == 'POINT':
# if datatype == Lamp.Types.Lamp:
self.writePointLight(ob, ob_mat, data, world)
elif datatype == 'SPOT':
# elif datatype == Lamp.Types.Spot:
self.writeSpotLight(ob, ob_mat, data, world)
elif datatype == 'SUN':
# elif datatype == Lamp.Types.Sun:
self.writeDirectionalLight(ob, ob_mat, data, world)
else:
self.writeDirectionalLight(ob, ob_mat, data, world)
# do you think x3d could document what to do with dummy objects?
#elif objType == "Empty" and objName != "Empty":
# self.writeNode(ob, ob_mat)
else:
#print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
pass
if free:
free_derived_objects(ob_main)
self.file.write("\n</Scene>\n</X3D>")
# if EXPORT_APPLY_MODIFIERS:
# if containerMesh:
# containerMesh.verts = None
self.cleanup()
##########################################################
# Utility methods
##########################################################
def cleanup(self):
self.file.close()
self.texNames={}
self.matNames={}
self.indentLevel=0
print("Info: finished X3D export to %s\n" % self.filename)
def cleanStr(self, name, prefix='rsvd_'):
"""cleanStr(name,prefix) - try to create a valid VRML DEF name from object name"""
newName=name[:]
if len(newName) == 0:
self.nNodeID+=1
return "%s%d" % (prefix, self.nNodeID)
if newName in self.namesReserved:
newName='%s%s' % (prefix,newName)
if newName[0].isdigit():
newName='%s%s' % ('_',newName)
for bad in [' ','"','#',"'",',','.','[','\\',']','{','}']:
newName=newName.replace(bad,'_')
return newName
def countIFSSetsNeeded(self, mesh, imageMap, sided, vColors):
"""
countIFFSetsNeeded() - should look at a blender mesh to determine
how many VRML IndexFaceSets or IndexLineSets are needed. A
new mesh created under the following conditions:
o - split by UV Textures / one per mesh
o - split by face, one sided and two sided
o - split by smooth and flat faces
o - split when faces only have 2 vertices * needs to be an IndexLineSet
"""
imageNameMap={}
faceMap={}
nFaceIndx=0
if mesh.active_uv_texture:
# if mesh.faceUV:
for face in mesh.active_uv_texture.data:
# for face in mesh.faces:
sidename='';
if face.twoside:
# if face.mode & Mesh.FaceModes.TWOSIDE:
sidename='two'
else:
sidename='one'
if sidename in sided:
sided[sidename]+=1
else:
sided[sidename]=1
image = face.image
if image:
faceName="%s_%s" % (face.image.name, sidename);
try:
imageMap[faceName].append(face)
except:
imageMap[faceName]=[face.image.name,sidename,face]
if self.verbose > 2:
for faceName in imageMap.keys():
ifs=imageMap[faceName]
print("Debug: faceName=%s image=%s, solid=%s facecnt=%d" % \
(faceName, ifs[0], ifs[1], len(ifs)-2))
return len(imageMap)
def faceToString(self,face):
print("Debug: face.flag=0x%x (bitflags)" % face.flag)
if face.sel:
print("Debug: face.sel=true")
print("Debug: face.mode=0x%x (bitflags)" % face.mode)
if face.mode & Mesh.FaceModes.TWOSIDE:
print("Debug: face.mode twosided")
print("Debug: face.transp=0x%x (enum)" % face.transp)
if face.transp == Mesh.FaceTranspModes.SOLID:
print("Debug: face.transp.SOLID")
if face.image:
print("Debug: face.image=%s" % face.image.name)
print("Debug: face.materialIndex=%d" % face.materialIndex)
# XXX not used
# def getVertexColorByIndx(self, mesh, indx):
# c = None
# for face in mesh.faces:
# j=0
# for vertex in face.v:
# if vertex.index == indx:
# c=face.col[j]
# break
# j=j+1
# if c: break
# return c
def meshToString(self,mesh):
# print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors)
print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0))
# print("Debug: mesh.faceUV=%d" % mesh.faceUV)
print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0))
# print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours())
print("Debug: mesh.verts=%d" % len(mesh.verts))
print("Debug: mesh.faces=%d" % len(mesh.faces))
print("Debug: mesh.materials=%d" % len(mesh.materials))
def rgbToFS(self, c):
s="%s %s %s" % (round(c[0]/255.0,self.cp),
round(c[1]/255.0,self.cp),
round(c[2]/255.0,self.cp))
# s="%s %s %s" % (
# round(c.r/255.0,self.cp),
# round(c.g/255.0,self.cp),
# round(c.b/255.0,self.cp))
return s
def computeDirection(self, mtx):
x,y,z=(0,-1.0,0) # point down
ax,ay,az = (mtx*MATWORLD).toEuler()
# ax *= DEG2RAD
# ay *= DEG2RAD
# az *= DEG2RAD
# rot X
x1=x
y1=y*math.cos(ax)-z*math.sin(ax)
z1=y*math.sin(ax)+z*math.cos(ax)
# rot Y
x2=x1*math.cos(ay)+z1*math.sin(ay)
y2=y1
z2=z1*math.cos(ay)-x1*math.sin(ay)
# rot Z
x3=x2*math.cos(az)-y2*math.sin(az)
y3=x2*math.sin(az)+y2*math.cos(az)
z3=z2
return [x3,y3,z3]
# swap Y and Z to handle axis difference between Blender and VRML
#------------------------------------------------------------------------
def rotatePointForVRML(self, v):
x = v[0]
y = v[2]
z = -v[1]
vrmlPoint=[x, y, z]
return vrmlPoint
# For writing well formed VRML code
#------------------------------------------------------------------------
def writeIndented(self, s, inc=0):
if inc < 1:
self.indentLevel = self.indentLevel + inc
spaces=""
for x in range(self.indentLevel):
spaces = spaces + "\t"
self.file.write(spaces + s)
if inc > 0:
self.indentLevel = self.indentLevel + inc
# Converts a Euler to three new Quaternions
# Angles of Euler are passed in as radians
#------------------------------------------------------------------------
def eulerToQuaternions(self, x, y, z):
Qx = [math.cos(x/2), math.sin(x/2), 0, 0]
Qy = [math.cos(y/2), 0, math.sin(y/2), 0]
Qz = [math.cos(z/2), 0, 0, math.sin(z/2)]
quaternionVec=[Qx,Qy,Qz]
return quaternionVec
# Multiply two Quaternions together to get a new Quaternion
#------------------------------------------------------------------------
def multiplyQuaternions(self, Q1, Q2):
result = [((Q1[0] * Q2[0]) - (Q1[1] * Q2[1]) - (Q1[2] * Q2[2]) - (Q1[3] * Q2[3])),
((Q1[0] * Q2[1]) + (Q1[1] * Q2[0]) + (Q1[2] * Q2[3]) - (Q1[3] * Q2[2])),
((Q1[0] * Q2[2]) + (Q1[2] * Q2[0]) + (Q1[3] * Q2[1]) - (Q1[1] * Q2[3])),
((Q1[0] * Q2[3]) + (Q1[3] * Q2[0]) + (Q1[1] * Q2[2]) - (Q1[2] * Q2[1]))]
return result
# Convert a Quaternion to an Angle Axis (ax, ay, az, angle)
# angle is in radians
#------------------------------------------------------------------------
def quaternionToAngleAxis(self, Qf):
scale = math.pow(Qf[1],2) + math.pow(Qf[2],2) + math.pow(Qf[3],2)
ax = Qf[1]
ay = Qf[2]
az = Qf[3]
if scale > .0001:
ax/=scale
ay/=scale
az/=scale
angle = 2 * math.acos(Qf[0])
result = [ax, ay, az, angle]
return result
##########################################################
# Callbacks, needed before Main
##########################################################
def x3d_export(filename,
context,
EXPORT_APPLY_MODIFIERS=False,
EXPORT_TRI=False,
EXPORT_GZIP=False):
if EXPORT_GZIP:
if not filename.lower().endswith('.x3dz'):
filename = '.'.join(filename.split('.')[:-1]) + '.x3dz'
else:
if not filename.lower().endswith('.x3d'):
filename = '.'.join(filename.split('.')[:-1]) + '.x3d'
scene = context.scene
# scene = Blender.Scene.GetCurrent()
world = scene.world
# XXX these are global textures while .Get() returned only scene's?
alltextures = bpy.data.textures
# alltextures = Blender.Texture.Get()
wrlexport=x3d_class(filename)
wrlexport.export(\
scene,\
world,\
alltextures,\
\
EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS,\
EXPORT_TRI = EXPORT_TRI,\
)
def x3d_export_ui(filename):
if not filename.endswith(extension):
filename += extension
#if _safeOverwrite and sys.exists(filename):
# result = Draw.PupMenu("File Already Exists, Overwrite?%t|Yes%x1|No%x0")
#if(result != 1):
# return
# Get user options
EXPORT_APPLY_MODIFIERS = Draw.Create(1)
EXPORT_TRI = Draw.Create(0)
EXPORT_GZIP = Draw.Create( filename.lower().endswith('.x3dz') )
# Get USER Options
pup_block = [\
('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object.'),\
('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\
('Compress', EXPORT_GZIP, 'GZip the resulting file, requires a full python install'),\
]
if not Draw.PupBlock('Export...', pup_block):
return
Blender.Window.EditMode(0)
Blender.Window.WaitCursor(1)
x3d_export(filename,\
EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val,\
EXPORT_TRI = EXPORT_TRI.val,\
EXPORT_GZIP = EXPORT_GZIP.val\
)
Blender.Window.WaitCursor(0)
#########################################################
# main routine
#########################################################
# if __name__ == '__main__':
# Blender.Window.FileSelector(x3d_export_ui,"Export X3D", Blender.Get('filename').replace('.blend', '.x3d'))
class EXPORT_OT_x3d(bpy.types.Operator):
'''
X3D Exporter
'''
__idname__ = "export.x3d"
__label__ = 'Export X3D'
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the X3D file", maxlen=1024, default=""),
bpy.props.BoolProperty(attr="apply_modifiers", name="Apply Modifiers", description="Use transformed mesh data from each object.", default=True),
bpy.props.BoolProperty(attr="triangulate", name="Triangulate", description="Triangulate quads.", default=False),
bpy.props.BoolProperty(attr="compress", name="Compress", description="GZip the resulting file, requires a full python install.", default=False),
]
def execute(self, context):
x3d_export(self.filename, context, self.apply_modifiers, self.triangulate, self.compress)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
def poll(self, context): # Poll isnt working yet
print("Poll")
return context.active_object != None
bpy.ops.add(EXPORT_OT_x3d)
# NOTES
# - blender version is hardcoded

1166
release/io/import_3ds.py Normal file
View File

@@ -0,0 +1,1166 @@
#!BPY
"""
Name: '3D Studio (.3ds)...'
Blender: 244
Group: 'Import'
Tooltip: 'Import from 3DS file format (.3ds)'
"""
__author__= ['Bob Holcomb', 'Richard L?rk?ng', 'Damien McGinnes', 'Campbell Barton', 'Mario Lapin']
__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/")
__version__= '0.996'
__bpydoc__= '''\
3ds Importer
This script imports a 3ds file and the materials into Blender for editing.
Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen).
0.996 by Mario Lapin (mario.lapin@gmail.com) 13/04/200 <br>
- Implemented workaround to correct association between name, geometry and materials of
imported meshes.
Without this patch, version 0.995 of this importer would associate to each mesh object the
geometry and the materials of the previously parsed mesh object. By so, the name of the
first mesh object would be thrown away, and the name of the last mesh object would be
automatically merged with a '.001' at the end. No object would desappear, however object's
names and materials would be completely jumbled.
0.995 by Campbell Barton<br>
- workaround for buggy mesh vert delete
- minor tweaks
0.99 by Bob Holcomb<br>
- added support for floating point color values that previously broke on import.
0.98 by Campbell Barton<br>
- import faces and verts to lists instead of a mesh, convert to a mesh later
- use new index mapping feature of mesh to re-map faces that were not added.
0.97 by Campbell Barton<br>
- Strip material names of spaces
- Added import as instance to import the 3ds into its own
scene and add a group instance to the current scene
- New option to scale down imported objects so they are within a limited bounding area.
0.96 by Campbell Barton<br>
- Added workaround for bug in setting UV's for Zero vert index UV faces.
- Removed unique name function, let blender make the names unique.
0.95 by Campbell Barton<br>
- Removed workarounds for Blender 2.41
- Mesh objects split by material- many 3ds objects used more then 16 per mesh.
- Removed a lot of unneeded variable creation.
0.94 by Campbell Barton<br>
- Face import tested to be about overall 16x speedup over 0.93.
- Material importing speedup.
- Tested with more models.
- Support some corrupt models.
0.93 by Campbell Barton<br>
- Tested with 400 3ds files from turbosquid and samples.
- Tactfully ignore faces that used the same verts twice.
- Rollback to 0.83 sloppy un-reorganized code, this broke UV coord loading.
- Converted from NMesh to Mesh.
- Faster and cleaner new names.
- Use external comprehensive image loader.
- Re intergrated 0.92 and 0.9 changes
- Fixes for 2.41 compat.
- Non textured faces do not use a texture flag.
0.92<br>
- Added support for diffuse, alpha, spec, bump maps in a single material
0.9<br>
- Reorganized code into object/material block functions<br>
- Use of Matrix() to copy matrix data<br>
- added support for material transparency<br>
0.83 2005-08-07: Campell Barton
- Aggressive image finding and case insensitivy for posisx systems.
0.82a 2005-07-22
- image texture loading (both for face uv and renderer)
0.82 - image texture loading (for face uv)
0.81a (fork- not 0.9) Campbell Barton 2005-06-08
- Simplified import code
- Never overwrite data
- Faster list handling
- Leaves import selected
0.81 Damien McGinnes 2005-01-09
- handle missing images better
0.8 Damien McGinnes 2005-01-08
- copies sticky UV coords to face ones
- handles images better
- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script
'''
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Bob Holcomb
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
# Importing modules
import os
import time
import struct
from import_obj import unpack_face_list, load_image
import bpy
import Mathutils
# import Blender
# from Blender import Mesh, Object, Material, Image, Texture, Lamp, Mathutils
# from Blender.Mathutils import Vector
# import BPyImage
# import BPyMessages
# try:
# from struct import calcsize, unpack
# except:
# calcsize= unpack= None
# # If python version is less than 2.4, try to get set stuff from module
# try:
# set
# except:
# from sets import Set as set
BOUNDS_3DS = []
#this script imports uvcoords as sticky vertex coords
#this parameter enables copying these to face uv coords
#which shold be more useful.
def createBlenderTexture(material, name, image):
texture = bpy.data.textures.new(name)
texture.setType('Image')
texture.image = image
material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
######################################################
# Data Structures
######################################################
#Some of the chunks that we will see
#----- Primary Chunk, at the beginning of each file
PRIMARY = int('0x4D4D',16)
#------ Main Chunks
OBJECTINFO = int('0x3D3D',16); #This gives the version of the mesh and is found right before the material and object information
VERSION = int('0x0002',16); #This gives the version of the .3ds file
EDITKEYFRAME= int('0xB000',16); #This is the header for all of the key frame info
#------ sub defines of OBJECTINFO
MATERIAL = 45055 #0xAFFF // This stored the texture info
OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc...
#>------ sub defines of MATERIAL
#------ sub defines of MATERIAL_BLOCK
MAT_NAME = int('0xA000',16) # This holds the material name
MAT_AMBIENT = int('0xA010',16) # Ambient color of the object/material
MAT_DIFFUSE = int('0xA020',16) # This holds the color of the object/material
MAT_SPECULAR = int('0xA030',16) # SPecular color of the object/material
MAT_SHINESS = int('0xA040',16) # ??
MAT_TRANSPARENCY= int('0xA050',16) # Transparency value of material
MAT_SELF_ILLUM = int('0xA080',16) # Self Illumination value of material
MAT_WIRE = int('0xA085',16) # Only render's wireframe
MAT_TEXTURE_MAP = int('0xA200',16) # This is a header for a new texture map
MAT_SPECULAR_MAP= int('0xA204',16) # This is a header for a new specular map
MAT_OPACITY_MAP = int('0xA210',16) # This is a header for a new opacity map
MAT_REFLECTION_MAP= int('0xA220',16) # This is a header for a new reflection map
MAT_BUMP_MAP = int('0xA230',16) # This is a header for a new bump map
MAT_MAP_FILENAME = int('0xA300',16) # This holds the file name of the texture
MAT_FLOAT_COLOR = int ('0x0010', 16) #color defined as 3 floats
MAT_24BIT_COLOR = int ('0x0011', 16) #color defined as 3 bytes
#>------ sub defines of OBJECT
OBJECT_MESH = int('0x4100',16); # This lets us know that we are reading a new object
OBJECT_LAMP = int('0x4600',16); # This lets un know we are reading a light object
OBJECT_LAMP_SPOT = int('0x4610',16); # The light is a spotloght.
OBJECT_LAMP_OFF = int('0x4620',16); # The light off.
OBJECT_LAMP_ATTENUATE = int('0x4625',16);
OBJECT_LAMP_RAYSHADE = int('0x4627',16);
OBJECT_LAMP_SHADOWED = int('0x4630',16);
OBJECT_LAMP_LOCAL_SHADOW = int('0x4640',16);
OBJECT_LAMP_LOCAL_SHADOW2 = int('0x4641',16);
OBJECT_LAMP_SEE_CONE = int('0x4650',16);
OBJECT_LAMP_SPOT_RECTANGULAR = int('0x4651',16);
OBJECT_LAMP_SPOT_OVERSHOOT = int('0x4652',16);
OBJECT_LAMP_SPOT_PROJECTOR = int('0x4653',16);
OBJECT_LAMP_EXCLUDE = int('0x4654',16);
OBJECT_LAMP_RANGE = int('0x4655',16);
OBJECT_LAMP_ROLL = int('0x4656',16);
OBJECT_LAMP_SPOT_ASPECT = int('0x4657',16);
OBJECT_LAMP_RAY_BIAS = int('0x4658',16);
OBJECT_LAMP_INNER_RANGE = int('0x4659',16);
OBJECT_LAMP_OUTER_RANGE = int('0x465A',16);
OBJECT_LAMP_MULTIPLIER = int('0x465B',16);
OBJECT_LAMP_AMBIENT_LIGHT = int('0x4680',16);
OBJECT_CAMERA= int('0x4700',16); # This lets un know we are reading a camera object
#>------ sub defines of CAMERA
OBJECT_CAM_RANGES= int('0x4720',16); # The camera range values
#>------ sub defines of OBJECT_MESH
OBJECT_VERTICES = int('0x4110',16); # The objects vertices
OBJECT_FACES = int('0x4120',16); # The objects faces
OBJECT_MATERIAL = int('0x4130',16); # This is found if the object has a material, either texture map or color
OBJECT_UV = int('0x4140',16); # The UV texture coordinates
OBJECT_TRANS_MATRIX = int('0x4160',16); # The Object Matrix
global scn
scn = None
#the chunk class
class chunk:
ID = 0
length = 0
bytes_read = 0
#we don't read in the bytes_read, we compute that
binary_format='<HI'
def __init__(self):
self.ID = 0
self.length = 0
self.bytes_read = 0
def dump(self):
print('ID: ', self.ID)
print('ID in hex: ', hex(self.ID))
print('length: ', self.length)
print('bytes_read: ', self.bytes_read)
def read_chunk(file, chunk):
temp_data = file.read(struct.calcsize(chunk.binary_format))
data = struct.unpack(chunk.binary_format, temp_data)
chunk.ID = data[0]
chunk.length = data[1]
#update the bytes read function
chunk.bytes_read = 6
#if debugging
#chunk.dump()
def read_string(file):
#read in the characters till we get a null character
s = b''
# s = ''
while not s.endswith(b'\x00'):
# while not s.endswith('\x00'):
s += struct.unpack('<c', file.read(1))[0]
# s += struct.unpack( '<c', file.read(1) )[0]
#print 'string: ',s
s = str(s[:-1], 'ASCII')
# print("read string", s)
#remove the null character from the string
return s
# return s[:-1]
######################################################
# IMPORT
######################################################
def process_next_object_chunk(file, previous_chunk):
new_chunk = chunk()
temp_chunk = chunk()
while (previous_chunk.bytes_read < previous_chunk.length):
#read the next chunk
read_chunk(file, new_chunk)
def skip_to_end(file, skip_chunk):
buffer_size = skip_chunk.length - skip_chunk.bytes_read
binary_format='%ic' % buffer_size
temp_data = file.read(struct.calcsize(binary_format))
skip_chunk.bytes_read += buffer_size
def add_texture_to_material(image, texture, material, mapto):
# if mapto=='DIFFUSE':
# map = Texture.MapTo.COL
# elif mapto=='SPECULAR':
# map = Texture.MapTo.SPEC
# elif mapto=='OPACITY':
# map = Texture.MapTo.ALPHA
# elif mapto=='BUMP':
# map = Texture.MapTo.NOR
# else:
if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
print('/tError: Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
mapto = "COLOR"
# map = Texture.MapTo.COL
if image: texture.image = image
# if image: texture.setImage(image) # double check its an image.
material.add_texture(texture, "UV", mapto)
# free_tex_slots = [i for i, tex in enumerate( material.getTextures() ) if tex == None]
# if not free_tex_slots:
# print('/tError: Cannot add "%s" map. 10 Texture slots alredy used.' % mapto)
# else:
# material.setTexture(free_tex_slots[0],texture,Texture.TexCo.UV,map)
def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
#print previous_chunk.bytes_read, 'BYTES READ'
contextObName = None
contextLamp = [None, None] # object, Data
contextMaterial = None
contextMatrix_rot = None # Blender.Mathutils.Matrix(); contextMatrix.identity()
#contextMatrix_tx = None # Blender.Mathutils.Matrix(); contextMatrix.identity()
contextMesh_vertls = None
contextMesh_facels = None
contextMeshMaterials = {} # matname:[face_idxs]
contextMeshUV = None
TEXTURE_DICT = {}
MATDICT = {}
# TEXMODE = Mesh.FaceModes['TEX']
# Localspace variable names, faster.
STRUCT_SIZE_1CHAR = struct.calcsize('c')
STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
_STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
# STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
# print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
materialFaces = set() # faces that have a material. Can optimize?
# Now make copies with assigned materils.
def makeMeshMaterialCopy(matName, faces):
'''
Make a new mesh with only face the faces that use this material.
faces can be any iterable object - containing ints.
'''
faceVertUsers = [False] * len(myContextMesh_vertls)
ok = 0
for fIdx in faces:
for vindex in myContextMesh_facels[fIdx]:
faceVertUsers[vindex] = True
if matName != None: # if matName is none then this is a set(), meaning we are using the untextured faces and do not need to store textured faces.
materialFaces.add(fIdx)
ok = 1
if not ok:
return
myVertMapping = {}
vertMappingIndex = 0
vertsToUse = [i for i in range(len(myContextMesh_vertls)) if faceVertUsers[i]]
myVertMapping = dict( [ (ii, i) for i, ii in enumerate(vertsToUse) ] )
tempName= '%s_%s' % (contextObName, matName) # matName may be None.
bmesh = bpy.data.add_mesh(tempName)
# bmesh = bpy.data.meshes.new(tempName)
if matName == None:
img = None
else:
bmat = MATDICT[matName][1]
bmesh.add_material(bmat)
# bmesh.materials = [bmat]
try: img = TEXTURE_DICT[bmat.name]
except: img = None
# bmesh_verts = bmesh.verts
if len(vertsToUse):
bmesh.add_geometry(len(vertsToUse), 0, len(faces))
# XXX why add extra vertex?
# bmesh_verts.extend( [Vector()] )
bmesh.verts.foreach_set("co", [x for tup in [myContextMesh_vertls[i] for i in vertsToUse] for x in tup])
# bmesh_verts.extend( [myContextMesh_vertls[i] for i in vertsToUse] )
# +1 because of DUMMYVERT
bmesh.faces.foreach_set("verts", unpack_face_list([[myVertMapping[vindex] for vindex in myContextMesh_facels[fIdx]] for fIdx in faces]))
# face_mapping = bmesh.faces.extend( [ [ bmesh_verts[ myVertMapping[vindex]+1] for vindex in myContextMesh_facels[fIdx]] for fIdx in faces ], indexList=True )
if bmesh.faces and (contextMeshUV or img):
bmesh.add_uv_texture()
# bmesh.faceUV = 1
for ii, i in enumerate(faces):
# Mapped index- faces may have not been added- if so, then map to the correct index
# BUGGY API - face_mapping is not always the right length
# map_index = face_mapping[ii]
if 1:
# if map_index != None:
targetFace = bmesh.faces[ii]
# targetFace = bmesh.faces[map_index]
uf = bmesh.active_uv_texture.data[ii]
if contextMeshUV:
# v.index-1 because of the DUMMYVERT
uvs = [contextMeshUV[vindex] for vindex in myContextMesh_facels[i]]
if len(myContextMesh_facels[i]) == 3:
uf.uv1, uf.uv2, uf.uv3, uf.uv4 = uvs + [(0.0, 0.0)]
else:
uf.uv1, uf.uv2, uf.uv3, uf.uv4 = uvs
# targetFace.uv = [contextMeshUV[vindex] for vindex in myContextMesh_facels[i]]
if img:
uf.image = img
# targetFace.image = img
# bmesh.transform(contextMatrix)
ob = bpy.data.add_object("MESH", tempName)
ob.data = bmesh
SCN.add_object(ob)
# ob = SCN_OBJECTS.new(bmesh, tempName)
'''
if contextMatrix_tx:
ob.setMatrix(contextMatrix_tx)
'''
if contextMatrix_rot:
# ob.matrix = [x for row in contextMatrix_rot for x in row]
ob.matrix = contextMatrix_rot
# ob.setMatrix(contextMatrix_rot)
importedObjects.append(ob)
bmesh.update()
# bmesh.calcNormals()
for matName, faces in myContextMeshMaterials.items():
makeMeshMaterialCopy(matName, faces)
if len(materialFaces) != len(myContextMesh_facels):
# Invert material faces.
makeMeshMaterialCopy(None, set(range(len( myContextMesh_facels ))) - materialFaces)
#raise 'Some UnMaterialed faces', len(contextMesh.faces)
#a spare chunk
new_chunk = chunk()
temp_chunk = chunk()
CreateBlenderObject = False
def read_float_color(temp_chunk):
temp_data = file.read(struct.calcsize('3f'))
temp_chunk.bytes_read += 12
return [float(col) for col in struct.unpack('<3f', temp_data)]
def read_byte_color(temp_chunk):
temp_data = file.read(struct.calcsize('3B'))
temp_chunk.bytes_read += 3
return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
def read_texture(new_chunk, temp_chunk, name, mapto):
new_texture = bpy.data.add_texture('Diffuse')
new_texture.type = 'IMAGE'
img = None
while (new_chunk.bytes_read < new_chunk.length):
#print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
read_chunk(file, temp_chunk)
if (temp_chunk.ID == MAT_MAP_FILENAME):
texture_name = read_string(file)
img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
# add the map to the material in the right channel
if img:
add_texture_to_material(img, new_texture, contextMaterial, mapto)
dirname = os.path.dirname(FILENAME)
#loop through all the data for this chunk (previous chunk) and see what it is
while (previous_chunk.bytes_read < previous_chunk.length):
#print '\t', previous_chunk.bytes_read, 'keep going'
#read the next chunk
#print 'reading a chunk'
read_chunk(file, new_chunk)
#is it a Version chunk?
if (new_chunk.ID == VERSION):
#print 'if (new_chunk.ID == VERSION):'
#print 'found a VERSION chunk'
#read in the version of the file
#it's an unsigned short (H)
temp_data = file.read(struct.calcsize('I'))
version = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += 4 #read the 4 bytes for the version number
#this loader works with version 3 and below, but may not with 4 and above
if (version > 3):
print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
#is it an object info chunk?
elif (new_chunk.ID == OBJECTINFO):
#print 'elif (new_chunk.ID == OBJECTINFO):'
# print 'found an OBJECTINFO chunk'
process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
#keep track of how much we read in the main chunk
new_chunk.bytes_read += temp_chunk.bytes_read
#is it an object chunk?
elif (new_chunk.ID == OBJECT):
if CreateBlenderObject:
putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
contextMesh_vertls = []; contextMesh_facels = []
## preparando para receber o proximo objeto
contextMeshMaterials = {} # matname:[face_idxs]
contextMeshUV = None
#contextMesh.vertexUV = 1 # Make sticky coords.
# Reset matrix
contextMatrix_rot = None
#contextMatrix_tx = None
CreateBlenderObject = True
tempName = read_string(file)
contextObName = tempName
new_chunk.bytes_read += len(tempName)+1
#is it a material chunk?
elif (new_chunk.ID == MATERIAL):
# print("read material")
#print 'elif (new_chunk.ID == MATERIAL):'
contextMaterial = bpy.data.add_material('Material')
# contextMaterial = bpy.data.materials.new('Material')
elif (new_chunk.ID == MAT_NAME):
#print 'elif (new_chunk.ID == MAT_NAME):'
material_name = read_string(file)
# print("material name", material_name)
#plus one for the null character that ended the string
new_chunk.bytes_read += len(material_name)+1
contextMaterial.name = material_name.rstrip() # remove trailing whitespace
MATDICT[material_name]= (contextMaterial.name, contextMaterial)
elif (new_chunk.ID == MAT_AMBIENT):
#print 'elif (new_chunk.ID == MAT_AMBIENT):'
read_chunk(file, temp_chunk)
if (temp_chunk.ID == MAT_FLOAT_COLOR):
contextMaterial.mirror_color = read_float_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3f'))
# temp_chunk.bytes_read += 12
# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
elif (temp_chunk.ID == MAT_24BIT_COLOR):
contextMaterial.mirror_color = read_byte_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3B'))
# temp_chunk.bytes_read += 3
# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif (new_chunk.ID == MAT_DIFFUSE):
#print 'elif (new_chunk.ID == MAT_DIFFUSE):'
read_chunk(file, temp_chunk)
if (temp_chunk.ID == MAT_FLOAT_COLOR):
contextMaterial.diffuse_color = read_float_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3f'))
# temp_chunk.bytes_read += 12
# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
elif (temp_chunk.ID == MAT_24BIT_COLOR):
contextMaterial.diffuse_color = read_byte_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3B'))
# temp_chunk.bytes_read += 3
# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
else:
skip_to_end(file, temp_chunk)
# print("read material diffuse color", contextMaterial.diffuse_color)
new_chunk.bytes_read += temp_chunk.bytes_read
elif (new_chunk.ID == MAT_SPECULAR):
#print 'elif (new_chunk.ID == MAT_SPECULAR):'
read_chunk(file, temp_chunk)
if (temp_chunk.ID == MAT_FLOAT_COLOR):
contextMaterial.specular_color = read_float_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3f'))
# temp_chunk.bytes_read += 12
# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
elif (temp_chunk.ID == MAT_24BIT_COLOR):
contextMaterial.specular_color = read_byte_color(temp_chunk)
# temp_data = file.read(struct.calcsize('3B'))
# temp_chunk.bytes_read += 3
# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif (new_chunk.ID == MAT_TEXTURE_MAP):
read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
# #print 'elif (new_chunk.ID==MAT_TEXTURE_MAP):'
# new_texture= bpy.data.textures.new('Diffuse')
# new_texture.setType('Image')
# img = None
# while (new_chunk.bytes_read<new_chunk.length):
# #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
# read_chunk(file, temp_chunk)
# if (temp_chunk.ID==MAT_MAP_FILENAME):
# texture_name=read_string(file)
# #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
# img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
# else:
# skip_to_end(file, temp_chunk)
# new_chunk.bytes_read+= temp_chunk.bytes_read
# #add the map to the material in the right channel
# if img:
# add_texture_to_material(img, new_texture, contextMaterial, 'DIFFUSE')
elif (new_chunk.ID == MAT_SPECULAR_MAP):
read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
# #print 'elif (new_chunk.ID == MAT_SPECULAR_MAP):'
# new_texture = bpy.data.textures.new('Specular')
# new_texture.setType('Image')
# img = None
# while (new_chunk.bytes_read < new_chunk.length):
# read_chunk(file, temp_chunk)
# if (temp_chunk.ID == MAT_MAP_FILENAME):
# texture_name = read_string(file)
# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
# new_chunk.bytes_read+= (len(texture_name)+1) #plus one for the null character that gets removed
# else:
# skip_to_end(file, temp_chunk)
# new_chunk.bytes_read += temp_chunk.bytes_read
# #add the map to the material in the right channel
# if img:
# add_texture_to_material(img, new_texture, contextMaterial, 'SPECULAR')
elif (new_chunk.ID == MAT_OPACITY_MAP):
read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
# #print 'new_texture = Blender.Texture.New('Opacity')'
# new_texture = bpy.data.textures.new('Opacity')
# new_texture.setType('Image')
# img = None
# while (new_chunk.bytes_read < new_chunk.length):
# read_chunk(file, temp_chunk)
# if (temp_chunk.ID == MAT_MAP_FILENAME):
# texture_name = read_string(file)
# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
# else:
# skip_to_end(file, temp_chunk)
# new_chunk.bytes_read += temp_chunk.bytes_read
# #add the map to the material in the right channel
# if img:
# add_texture_to_material(img, new_texture, contextMaterial, 'OPACITY')
elif (new_chunk.ID == MAT_BUMP_MAP):
read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
# #print 'elif (new_chunk.ID == MAT_BUMP_MAP):'
# new_texture = bpy.data.textures.new('Bump')
# new_texture.setType('Image')
# img = None
# while (new_chunk.bytes_read < new_chunk.length):
# read_chunk(file, temp_chunk)
# if (temp_chunk.ID == MAT_MAP_FILENAME):
# texture_name = read_string(file)
# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
# else:
# skip_to_end(file, temp_chunk)
# new_chunk.bytes_read += temp_chunk.bytes_read
# #add the map to the material in the right channel
# if img:
# add_texture_to_material(img, new_texture, contextMaterial, 'BUMP')
elif (new_chunk.ID == MAT_TRANSPARENCY):
#print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
read_chunk(file, temp_chunk)
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
temp_chunk.bytes_read += 2
contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100)
new_chunk.bytes_read += temp_chunk.bytes_read
elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
temp_data = file.read(STRUCT_SIZE_3FLOAT)
x,y,z = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
ob = bpy.data.add_object("LAMP", "Lamp")
ob.data = bpy.data.add_lamp("Lamp")
SCN.add_object(ob)
contextLamp[1]= ob.data
# contextLamp[1]= bpy.data.lamps.new()
contextLamp[0]= ob
# contextLamp[0]= SCN_OBJECTS.new(contextLamp[1])
importedObjects.append(contextLamp[0])
#print 'number of faces: ', num_faces
#print x,y,z
contextLamp[0].location = (x, y, z)
# contextLamp[0].setLocation(x,y,z)
# Reset matrix
contextMatrix_rot = None
#contextMatrix_tx = None
#print contextLamp.name,
elif (new_chunk.ID == OBJECT_MESH):
# print 'Found an OBJECT_MESH chunk'
pass
elif (new_chunk.ID == OBJECT_VERTICES):
'''
Worldspace vertex locations
'''
# print 'elif (new_chunk.ID == OBJECT_VERTICES):'
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
num_verts = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
# print 'number of verts: ', num_verts
def getvert():
temp_data = struct.unpack('<3f', file.read(STRUCT_SIZE_3FLOAT))
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT #12: 3 floats x 4 bytes each
return temp_data
#contextMesh.verts.extend( [Vector(),] ) # DUMMYVERT! - remove when blenders internals are fixed.
contextMesh_vertls = [getvert() for i in range(num_verts)]
#print 'object verts: bytes read: ', new_chunk.bytes_read
elif (new_chunk.ID == OBJECT_FACES):
# print 'elif (new_chunk.ID == OBJECT_FACES):'
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
num_faces = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
#print 'number of faces: ', num_faces
def getface():
# print '\ngetting a face'
temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT)
new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT #4 short ints x 2 bytes each
v1,v2,v3,dummy = struct.unpack('<4H', temp_data)
return v1, v2, v3
contextMesh_facels = [ getface() for i in range(num_faces) ]
elif (new_chunk.ID == OBJECT_MATERIAL):
# print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
material_name = read_string(file)
new_chunk.bytes_read += len(material_name)+1 # remove 1 null character.
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
num_faces_using_mat = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
def getmat():
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
return struct.unpack('<H', temp_data)[0]
contextMeshMaterials[material_name]= [ getmat() for i in range(num_faces_using_mat) ]
#look up the material in all the materials
elif (new_chunk.ID == OBJECT_UV):
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
num_uv = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
def getuv():
temp_data = file.read(STRUCT_SIZE_2FLOAT)
new_chunk.bytes_read += STRUCT_SIZE_2FLOAT #2 float x 4 bytes each
return Mathutils.Vector( struct.unpack('<2f', temp_data) )
# return Vector( struct.unpack('<2f', temp_data) )
contextMeshUV = [ getuv() for i in range(num_uv) ]
elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
# How do we know the matrix size? 54 == 4x4 48 == 4x3
temp_data = file.read(STRUCT_SIZE_4x3MAT)
data = list( struct.unpack('<ffffffffffff', temp_data) )
new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
contextMatrix_rot = Mathutils.Matrix(\
# contextMatrix_rot = Blender.Mathutils.Matrix(\
data[:3] + [0],\
data[3:6] + [0],\
data[6:9] + [0],\
data[9:] + [1])
'''
contextMatrix_rot = Blender.Mathutils.Matrix(\
data[:3] + [0],\
data[3:6] + [0],\
data[6:9] + [0],\
[0,0,0,1])
'''
'''
contextMatrix_rot = Blender.Mathutils.Matrix(\
data[:3] ,\
data[3:6],\
data[6:9])
'''
'''
contextMatrix_rot = Blender.Mathutils.Matrix()
m = 0
for j in xrange(4):
for i in xrange(3):
contextMatrix_rot[j][i] = data[m]
m += 1
contextMatrix_rot[0][3]=0;
contextMatrix_rot[1][3]=0;
contextMatrix_rot[2][3]=0;
contextMatrix_rot[3][3]=1;
'''
#contextMatrix_rot.resize4x4()
#print "MTX"
#print contextMatrix_rot
contextMatrix_rot.invert()
#print contextMatrix_rot
#contextMatrix_tx = Blender.Mathutils.TranslationMatrix(0.5 * Blender.Mathutils.Vector(data[9:]))
#contextMatrix_tx.invert()
#tx.invert()
#contextMatrix = contextMatrix * tx
#contextMatrix = contextMatrix *tx
elif (new_chunk.ID == MAT_MAP_FILENAME):
texture_name = read_string(file)
try:
TEXTURE_DICT[contextMaterial.name]
except:
#img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
# img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
new_chunk.bytes_read += len(texture_name)+1 #plus one for the null character that gets removed
else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
# print 'skipping to end of this chunk'
buffer_size = new_chunk.length - new_chunk.bytes_read
binary_format='%ic' % buffer_size
temp_data = file.read(struct.calcsize(binary_format))
new_chunk.bytes_read += buffer_size
#update the previous chunk bytes read
# print 'previous_chunk.bytes_read += new_chunk.bytes_read'
# print previous_chunk.bytes_read, new_chunk.bytes_read
previous_chunk.bytes_read += new_chunk.bytes_read
## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
# FINISHED LOOP
# There will be a number of objects still not added
if contextMesh_facels != None:
putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
def load_3ds(filename, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=False):
global FILENAME, SCN
# global FILENAME, SCN_OBJECTS
# XXX
# if BPyMessages.Error_NoFile(filename):
# return
print('\n\nImporting 3DS: "%s"' % (bpy.sys.expandpath(filename)))
# print('\n\nImporting 3DS: "%s"' % (Blender.sys.expandpath(filename)))
time1 = time.clock()
# time1 = Blender.sys.time()
FILENAME = filename
current_chunk = chunk()
file = open(filename,'rb')
#here we go!
# print 'reading the first chunk'
read_chunk(file, current_chunk)
if (current_chunk.ID!=PRIMARY):
print('\tFatal Error: Not a valid 3ds file: ', filename)
file.close()
return
# IMPORT_AS_INSTANCE = Blender.Draw.Create(0)
# IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0)
# IMAGE_SEARCH = Blender.Draw.Create(1)
# APPLY_MATRIX = Blender.Draw.Create(0)
# Get USER Options
# pup_block = [\
# ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\
# ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
# ('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\
# #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\
# ]
# if PREF_UI:
# if not Blender.Draw.PupBlock('Import 3DS...', pup_block):
# return
# Blender.Window.WaitCursor(1)
# IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val
# # IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val
# IMAGE_SEARCH = IMAGE_SEARCH.val
# APPLY_MATRIX = APPLY_MATRIX.val
if IMPORT_CONSTRAIN_BOUNDS:
BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30]
else:
BOUNDS_3DS[:]= []
##IMAGE_SEARCH
scn = context.scene
# scn = bpy.data.scenes.active
SCN = scn
# SCN_OBJECTS = scn.objects
# SCN_OBJECTS.selected = [] # de select all
importedObjects = [] # Fill this list with objects
process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
# Link the objects into this scene.
# Layers = scn.Layers
# REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
# for ob in importedObjects:
# if ob.type == 'MESH':
# # if ob.type=='Mesh':
# me = ob.getData(mesh=1)
# me.verts.delete([me.verts[0],])
# if not APPLY_MATRIX:
# me.transform(ob.matrixWorld.copy().invert())
# Done DUMMYVERT
"""
if IMPORT_AS_INSTANCE:
name = filename.split('\\')[-1].split('/')[-1]
# Create a group for this import.
group_scn = Scene.New(name)
for ob in importedObjects:
group_scn.link(ob) # dont worry about the layers
grp = Blender.Group.New(name)
grp.objects = importedObjects
grp_ob = Object.New('Empty', name)
grp_ob.enableDupGroup = True
grp_ob.DupGroup = grp
scn.link(grp_ob)
grp_ob.Layers = Layers
grp_ob.sel = 1
else:
# Select all imported objects.
for ob in importedObjects:
scn.link(ob)
ob.Layers = Layers
ob.sel = 1
"""
if 0:
# if IMPORT_CONSTRAIN_BOUNDS!=0.0:
# Set bounds from objecyt bounding box
for ob in importedObjects:
if ob.type == 'MESH':
# if ob.type=='Mesh':
ob.makeDisplayList() # Why dosnt this update the bounds?
for v in ob.getBoundBox():
for i in (0,1,2):
if v[i] < BOUNDS_3DS[i]:
BOUNDS_3DS[i]= v[i] # min
if v[i] > BOUNDS_3DS[i + 3]:
BOUNDS_3DS[i + 3]= v[i] # min
# Get the max axis x/y/z
max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2])
# print max_axis
if max_axis < 1 << 30: # Should never be false but just make sure.
# Get a new scale factor if set as an option
SCALE = 1.0
while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
SCALE/=10
# SCALE Matrix
SCALE_MAT = Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1])
# SCALE_MAT = Blender.Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1])
for ob in importedObjects:
ob.setMatrix(ob.matrixWorld * SCALE_MAT)
# Done constraining to bounds.
# Select all new objects.
print('finished importing: "%s" in %.4f sec.' % (filename, (time.clock()-time1)))
# print('finished importing: "%s" in %.4f sec.' % (filename, (Blender.sys.time()-time1)))
file.close()
# Blender.Window.WaitCursor(0)
DEBUG = False
# if __name__=='__main__' and not DEBUG:
# if calcsize == None:
# Blender.Draw.PupMenu('Error%t|a full python installation not found')
# else:
# Blender.Window.FileSelector(load_3ds, 'Import 3DS', '*.3ds')
# For testing compatibility
#load_3ds('/metavr/convert/vehicle/truck_002/TruckTanker1.3DS', False)
#load_3ds('/metavr/archive/convert/old/arranged_3ds_to_hpx-2/only-need-engine-trains/Engine2.3DS', False)
'''
else:
import os
# DEBUG ONLY
TIME = Blender.sys.time()
import os
print 'Searching for files'
os.system('find /metavr/ -iname "*.3ds" > /tmp/temp3ds_list')
# os.system('find /storage/ -iname "*.3ds" > /tmp/temp3ds_list')
print '...Done'
file = open('/tmp/temp3ds_list', 'r')
lines = file.readlines()
file.close()
# sort by filesize for faster testing
lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines]
lines_size.sort()
lines = [f[1] for f in lines_size]
def between(v,a,b):
if v <= max(a,b) and v >= min(a,b):
return True
return False
for i, _3ds in enumerate(lines):
if between(i, 650,800):
#_3ds= _3ds[:-1]
print 'Importing', _3ds, '\nNUMBER', i, 'of', len(lines)
_3ds_file= _3ds.split('/')[-1].split('\\')[-1]
newScn = Blender.Scene.New(_3ds_file)
newScn.makeCurrent()
load_3ds(_3ds, False)
print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME)
'''
class IMPORT_OT_3ds(bpy.types.Operator):
'''
3DS Importer
'''
__idname__ = "import.3ds"
__label__ = 'Import 3DS'
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for importing the 3DS file", maxlen=1024, default= ""),
# bpy.props.FloatProperty(attr="size_constraint", name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0),
# bpy.props.BoolProperty(attr="search_images", name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True),
# bpy.props.BoolProperty(attr="apply_matrix", name="Transform Fix", description="Workaround for object transformations importing incorrectly", default=False),
]
def execute(self, context):
load_3ds(self.filename, context, 0.0, False, False)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
def poll(self, context): # Poll isnt working yet
print("Poll")
return context.active_object != None
bpy.ops.add(IMPORT_OT_3ds)
# NOTES:
# why add 1 extra vertex? and remove it when done?
# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)

1633
release/io/import_obj.py Normal file
View File

@@ -0,0 +1,1633 @@
#!BPY
"""
Name: 'Wavefront (.obj)...'
Blender: 249
Group: 'Import'
Tooltip: 'Load a Wavefront OBJ File, Shift: batch import all dir.'
"""
__author__= "Campbell Barton", "Jiri Hnidek", "Paolo Ciccone"
__url__= ['http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj', 'blender.org', 'blenderartists.org']
__version__= "2.11"
__bpydoc__= """\
This script imports a Wavefront OBJ files to Blender.
Usage:
Run this script from "File->Import" menu and then load the desired OBJ file.
Note, This loads mesh objects and materials only, nurbs and curves are not supported.
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Campbell J Barton 2007
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import os
import bpy
import Mathutils
import Geometry
# from Blender import Mesh, Draw, Window, Texture, Material, sys
# # import BPyMesh
# import BPyImage
# import BPyMessages
# try: import os
# except: os= False
# Generic path functions
def stripFile(path):
'''Return directory, where the file is'''
lastSlash= max(path.rfind('\\'), path.rfind('/'))
if lastSlash != -1:
path= path[:lastSlash]
return '%s%s' % (path, os.sep)
# return '%s%s' % (path, sys.sep)
def stripPath(path):
'''Strips the slashes from the back of a string'''
return path.split('/')[-1].split('\\')[-1]
def stripExt(name): # name is a string
'''Strips the prefix off the name before writing'''
index= name.rfind('.')
if index != -1:
return name[ : index ]
else:
return name
# end path funcs
def unpack_list(list_of_tuples):
l = []
for t in list_of_tuples:
l.extend(t)
return l
# same as above except that it adds 0 for triangle faces
def unpack_face_list(list_of_tuples):
l = []
for t in list_of_tuples:
face = [i for i in t]
if len(face) != 3 and len(face) != 4:
raise RuntimeError("{0} vertices in face.".format(len(face)))
if len(face) == 3:
face.append(0)
l.extend(face)
return l
def BPyMesh_ngon(from_data, indices, PREF_FIX_LOOPS= True):
'''
Takes a polyline of indices (fgon)
and returns a list of face indicie lists.
Designed to be used for importers that need indices for an fgon to create from existing verts.
from_data: either a mesh, or a list/tuple of vectors.
indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given.
PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly.
'''
if not set: # Need sets for this, otherwise do a normal fill.
PREF_FIX_LOOPS= False
Vector= Mathutils.Vector
if not indices:
return []
# return []
def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6)
def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length
def vert_treplet(v, i):
return v, rvec(v), i, mlen(v)
def ed_key_mlen(v1, v2):
if v1[3] > v2[3]:
return v2[1], v1[1]
else:
return v1[1], v2[1]
if not PREF_FIX_LOOPS:
'''
Normal single concave loop filling
'''
if type(from_data) in (tuple, list):
verts= [Vector(from_data[i]) for ii, i in enumerate(indices)]
else:
verts= [from_data.verts[i].co for ii, i in enumerate(indices)]
for i in range(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))):
if verts[i][1]==verts[i-1][0]:
verts.pop(i-1)
fill= Geometry.PolyFill([verts])
else:
'''
Seperate this loop into multiple loops be finding edges that are used twice
This is used by lightwave LWO files a lot
'''
if type(from_data) in (tuple, list):
verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)]
else:
verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)]
edges= [(i, i-1) for i in range(len(verts))]
if edges:
edges[0]= (0,len(verts)-1)
if not verts:
return []
edges_used= set()
edges_doubles= set()
# We need to check if any edges are used twice location based.
for ed in edges:
edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]])
if edkey in edges_used:
edges_doubles.add(edkey)
else:
edges_used.add(edkey)
# Store a list of unconnected loop segments split by double edges.
# will join later
loop_segments= []
v_prev= verts[0]
context_loop= [v_prev]
loop_segments= [context_loop]
for v in verts:
if v!=v_prev:
# Are we crossing an edge we removed?
if ed_key_mlen(v, v_prev) in edges_doubles:
context_loop= [v]
loop_segments.append(context_loop)
else:
if context_loop and context_loop[-1][1]==v[1]:
#raise "as"
pass
else:
context_loop.append(v)
v_prev= v
# Now join loop segments
def join_seg(s1,s2):
if s2[-1][1]==s1[0][1]: #
s1,s2= s2,s1
elif s1[-1][1]==s2[0][1]:
pass
else:
return False
# If were stuill here s1 and s2 are 2 segments in the same polyline
s1.pop() # remove the last vert from s1
s1.extend(s2) # add segment 2 to segment 1
if s1[0][1]==s1[-1][1]: # remove endpoints double
s1.pop()
s2[:]= [] # Empty this segment s2 so we dont use it again.
return True
joining_segments= True
while joining_segments:
joining_segments= False
segcount= len(loop_segments)
for j in range(segcount-1, -1, -1): #reversed(range(segcount)):
seg_j= loop_segments[j]
if seg_j:
for k in range(j-1, -1, -1): # reversed(range(j)):
if not seg_j:
break
seg_k= loop_segments[k]
if seg_k and join_seg(seg_j, seg_k):
joining_segments= True
loop_list= loop_segments
for verts in loop_list:
while verts and verts[0][1]==verts[-1][1]:
verts.pop()
loop_list= [verts for verts in loop_list if len(verts)>2]
# DONE DEALING WITH LOOP FIXING
# vert mapping
vert_map= [None]*len(indices)
ii=0
for verts in loop_list:
if len(verts)>2:
for i, vert in enumerate(verts):
vert_map[i+ii]= vert[2]
ii+=len(verts)
fill= Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ])
#draw_loops(loop_list)
#raise 'done loop'
# map to original indicies
fill= [[vert_map[i] for i in reversed(f)] for f in fill]
if not fill:
print('Warning Cannot scanfill, fallback on a triangle fan.')
fill= [ [0, i-1, i] for i in range(2, len(indices)) ]
else:
# Use real scanfill.
# See if its flipped the wrong way.
flip= None
for fi in fill:
if flip != None:
break
for i, vi in enumerate(fi):
if vi==0 and fi[i-1]==1:
flip= False
break
elif vi==1 and fi[i-1]==0:
flip= True
break
if not flip:
for i, fi in enumerate(fill):
fill[i]= tuple([ii for ii in reversed(fi)])
return fill
def line_value(line_split):
'''
Returns 1 string represneting the value for this line
None will be returned if theres only 1 word
'''
length= len(line_split)
if length == 1:
return None
elif length == 2:
return line_split[1]
elif length > 2:
return ' '.join( line_split[1:] )
# limited replacement for BPyImage.comprehensiveImageLoad
def load_image(imagepath, dirname):
if os.path.exists(imagepath):
return bpy.data.add_image(imagepath)
variants = [os.path.join(dirname, imagepath), os.path.join(dirname, os.path.basename(imagepath))]
for path in variants:
if os.path.exists(path):
return bpy.data.add_image(path)
else:
print(path, "doesn't exist")
# TODO comprehensiveImageLoad also searched in bpy.config.textureDir
return None
def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
if '_' in imagepath:
image= load_image(imagepath.replace('_', ' '), DIR)
if image: return image
return load_image(imagepath, DIR)
# def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
# '''
# Mainly uses comprehensiveImageLoad
# but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores.
# '''
# if '_' in imagepath:
# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
# if image: return image
# # Did the exporter rename the image?
# image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
# if image: return image
# # Return an image, placeholder if it dosnt exist
# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH)
# return image
def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH):
'''
Create all the used materials in this obj,
assign colors and images to the materials from all referenced material libs
'''
DIR= stripFile(filepath)
#==================================================================================#
# This function sets textures defined in .mtl file #
#==================================================================================#
def load_material_image(blender_material, context_material_name, imagepath, type):
texture= bpy.data.add_texture(type)
texture.type= 'IMAGE'
# texture= bpy.data.textures.new(type)
# texture.setType('Image')
# Absolute path - c:\.. etc would work here
image= obj_image_load(imagepath, DIR, IMAGE_SEARCH)
has_data = image.has_data if image else False
if image:
texture.image = image
# Adds textures for materials (rendering)
if type == 'Kd':
if has_data and image.depth == 32:
# Image has alpha
# XXX bitmask won't work?
blender_material.add_texture(texture, "UV", ("COLOR", "ALPHA"))
texture.mipmap = True
texture.interpolation = True
texture.use_alpha = True
blender_material.z_transparency = True
blender_material.alpha = 0.0
# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA)
# texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha')
# blender_material.mode |= Material.Modes.ZTRANSP
# blender_material.alpha = 0.0
else:
blender_material.add_texture(texture, "UV", "COLOR")
# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
# adds textures to faces (Textured/Alt-Z mode)
# Only apply the diffuse texture to the face if the image has not been set with the inline usemat func.
unique_material_images[context_material_name]= image, has_data # set the texface image
elif type == 'Ka':
blender_material.add_texture(texture, "UV", "AMBIENT")
# blender_material.setTexture(1, texture, Texture.TexCo.UV, Texture.MapTo.CMIR) # TODO- Add AMB to BPY API
elif type == 'Ks':
blender_material.add_texture(texture, "UV", "SPECULARITY")
# blender_material.setTexture(2, texture, Texture.TexCo.UV, Texture.MapTo.SPEC)
elif type == 'Bump':
blender_material.add_texture(texture, "UV", "NORMAL")
# blender_material.setTexture(3, texture, Texture.TexCo.UV, Texture.MapTo.NOR)
elif type == 'D':
blender_material.add_texture(texture, "UV", "ALPHA")
blender_material.z_transparency = True
blender_material.alpha = 0.0
# blender_material.setTexture(4, texture, Texture.TexCo.UV, Texture.MapTo.ALPHA)
# blender_material.mode |= Material.Modes.ZTRANSP
# blender_material.alpha = 0.0
# Todo, unset deffuse material alpha if it has an alpha channel
elif type == 'refl':
blender_material.add_texture(texture, "UV", "REFLECTION")
# blender_material.setTexture(5, texture, Texture.TexCo.UV, Texture.MapTo.REF)
# Add an MTL with the same name as the obj if no MTLs are spesified.
temp_mtl= stripExt(stripPath(filepath))+ '.mtl'
if os.path.exists(DIR + temp_mtl) and temp_mtl not in material_libs:
# if sys.exists(DIR + temp_mtl) and temp_mtl not in material_libs:
material_libs.append( temp_mtl )
del temp_mtl
#Create new materials
for name in unique_materials: # .keys()
if name != None:
unique_materials[name]= bpy.data.add_material(name)
# unique_materials[name]= bpy.data.materials.new(name)
unique_material_images[name]= None, False # assign None to all material images to start with, add to later.
unique_materials[None]= None
unique_material_images[None]= None, False
for libname in material_libs:
mtlpath= DIR + libname
if not os.path.exists(mtlpath):
# if not sys.exists(mtlpath):
#print '\tError Missing MTL: "%s"' % mtlpath
pass
else:
#print '\t\tloading mtl: "%s"' % mtlpath
context_material= None
mtl= open(mtlpath, 'rU')
for line in mtl: #.xreadlines():
if line.startswith('newmtl'):
context_material_name= line_value(line.split())
if context_material_name in unique_materials:
context_material = unique_materials[ context_material_name ]
else:
context_material = None
elif context_material:
# we need to make a material to assign properties to it.
line_split= line.split()
line_lower= line.lower().lstrip()
if line_lower.startswith('ka'):
context_material.mirror_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
# context_material.setMirCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
elif line_lower.startswith('kd'):
context_material.diffuse_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
# context_material.setRGBCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
elif line_lower.startswith('ks'):
context_material.specular_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
# context_material.setSpecCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
elif line_lower.startswith('ns'):
context_material.specular_hardness = int((float(line_split[1])*0.51))
# context_material.setHardness( int((float(line_split[1])*0.51)) )
elif line_lower.startswith('ni'): # Refraction index
context_material.ior = max(1, min(float(line_split[1]), 3))
# context_material.setIOR( max(1, min(float(line_split[1]), 3))) # Between 1 and 3
elif line_lower.startswith('d') or line_lower.startswith('tr'):
context_material.alpha = float(line_split[1])
# context_material.setAlpha(float(line_split[1]))
elif line_lower.startswith('map_ka'):
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'Ka')
elif line_lower.startswith('map_ks'):
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'Ks')
elif line_lower.startswith('map_kd'):
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'Kd')
elif line_lower.startswith('map_bump'):
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'Bump')
elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): # Alpha map - Dissolve
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'D')
elif line_lower.startswith('refl'): # Reflectionmap
img_filepath= line_value(line.split())
if img_filepath:
load_material_image(context_material, context_material_name, img_filepath, 'refl')
mtl.close()
def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS):
'''
Takes vert_loc and faces, and seperates into multiple sets of
(verts_loc, faces, unique_materials, dataname)
This is done so objects do not overload the 16 material limit.
'''
filename = stripExt(stripPath(filepath))
if not SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS:
# use the filename for the object name since we arnt chopping up the mesh.
return [(verts_loc, faces, unique_materials, filename)]
def key_to_name(key):
# if the key is a tuple, join it to make a string
if type(key) == tuple:
return '%s_%s' % key
elif not key:
return filename # assume its a string. make sure this is true if the splitting code is changed
else:
return key
# Return a key that makes the faces unique.
if SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS:
def face_key(face):
return face[4] # object
elif not SPLIT_OB_OR_GROUP and SPLIT_MATERIALS:
def face_key(face):
return face[2] # material
else: # Both
def face_key(face):
return face[4], face[2] # object,material
face_split_dict= {}
oldkey= -1 # initialize to a value that will never match the key
for face in faces:
key= face_key(face)
if oldkey != key:
# Check the key has changed.
try:
verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key]
except KeyError:
faces_split= []
verts_split= []
unique_materials_split= {}
vert_remap= [-1]*len(verts_loc)
face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap)
oldkey= key
face_vert_loc_indicies= face[0]
# Remap verts to new vert list and add where needed
for enum, i in enumerate(face_vert_loc_indicies):
if vert_remap[i] == -1:
new_index= len(verts_split)
vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time.
face_vert_loc_indicies[enum] = new_index # remap to the local index
verts_split.append( verts_loc[i] ) # add the vert to the local verts
else:
face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index
matname= face[2]
if matname and matname not in unique_materials_split:
unique_materials_split[matname] = unique_materials[matname]
faces_split.append(face)
# remove one of the itemas and reorder
return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())]
def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname):
'''
Takes all the data gathered and generates a mesh, adding the new object to new_objects
deals with fgons, sharp edges and assigning materials
'''
if not has_ngons:
CREATE_FGONS= False
if unique_smooth_groups:
sharp_edges= {}
smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in list(unique_smooth_groups.keys()) ])
context_smooth_group_old= -1
# Split fgons into tri's
fgon_edges= {} # Used for storing fgon keys
if CREATE_EDGES:
edges= []
context_object= None
# reverse loop through face indicies
for f_idx in range(len(faces)-1, -1, -1):
face_vert_loc_indicies,\
face_vert_tex_indicies,\
context_material,\
context_smooth_group,\
context_object= faces[f_idx]
len_face_vert_loc_indicies = len(face_vert_loc_indicies)
if len_face_vert_loc_indicies==1:
faces.pop(f_idx)# cant add single vert faces
elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines
if CREATE_EDGES:
# generators are better in python 2.4+ but can't be used in 2.3
# edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) )
edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in range(len_face_vert_loc_indicies-1)] )
faces.pop(f_idx)
else:
# Smooth Group
if unique_smooth_groups and context_smooth_group:
# Is a part of of a smooth group and is a face
if context_smooth_group_old is not context_smooth_group:
edge_dict= smooth_group_users[context_smooth_group]
context_smooth_group_old= context_smooth_group
for i in range(len_face_vert_loc_indicies):
i1= face_vert_loc_indicies[i]
i2= face_vert_loc_indicies[i-1]
if i1>i2: i1,i2= i2,i1
try:
edge_dict[i1,i2]+= 1
except KeyError:
edge_dict[i1,i2]= 1
# FGons into triangles
if has_ngons and len_face_vert_loc_indicies > 4:
ngon_face_indices= BPyMesh_ngon(verts_loc, face_vert_loc_indicies)
faces.extend(\
[(\
[face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\
[face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\
context_material,\
context_smooth_group,\
context_object)\
for ngon in ngon_face_indices]\
)
# edges to make fgons
if CREATE_FGONS:
edge_users= {}
for ngon in ngon_face_indices:
for i in (0,1,2):
i1= face_vert_loc_indicies[ngon[i ]]
i2= face_vert_loc_indicies[ngon[i-1]]
if i1>i2: i1,i2= i2,i1
try:
edge_users[i1,i2]+=1
except KeyError:
edge_users[i1,i2]= 1
for key, users in edge_users.items():
if users>1:
fgon_edges[key]= None
# remove all after 3, means we dont have to pop this one.
faces.pop(f_idx)
# Build sharp edges
if unique_smooth_groups:
for edge_dict in list(smooth_group_users.values()):
for key, users in list(edge_dict.items()):
if users==1: # This edge is on the boundry of a group
sharp_edges[key]= None
# map the material names to an index
material_mapping= dict([(name, i) for i, name in enumerate(unique_materials)]) # enumerate over unique_materials keys()
materials= [None] * len(unique_materials)
for name, index in list(material_mapping.items()):
materials[index]= unique_materials[name]
me= bpy.data.add_mesh(dataname)
# me= bpy.data.meshes.new(dataname)
# make sure the list isnt too big
for material in materials[0:16]:
me.add_material(material)
# me.materials= materials[0:16] # make sure the list isnt too big.
#me.verts.extend([(0,0,0)]) # dummy vert
me.add_geometry(len(verts_loc), 0, len(faces))
# verts_loc is a list of (x, y, z) tuples
me.verts.foreach_set("co", unpack_list(verts_loc))
# me.verts.extend(verts_loc)
# faces is a list of (vert_indices, texco_indices, ...) tuples
# XXX faces should not contain edges
# XXX no check for valid face indices
me.faces.foreach_set("verts", unpack_face_list([f[0] for f in faces]))
# face_mapping= me.faces.extend([f[0] for f in faces], indexList=True)
if verts_tex and me.faces:
me.add_uv_texture()
# me.faceUV= 1
# TEXMODE= Mesh.FaceModes['TEX']
context_material_old= -1 # avoid a dict lookup
mat= 0 # rare case it may be un-initialized.
me_faces= me.faces
# ALPHA= Mesh.FaceTranspModes.ALPHA
for i, face in enumerate(faces):
if len(face[0]) < 2:
pass #raise "bad face"
elif len(face[0])==2:
if CREATE_EDGES:
edges.append(face[0])
else:
# face_index_map= face_mapping[i]
# since we use foreach_set to add faces, all of them are added
if 1:
# if face_index_map!=None: # None means the face wasnt added
blender_face = me.faces[i]
# blender_face= me_faces[face_index_map]
face_vert_loc_indicies,\
face_vert_tex_indicies,\
context_material,\
context_smooth_group,\
context_object= face
if context_smooth_group:
blender_face.smooth= True
if context_material:
if context_material_old is not context_material:
mat= material_mapping[context_material]
if mat>15:
mat= 15
context_material_old= context_material
blender_face.material_index= mat
# blender_face.mat= mat
if verts_tex:
blender_tface= me.uv_textures[0].data[i]
if context_material:
image, has_data= unique_material_images[context_material]
if image: # Can be none if the material dosnt have an image.
blender_tface.image= image
# blender_face.image= image
if has_data:
# if has_data and image.depth == 32:
blender_tface.transp = 'ALPHA'
# blender_face.transp |= ALPHA
# BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
if len(face_vert_loc_indicies)==4:
if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0:
face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1]
else: # length of 3
if face_vert_loc_indicies[2]==0:
face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0]
# END EEEKADOODLE FIX
# assign material, uv's and image
blender_tface.uv1= verts_tex[face_vert_tex_indicies[0]]
blender_tface.uv2= verts_tex[face_vert_tex_indicies[1]]
blender_tface.uv3= verts_tex[face_vert_tex_indicies[2]]
if blender_face.verts[3] != 0:
blender_tface.uv4= verts_tex[face_vert_tex_indicies[3]]
# for ii, uv in enumerate(blender_face.uv):
# uv.x, uv.y= verts_tex[face_vert_tex_indicies[ii]]
del me_faces
# del ALPHA
if CREATE_EDGES:
me.add_geometry(0, len(edges), 0)
# edges should be a list of (a, b) tuples
me.edges.foreach_set("verts", unpack_list(edges))
# me_edges.extend( edges )
# del me_edges
# Add edge faces.
# me_edges= me.edges
def edges_match(e1, e2):
return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0])
# XXX slow
# if CREATE_FGONS and fgon_edges:
# for fgon_edge in fgon_edges.keys():
# for ed in me.edges:
# if edges_match(fgon_edge, ed.verts):
# ed.fgon = True
# if CREATE_FGONS and fgon_edges:
# FGON= Mesh.EdgeFlags.FGON
# for ed in me.findEdges( fgon_edges.keys() ):
# if ed!=None:
# me_edges[ed].flag |= FGON
# del FGON
# XXX slow
# if unique_smooth_groups and sharp_edges:
# for sharp_edge in sharp_edges.keys():
# for ed in me.edges:
# if edges_match(sharp_edge, ed.verts):
# ed.sharp = True
# if unique_smooth_groups and sharp_edges:
# SHARP= Mesh.EdgeFlags.SHARP
# for ed in me.findEdges( sharp_edges.keys() ):
# if ed!=None:
# me_edges[ed].flag |= SHARP
# del SHARP
me.update()
# me.calcNormals()
ob= bpy.data.add_object("MESH", "Mesh")
ob.data= me
scn.add_object(ob)
# ob= scn.objects.new(me)
new_objects.append(ob)
# Create the vertex groups. No need to have the flag passed here since we test for the
# content of the vertex_groups. If the user selects to NOT have vertex groups saved then
# the following test will never run
for group_name, group_indicies in vertex_groups.items():
group= ob.add_vertex_group(group_name)
# me.addVertGroup(group_name)
for vertex_index in group_indicies:
ob.add_vertex_to_group(vertex_index, group, 1.0, 'REPLACE')
# me.assignVertsToGroup(group_name, group_indicies, 1.00, Mesh.AssignModes.REPLACE)
def create_nurbs(scn, context_nurbs, vert_loc, new_objects):
'''
Add nurbs object to blender, only support one type at the moment
'''
deg = context_nurbs.get('deg', (3,))
curv_range = context_nurbs.get('curv_range', None)
curv_idx = context_nurbs.get('curv_idx', [])
parm_u = context_nurbs.get('parm_u', [])
parm_v = context_nurbs.get('parm_v', [])
name = context_nurbs.get('name', 'ObjNurb')
cstype = context_nurbs.get('cstype', None)
if cstype == None:
print('\tWarning, cstype not found')
return
if cstype != 'bspline':
print('\tWarning, cstype is not supported (only bspline)')
return
if not curv_idx:
print('\tWarning, curv argument empty or not set')
return
if len(deg) > 1 or parm_v:
print('\tWarning, surfaces not supported')
return
cu = bpy.data.curves.new(name, 'Curve')
cu.flag |= 1 # 3D curve
nu = None
for pt in curv_idx:
pt = vert_loc[pt]
pt = (pt[0], pt[1], pt[2], 1.0)
if nu == None:
nu = cu.appendNurb(pt)
else:
nu.append(pt)
nu.orderU = deg[0]+1
# get for endpoint flag from the weighting
if curv_range and len(parm_u) > deg[0]+1:
do_endpoints = True
for i in range(deg[0]+1):
if abs(parm_u[i]-curv_range[0]) > 0.0001:
do_endpoints = False
break
if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001:
do_endpoints = False
break
else:
do_endpoints = False
if do_endpoints:
nu.flagU |= 2
# close
'''
do_closed = False
if len(parm_u) > deg[0]+1:
for i in xrange(deg[0]+1):
#print curv_idx[i], curv_idx[-(i+1)]
if curv_idx[i]==curv_idx[-(i+1)]:
do_closed = True
break
if do_closed:
nu.flagU |= 1
'''
ob = scn.objects.new(cu)
new_objects.append(ob)
def strip_slash(line_split):
if line_split[-1][-1]== '\\':
if len(line_split[-1])==1:
line_split.pop() # remove the \ item
else:
line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number
return True
return False
def get_float_func(filepath):
'''
find the float function for this obj file
- weather to replace commas or not
'''
file= open(filepath, 'rU')
for line in file: #.xreadlines():
line = line.lstrip()
if line.startswith('v'): # vn vt v
if ',' in line:
return lambda f: float(f.replace(',', '.'))
elif '.' in line:
return float
# incase all vert values were ints
return float
def load_obj(filepath,
context,
CLAMP_SIZE= 0.0,
CREATE_FGONS= True,
CREATE_SMOOTH_GROUPS= True,
CREATE_EDGES= True,
SPLIT_OBJECTS= True,
SPLIT_GROUPS= True,
SPLIT_MATERIALS= True,
ROTATE_X90= True,
IMAGE_SEARCH=True,
POLYGROUPS=False):
'''
Called by the user interface or another script.
load_obj(path) - should give acceptable results.
This function passes the file and sends the data off
to be split into objects and then converted into mesh objects
'''
print('\nimporting obj "%s"' % filepath)
if SPLIT_OBJECTS or SPLIT_GROUPS or SPLIT_MATERIALS:
POLYGROUPS = False
time_main= bpy.sys.time()
# time_main= sys.time()
verts_loc= []
verts_tex= []
faces= [] # tuples of the faces
material_libs= [] # filanems to material libs this uses
vertex_groups = {} # when POLYGROUPS is true
# Get the string to float conversion func for this file- is 'float' for almost all files.
float_func= get_float_func(filepath)
# Context variables
context_material= None
context_smooth_group= None
context_object= None
context_vgroup = None
# Nurbs
context_nurbs = {}
nurbs = []
context_parm = '' # used by nurbs too but could be used elsewhere
has_ngons= False
# has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0
# Until we can use sets
unique_materials= {}
unique_material_images= {}
unique_smooth_groups= {}
# unique_obects= {} - no use for this variable since the objects are stored in the face.
# when there are faces that end with \
# it means they are multiline-
# since we use xreadline we cant skip to the next line
# so we need to know weather
context_multi_line= ''
print('\tparsing obj file "%s"...' % filepath)
time_sub= bpy.sys.time()
# time_sub= sys.time()
file= open(filepath, 'rU')
for line in file: #.xreadlines():
line = line.lstrip() # rare cases there is white space at the start of the line
if line.startswith('v '):
line_split= line.split()
# rotate X90: (x,-z,y)
verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) )
elif line.startswith('vn '):
pass
elif line.startswith('vt '):
line_split= line.split()
verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) )
# Handel faces lines (as faces) and the second+ lines of fa multiline face here
# use 'f' not 'f ' because some objs (very rare have 'fo ' for faces)
elif line.startswith('f') or context_multi_line == 'f':
if context_multi_line:
# use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
line_split= line.split()
else:
line_split= line[2:].split()
face_vert_loc_indicies= []
face_vert_tex_indicies= []
# Instance a face
faces.append((\
face_vert_loc_indicies,\
face_vert_tex_indicies,\
context_material,\
context_smooth_group,\
context_object\
))
if strip_slash(line_split):
context_multi_line = 'f'
else:
context_multi_line = ''
for v in line_split:
obj_vert= v.split('/')
vert_loc_index= int(obj_vert[0])-1
# Add the vertex to the current group
# *warning*, this wont work for files that have groups defined around verts
if POLYGROUPS and context_vgroup:
vertex_groups[context_vgroup].append(vert_loc_index)
# Make relative negative vert indicies absolute
if vert_loc_index < 0:
vert_loc_index= len(verts_loc) + vert_loc_index + 1
face_vert_loc_indicies.append(vert_loc_index)
if len(obj_vert)>1 and obj_vert[1]:
# formatting for faces with normals and textures us
# loc_index/tex_index/nor_index
vert_tex_index= int(obj_vert[1])-1
# Make relative negative vert indicies absolute
if vert_tex_index < 0:
vert_tex_index= len(verts_tex) + vert_tex_index + 1
face_vert_tex_indicies.append(vert_tex_index)
else:
# dummy
face_vert_tex_indicies.append(0)
if len(face_vert_loc_indicies) > 4:
has_ngons= True
elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'):
# very similar to the face load function above with some parts removed
if context_multi_line:
# use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
line_split= line.split()
else:
line_split= line[2:].split()
face_vert_loc_indicies= []
face_vert_tex_indicies= []
# Instance a face
faces.append((\
face_vert_loc_indicies,\
face_vert_tex_indicies,\
context_material,\
context_smooth_group,\
context_object\
))
if strip_slash(line_split):
context_multi_line = 'l'
else:
context_multi_line = ''
isline= line.startswith('l')
for v in line_split:
vert_loc_index= int(v)-1
# Make relative negative vert indicies absolute
if vert_loc_index < 0:
vert_loc_index= len(verts_loc) + vert_loc_index + 1
face_vert_loc_indicies.append(vert_loc_index)
elif line.startswith('s'):
if CREATE_SMOOTH_GROUPS:
context_smooth_group= line_value(line.split())
if context_smooth_group=='off':
context_smooth_group= None
elif context_smooth_group: # is not None
unique_smooth_groups[context_smooth_group]= None
elif line.startswith('o'):
if SPLIT_OBJECTS:
context_object= line_value(line.split())
# unique_obects[context_object]= None
elif line.startswith('g'):
if SPLIT_GROUPS:
context_object= line_value(line.split())
# print 'context_object', context_object
# unique_obects[context_object]= None
elif POLYGROUPS:
context_vgroup = line_value(line.split())
if context_vgroup and context_vgroup != '(null)':
vertex_groups.setdefault(context_vgroup, [])
else:
context_vgroup = None # dont assign a vgroup
elif line.startswith('usemtl'):
context_material= line_value(line.split())
unique_materials[context_material]= None
elif line.startswith('mtllib'): # usemap or usemat
material_libs.extend( line.split()[1:] ) # can have multiple mtllib filenames per line
# Nurbs support
elif line.startswith('cstype '):
context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline'
elif line.startswith('curv ') or context_multi_line == 'curv':
line_split= line.split()
curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline
if not context_multi_line:
context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2])
line_split[0:3] = [] # remove first 3 items
if strip_slash(line_split):
context_multi_line = 'curv'
else:
context_multi_line = ''
for i in line_split:
vert_loc_index = int(i)-1
if vert_loc_index < 0:
vert_loc_index= len(verts_loc) + vert_loc_index + 1
curv_idx.append(vert_loc_index)
elif line.startswith('parm') or context_multi_line == 'parm':
line_split= line.split()
if context_multi_line:
context_multi_line = ''
else:
context_parm = line_split[1]
line_split[0:2] = [] # remove first 2
if strip_slash(line_split):
context_multi_line = 'parm'
else:
context_multi_line = ''
if context_parm.lower() == 'u':
context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] )
elif context_parm.lower() == 'v': # surfaces not suported yet
context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] )
# else: # may want to support other parm's ?
elif line.startswith('deg '):
context_nurbs['deg']= [int(i) for i in line.split()[1:]]
elif line.startswith('end'):
# Add the nurbs curve
if context_object:
context_nurbs['name'] = context_object
nurbs.append(context_nurbs)
context_nurbs = {}
context_parm = ''
''' # How to use usemap? depricated?
elif line.startswith('usema'): # usemap or usemat
context_image= line_value(line.split())
'''
file.close()
time_new= bpy.sys.time()
# time_new= sys.time()
print('%.4f sec' % (time_new-time_sub))
time_sub= time_new
print('\tloading materials and images...')
create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH)
time_new= bpy.sys.time()
# time_new= sys.time()
print('%.4f sec' % (time_new-time_sub))
time_sub= time_new
if not ROTATE_X90:
verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc]
# deselect all
# if context.selected_objects:
# bpy.ops.OBJECT_OT_select_all_toggle()
scene = context.scene
# scn = bpy.data.scenes.active
# scn.objects.selected = []
new_objects= [] # put new objects here
print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) ))
# Split the mesh by objects/materials, may
if SPLIT_OBJECTS or SPLIT_GROUPS: SPLIT_OB_OR_GROUP = True
else: SPLIT_OB_OR_GROUP = False
for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS):
# Create meshes from the data, warning 'vertex_groups' wont support splitting
create_mesh(scene, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname)
# nurbs support
# for context_nurbs in nurbs:
# create_nurbs(scn, context_nurbs, verts_loc, new_objects)
axis_min= [ 1000000000]*3
axis_max= [-1000000000]*3
# if CLAMP_SIZE:
# # Get all object bounds
# for ob in new_objects:
# for v in ob.getBoundBox():
# for axis, value in enumerate(v):
# if axis_min[axis] > value: axis_min[axis]= value
# if axis_max[axis] < value: axis_max[axis]= value
# # Scale objects
# max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2])
# scale= 1.0
# while CLAMP_SIZE < max_axis * scale:
# scale= scale/10.0
# for ob in new_objects:
# ob.setSize(scale, scale, scale)
# Better rotate the vert locations
#if not ROTATE_X90:
# for ob in new_objects:
# ob.RotX = -1.570796326794896558
time_new= bpy.sys.time()
# time_new= sys.time()
print('%.4f sec' % (time_new-time_sub))
print('finished importing: "%s" in %.4f sec.' % (filepath, (time_new-time_main)))
DEBUG= True
def load_obj_ui(filepath, BATCH_LOAD= False):
if BPyMessages.Error_NoFile(filepath):
return
global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90
CREATE_SMOOTH_GROUPS= Draw.Create(0)
CREATE_FGONS= Draw.Create(1)
CREATE_EDGES= Draw.Create(1)
SPLIT_OBJECTS= Draw.Create(0)
SPLIT_GROUPS= Draw.Create(0)
SPLIT_MATERIALS= Draw.Create(0)
CLAMP_SIZE= Draw.Create(10.0)
IMAGE_SEARCH= Draw.Create(1)
POLYGROUPS= Draw.Create(0)
KEEP_VERT_ORDER= Draw.Create(1)
ROTATE_X90= Draw.Create(1)
# Get USER Options
# Note, Works but not pretty, instead use a more complicated GUI
'''
pup_block= [\
'Import...',\
('Smooth Groups', CREATE_SMOOTH_GROUPS, 'Surround smooth groups by sharp edges'),\
('Create FGons', CREATE_FGONS, 'Import faces with more then 4 verts as fgons.'),\
('Lines', CREATE_EDGES, 'Import lines and faces with 2 verts as edges'),\
'Separate objects from obj...',\
('Object', SPLIT_OBJECTS, 'Import OBJ Objects into Blender Objects'),\
('Group', SPLIT_GROUPS, 'Import OBJ Groups into Blender Objects'),\
('Material', SPLIT_MATERIALS, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)'),\
'Options...',\
('Keep Vert Order', KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\
('Clamp Scale:', CLAMP_SIZE, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)'),\
('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
]
if not Draw.PupBlock('Import OBJ...', pup_block):
return
if KEEP_VERT_ORDER.val:
SPLIT_OBJECTS.val = False
SPLIT_GROUPS.val = False
SPLIT_MATERIALS.val = False
'''
# BEGIN ALTERNATIVE UI *******************
if True:
EVENT_NONE = 0
EVENT_EXIT = 1
EVENT_REDRAW = 2
EVENT_IMPORT = 3
GLOBALS = {}
GLOBALS['EVENT'] = EVENT_REDRAW
#GLOBALS['MOUSE'] = Window.GetMouseCoords()
GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
def obj_ui_set_event(e,v):
GLOBALS['EVENT'] = e
def do_split(e,v):
global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS
if SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val:
KEEP_VERT_ORDER.val = 0
POLYGROUPS.val = 0
else:
KEEP_VERT_ORDER.val = 1
def do_vertorder(e,v):
global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER
if KEEP_VERT_ORDER.val:
SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0
else:
if not (SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val):
KEEP_VERT_ORDER.val = 1
def do_polygroups(e,v):
global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS
if POLYGROUPS.val:
SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0
def do_help(e,v):
url = __url__[0]
print('Trying to open web browser with documentation at this address...')
print('\t' + url)
try:
import webbrowser
webbrowser.open(url)
except:
print('...could not open a browser window.')
def obj_ui():
ui_x, ui_y = GLOBALS['MOUSE']
# Center based on overall pup size
ui_x -= 165
ui_y -= 90
global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90
Draw.Label('Import...', ui_x+9, ui_y+159, 220, 21)
Draw.BeginAlign()
CREATE_SMOOTH_GROUPS = Draw.Toggle('Smooth Groups', EVENT_NONE, ui_x+9, ui_y+139, 110, 20, CREATE_SMOOTH_GROUPS.val, 'Surround smooth groups by sharp edges')
CREATE_FGONS = Draw.Toggle('NGons as FGons', EVENT_NONE, ui_x+119, ui_y+139, 110, 20, CREATE_FGONS.val, 'Import faces with more then 4 verts as fgons')
CREATE_EDGES = Draw.Toggle('Lines as Edges', EVENT_NONE, ui_x+229, ui_y+139, 110, 20, CREATE_EDGES.val, 'Import lines and faces with 2 verts as edges')
Draw.EndAlign()
Draw.Label('Separate objects by OBJ...', ui_x+9, ui_y+110, 220, 20)
Draw.BeginAlign()
SPLIT_OBJECTS = Draw.Toggle('Object', EVENT_REDRAW, ui_x+9, ui_y+89, 55, 21, SPLIT_OBJECTS.val, 'Import OBJ Objects into Blender Objects', do_split)
SPLIT_GROUPS = Draw.Toggle('Group', EVENT_REDRAW, ui_x+64, ui_y+89, 55, 21, SPLIT_GROUPS.val, 'Import OBJ Groups into Blender Objects', do_split)
SPLIT_MATERIALS = Draw.Toggle('Material', EVENT_REDRAW, ui_x+119, ui_y+89, 60, 21, SPLIT_MATERIALS.val, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)', do_split)
Draw.EndAlign()
# Only used for user feedback
KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+184, ui_y+89, 113, 21, KEEP_VERT_ORDER.val, 'Keep vert and face order, disables split options, enable for morph targets', do_vertorder)
ROTATE_X90 = Draw.Toggle('-X90', EVENT_REDRAW, ui_x+302, ui_y+89, 38, 21, ROTATE_X90.val, 'Rotate X 90.')
Draw.Label('Options...', ui_x+9, ui_y+60, 211, 20)
CLAMP_SIZE = Draw.Number('Clamp Scale: ', EVENT_NONE, ui_x+9, ui_y+39, 130, 21, CLAMP_SIZE.val, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)')
POLYGROUPS = Draw.Toggle('Poly Groups', EVENT_REDRAW, ui_x+144, ui_y+39, 90, 21, POLYGROUPS.val, 'Import OBJ groups as vertex groups.', do_polygroups)
IMAGE_SEARCH = Draw.Toggle('Image Search', EVENT_NONE, ui_x+239, ui_y+39, 100, 21, IMAGE_SEARCH.val, 'Search subdirs for any assosiated images (Warning, may be slow)')
Draw.BeginAlign()
Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 21, 'Load the wiki page for this script', do_help)
Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 21, '', obj_ui_set_event)
Draw.PushButton('Import', EVENT_IMPORT, ui_x+229, ui_y+9, 110, 21, 'Import with these settings', obj_ui_set_event)
Draw.EndAlign()
# hack so the toggle buttons redraw. this is not nice at all
while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_IMPORT):
Draw.UIBlock(obj_ui, 0)
if GLOBALS['EVENT'] != EVENT_IMPORT:
return
# END ALTERNATIVE UI *********************
Window.WaitCursor(1)
if BATCH_LOAD: # load the dir
try:
files= [ f for f in os.listdir(filepath) if f.lower().endswith('.obj') ]
except:
Window.WaitCursor(0)
Draw.PupMenu('Error%t|Could not open path ' + filepath)
return
if not files:
Window.WaitCursor(0)
Draw.PupMenu('Error%t|No files at path ' + filepath)
return
for f in files:
scn= bpy.data.scenes.new( stripExt(f) )
scn.makeCurrent()
load_obj(sys.join(filepath, f),\
CLAMP_SIZE.val,\
CREATE_FGONS.val,\
CREATE_SMOOTH_GROUPS.val,\
CREATE_EDGES.val,\
SPLIT_OBJECTS.val,\
SPLIT_GROUPS.val,\
SPLIT_MATERIALS.val,\
ROTATE_X90.val,\
IMAGE_SEARCH.val,\
POLYGROUPS.val
)
else: # Normal load
load_obj(filepath,\
CLAMP_SIZE.val,\
CREATE_FGONS.val,\
CREATE_SMOOTH_GROUPS.val,\
CREATE_EDGES.val,\
SPLIT_OBJECTS.val,\
SPLIT_GROUPS.val,\
SPLIT_MATERIALS.val,\
ROTATE_X90.val,\
IMAGE_SEARCH.val,\
POLYGROUPS.val
)
Window.WaitCursor(0)
def load_obj_ui_batch(file):
load_obj_ui(file, True)
DEBUG= False
# if __name__=='__main__' and not DEBUG:
# if os and Window.GetKeyQualifiers() & Window.Qual.SHIFT:
# Window.FileSelector(load_obj_ui_batch, 'Import OBJ Dir', '')
# else:
# Window.FileSelector(load_obj_ui, 'Import a Wavefront OBJ', '*.obj')
# For testing compatibility
'''
else:
# DEBUG ONLY
TIME= sys.time()
DIR = '/fe/obj'
import os
print 'Searching for files'
def fileList(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
yield os.path.join(dirpath, filename)
files = [f for f in fileList(DIR) if f.lower().endswith('.obj')]
files.sort()
for i, obj_file in enumerate(files):
if 0 < i < 20:
print 'Importing', obj_file, '\nNUMBER', i, 'of', len(files)
newScn= bpy.data.scenes.new(os.path.basename(obj_file))
newScn.makeCurrent()
load_obj(obj_file, False, IMAGE_SEARCH=0)
print 'TOTAL TIME: %.6f' % (sys.time() - TIME)
'''
#load_obj('/test.obj')
#load_obj('/fe/obj/mba1.obj')
class IMPORT_OT_obj(bpy.types.Operator):
'''
Operator documentation text, will be used for the operator tooltip and python docs.
'''
__idname__ = "import.obj"
__label__ = "Import OBJ"
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
__props__ = [
bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the PLY file", maxlen= 1024, default= ""),
bpy.props.BoolProperty(attr="CREATE_SMOOTH_GROUPS", name="Smooth Groups", description="Surround smooth groups by sharp edges", default= True),
bpy.props.BoolProperty(attr="CREATE_FGONS", name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default= True),
bpy.props.BoolProperty(attr="CREATE_EDGES", name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default= True),
bpy.props.BoolProperty(attr="SPLIT_OBJECTS", name="Object", description="Import OBJ Objects into Blender Objects", default= True),
bpy.props.BoolProperty(attr="SPLIT_GROUPS", name="Group", description="Import OBJ Groups into Blender Objects", default= True),
bpy.props.BoolProperty(attr="SPLIT_MATERIALS", name="Material", description="Import each material into a seperate mesh (Avoids > 16 per mesh error)", default= True),
# old comment: only used for user feedback
# disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj
# bpy.props.BoolProperty(attr="KEEP_VERT_ORDER", name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True),
bpy.props.BoolProperty(attr="ROTATE_X90", name="-X90", description="Rotate X 90.", default= True),
bpy.props.FloatProperty(attr="CLAMP_SIZE", name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.01, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0),
bpy.props.BoolProperty(attr="POLYGROUPS", name="Poly Groups", description="Import OBJ groups as vertex groups.", default= True),
bpy.props.BoolProperty(attr="IMAGE_SEARCH", name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default= True),
]
def poll(self, context):
print("Poll")
return context.active_object != None
def execute(self, context):
# print("Selected: " + context.active_object.name)
if not self.filename:
raise Exception("filename not set")
load_obj(self.filename,
context,
self.CLAMP_SIZE,
self.CREATE_FGONS,
self.CREATE_SMOOTH_GROUPS,
self.CREATE_EDGES,
self.SPLIT_OBJECTS,
self.SPLIT_GROUPS,
self.SPLIT_MATERIALS,
self.ROTATE_X90,
self.IMAGE_SEARCH,
self.POLYGROUPS)
return ('FINISHED',)
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self.__operator__)
return ('RUNNING_MODAL',)
bpy.ops.add(IMPORT_OT_obj)
# NOTES (all line numbers refer to 2.4x import_obj.py, not this file)
# check later: line 489
# can convert now: edge flags, edges: lines 508-528
# ngon (uses python module BPyMesh): 384-414
# nurbs: 947-
# NEXT clamp size: get bound box with RNA
# get back to l 140 (here)
# search image in bpy.config.textureDir - load_image
# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load)
# bitmask won't work? - 132
# uses operator bpy.ops.OBJECT_OT_select_all_toggle() to deselect all (not necessary?)
# uses bpy.sys.time()

View File

@@ -84,7 +84,9 @@ class INFO_MT_file_import(bpy.types.Menu):
def draw(self, context):
layout = self.layout
layout.itemL(text="Nothing yet")
layout.itemO("import.3ds", text="3DS")
layout.itemO("import.obj", text="OBJ")
class INFO_MT_file_export(bpy.types.Menu):
__space_type__ = 'INFO'
@@ -93,7 +95,12 @@ class INFO_MT_file_export(bpy.types.Menu):
def draw(self, context):
layout = self.layout
layout.itemO("export.3ds", text="3DS")
layout.itemO("export.fbx", text="FBX")
layout.itemO("export.obj", text="OBJ")
layout.itemO("export.ply", text="PLY")
layout.itemO("export.x3d", text="X3D")
class INFO_MT_file_external_data(bpy.types.Menu):
__space_type__ = 'INFO'

View File

@@ -39,14 +39,7 @@ struct PartEff;
struct Scene;
struct ListBase;
typedef struct DupliObject {
struct DupliObject *next, *prev;
struct Object *ob;
unsigned int origlay;
int index, no_draw, type, animated;
float mat[4][4], omat[4][4];
float orco[3], uv[2];
} DupliObject;
#include "DNA_object_types.h"
void free_path(struct Path *path);
void calc_curvepath(struct Object *ob);

View File

@@ -2175,3 +2175,103 @@ void BKE_image_user_calc_imanr(ImageUser *iuser, int cfra, int fieldnr)
}
}
/*
Produce image export path.
Fails returning 0 if image filename is empty or if destination path
matches image path (i.e. both are the same file).
Trailing slash in dest_dir is optional.
Logic:
- if an image is "below" current .blend file directory, rebuild the
same dir structure in dest_dir
For example //textures/foo/bar.png becomes
[dest_dir]/textures/foo/bar.png.
- if an image is not "below" current .blend file directory,
disregard it's path and copy it in the same directory where 3D file
goes.
For example //../foo/bar.png becomes [dest_dir]/bar.png.
This logic will help ensure that all image paths are relative and
that a user gets his images in one place. It'll also provide
consistent behaviour across exporters.
*/
int BKE_get_image_export_path(struct Image *im, const char *dest_dir, char *abs, int abs_size, char *rel, int rel_size)
{
char path[FILE_MAX];
char dir[FILE_MAX];
char base[FILE_MAX];
char blend_dir[FILE_MAX]; /* directory, where current .blend file resides */
char dest_path[FILE_MAX];
char rel_dir[FILE_MAX];
int len;
if (abs)
abs[0]= 0;
if (rel)
rel[0]= 0;
BLI_split_dirfile_basic(G.sce, blend_dir, NULL);
if (!strlen(im->name)) {
if (G.f & G_DEBUG) printf("Invalid image type.\n");
return 0;
}
BLI_strncpy(path, im->name, sizeof(path));
/* expand "//" in filename and get absolute path */
BLI_convertstringcode(path, G.sce);
/* get the directory part */
BLI_split_dirfile_basic(path, dir, base);
len= strlen(blend_dir);
rel_dir[0] = 0;
/* if image is "below" current .blend file directory */
if (!strncmp(path, blend_dir, len)) {
/* if image is _in_ current .blend file directory */
if (!strcmp(dir, blend_dir)) {
BLI_join_dirfile(dest_path, dest_dir, base);
}
/* "below" */
else {
/* rel = image_path_dir - blend_dir */
BLI_strncpy(rel_dir, dir + len, sizeof(rel_dir));
BLI_join_dirfile(dest_path, dest_dir, rel_dir);
BLI_join_dirfile(dest_path, dest_path, base);
}
}
/* image is out of current directory */
else {
BLI_join_dirfile(dest_path, dest_dir, base);
}
if (abs)
BLI_strncpy(abs, dest_path, abs_size);
if (rel) {
strncat(rel, rel_dir, rel_size);
strncat(rel, base, rel_size);
}
/* return 2 if src=dest */
if (!strcmp(path, dest_path)) {
if (G.f & G_DEBUG) printf("%s and %s are the same file\n", path, dest_path);
return 2;
}
return 1;
}

View File

@@ -542,7 +542,8 @@ void set_mesh(Object *ob, Mesh *me)
if(ob->type==OB_MESH) {
old= ob->data;
old->id.us--;
if (old)
old->id.us--;
ob->data= me;
id_us_plus((ID *)me);
}

View File

@@ -118,6 +118,7 @@ void EM_select_face(struct EditFace *efa, int sel);
void EM_select_face_fgon(struct EditMesh *em, struct EditFace *efa, int val);
void EM_select_swap(struct EditMesh *em);
void EM_toggle_select_all(struct EditMesh *em);
void EM_select_all(struct EditMesh *em);
void EM_selectmode_flush(struct EditMesh *em);
void EM_deselect_flush(struct EditMesh *em);
void EM_selectmode_set(struct EditMesh *em);

View File

@@ -3293,6 +3293,11 @@ void EM_toggle_select_all(EditMesh *em) /* exported for UV */
EM_set_flag_all(em, SELECT);
}
void EM_select_all(EditMesh *em)
{
EM_set_flag_all(em, SELECT);
}
static int toggle_select_all_exec(bContext *C, wmOperator *op)
{
Object *obedit= CTX_data_edit_object(C);

View File

@@ -243,6 +243,7 @@ typedef struct Object {
ListBase gpulamp; /* runtime, for lamps only */
ListBase pc_ids;
ListBase *duplilist; /* for temporary dupli list storage, only for use by RNA API */
} Object;
/* Warning, this is not used anymore because hooks are now modifiers */
@@ -263,6 +264,14 @@ typedef struct ObHook {
float force;
} ObHook;
typedef struct DupliObject {
struct DupliObject *next, *prev;
struct Object *ob;
unsigned int origlay;
int index, no_draw, type, animated;
float mat[4][4], omat[4][4];
float orco[3], uv[2];
} DupliObject;
/* this work object is defined in object.c */
extern Object workob;

View File

@@ -1969,7 +1969,7 @@ RNAProcessItem PROCESS_ITEMS[]= {
{"rna_rna.c", NULL, RNA_def_rna},
{"rna_ID.c", NULL, RNA_def_ID},
{"rna_texture.c", NULL, RNA_def_texture},
{"rna_action.c", NULL, RNA_def_action},
{"rna_action.c", "rna_action_api.c", RNA_def_action},
{"rna_animation.c", "rna_animation_api.c", RNA_def_animation},
{"rna_actuator.c", NULL, RNA_def_actuator},
{"rna_armature.c", NULL, RNA_def_armature},
@@ -1986,12 +1986,12 @@ RNAProcessItem PROCESS_ITEMS[]= {
{"rna_fluidsim.c", NULL, RNA_def_fluidsim},
{"rna_gpencil.c", NULL, RNA_def_gpencil},
{"rna_group.c", NULL, RNA_def_group},
{"rna_image.c", NULL, RNA_def_image},
{"rna_image.c", "rna_image_api.c", RNA_def_image},
{"rna_key.c", NULL, RNA_def_key},
{"rna_lamp.c", NULL, RNA_def_lamp},
{"rna_lattice.c", NULL, RNA_def_lattice},
{"rna_main.c", "rna_main_api.c", RNA_def_main},
{"rna_material.c", NULL, RNA_def_material},
{"rna_material.c", "rna_material_api.c", RNA_def_material},
{"rna_mesh.c", "rna_mesh_api.c", RNA_def_mesh},
{"rna_meta.c", NULL, RNA_def_meta},
{"rna_modifier.c", NULL, RNA_def_modifier},
@@ -2001,7 +2001,7 @@ RNAProcessItem PROCESS_ITEMS[]= {
{"rna_object_force.c", NULL, RNA_def_object_force},
{"rna_packedfile.c", NULL, RNA_def_packedfile},
{"rna_particle.c", NULL, RNA_def_particle},
{"rna_pose.c", NULL, RNA_def_pose},
{"rna_pose.c", "rna_pose_api.c", RNA_def_pose},
{"rna_property.c", NULL, RNA_def_gameproperty},
{"rna_render.c", NULL, RNA_def_render},
{"rna_scene.c", "rna_scene_api.c", RNA_def_scene},

View File

@@ -100,6 +100,8 @@ static void rna_def_action(BlenderRNA *brna)
RNA_def_property_collection_sdna(prop, NULL, "markers", NULL);
RNA_def_property_struct_type(prop, "TimelineMarker");
RNA_def_property_ui_text(prop, "Pose Markers", "Markers specific to this Action, for labeling poses.");
RNA_api_action(srna);
}
/* --------- */

View File

@@ -0,0 +1,80 @@
/**
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The Original Code is Copyright (C) 2009 Blender Foundation.
* All rights reserved.
*
*
* Contributor(s): Arystanbek Dyussenov
*
* ***** END GPL LICENSE BLOCK *****
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_action_types.h"
#ifdef RNA_RUNTIME
#include "BKE_action.h"
#include "DNA_anim_types.h"
#include "DNA_curve_types.h"
/* XXX disabled until RNA allows returning arrays */
#if 0
/* return frame range of all curves (min, max) or (0, 1) if there are no keys */
int *rna_Action_get_frame_range(bAction *act, int *ret_length)
{
int *ret;
float start, end;
calc_action_range(act, &start, &end, 1);
*ret_length= 2;
ret= MEM_callocN(*ret_length * sizeof(int), "rna_Action_get_frame_range");
ret[0]= (int)start;
ret[1]= (int)end;
return ret;
}
#endif
#else
void RNA_api_action(StructRNA *srna)
{
#if 0
FunctionRNA *func;
PropertyRNA *parm;
func= RNA_def_function(srna, "get_frame_range", "rna_Action_get_frame_range");
RNA_def_function_ui_description(func, "Get action frame range as a (min, max) tuple.");
parm= RNA_def_int_array(func, "frame_range", 1, NULL, 0, 0, "", "Action frame range.", 0, 0);
RNA_def_property_flag(parm, PROP_DYNAMIC_ARRAY);
RNA_def_function_return(func, parm);
#endif
}
#endif

View File

@@ -464,6 +464,37 @@ static void rna_def_bone(BlenderRNA *brna)
RNA_def_property_boolean_sdna(prop, NULL, "flag", BONE_SELECTED);
RNA_def_property_ui_text(prop, "Selected", "");
RNA_def_property_update(prop, 0, "rna_Armature_redraw_data");
/* XXX better matrix descriptions possible (Arystan) */
prop= RNA_def_property(srna, "matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "bone_mat");
RNA_def_property_array(prop, 9);
RNA_def_property_ui_text(prop, "Bone Matrix", "3x3 bone matrix.");
prop= RNA_def_property(srna, "armature_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "arm_mat");
RNA_def_property_array(prop, 16);
RNA_def_property_ui_text(prop, "Bone Armature-Relative Matrix", "4x4 bone matrix relative to armature.");
prop= RNA_def_property(srna, "tail", PROP_FLOAT, PROP_TRANSLATION);
RNA_def_property_float_sdna(prop, NULL, "tail");
RNA_def_property_array(prop, 3);
RNA_def_property_ui_text(prop, "Tail", "Location of tail end of the bone.");
prop= RNA_def_property(srna, "armature_tail", PROP_FLOAT, PROP_TRANSLATION);
RNA_def_property_float_sdna(prop, NULL, "arm_tail");
RNA_def_property_array(prop, 3);
RNA_def_property_ui_text(prop, "Armature-Relative Tail", "Location of tail end of the bone relative to armature.");
prop= RNA_def_property(srna, "head", PROP_FLOAT, PROP_TRANSLATION);
RNA_def_property_float_sdna(prop, NULL, "head");
RNA_def_property_array(prop, 3);
RNA_def_property_ui_text(prop, "Head", "Location of head end of the bone.");
prop= RNA_def_property(srna, "armature_head", PROP_FLOAT, PROP_TRANSLATION);
RNA_def_property_float_sdna(prop, NULL, "arm_head");
RNA_def_property_array(prop, 3);
RNA_def_property_ui_text(prop, "Armature-Relative Head", "Location of head end of the bone relative to armature.");
}
static void rna_def_edit_bone(BlenderRNA *brna)

View File

@@ -144,6 +144,29 @@ static EnumPropertyItem *rna_Image_source_itemf(bContext *C, PointerRNA *ptr, in
return item;
}
static int rna_Image_has_data_get(PointerRNA *ptr)
{
Image *im= (Image*)ptr->data;
if (im->ibufs.first)
return 1;
return 0;
}
static int rna_Image_depth_get(PointerRNA *ptr)
{
Image *im= (Image*)ptr->data;
ImBuf *ibuf= BKE_image_get_ibuf(im, NULL);
if (!ibuf) return 0;
if (ibuf->rect_float)
return 128;
return ibuf->depth;
}
#else
static void rna_def_imageuser(BlenderRNA *brna)
@@ -356,6 +379,22 @@ static void rna_def_image(BlenderRNA *brna)
RNA_def_property_boolean_sdna(prop, NULL, "tpageflag", IMA_CLAMP_V);
RNA_def_property_ui_text(prop, "Clamp Y", "Disable texture repeating vertically.");
RNA_def_property_update(prop, NC_IMAGE|ND_DISPLAY, NULL);
/*
Image.has_data and Image.depth are temporary,
Update import_obj.py when they are replaced (Arystan)
*/
prop= RNA_def_property(srna, "has_data", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_funcs(prop, "rna_Image_has_data_get", NULL);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Has data", "True if this image has data.");
prop= RNA_def_property(srna, "depth", PROP_INT, PROP_NONE);
RNA_def_property_int_funcs(prop, "rna_Image_depth_get", NULL, NULL);
RNA_def_property_ui_text(prop, "Depth", "Image bit depth.");
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_api_image(srna);
}
void RNA_def_image(BlenderRNA *brna)

View File

@@ -0,0 +1,82 @@
/**
* $Id$
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The Original Code is Copyright (C) 2009 Blender Foundation.
* All rights reserved.
*
*
* Contributor(s): Arystanbek Dyussenov
*
* ***** END GPL LICENSE BLOCK *****
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_object_types.h"
#ifdef RNA_RUNTIME
#include "BKE_utildefines.h"
#include "BKE_image.h"
#include "MEM_guardedalloc.h"
/*
User should check if returned path exists before copying a file there.
TODO: it would be better to return a (abs, rel) tuple.
*/
static char *rna_Image_get_export_path(Image *image, char *dest_dir, int rel)
{
int length = FILE_MAX;
char *path= MEM_callocN(length, "image file path");
if (!BKE_get_image_export_path(image, dest_dir, rel ? NULL : path, length, rel ? path : NULL, length )) {
MEM_freeN(path);
return NULL;
}
return path;
}
#else
void RNA_api_image(StructRNA *srna)
{
FunctionRNA *func;
PropertyRNA *parm;
func= RNA_def_function(srna, "get_export_path", "rna_Image_get_export_path");
RNA_def_function_ui_description(func, "Produce image export path.");
parm= RNA_def_string(func, "dest_dir", "", 0, "", "Destination directory.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_boolean(func, "get_rel_path", 1, "", "Return relative path if True.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_string(func, "path", "", 0, "", "Absolute export path.");
RNA_def_function_return(func, parm);
}
#endif

View File

@@ -200,15 +200,19 @@ void rna_Mesh_update_draw(struct bContext *C, struct PointerRNA *ptr);
/* API functions */
void RNA_api_action(StructRNA *srna);
void RNA_api_image(struct StructRNA *srna);
void RNA_api_keyingset(struct StructRNA *srna);
void RNA_api_main(struct StructRNA *srna);
void RNA_api_material(StructRNA *srna);
void RNA_api_mesh(struct StructRNA *srna);
void RNA_api_object(struct StructRNA *srna);
void RNA_api_scene(struct StructRNA *srna);
void RNA_api_scene(struct StructRNA *srna);
void RNA_api_text(struct StructRNA *srna);
void RNA_api_ui_layout(struct StructRNA *srna);
void RNA_api_wm(struct StructRNA *srna);
/* ID Properties */
extern StringPropertyRNA rna_IDProperty_string;

View File

@@ -32,13 +32,21 @@
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_object_types.h"
#include "DNA_material_types.h"
#include "DNA_mesh_types.h"
#ifdef RNA_RUNTIME
#include "BKE_main.h"
#include "BKE_mesh.h"
#include "BKE_library.h"
#include "BKE_object.h"
#include "BKE_material.h"
#include "BKE_image.h"
#include "BKE_texture.h"
#include "DNA_mesh_types.h"
#include "DNA_lamp_types.h"
static Mesh *rna_Main_add_mesh(Main *main, char *name)
{
@@ -57,24 +65,145 @@ static void rna_Main_remove_mesh(Main *main, ReportList *reports, Mesh *me)
/* XXX python now has invalid pointer? */
}
static Lamp *rna_Main_add_lamp(Main *main, char *name)
{
Lamp *la= add_lamp(name);
la->id.us--;
return la;
}
/*
static void rna_Main_remove_lamp(Main *main, ReportList *reports, Lamp *la)
{
if(la->id.us == 0)
free_libblock(&main->lamp, la);
else
BKE_report(reports, RPT_ERROR, "Lamp must have zero users to be removed.");
}
*/
static Object* rna_Main_add_object(Main *main, int type, char *name)
{
Object *ob= add_only_object(type, name);
ob->id.us--;
return ob;
}
/*
NOTE: the following example shows when this function should _not_ be called
ob = bpy.data.add_object()
scene.add_object(ob)
# ob is freed here
scene.remove_object(ob)
# don't do this since ob is already freed!
bpy.data.remove_object(ob)
*/
static void rna_Main_remove_object(Main *main, ReportList *reports, Object *ob)
{
if(ob->id.us == 0)
free_libblock(&main->object, ob);
else
BKE_report(reports, RPT_ERROR, "Object must have zero users to be removed.");
}
static Material *rna_Main_add_material(Main *main, char *name)
{
return add_material(name);
}
/* TODO: remove material? */
struct Tex *rna_Main_add_texture(Main *main, char *name)
{
return add_texture(name);
}
/* TODO: remove texture? */
struct Image *rna_Main_add_image(Main *main, char *filename)
{
return BKE_add_image_file(filename, 0);
}
#else
void RNA_api_main(StructRNA *srna)
{
FunctionRNA *func;
PropertyRNA *prop;
PropertyRNA *parm;
/* copied from rna_def_object */
static EnumPropertyItem object_type_items[] = {
{OB_EMPTY, "EMPTY", 0, "Empty", ""},
{OB_MESH, "MESH", 0, "Mesh", ""},
{OB_CURVE, "CURVE", 0, "Curve", ""},
{OB_SURF, "SURFACE", 0, "Surface", ""},
{OB_FONT, "TEXT", 0, "Text", ""},
{OB_MBALL, "META", 0, "Meta", ""},
{OB_LAMP, "LAMP", 0, "Lamp", ""},
{OB_CAMERA, "CAMERA", 0, "Camera", ""},
{OB_WAVE, "WAVE", 0, "Wave", ""},
{OB_LATTICE, "LATTICE", 0, "Lattice", ""},
{OB_ARMATURE, "ARMATURE", 0, "Armature", ""},
{0, NULL, 0, NULL, NULL}};
func= RNA_def_function(srna, "add_object", "rna_Main_add_object");
RNA_def_function_ui_description(func, "Add a new object.");
parm= RNA_def_enum(func, "type", object_type_items, 0, "", "Type of Object.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_string(func, "name", "Object", 0, "", "New name for the datablock.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "object", "Object", "", "New object.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "remove_object", "rna_Main_remove_object");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
RNA_def_function_ui_description(func, "Remove an object if it has zero users.");
parm= RNA_def_pointer(func, "object", "Object", "", "Object to remove.");
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "add_mesh", "rna_Main_add_mesh");
RNA_def_function_ui_description(func, "Add a new mesh.");
prop= RNA_def_string(func, "name", "Mesh", 0, "", "New name for the datablock.");
prop= RNA_def_pointer(func, "mesh", "Mesh", "", "New mesh.");
RNA_def_function_return(func, prop);
parm= RNA_def_string(func, "name", "Mesh", 0, "", "New name for the datablock.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "mesh", "Mesh", "", "New mesh.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "remove_mesh", "rna_Main_remove_mesh");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
RNA_def_function_ui_description(func, "Remove a mesh if it has zero users.");
prop= RNA_def_pointer(func, "mesh", "Mesh", "", "Mesh to remove.");
RNA_def_property_flag(prop, PROP_REQUIRED);
parm= RNA_def_pointer(func, "mesh", "Mesh", "", "Mesh to remove.");
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "add_lamp", "rna_Main_add_lamp");
RNA_def_function_ui_description(func, "Add a new lamp.");
parm= RNA_def_string(func, "name", "Lamp", 0, "", "New name for the datablock.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "mesh", "Lamp", "", "New lamp.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "add_material", "rna_Main_add_material");
RNA_def_function_ui_description(func, "Add a new material.");
parm= RNA_def_string(func, "name", "Material", 0, "", "New name for the datablock."); /* optional */
parm= RNA_def_pointer(func, "material", "Material", "", "New material.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "add_texture", "rna_Main_add_texture");
RNA_def_function_ui_description(func, "Add a new texture.");
parm= RNA_def_string(func, "name", "Tex", 0, "", "New name for the datablock."); /* optional */
parm= RNA_def_pointer(func, "texture", "Texture", "", "New texture.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "add_image", "rna_Main_add_image");
RNA_def_function_ui_description(func, "Add a new image.");
parm= RNA_def_string(func, "filename", "", 0, "", "Filename to load image from.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "image", "Image", "", "New image.");
RNA_def_function_return(func, parm);
}
#endif

View File

@@ -1688,6 +1688,8 @@ void RNA_def_material(BlenderRNA *brna)
rna_def_material_mtex(brna);
rna_def_material_strand(brna);
rna_def_material_physics(brna);
RNA_api_material(srna);
}
void rna_def_mtex_common(StructRNA *srna, const char *begin, const char *activeget, const char *activeset, const char *structname)

View File

@@ -0,0 +1,126 @@
/**
*
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The Original Code is Copyright (C) 2009 Blender Foundation.
* All rights reserved.
*
*
* Contributor(s): Blender Foundation
*
* ***** END GPL LICENSE BLOCK *****
*/
#include <stdlib.h>
#include <stdio.h>
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_material_types.h"
#ifdef RNA_RUNTIME
#include "BKE_material.h"
#include "BKE_texture.h"
/*
Adds material to the first free texture slot.
If all slots are busy, replaces the first.
*/
static void rna_Material_add_texture(Material *ma, Tex *tex, int mapto, int texco)
{
int i;
MTex *mtex;
int slot= -1;
for (i= 0; i < MAX_MTEX; i++) {
if (!ma->mtex[i]) {
slot= i;
break;
}
}
if (slot == -1)
slot= 0;
if (ma->mtex[slot]) {
ma->mtex[slot]->tex->id.us--;
}
else {
ma->mtex[slot]= add_mtex();
}
mtex= ma->mtex[slot];
mtex->tex= tex;
id_us_plus(&tex->id);
mtex->texco= mapto;
mtex->mapto= texco;
}
#else
void RNA_api_material(StructRNA *srna)
{
FunctionRNA *func;
PropertyRNA *parm;
/* copied from rna_def_material_mtex (rna_material.c) */
static EnumPropertyItem prop_texture_coordinates_items[] = {
{TEXCO_GLOB, "GLOBAL", 0, "Global", "Uses global coordinates for the texture coordinates."},
{TEXCO_OBJECT, "OBJECT", 0, "Object", "Uses linked object's coordinates for texture coordinates."},
{TEXCO_UV, "UV", 0, "UV", "Uses UV coordinates for texture coordinates."},
{TEXCO_ORCO, "ORCO", 0, "Generated", "Uses the original undeformed coordinates of the object."},
{TEXCO_STRAND, "STRAND", 0, "Strand", "Uses normalized strand texture coordinate (1D)."},
{TEXCO_STICKY, "STICKY", 0, "Sticky", "Uses mesh's sticky coordinates for the texture coordinates."},
{TEXCO_WINDOW, "WINDOW", 0, "Window", "Uses screen coordinates as texture coordinates."},
{TEXCO_NORM, "NORMAL", 0, "Normal", "Uses normal vector as texture coordinates."},
{TEXCO_REFL, "REFLECTION", 0, "Reflection", "Uses reflection vector as texture coordinates."},
{TEXCO_STRESS, "STRESS", 0, "Stress", "Uses the difference of edge lengths compared to original coordinates of the mesh."},
{TEXCO_TANGENT, "TANGENT", 0, "Tangent", "Uses the optional tangent vector as texture coordinates."},
{0, NULL, 0, NULL, NULL}};
static EnumPropertyItem prop_texture_mapto_items[] = {
{MAP_COL, "COLOR", 0, "Color", "Causes the texture to affect basic color of the material"},
{MAP_NORM, "NORMAL", 0, "Normal", "Causes the texture to affect the rendered normal"},
{MAP_COLSPEC, "SPECULAR_COLOR", 0, "Specularity Color", "Causes the texture to affect the specularity color"},
{MAP_COLMIR, "MIRROR", 0, "Mirror", "Causes the texture to affect the mirror color"},
{MAP_REF, "REFLECTION", 0, "Reflection", "Causes the texture to affect the value of the materials reflectivity"},
{MAP_SPEC, "SPECULARITY", 0, "Specularity", "Causes the texture to affect the value of specularity"},
{MAP_EMIT, "EMIT", 0, "Emit", "Causes the texture to affect the emit value"},
{MAP_ALPHA, "ALPHA", 0, "Alpha", "Causes the texture to affect the alpha value"},
{MAP_HAR, "HARDNESS", 0, "Hardness", "Causes the texture to affect the hardness value"},
{MAP_RAYMIRR, "RAY_MIRROR", 0, "Ray-Mirror", "Causes the texture to affect the ray-mirror value"},
{MAP_TRANSLU, "TRANSLUCENCY", 0, "Translucency", "Causes the texture to affect the translucency value"},
{MAP_AMB, "AMBIENT", 0, "Ambient", "Causes the texture to affect the value of ambient"},
{MAP_DISPLACE, "DISPLACEMENT", 0, "Displacement", "Let the texture displace the surface"},
{MAP_WARP, "WARP", 0, "Warp", "Let the texture warp texture coordinates of next channels"},
{0, NULL, 0, NULL, NULL}};
func= RNA_def_function(srna, "add_texture", "rna_Material_add_texture");
RNA_def_function_ui_description(func, "Add a texture to material's free texture slot.");
parm= RNA_def_pointer(func, "texture", "Texture", "", "Texture to add.");
parm= RNA_def_enum(func, "texture_coordinates", prop_texture_coordinates_items, TEXCO_UV, "", "Source of texture coordinate information."); /* optional */
parm= RNA_def_enum(func, "map_to", prop_texture_mapto_items, MAP_COL, "", "Controls which material property the texture affects."); /* optional */
}
#endif

View File

@@ -953,6 +953,16 @@ static void rna_def_medge(BlenderRNA *brna)
RNA_def_property_boolean_sdna(prop, NULL, "flag", ME_SHARP);
RNA_def_property_ui_text(prop, "Sharp", "Sharp edge for the EdgeSplit modifier");
RNA_def_property_update(prop, 0, "rna_Mesh_update_data");
prop= RNA_def_property(srna, "loose", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "flag", ME_LOOSEEDGE);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Loose", "Loose edge");
prop= RNA_def_property(srna, "fgon", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "flag", ME_FGON);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Fgon", "Fgon edge");
}
static void rna_def_mface(BlenderRNA *brna)
@@ -968,10 +978,13 @@ static void rna_def_mface(BlenderRNA *brna)
// XXX allows creating invalid meshes
prop= RNA_def_property(srna, "verts", PROP_INT, PROP_UNSIGNED);
RNA_def_property_int_sdna(prop, NULL, "v1");
RNA_def_property_array(prop, 4);
/*
RNA_def_property_flag(prop, PROP_DYNAMIC);
RNA_def_property_dynamic_array_funcs(prop, "rna_MeshFace_verts_get_length");
RNA_def_property_int_funcs(prop, "rna_MeshFace_verts_get", "rna_MeshFace_verts_set", NULL);
*/
RNA_def_property_ui_text(prop, "Vertices", "Vertex indices");
prop= RNA_def_property(srna, "material_index", PROP_INT, PROP_UNSIGNED);

View File

@@ -42,12 +42,19 @@
#include "BKE_DerivedMesh.h"
#include "BKE_main.h"
#include "BKE_mesh.h"
#include "BKE_material.h"
#include "DNA_mesh_types.h"
#include "DNA_scene_types.h"
#include "BLI_arithb.h"
#include "BLI_edgehash.h"
#include "WM_api.h"
#include "WM_types.h"
#include "MEM_guardedalloc.h"
static void rna_Mesh_calc_edges(Mesh *mesh)
{
CustomData edata;
@@ -102,6 +109,9 @@ static void rna_Mesh_calc_edges(Mesh *mesh)
static void rna_Mesh_update(Mesh *mesh, bContext *C)
{
Main *bmain= CTX_data_main(C);
Object *ob;
if(mesh->totface && mesh->totedge == 0)
rna_Mesh_calc_edges(mesh);
@@ -111,6 +121,18 @@ static void rna_Mesh_update(Mesh *mesh, bContext *C)
WM_event_add_notifier(C, NC_GEOM|ND_DATA, mesh);
}
static void rna_Mesh_transform(Mesh *me, float *mat)
{
/* TODO: old API transform had recalc_normals option */
int i;
MVert *mvert= me->mvert;
for(i= 0; i < me->totvert; i++, mvert++) {
Mat4MulVecfl((float (*)[4])mat, mvert->co);
}
}
static void rna_Mesh_add_verts(Mesh *mesh, int len)
{
CustomData vdata;
@@ -141,6 +163,14 @@ static void rna_Mesh_add_verts(Mesh *mesh, int len)
mesh->totvert= totvert;
}
Mesh *rna_Mesh_create_copy(Mesh *me)
{
Mesh *ret= copy_mesh(me);
ret->id.us--;
return ret;
}
static void rna_Mesh_add_edges(Mesh *mesh, int len)
{
CustomData edata;
@@ -201,6 +231,12 @@ static void rna_Mesh_add_faces(Mesh *mesh, int len)
mesh->totface= totface;
}
/*
static void rna_Mesh_add_faces(Mesh *mesh)
{
}
*/
static void rna_Mesh_add_geometry(Mesh *mesh, int verts, int edges, int faces)
{
if(verts)
@@ -211,6 +247,39 @@ static void rna_Mesh_add_geometry(Mesh *mesh, int verts, int edges, int faces)
rna_Mesh_add_faces(mesh, faces);
}
static void rna_Mesh_add_uv_texture(Mesh *me)
{
me->mtface= CustomData_add_layer(&me->fdata, CD_MTFACE, CD_DEFAULT, NULL, me->totface);
}
static void rna_Mesh_calc_normals(Mesh *me)
{
mesh_calc_normals(me->mvert, me->totvert, me->mface, me->totface, NULL);
}
static void rna_Mesh_add_material(Mesh *me, Material *ma)
{
int i;
int totcol = me->totcol + 1;
Material **mat;
/* don't add if mesh already has it */
for (i = 0; i < me->totcol; i++)
if (me->mat[i] == ma)
return;
mat= MEM_callocN(sizeof(void*) * totcol, "newmatar");
if (me->totcol) memcpy(mat, me->mat, sizeof(void*) * me->totcol);
if (me->mat) MEM_freeN(me->mat);
me->mat = mat;
me->mat[me->totcol++] = ma;
ma->id.us++;
test_object_materials((ID*)me);
}
#else
void RNA_api_mesh(StructRNA *srna)
@@ -218,6 +287,11 @@ void RNA_api_mesh(StructRNA *srna)
FunctionRNA *func;
PropertyRNA *parm;
func= RNA_def_function(srna, "transform", "rna_Mesh_transform");
RNA_def_function_ui_description(func, "Transform mesh vertices by a matrix.");
parm= RNA_def_float_matrix(func, "matrix", 4, 4, NULL, 0.0f, 0.0f, "", "Matrix.", 0.0f, 0.0f);
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "add_geometry", "rna_Mesh_add_geometry");
parm= RNA_def_int(func, "verts", 0, 0, INT_MAX, "Number", "Number of vertices to add.", 0, INT_MAX);
RNA_def_property_flag(parm, PROP_REQUIRED);
@@ -226,8 +300,24 @@ void RNA_api_mesh(StructRNA *srna)
parm= RNA_def_int(func, "faces", 0, 0, INT_MAX, "Number", "Number of faces to add.", 0, INT_MAX);
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "create_copy", "rna_Mesh_create_copy");
RNA_def_function_ui_description(func, "Create a copy of this Mesh datablock.");
parm= RNA_def_pointer(func, "mesh", "Mesh", "", "Mesh, remove it if it is only used for export.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "add_uv_texture", "rna_Mesh_add_uv_texture");
RNA_def_function_ui_description(func, "Add a UV texture layer to Mesh.");
func= RNA_def_function(srna, "calc_normals", "rna_Mesh_calc_normals");
RNA_def_function_ui_description(func, "Calculate vertex normals.");
func= RNA_def_function(srna, "update", "rna_Mesh_update");
RNA_def_function_flag(func, FUNC_USE_CONTEXT);
func= RNA_def_function(srna, "add_material", "rna_Mesh_add_material");
RNA_def_function_ui_description(func, "Add a new material to Mesh.");
parm= RNA_def_pointer(func, "material", "Material", "", "Material to add.");
RNA_def_property_flag(parm, PROP_REQUIRED);
}
#endif

View File

@@ -103,6 +103,12 @@ void rna_Object_update(bContext *C, PointerRNA *ptr)
DAG_id_flush_update(ptr->id.data, OB_RECALC_OB);
}
void rna_Object_matrix_update(bContext *C, PointerRNA *ptr)
{
ED_object_apply_obmat(ptr->id.data);
rna_Object_update(C, ptr);
}
void rna_Object_update_data(bContext *C, PointerRNA *ptr)
{
DAG_id_flush_update(ptr->id.data, OB_RECALC_DATA);
@@ -1212,6 +1218,7 @@ static void rna_def_object(BlenderRNA *brna)
RNA_def_property_float_sdna(prop, NULL, "obmat");
RNA_def_property_multi_array(prop, 2, matrix_dimsize);
RNA_def_property_ui_text(prop, "Matrix", "Transformation matrix.");
RNA_def_property_update(prop, NC_OBJECT|ND_TRANSFORM, "rna_Object_matrix_update");
/* collections */
prop= RNA_def_property(srna, "constraints", PROP_COLLECTION, PROP_NONE);
@@ -1524,12 +1531,43 @@ static void rna_def_object(BlenderRNA *brna)
RNA_api_object(srna);
}
static void rna_def_dupli_object(BlenderRNA *brna)
{
StructRNA *srna;
PropertyRNA *prop;
srna= RNA_def_struct(brna, "DupliObject", NULL);
RNA_def_struct_sdna(srna, "DupliObject");
RNA_def_struct_ui_text(srna, "Dupli Object", "Dupli Object data.");
/* RNA_def_struct_ui_icon(srna, ICON_OBJECT_DATA); */
prop= RNA_def_property(srna, "object", PROP_POINTER, PROP_NONE);
/* RNA_def_property_struct_type(prop, "Object"); */
RNA_def_property_pointer_sdna(prop, NULL, "ob");
/* RNA_def_property_pointer_funcs(prop, "rna_DupliObject_object_get", NULL, NULL); */
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Object", "Object this DupliObject represents.");
prop= RNA_def_property(srna, "ob_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "omat");
RNA_def_property_array(prop, 16);
RNA_def_property_ui_text(prop, "Object Matrix", "Object transformation matrix.");
prop= RNA_def_property(srna, "matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "mat");
RNA_def_property_array(prop, 16);
RNA_def_property_ui_text(prop, "DupliObject Matrix", "DupliObject transformation matrix.");
/* TODO: DupliObject has more properties that can be wrapped */
}
void RNA_def_object(BlenderRNA *brna)
{
rna_def_object(brna);
rna_def_object_game_settings(brna);
rna_def_vertex_group(brna);
rna_def_material_slot(brna);
rna_def_dupli_object(brna);
}
#endif

View File

@@ -28,43 +28,64 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_object_types.h"
#include "BLO_sys_types.h" /* needed for intptr_t used in ED_mesh.h */
#include "ED_mesh.h"
#ifdef RNA_RUNTIME
#include "BKE_main.h"
#include "BKE_global.h"
#include "BKE_context.h"
#include "BKE_report.h"
#include "BKE_object.h"
#include "BKE_mesh.h"
#include "BKE_DerivedMesh.h"
#include "BKE_customdata.h"
#include "BKE_anim.h"
#include "BKE_depsgraph.h"
#include "BKE_displist.h"
#include "BKE_font.h"
#include "BKE_mball.h"
#include "BLI_arithb.h"
#include "DNA_mesh_types.h"
#include "DNA_scene_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_curve_types.h"
#include "DNA_modifier_types.h"
#include "MEM_guardedalloc.h"
#include "BKE_customdata.h"
#include "BKE_DerivedMesh.h"
#include "BKE_displist.h"
#include "BKE_object.h"
#include "BKE_mball.h"
#include "BKE_main.h"
#include "DNA_mesh_types.h"
#include "DNA_curve_types.h"
#include "DNA_scene_types.h"
/* copied from init_render_mesh (render code) */
static Mesh *rna_Object_create_render_mesh(Object *ob, bContext *C, Scene *scene)
/* copied from Mesh_getFromObject and adapted to RNA interface */
/* settings: 0 - preview, 1 - render */
static Mesh *rna_Object_create_mesh(Object *ob, bContext *C, ReportList *reports, int apply_modifiers, int settings)
{
CustomDataMask mask = CD_MASK_BAREMESH|CD_MASK_MTFACE|CD_MASK_MCOL;
Mesh *tmpmesh;
Curve *tmpcu = NULL;
Object *tmpobj = NULL;
DerivedMesh *dm;
Mesh *me;
switch(ob->type) {
int render = settings, i;
int cage = !apply_modifiers;
Scene *sce = CTX_data_scene(C);
/* perform the mesh extraction based on type */
switch (ob->type) {
case OB_FONT:
case OB_CURVE:
case OB_SURF:
{
int cage = 0; //XXX -todo
Curve *tmpcu = NULL;
/* copies object and modifiers (but not the data) */
tmpobj= copy_object( ob );
tmpobj= copy_object(ob);
tmpcu = (Curve *)tmpobj->data;
tmpcu->id.us--;
@@ -88,80 +109,352 @@ static Mesh *rna_Object_create_render_mesh(Object *ob, bContext *C, Scene *scene
#endif
/* get updated display list, and convert to a mesh */
makeDispListCurveTypes( scene, tmpobj, 0 );
makeDispListCurveTypes( sce, tmpobj, 0 );
nurbs_to_mesh( tmpobj );
/* nurbs_to_mesh changes the type tp a mesh, check it worked */
/* nurbs_to_mesh changes the type to a mesh, check it worked */
if (tmpobj->type != OB_MESH) {
free_libblock_us( &(CTX_data_main(C)->object), tmpobj );
printf("cant convert curve to mesh. Does the curve have any segments?" ); // XXX use report api
free_libblock_us( &G.main->object, tmpobj );
BKE_report(reports, RPT_ERROR, "cant convert curve to mesh. Does the curve have any segments?");
return NULL;
}
me = tmpobj->data;
free_libblock_us( &(CTX_data_main(C)->object), tmpobj );
break;
}
tmpmesh = tmpobj->data;
free_libblock_us( &G.main->object, tmpobj );
break;
case OB_MBALL:
/* metaballs don't have modifiers, so just convert to mesh */
ob = find_basis_mball(scene, ob);
/* todo, re-generatre for render-res */
// metaball_polygonize(scene, ob)
me = add_mesh("Mesh");
mball_to_mesh( &ob->disp, me );
ob = find_basis_mball( sce, ob );
tmpmesh = add_mesh("Mesh");
mball_to_mesh( &ob->disp, tmpmesh );
break;
case OB_MESH:
/* copies object and modifiers (but not the data) */
if (cage) {
/* copies the data */
tmpmesh = copy_mesh( ob->data );
/* if not getting the original caged mesh, get final derived mesh */
} else {
/* Make a dummy mesh, saves copying */
DerivedMesh *dm;
/* CustomDataMask mask = CD_MASK_BAREMESH|CD_MASK_MTFACE|CD_MASK_MCOL; */
CustomDataMask mask = CD_MASK_MESH; /* this seems more suitable, exporter,
for example, needs CD_MASK_MDEFORMVERT */
/* Write the display mesh into the dummy mesh */
if (render)
dm = mesh_create_derived_render( sce, ob, mask );
else
dm = mesh_create_derived_view( sce, ob, mask );
tmpmesh = add_mesh( "Mesh" );
DM_to_mesh( dm, tmpmesh );
dm->release( dm );
}
break;
case OB_MESH:
{
dm= mesh_create_derived_render(scene, ob, mask);
// dm= mesh_create_derived_view(scene, ob, mask);
default:
BKE_report(reports, RPT_ERROR, "Object does not have geometry data");
return NULL;
}
if(!dm)
return NULL;
/* Copy materials to new mesh */
switch (ob->type) {
case OB_SURF:
tmpmesh->totcol = tmpcu->totcol;
/* free old material list (if it exists) and adjust user counts */
if( tmpcu->mat ) {
for( i = tmpcu->totcol; i-- > 0; ) {
/* are we an object material or data based? */
if (ob->colbits & 1<<i)
tmpmesh->mat[i] = ob->mat[i];
else
tmpmesh->mat[i] = tmpcu->mat[i];
me= add_mesh("tmp_render_mesh");
me->id.us--; /* we don't assign it to anything */
DM_to_mesh(dm, me);
dm->release(dm);
break;
}
default:
return NULL;
}
{ /* update the material */
short i, *totcol =give_totcolp(ob);
/* free the current material list */
if(me->mat)
MEM_freeN((void *)me->mat);
me->mat= (Material **)MEM_callocN(sizeof(void *)*(*totcol), "matarray");
for(i=0; i<*totcol; i++) {
Material *mat= give_current_material(ob, i+1);
if(mat) {
me->mat[i]= mat;
mat->id.us++;
if (tmpmesh->mat[i])
tmpmesh->mat[i]->id.us++;
}
}
break;
#if 0
/* Crashes when assigning the new material, not sure why */
case OB_MBALL:
tmpmb = (MetaBall *)ob->data;
tmpmesh->totcol = tmpmb->totcol;
/* free old material list (if it exists) and adjust user counts */
if( tmpmb->mat ) {
for( i = tmpmb->totcol; i-- > 0; ) {
tmpmesh->mat[i] = tmpmb->mat[i]; /* CRASH HERE ??? */
if (tmpmesh->mat[i]) {
tmpmb->mat[i]->id.us++;
}
}
}
break;
#endif
case OB_MESH:
if (!cage) {
Mesh *origmesh= ob->data;
tmpmesh->flag= origmesh->flag;
tmpmesh->mat = MEM_dupallocN(origmesh->mat);
tmpmesh->totcol = origmesh->totcol;
tmpmesh->smoothresh= origmesh->smoothresh;
if( origmesh->mat ) {
for( i = origmesh->totcol; i-- > 0; ) {
/* are we an object material or data based? */
if (ob->colbits & 1<<i)
tmpmesh->mat[i] = ob->mat[i];
else
tmpmesh->mat[i] = origmesh->mat[i];
if (tmpmesh->mat[i])
tmpmesh->mat[i]->id.us++;
}
}
}
break;
} /* end copy materials */
/* we don't assign it to anything */
tmpmesh->id.us--;
/* make sure materials get updated in objects */
test_object_materials( ( ID * ) tmpmesh );
return tmpmesh;
}
/* When no longer needed, duplilist should be freed with Object.free_duplilist */
static void rna_Object_create_duplilist(Object *ob, bContext *C, ReportList *reports)
{
if (!(ob->transflag & OB_DUPLI)) {
BKE_report(reports, RPT_ERROR, "Object does not have duplis.");
return;
}
/* free duplilist if a user forgets to */
if (ob->duplilist) {
BKE_reportf(reports, RPT_WARNING, "Object.dupli_list has not been freed.");
free_object_duplilist(ob->duplilist);
ob->duplilist= NULL;
}
ob->duplilist= object_duplilist(CTX_data_scene(C), ob);
/* ob->duplilist should now be freed with Object.free_duplilist */
}
static void rna_Object_free_duplilist(Object *ob, ReportList *reports)
{
if (ob->duplilist) {
free_object_duplilist(ob->duplilist);
ob->duplilist= NULL;
}
}
static void rna_Object_convert_to_triface(Object *ob, bContext *C, ReportList *reports, Scene *sce)
{
Mesh *me;
int ob_editing = CTX_data_edit_object(C) == ob;
if (ob->type != OB_MESH) {
BKE_report(reports, RPT_ERROR, "Object should be of type MESH.");
return;
}
me= (Mesh*)ob->data;
if (!ob_editing)
make_editMesh(sce, ob);
/* select all */
EM_select_all(me->edit_mesh);
convert_to_triface(me->edit_mesh, 0);
load_editMesh(sce, ob);
if (!ob_editing)
free_editMesh(me->edit_mesh);
DAG_id_flush_update(&ob->id, OB_RECALC_DATA);
}
static bDeformGroup *rna_Object_add_vertex_group(Object *ob, char *group_name)
{
return ED_vgroup_add_name(ob, group_name);
}
static void rna_Object_add_vertex_to_group(Object *ob, int vertex_index, bDeformGroup *def, float weight, int assignmode)
{
/* creates dverts if needed */
ED_vgroup_vert_add(ob, def, vertex_index, weight, assignmode);
}
/* copied from old API Object.makeDisplayList (Object.c) */
static void rna_Object_make_display_list(Object *ob, bContext *C)
{
Scene *sce= CTX_data_scene(C);
if (ob->type == OB_FONT) {
Curve *cu = ob->data;
freedisplist(&cu->disp);
BKE_text_to_curve(sce, ob, CU_LEFT);
}
DAG_id_flush_update(&ob->id, OB_RECALC_DATA);
}
static Object *rna_Object_find_armature(Object *ob)
{
Object *ob_arm = NULL;
if (ob->type != OB_MESH) return NULL;
if (ob->parent && ob->partype == PARSKEL && ob->parent->type == OB_ARMATURE) {
ob_arm = ob->parent;
}
else {
ModifierData *mod = (ModifierData*)ob->modifiers.first;
while (mod) {
if (mod->type == eModifierType_Armature) {
ob_arm = ((ArmatureModifierData*)mod)->object;
}
mod = mod->next;
}
}
return me;
return ob_arm;
}
int rna_Object_is_visible(Object *ob, bContext *C)
{
return ob->lay & CTX_data_scene(C)->lay;
}
/*
static void rna_Mesh_assign_verts_to_group(Object *ob, bDeformGroup *group, int *indices, int totindex, float weight, int assignmode)
{
if (ob->type != OB_MESH) {
BKE_report(reports, RPT_ERROR, "Object should be of MESH type.");
return;
}
Mesh *me = (Mesh*)ob->data;
int group_index = get_defgroup_num(ob, group);
if (group_index == -1) {
BKE_report(reports, RPT_ERROR, "No deform groups assigned to mesh.");
return;
}
if (assignmode != WEIGHT_REPLACE && assignmode != WEIGHT_ADD && assignmode != WEIGHT_SUBTRACT) {
BKE_report(reports, RPT_ERROR, "Bad assignment mode." );
return;
}
// makes a set of dVerts corresponding to the mVerts
if (!me->dvert)
create_dverts(&me->id);
// loop list adding verts to group
for (i= 0; i < totindex; i++) {
if(i < 0 || i >= me->totvert) {
BKE_report(reports, RPT_ERROR, "Bad vertex index in list.");
return;
}
add_vert_defnr(ob, group_index, i, weight, assignmode);
}
}
*/
#else
void RNA_api_object(StructRNA *srna)
{
FunctionRNA *func;
PropertyRNA *prop;
PropertyRNA *parm;
func= RNA_def_function(srna, "create_render_mesh", "rna_Object_create_render_mesh");
RNA_def_function_ui_description(func, "Create a Mesh datablock with all modifiers applied.");
static EnumPropertyItem mesh_type_items[] = {
{0, "PREVIEW", 0, "Preview", "Apply modifier preview settings."},
{1, "RENDER", 0, "Render", "Apply modifier render settings."},
{0, NULL, 0, NULL, NULL}
};
static EnumPropertyItem assign_mode_items[] = {
{WEIGHT_REPLACE, "REPLACE", 0, "Replace", "Replace."}, /* TODO: more meaningful descriptions */
{WEIGHT_ADD, "ADD", 0, "Add", "Add."},
{WEIGHT_SUBTRACT, "SUBTRACT", 0, "Subtract", "Subtract."},
{0, NULL, 0, NULL, NULL}
};
/* mesh */
func= RNA_def_function(srna, "create_mesh", "rna_Object_create_mesh");
RNA_def_function_ui_description(func, "Create a Mesh datablock with modifiers applied.");
RNA_def_function_flag(func, FUNC_USE_CONTEXT|FUNC_USE_REPORTS);
parm= RNA_def_boolean(func, "apply_modifiers", 0, "", "Apply modifiers.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_enum(func, "settings", mesh_type_items, 0, "", "Modifier settings to apply.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "mesh", "Mesh", "", "Mesh created from object, remove it if it is only used for export.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "convert_to_triface", "rna_Object_convert_to_triface");
RNA_def_function_ui_description(func, "Convert all mesh faces to triangles.");
RNA_def_function_flag(func, FUNC_USE_CONTEXT|FUNC_USE_REPORTS);
parm= RNA_def_pointer(func, "scene", "Scene", "", "Scene where the object belongs.");
RNA_def_property_flag(parm, PROP_REQUIRED);
/* duplis */
func= RNA_def_function(srna, "create_dupli_list", "rna_Object_create_duplilist");
RNA_def_function_ui_description(func, "Create a list of dupli objects for this object, needs to be freed manually with free_dupli_list.");
RNA_def_function_flag(func, FUNC_USE_CONTEXT|FUNC_USE_REPORTS);
func= RNA_def_function(srna, "free_dupli_list", "rna_Object_free_duplilist");
RNA_def_function_ui_description(func, "Free the list of dupli objects.");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
/* vertex groups */
func= RNA_def_function(srna, "add_vertex_group", "rna_Object_add_vertex_group");
RNA_def_function_ui_description(func, "Add vertex group to object.");
parm= RNA_def_string(func, "name", "Group", 0, "", "Vertex group name."); /* optional */
parm= RNA_def_pointer(func, "group", "VertexGroup", "", "New vertex group.");
RNA_def_function_return(func, parm);
func= RNA_def_function(srna, "add_vertex_to_group", "rna_Object_add_vertex_to_group");
RNA_def_function_ui_description(func, "Add vertex to a vertex group.");
parm= RNA_def_int(func, "vertex_index", 0, 0, 0, "", "Vertex index.", 0, 0);
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_pointer(func, "group", "VertexGroup", "", "Vertex group to add vertex to.");
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_float(func, "weight", 0, 0.0f, 1.0f, "", "Vertex weight.", 0.0f, 1.0f);
RNA_def_property_flag(parm, PROP_REQUIRED);
parm= RNA_def_enum(func, "type", assign_mode_items, 0, "", "Vertex assign mode.");
RNA_def_property_flag(parm, PROP_REQUIRED);
/* Armature */
func= RNA_def_function(srna, "find_armature", "rna_Object_find_armature");
RNA_def_function_ui_description(func, "Find armature influencing this object as a parent or via a modifier.");
parm= RNA_def_pointer(func, "ob_arm", "Object", "", "Armature object influencing this object or NULL.");
RNA_def_function_return(func, parm);
/* DAG */
func= RNA_def_function(srna, "make_display_list", "rna_Object_make_display_list");
RNA_def_function_ui_description(func, "Update object's display data."); /* XXX describe better */
RNA_def_function_flag(func, FUNC_USE_CONTEXT);
prop= RNA_def_pointer(func, "scene", "Scene", "", "");
RNA_def_property_flag(prop, PROP_REQUIRED);
prop= RNA_def_pointer(func, "mesh", "Mesh", "", "Mesh created from object, remove it if it is only used for export.");
RNA_def_function_return(func, prop);
/* View */
func= RNA_def_function(srna, "is_visible", "rna_Object_is_visible");
RNA_def_function_ui_description(func, "Determine if object is visible in active scene.");
RNA_def_function_flag(func, FUNC_USE_CONTEXT);
parm= RNA_def_boolean(func, "is_visible", 0, "", "Object visibility.");
RNA_def_function_return(func, parm);
}
#endif

View File

@@ -547,21 +547,25 @@ static void rna_def_pose_channel(BlenderRNA *brna)
RNA_def_property_update(prop, NC_OBJECT|ND_POSE, "rna_Pose_update");
/* These three matrix properties await an implementation of the PROP_MATRIX subtype, which currently doesn't exist. */
/* prop= RNA_def_property(srna, "channel_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_struct_type(prop, "chan_mat");
prop= RNA_def_property(srna, "channel_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "chan_mat");
RNA_def_property_array(prop, 16);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Channel Matrix", "4x4 matrix, before constraints.");*/
RNA_def_property_ui_text(prop, "Channel Matrix", "4x4 matrix, before constraints.");
/* kaito says this should be not user-editable; I disagree; power users should be able to force this in python; he's the boss. */
/* prop= RNA_def_property(srna, "pose_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_struct_type(prop, "pose_mat");
prop= RNA_def_property(srna, "pose_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_float_sdna(prop, NULL, "pose_mat");
RNA_def_property_array(prop, 16);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Pose Matrix", "Final 4x4 matrix for this channel.");
/*
prop= RNA_def_property(srna, "constraint_inverse_matrix", PROP_FLOAT, PROP_MATRIX);
RNA_def_property_struct_type(prop, "constinv");
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop, "Constraint Inverse Matrix", "4x4 matrix, defines transform from final position to unconstrained position."); */
RNA_def_property_ui_text(prop, "Constraint Inverse Matrix", "4x4 matrix, defines transform from final position to unconstrained position.");
*/
/* Head/Tail Coordinates (in Pose Space) - Automatically calculated... */
prop= RNA_def_property(srna, "pose_head", PROP_FLOAT, PROP_TRANSLATION);
@@ -757,6 +761,8 @@ static void rna_def_pose(BlenderRNA *brna)
RNA_def_property_int_funcs(prop, "rna_Pose_active_bone_group_index_get", "rna_Pose_active_bone_group_index_set", "rna_Pose_active_bone_group_index_range");
RNA_def_property_ui_text(prop, "Active Bone Group Index", "Active index in bone groups array.");
RNA_def_property_update(prop, NC_OBJECT|ND_POSE, "rna_Pose_update");
/* RNA_api_pose(srna); */
}
void RNA_def_pose(BlenderRNA *brna)

View File

@@ -0,0 +1,56 @@
/**
* $Id$
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The Original Code is Copyright (C) 2009 Blender Foundation.
* All rights reserved.
*
*
* Contributor(s): Blender Foundation
*
* ***** END GPL LICENSE BLOCK *****
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "RNA_define.h"
#include "RNA_types.h"
#include "DNA_object_types.h"
/* #include "BLO_sys_types.h" */
#ifdef RNA_RUNTIME
/* #include "DNA_anim_types.h" */
#include "DNA_action_types.h" /* bPose */
#else
void RNA_api_pose(StructRNA *srna)
{
/* FunctionRNA *func; */
/* PropertyRNA *parm; */
}
#endif

View File

@@ -1,5 +1,5 @@
/**
* $Id:
* $Id$
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
@@ -21,7 +21,7 @@
* All rights reserved.
*
*
* Contributor(s): Joshua Leung
* Contributor(s): Joshua Leung, Arystanbek Dyussenov
*
* ***** END GPL LICENSE BLOCK *****
*/
@@ -40,9 +40,49 @@
#ifdef RNA_RUNTIME
#include "BKE_animsys.h"
#include "BKE_scene.h"
#include "BKE_depsgraph.h"
// Scene API stuff from kazanbas branch here...
#include "ED_object.h"
#include "WM_api.h"
static void rna_Scene_add_object(Scene *sce, ReportList *reports, Object *ob)
{
Base *base= object_in_scene(ob, sce);
if (base) {
BKE_report(reports, RPT_ERROR, "Object is already in this scene.");
return;
}
base= scene_add_base(sce, ob);
ob->id.us++;
/* this is similar to what object_add_type and add_object do */
ob->lay= base->lay= sce->lay;
ob->recalc |= OB_RECALC;
DAG_scene_sort(sce);
}
static void rna_Scene_remove_object(Scene *sce, ReportList *reports, Object *ob)
{
Base *base= object_in_scene(ob, sce);
if (!base) {
BKE_report(reports, RPT_ERROR, "Object is not in this scene.");
return;
}
/* as long as ED_base_object_free_and_unlink calls free_libblock_us, we don't have to decrement ob->id.us */
ED_base_object_free_and_unlink(sce, base);
}
static void rna_Scene_set_frame(Scene *sce, bContext *C, int frame)
{
sce->r.cfra= frame;
CLAMP(sce->r.cfra, MINAFRAME, MAXFRAME);
scene_update_for_newframe(sce, (1<<20) - 1);
WM_event_add_notifier(C, NC_SCENE|ND_FRAME, sce);
}
static KeyingSet *rna_Scene_add_keying_set(Scene *sce, ReportList *reports,
char name[], int absolute, int insertkey_needed, int insertkey_visual)
@@ -77,21 +117,37 @@ void RNA_api_scene(StructRNA *srna)
{
FunctionRNA *func;
PropertyRNA *parm;
// Scene API stuff from kazanbas branch here...
func= RNA_def_function(srna, "add_object", "rna_Scene_add_object");
RNA_def_function_ui_description(func, "Add object to scene.");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
parm= RNA_def_pointer(func, "object", "Object", "", "Object to add to scene.");
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "remove_object", "rna_Scene_remove_object");
RNA_def_function_ui_description(func, "Remove object from scene.");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
parm= RNA_def_pointer(func, "object", "Object", "", "Object to remove from scene.");
RNA_def_property_flag(parm, PROP_REQUIRED);
func= RNA_def_function(srna, "set_frame", "rna_Scene_set_frame");
RNA_def_function_flag(func, FUNC_USE_CONTEXT);
RNA_def_function_ui_description(func, "Set scene frame updating all objects immediately.");
parm= RNA_def_int(func, "frame", 0, MINAFRAME, MAXFRAME, "", "Frame number to set.", MINAFRAME, MAXFRAME);
RNA_def_property_flag(parm, PROP_REQUIRED);
/* Add Keying Set */
func= RNA_def_function(srna, "add_keying_set", "rna_Scene_add_keying_set");
RNA_def_function_ui_description(func, "Add a new Keying Set to Scene.");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
/* returns the new KeyingSet */
/* returns the new KeyingSet */
parm= RNA_def_pointer(func, "keyingset", "KeyingSet", "", "Newly created Keying Set.");
RNA_def_function_return(func, parm);
/* name */
RNA_def_function_return(func, parm);
/* name */
RNA_def_string(func, "name", "KeyingSet", 64, "Name", "Name of Keying Set");
/* flags */
/* flags */
RNA_def_boolean(func, "absolute", 1, "Absolute", "Keying Set defines specific paths/settings to be keyframed (i.e. is not reliant on context info)");
/* keying flags */
/* keying flags */
RNA_def_boolean(func, "insertkey_needed", 0, "Insert Keyframes - Only Needed", "Only insert keyframes where they're needed in the relevant F-Curves.");
RNA_def_boolean(func, "insertkey_visual", 0, "Insert Keyframes - Visual", "Insert keyframes based on 'visual transforms'.");
}

View File

@@ -345,6 +345,27 @@ static char *pyrna_enum_as_string(PointerRNA *ptr, PropertyRNA *prop)
return result;
}
static int pyrna_string_to_enum(PyObject *item, PointerRNA *ptr, PropertyRNA *prop, int *val, const char *error_prefix)
{
char *param= _PyUnicode_AsString(item);
if (param==NULL) {
char *enum_str= pyrna_enum_as_string(ptr, prop);
PyErr_Format(PyExc_TypeError, "%.200s expected a string enum type in (%.200s)", error_prefix, enum_str);
MEM_freeN(enum_str);
return 0;
} else {
if (!RNA_property_enum_value(BPy_GetContext(), ptr, prop, param, val)) {
char *enum_str= pyrna_enum_as_string(ptr, prop);
PyErr_Format(PyExc_TypeError, "%.200s enum \"%.200s\" not found in (%.200s)", error_prefix, param, enum_str);
MEM_freeN(enum_str);
return 0;
}
}
return 1;
}
PyObject * pyrna_prop_to_py(PointerRNA *ptr, PropertyRNA *prop)
{
PyObject *ret;
@@ -603,25 +624,34 @@ int pyrna_py_to_prop(PointerRNA *ptr, PropertyRNA *prop, void *data, PyObject *v
}
case PROP_ENUM:
{
char *param = _PyUnicode_AsString(value);
if (param==NULL) {
char *enum_str= pyrna_enum_as_string(ptr, prop);
PyErr_Format(PyExc_TypeError, "%.200s expected a string enum type in (%.200s)", error_prefix, enum_str);
MEM_freeN(enum_str);
return -1;
} else {
int val;
if (RNA_property_enum_value(BPy_GetContext(), ptr, prop, param, &val)) {
if(data) *((int*)data)= val;
else RNA_property_enum_set(ptr, prop, val);
} else {
char *enum_str= pyrna_enum_as_string(ptr, prop);
PyErr_Format(PyExc_TypeError, "%.200s enum \"%.200s\" not found in (%.200s)", error_prefix, param, enum_str);
MEM_freeN(enum_str);
int val, i;
if (PyUnicode_Check(value)) {
if (!pyrna_string_to_enum(value, ptr, prop, &val, error_prefix))
return -1;
}
else if (PyTuple_Check(value)) {
/* tuple of enum items, concatenate all values with OR */
val= 0;
for (i= 0; i < PyTuple_Size(value); i++) {
int tmpval;
/* PyTuple_GET_ITEM returns a borrowed reference */
if (!pyrna_string_to_enum(PyTuple_GET_ITEM(value, i), ptr, prop, &tmpval, error_prefix))
return -1;
val |= tmpval;
}
}
else {
char *enum_str= pyrna_enum_as_string(ptr, prop);
PyErr_Format(PyExc_TypeError, "%.200s expected a string enum or a tuple of strings in (%.200s)", error_prefix, enum_str);
MEM_freeN(enum_str);
return -1;
}
if(data) *((int*)data)= val;
else RNA_property_enum_set(ptr, prop, val);
break;
}