Pare down assimp thirdparty library

This commit is contained in:
Dane Johnson
2021-01-20 12:53:51 -06:00
parent 68f373bf69
commit f95ba69047
244 changed files with 0 additions and 91776 deletions

View File

@@ -1 +0,0 @@
from .core import *

View File

@@ -1,546 +0,0 @@
"""
PyAssimp
This is the main-module of PyAssimp.
"""
import sys
if sys.version_info < (2,6):
raise RuntimeError('pyassimp: need python 2.6 or newer')
# xrange was renamed range in Python 3 and the original range from Python 2 was removed.
# To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up.
if sys.version_info >= (3,0):
xrange = range
try: import numpy
except ImportError: numpy = None
import logging
import ctypes
logger = logging.getLogger("pyassimp")
# attach default null handler to logger so it doesn't complain
# even if you don't attach another handler to logger
logger.addHandler(logging.NullHandler())
from . import structs
from . import helper
from . import postprocess
from .errors import AssimpError
class AssimpLib(object):
"""
Assimp-Singleton
"""
load, load_mem, export, export_blob, release, dll = helper.search_library()
_assimp_lib = AssimpLib()
def make_tuple(ai_obj, type = None):
res = None
#notes:
# ai_obj._fields_ = [ ("attr", c_type), ... ]
# getattr(ai_obj, e[0]).__class__ == float
if isinstance(ai_obj, structs.Matrix4x4):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4))
#import pdb;pdb.set_trace()
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+4] for i in xrange(0,16,4)]
elif isinstance(ai_obj, structs.Matrix3x3):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3))
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+3] for i in xrange(0,9,3)]
else:
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_])
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
return res
# Returns unicode object for Python 2, and str object for Python 3.
def _convert_assimp_string(assimp_string):
if sys.version_info >= (3, 0):
return str(assimp_string.data, errors='ignore')
else:
return unicode(assimp_string.data, errors='ignore')
# It is faster and more correct to have an init function for each assimp class
def _init_face(aiFace):
aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)]
assimp_struct_inits = { structs.Face : _init_face }
def call_init(obj, caller = None):
if helper.hasattr_silent(obj,'contents'): #pointer
_init(obj.contents, obj, caller)
else:
_init(obj,parent=caller)
def _is_init_type(obj):
if obj and helper.hasattr_silent(obj,'contents'): #pointer
return _is_init_type(obj[0])
# null-pointer case that arises when we reach a mesh attribute
# like mBitangents which use mNumVertices rather than mNumBitangents
# so it breaks the 'is iterable' check.
# Basically:
# FIXME!
elif not bool(obj):
return False
tname = obj.__class__.__name__
return not (tname[:2] == 'c_' or tname == 'Structure' \
or tname == 'POINTER') and not isinstance(obj, (int, str, bytes))
def _init(self, target = None, parent = None):
"""
Custom initialize() for C structs, adds safely accessible member functionality.
:param target: set the object which receive the added methods. Useful when manipulating
pointers, to skip the intermediate 'contents' deferencing.
"""
if not target:
target = self
dirself = dir(self)
for m in dirself:
if m.startswith("_"):
continue
if m.startswith('mNum'):
if 'm' + m[4:] in dirself:
continue # will be processed later on
else:
name = m[1:].lower()
obj = getattr(self, m)
setattr(target, name, obj)
continue
if m == 'mName':
target.name = str(_convert_assimp_string(self.mName))
target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + getattr(x, 'name','') + ")"
target.__class__.__str__ = lambda x: getattr(x, 'name', '')
continue
name = m[1:].lower()
obj = getattr(self, m)
# Create tuples
if isinstance(obj, structs.assimp_structs_as_tuple):
setattr(target, name, make_tuple(obj))
logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower())
continue
if m.startswith('m'):
if name == "parent":
setattr(target, name, parent)
logger.debug("Added a parent as self." + name)
continue
if helper.hasattr_silent(self, 'mNum' + m[1:]):
length = getattr(self, 'mNum' + m[1:])
# -> special case: properties are
# stored as a dict.
if m == 'mProperties':
setattr(target, name, _get_properties(obj, length))
continue
if not length: # empty!
setattr(target, name, [])
logger.debug(str(self) + ": " + name + " is an empty list.")
continue
try:
if obj._type_ in structs.assimp_structs_as_tuple:
if numpy:
setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32))
logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [make_tuple(obj[i]) for i in range(length)])
logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array?
logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
# initialize array elements
try:
init = assimp_struct_inits[type(obj[0])]
except KeyError:
if _is_init_type(obj[0]):
for e in getattr(target, name):
call_init(e, target)
else:
for e in getattr(target, name):
init(e)
except IndexError:
logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.")
sys.exit(1)
except ValueError as e:
logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.")
if "setting an array element with a sequence" in str(e):
logger.error("Note that pyassimp does not currently "
"support meshes with mixed triangles "
"and quads. Try to load your mesh with"
" a post-processing to triangulate your"
" faces.")
raise e
else: # starts with 'm' but not iterable
setattr(target, name, obj)
logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
if _is_init_type(obj):
call_init(obj, target)
if isinstance(self, structs.Mesh):
_finalize_mesh(self, target)
if isinstance(self, structs.Texture):
_finalize_texture(self, target)
if isinstance(self, structs.Metadata):
_finalize_metadata(self, target)
return self
def pythonize_assimp(type, obj, scene):
""" This method modify the Assimp data structures
to make them easier to work with in Python.
Supported operations:
- MESH: replace a list of mesh IDs by reference to these meshes
- ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node.
:param type: the type of modification to operate (cf above)
:param obj: the input object to modify
:param scene: a reference to the whole scene
"""
if type == "MESH":
meshes = []
for i in obj:
meshes.append(scene.meshes[i])
return meshes
if type == "ADDTRANSFORMATION":
def getnode(node, name):
if node.name == name: return node
for child in node.children:
n = getnode(child, name)
if n: return n
node = getnode(scene.rootnode, obj.name)
if not node:
raise AssimpError("Object " + str(obj) + " has no associated node!")
setattr(obj, "transformation", node.transformation)
def recur_pythonize(node, scene):
'''
Recursively call pythonize_assimp on
nodes tree to apply several post-processing to
pythonize the assimp datastructures.
'''
node.meshes = pythonize_assimp("MESH", node.meshes, scene)
for mesh in node.meshes:
mesh.material = scene.materials[mesh.materialindex]
for cam in scene.cameras:
pythonize_assimp("ADDTRANSFORMATION", cam, scene)
for c in node.children:
recur_pythonize(c, scene)
def load(filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Load a model into a scene. On failure throws AssimpError.
Arguments
---------
filename: Either a filename or a file object to load model from.
If a file object is passed, file_type MUST be specified
Otherwise Assimp has no idea which importer to use.
This is named 'filename' so as to not break legacy code.
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
file_type: string of file extension, such as 'stl'
Returns
---------
Scene object with model data
'''
if hasattr(filename, 'read'):
# This is the case where a file object has been passed to load.
# It is calling the following function:
# const aiScene* aiImportFileFromMemory(const char* pBuffer,
# unsigned int pLength,
# unsigned int pFlags,
# const char* pHint)
if file_type is None:
raise AssimpError('File type must be specified when passing file objects!')
data = filename.read()
model = _assimp_lib.load_mem(data,
len(data),
processing,
file_type)
else:
# a filename string has been passed
model = _assimp_lib.load(filename.encode(sys.getfilesystemencoding()), processing)
if not model:
raise AssimpError('Could not import file!')
scene = _init(model.contents)
recur_pythonize(scene.rootnode, scene)
return scene
def export(scene,
filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Export a scene. On failure throws AssimpError.
Arguments
---------
scene: scene to export.
filename: Filename that the scene should be exported to.
file_type: string of file exporter to use. For example "collada".
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
'''
exportStatus = _assimp_lib.export(ctypes.pointer(scene), file_type.encode("ascii"), filename.encode(sys.getfilesystemencoding()), processing)
if exportStatus != 0:
raise AssimpError('Could not export scene!')
def export_blob(scene,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Export a scene and return a blob in the correct format. On failure throws AssimpError.
Arguments
---------
scene: scene to export.
file_type: string of file exporter to use. For example "collada".
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
Returns
---------
Pointer to structs.ExportDataBlob
'''
exportBlobPtr = _assimp_lib.export_blob(ctypes.pointer(scene), file_type.encode("ascii"), processing)
if exportBlobPtr == 0:
raise AssimpError('Could not export scene to blob!')
return exportBlobPtr
def release(scene):
_assimp_lib.release(ctypes.pointer(scene))
def _finalize_texture(tex, target):
setattr(target, "achformathint", tex.achFormatHint)
if numpy:
data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)])
else:
data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]
setattr(target, "data", data)
def _finalize_mesh(mesh, target):
""" Building of meshes is a bit specific.
We override here the various datasets that can
not be process as regular fields.
For instance, the length of the normals array is
mNumVertices (no mNumNormals is available)
"""
nb_vertices = getattr(mesh, "mNumVertices")
def fill(name):
mAttr = getattr(mesh, name)
if numpy:
if mAttr:
data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32)
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), numpy.array([], dtype="float32"))
else:
if mAttr:
data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)]
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), [])
def fillarray(name):
mAttr = getattr(mesh, name)
data = []
for index, mSubAttr in enumerate(mAttr):
if mSubAttr:
data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)])
if numpy:
setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32))
else:
setattr(target, name[1:].lower(), data)
fill("mNormals")
fill("mTangents")
fill("mBitangents")
fillarray("mColors")
fillarray("mTextureCoords")
# prepare faces
if numpy:
faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32)
else:
faces = [f.indices for f in target.faces]
setattr(target, 'faces', faces)
def _init_metadata_entry(entry):
entry.type = entry.mType
if entry.type == structs.MetadataEntry.AI_BOOL:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_bool)).contents.value
elif entry.type == structs.MetadataEntry.AI_INT32:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_int32)).contents.value
elif entry.type == structs.MetadataEntry.AI_UINT64:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_uint64)).contents.value
elif entry.type == structs.MetadataEntry.AI_FLOAT:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_float)).contents.value
elif entry.type == structs.MetadataEntry.AI_DOUBLE:
entry.data = ctypes.cast(entry.mData, ctypes.POINTER(ctypes.c_double)).contents.value
elif entry.type == structs.MetadataEntry.AI_AISTRING:
assimp_string = ctypes.cast(entry.mData, ctypes.POINTER(structs.String)).contents
entry.data = _convert_assimp_string(assimp_string)
elif entry.type == structs.MetadataEntry.AI_AIVECTOR3D:
assimp_vector = ctypes.cast(entry.mData, ctypes.POINTER(structs.Vector3D)).contents
entry.data = make_tuple(assimp_vector)
return entry
def _finalize_metadata(metadata, target):
""" Building the metadata object is a bit specific.
Firstly, there are two separate arrays: one with metadata keys and one
with metadata values, and there are no corresponding mNum* attributes,
so the C arrays are not converted to Python arrays using the generic
code in the _init function.
Secondly, a metadata entry value has to be cast according to declared
metadata entry type.
"""
length = metadata.mNumProperties
setattr(target, 'keys', [str(_convert_assimp_string(metadata.mKeys[i])) for i in range(length)])
setattr(target, 'values', [_init_metadata_entry(metadata.mValues[i]) for i in range(length)])
class PropertyGetter(dict):
def __getitem__(self, key):
semantic = 0
if isinstance(key, tuple):
key, semantic = key
return dict.__getitem__(self, (key, semantic))
def keys(self):
for k in dict.keys(self):
yield k[0]
def __iter__(self):
return self.keys()
def items(self):
for k, v in dict.items(self):
yield k[0], v
def _get_properties(properties, length):
"""
Convenience Function to get the material properties as a dict
and values in a python format.
"""
result = {}
#read all properties
for p in [properties[i] for i in range(length)]:
#the name
p = p.contents
key = str(_convert_assimp_string(p.mKey))
key = (key.split('.')[1], p.mSemantic)
#the data
if p.mType == 1:
arr = ctypes.cast(p.mData,
ctypes.POINTER(ctypes.c_float * int(p.mDataLength/ctypes.sizeof(ctypes.c_float)))
).contents
value = [x for x in arr]
elif p.mType == 3: #string can't be an array
value = _convert_assimp_string(ctypes.cast(p.mData, ctypes.POINTER(structs.MaterialPropertyString)).contents)
elif p.mType == 4:
arr = ctypes.cast(p.mData,
ctypes.POINTER(ctypes.c_int * int(p.mDataLength/ctypes.sizeof(ctypes.c_int)))
).contents
value = [x for x in arr]
else:
value = p.mData[:p.mDataLength]
if len(value) == 1:
[value] = value
result[key] = value
return PropertyGetter(result)
def decompose_matrix(matrix):
if not isinstance(matrix, structs.Matrix4x4):
raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!")
scaling = structs.Vector3D()
rotation = structs.Quaternion()
position = structs.Vector3D()
_assimp_lib.dll.aiDecomposeMatrix(ctypes.pointer(matrix),
ctypes.byref(scaling),
ctypes.byref(rotation),
ctypes.byref(position))
return scaling._init(), rotation._init(), position._init()

View File

@@ -1,11 +0,0 @@
#-*- coding: UTF-8 -*-
"""
All possible errors.
"""
class AssimpError(BaseException):
"""
If an internal error occurs.
"""
pass

View File

@@ -1,41 +0,0 @@
FORMATS = ["CSM",
"LWS",
"B3D",
"COB",
"PLY",
"IFC",
"OFF",
"SMD",
"IRRMESH",
"3D",
"DAE",
"MDL",
"HMP",
"TER",
"WRL",
"XML",
"NFF",
"AC",
"OBJ",
"3DS",
"STL",
"IRR",
"Q3O",
"Q3D",
"MS3D",
"Q3S",
"ZGL",
"MD2",
"X",
"BLEND",
"XGL",
"MD5MESH",
"MAX",
"LXO",
"DXF",
"BVH",
"LWO",
"NDO"]
def available_formats():
return FORMATS

View File

@@ -1,281 +0,0 @@
#-*- coding: UTF-8 -*-
"""
Some fancy helper functions.
"""
import os
import ctypes
import operator
from distutils.sysconfig import get_python_lib
import re
import sys
try: import numpy
except ImportError: numpy = None
import logging;logger = logging.getLogger("pyassimp")
from .errors import AssimpError
additional_dirs, ext_whitelist = [],[]
# populate search directories and lists of allowed file extensions
# depending on the platform we're running on.
if os.name=='posix':
additional_dirs.append('./')
additional_dirs.append('/usr/lib/')
additional_dirs.append('/usr/lib/x86_64-linux-gnu/')
additional_dirs.append('/usr/local/lib/')
if 'LD_LIBRARY_PATH' in os.environ:
additional_dirs.extend([item for item in os.environ['LD_LIBRARY_PATH'].split(':') if item])
# check if running from anaconda.
if "conda" or "continuum" in sys.version.lower():
cur_path = get_python_lib()
pattern = re.compile('.*\/lib\/')
conda_lib = pattern.match(cur_path).group()
logger.info("Adding Anaconda lib path:"+ conda_lib)
additional_dirs.append(conda_lib)
# note - this won't catch libassimp.so.N.n, but
# currently there's always a symlink called
# libassimp.so in /usr/local/lib.
ext_whitelist.append('.so')
# libassimp.dylib in /usr/local/lib
ext_whitelist.append('.dylib')
elif os.name=='nt':
ext_whitelist.append('.dll')
path_dirs = os.environ['PATH'].split(';')
additional_dirs.extend(path_dirs)
def vec2tuple(x):
""" Converts a VECTOR3D to a Tuple """
return (x.x, x.y, x.z)
def transform(vector3, matrix4x4):
""" Apply a transformation matrix on a 3D vector.
:param vector3: array with 3 elements
:param matrix4x4: 4x4 matrix
"""
if numpy:
return numpy.dot(matrix4x4, numpy.append(vector3, 1.))
else:
m0,m1,m2,m3 = matrix4x4; x,y,z = vector3
return [
m0[0]*x + m0[1]*y + m0[2]*z + m0[3],
m1[0]*x + m1[1]*y + m1[2]*z + m1[3],
m2[0]*x + m2[1]*y + m2[2]*z + m2[3],
m3[0]*x + m3[1]*y + m3[2]*z + m3[3]
]
def _inv(matrix4x4):
m0,m1,m2,m3 = matrix4x4
det = m0[3]*m1[2]*m2[1]*m3[0] - m0[2]*m1[3]*m2[1]*m3[0] - \
m0[3]*m1[1]*m2[2]*m3[0] + m0[1]*m1[3]*m2[2]*m3[0] + \
m0[2]*m1[1]*m2[3]*m3[0] - m0[1]*m1[2]*m2[3]*m3[0] - \
m0[3]*m1[2]*m2[0]*m3[1] + m0[2]*m1[3]*m2[0]*m3[1] + \
m0[3]*m1[0]*m2[2]*m3[1] - m0[0]*m1[3]*m2[2]*m3[1] - \
m0[2]*m1[0]*m2[3]*m3[1] + m0[0]*m1[2]*m2[3]*m3[1] + \
m0[3]*m1[1]*m2[0]*m3[2] - m0[1]*m1[3]*m2[0]*m3[2] - \
m0[3]*m1[0]*m2[1]*m3[2] + m0[0]*m1[3]*m2[1]*m3[2] + \
m0[1]*m1[0]*m2[3]*m3[2] - m0[0]*m1[1]*m2[3]*m3[2] - \
m0[2]*m1[1]*m2[0]*m3[3] + m0[1]*m1[2]*m2[0]*m3[3] + \
m0[2]*m1[0]*m2[1]*m3[3] - m0[0]*m1[2]*m2[1]*m3[3] - \
m0[1]*m1[0]*m2[2]*m3[3] + m0[0]*m1[1]*m2[2]*m3[3]
return[[( m1[2]*m2[3]*m3[1] - m1[3]*m2[2]*m3[1] + m1[3]*m2[1]*m3[2] - m1[1]*m2[3]*m3[2] - m1[2]*m2[1]*m3[3] + m1[1]*m2[2]*m3[3]) /det,
( m0[3]*m2[2]*m3[1] - m0[2]*m2[3]*m3[1] - m0[3]*m2[1]*m3[2] + m0[1]*m2[3]*m3[2] + m0[2]*m2[1]*m3[3] - m0[1]*m2[2]*m3[3]) /det,
( m0[2]*m1[3]*m3[1] - m0[3]*m1[2]*m3[1] + m0[3]*m1[1]*m3[2] - m0[1]*m1[3]*m3[2] - m0[2]*m1[1]*m3[3] + m0[1]*m1[2]*m3[3]) /det,
( m0[3]*m1[2]*m2[1] - m0[2]*m1[3]*m2[1] - m0[3]*m1[1]*m2[2] + m0[1]*m1[3]*m2[2] + m0[2]*m1[1]*m2[3] - m0[1]*m1[2]*m2[3]) /det],
[( m1[3]*m2[2]*m3[0] - m1[2]*m2[3]*m3[0] - m1[3]*m2[0]*m3[2] + m1[0]*m2[3]*m3[2] + m1[2]*m2[0]*m3[3] - m1[0]*m2[2]*m3[3]) /det,
( m0[2]*m2[3]*m3[0] - m0[3]*m2[2]*m3[0] + m0[3]*m2[0]*m3[2] - m0[0]*m2[3]*m3[2] - m0[2]*m2[0]*m3[3] + m0[0]*m2[2]*m3[3]) /det,
( m0[3]*m1[2]*m3[0] - m0[2]*m1[3]*m3[0] - m0[3]*m1[0]*m3[2] + m0[0]*m1[3]*m3[2] + m0[2]*m1[0]*m3[3] - m0[0]*m1[2]*m3[3]) /det,
( m0[2]*m1[3]*m2[0] - m0[3]*m1[2]*m2[0] + m0[3]*m1[0]*m2[2] - m0[0]*m1[3]*m2[2] - m0[2]*m1[0]*m2[3] + m0[0]*m1[2]*m2[3]) /det],
[( m1[1]*m2[3]*m3[0] - m1[3]*m2[1]*m3[0] + m1[3]*m2[0]*m3[1] - m1[0]*m2[3]*m3[1] - m1[1]*m2[0]*m3[3] + m1[0]*m2[1]*m3[3]) /det,
( m0[3]*m2[1]*m3[0] - m0[1]*m2[3]*m3[0] - m0[3]*m2[0]*m3[1] + m0[0]*m2[3]*m3[1] + m0[1]*m2[0]*m3[3] - m0[0]*m2[1]*m3[3]) /det,
( m0[1]*m1[3]*m3[0] - m0[3]*m1[1]*m3[0] + m0[3]*m1[0]*m3[1] - m0[0]*m1[3]*m3[1] - m0[1]*m1[0]*m3[3] + m0[0]*m1[1]*m3[3]) /det,
( m0[3]*m1[1]*m2[0] - m0[1]*m1[3]*m2[0] - m0[3]*m1[0]*m2[1] + m0[0]*m1[3]*m2[1] + m0[1]*m1[0]*m2[3] - m0[0]*m1[1]*m2[3]) /det],
[( m1[2]*m2[1]*m3[0] - m1[1]*m2[2]*m3[0] - m1[2]*m2[0]*m3[1] + m1[0]*m2[2]*m3[1] + m1[1]*m2[0]*m3[2] - m1[0]*m2[1]*m3[2]) /det,
( m0[1]*m2[2]*m3[0] - m0[2]*m2[1]*m3[0] + m0[2]*m2[0]*m3[1] - m0[0]*m2[2]*m3[1] - m0[1]*m2[0]*m3[2] + m0[0]*m2[1]*m3[2]) /det,
( m0[2]*m1[1]*m3[0] - m0[1]*m1[2]*m3[0] - m0[2]*m1[0]*m3[1] + m0[0]*m1[2]*m3[1] + m0[1]*m1[0]*m3[2] - m0[0]*m1[1]*m3[2]) /det,
( m0[1]*m1[2]*m2[0] - m0[2]*m1[1]*m2[0] + m0[2]*m1[0]*m2[1] - m0[0]*m1[2]*m2[1] - m0[1]*m1[0]*m2[2] + m0[0]*m1[1]*m2[2]) /det]]
def get_bounding_box(scene):
bb_min = [1e10, 1e10, 1e10] # x,y,z
bb_max = [-1e10, -1e10, -1e10] # x,y,z
inv = numpy.linalg.inv if numpy else _inv
return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, inv(scene.rootnode.transformation))
def get_bounding_box_for_node(node, bb_min, bb_max, transformation):
if numpy:
transformation = numpy.dot(transformation, node.transformation)
else:
t0,t1,t2,t3 = transformation
T0,T1,T2,T3 = node.transformation
transformation = [ [
t0[0]*T0[0] + t0[1]*T1[0] + t0[2]*T2[0] + t0[3]*T3[0],
t0[0]*T0[1] + t0[1]*T1[1] + t0[2]*T2[1] + t0[3]*T3[1],
t0[0]*T0[2] + t0[1]*T1[2] + t0[2]*T2[2] + t0[3]*T3[2],
t0[0]*T0[3] + t0[1]*T1[3] + t0[2]*T2[3] + t0[3]*T3[3]
],[
t1[0]*T0[0] + t1[1]*T1[0] + t1[2]*T2[0] + t1[3]*T3[0],
t1[0]*T0[1] + t1[1]*T1[1] + t1[2]*T2[1] + t1[3]*T3[1],
t1[0]*T0[2] + t1[1]*T1[2] + t1[2]*T2[2] + t1[3]*T3[2],
t1[0]*T0[3] + t1[1]*T1[3] + t1[2]*T2[3] + t1[3]*T3[3]
],[
t2[0]*T0[0] + t2[1]*T1[0] + t2[2]*T2[0] + t2[3]*T3[0],
t2[0]*T0[1] + t2[1]*T1[1] + t2[2]*T2[1] + t2[3]*T3[1],
t2[0]*T0[2] + t2[1]*T1[2] + t2[2]*T2[2] + t2[3]*T3[2],
t2[0]*T0[3] + t2[1]*T1[3] + t2[2]*T2[3] + t2[3]*T3[3]
],[
t3[0]*T0[0] + t3[1]*T1[0] + t3[2]*T2[0] + t3[3]*T3[0],
t3[0]*T0[1] + t3[1]*T1[1] + t3[2]*T2[1] + t3[3]*T3[1],
t3[0]*T0[2] + t3[1]*T1[2] + t3[2]*T2[2] + t3[3]*T3[2],
t3[0]*T0[3] + t3[1]*T1[3] + t3[2]*T2[3] + t3[3]*T3[3]
] ]
for mesh in node.meshes:
for v in mesh.vertices:
v = transform(v, transformation)
bb_min[0] = min(bb_min[0], v[0])
bb_min[1] = min(bb_min[1], v[1])
bb_min[2] = min(bb_min[2], v[2])
bb_max[0] = max(bb_max[0], v[0])
bb_max[1] = max(bb_max[1], v[1])
bb_max[2] = max(bb_max[2], v[2])
for child in node.children:
bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation)
return bb_min, bb_max
def try_load_functions(library_path, dll):
'''
Try to bind to aiImportFile and aiReleaseImport
Arguments
---------
library_path: path to current lib
dll: ctypes handle to library
Returns
---------
If unsuccessful:
None
If successful:
Tuple containing (library_path,
load from filename function,
load from memory function,
export to filename function,
export to blob function,
release function,
ctypes handle to assimp library)
'''
try:
load = dll.aiImportFile
release = dll.aiReleaseImport
load_mem = dll.aiImportFileFromMemory
export = dll.aiExportScene
export2blob = dll.aiExportSceneToBlob
except AttributeError:
#OK, this is a library, but it doesn't have the functions we need
return None
# library found!
from .structs import Scene, ExportDataBlob
load.restype = ctypes.POINTER(Scene)
load_mem.restype = ctypes.POINTER(Scene)
export2blob.restype = ctypes.POINTER(ExportDataBlob)
return (library_path, load, load_mem, export, export2blob, release, dll)
def search_library():
'''
Loads the assimp library.
Throws exception AssimpError if no library_path is found
Returns: tuple, (load from filename function,
load from memory function,
export to filename function,
export to blob function,
release function,
dll)
'''
#this path
folder = os.path.dirname(__file__)
# silence 'DLL not found' message boxes on win
try:
ctypes.windll.kernel32.SetErrorMode(0x8007)
except AttributeError:
pass
candidates = []
# test every file
for curfolder in [folder]+additional_dirs:
if os.path.isdir(curfolder):
for filename in os.listdir(curfolder):
# our minimum requirement for candidates is that
# they should contain 'assimp' somewhere in
# their name
if filename.lower().find('assimp')==-1 :
continue
is_out=1
for et in ext_whitelist:
if et in filename.lower():
is_out=0
break
if is_out:
continue
library_path = os.path.join(curfolder, filename)
logger.debug('Try ' + library_path)
try:
dll = ctypes.cdll.LoadLibrary(library_path)
except Exception as e:
logger.warning(str(e))
# OK, this except is evil. But different OSs will throw different
# errors. So just ignore any errors.
continue
# see if the functions we need are in the dll
loaded = try_load_functions(library_path, dll)
if loaded: candidates.append(loaded)
if not candidates:
# no library found
raise AssimpError("assimp library not found")
else:
# get the newest library_path
candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates)
res = max(candidates, key=operator.itemgetter(0))[1]
logger.debug('Using assimp library located at ' + res[0])
# XXX: if there are 1000 dll/so files containing 'assimp'
# in their name, do we have all of them in our address
# space now until gc kicks in?
# XXX: take version postfix of the .so on linux?
return res[1:]
def hasattr_silent(object, name):
"""
Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2)
functionality of silently catching exceptions.
Returns the result of hasatter() or False if an exception was raised.
"""
try:
if not object:
return False
return hasattr(object, name)
except AttributeError:
return False

View File

@@ -1,89 +0,0 @@
# Dummy value.
#
# No texture, but the value to be used as 'texture semantic'
# (#aiMaterialProperty::mSemantic) for all material properties
# # not* related to textures.
#
aiTextureType_NONE = 0x0
# The texture is combined with the result of the diffuse
# lighting equation.
#
aiTextureType_DIFFUSE = 0x1
# The texture is combined with the result of the specular
# lighting equation.
#
aiTextureType_SPECULAR = 0x2
# The texture is combined with the result of the ambient
# lighting equation.
#
aiTextureType_AMBIENT = 0x3
# The texture is added to the result of the lighting
# calculation. It isn't influenced by incoming light.
#
aiTextureType_EMISSIVE = 0x4
# The texture is a height map.
#
# By convention, higher gray-scale values stand for
# higher elevations from the base height.
#
aiTextureType_HEIGHT = 0x5
# The texture is a (tangent space) normal-map.
#
# Again, there are several conventions for tangent-space
# normal maps. Assimp does (intentionally) not
# distinguish here.
#
aiTextureType_NORMALS = 0x6
# The texture defines the glossiness of the material.
#
# The glossiness is in fact the exponent of the specular
# (phong) lighting equation. Usually there is a conversion
# function defined to map the linear color values in the
# texture to a suitable exponent. Have fun.
#
aiTextureType_SHININESS = 0x7
# The texture defines per-pixel opacity.
#
# Usually 'white' means opaque and 'black' means
# 'transparency'. Or quite the opposite. Have fun.
#
aiTextureType_OPACITY = 0x8
# Displacement texture
#
# The exact purpose and format is application-dependent.
# Higher color values stand for higher vertex displacements.
#
aiTextureType_DISPLACEMENT = 0x9
# Lightmap texture (aka Ambient Occlusion)
#
# Both 'Lightmaps' and dedicated 'ambient occlusion maps' are
# covered by this material property. The texture contains a
# scaling value for the final color value of a pixel. Its
# intensity is not affected by incoming light.
#
aiTextureType_LIGHTMAP = 0xA
# Reflection texture
#
# Contains the color of a perfect mirror reflection.
# Rarely used, almost never for real-time applications.
#
aiTextureType_REFLECTION = 0xB
# Unknown texture
#
# A texture reference that does not match any of the definitions
# above is considered to be 'unknown'. It is still imported
# but is excluded from any further postprocessing.
#
aiTextureType_UNKNOWN = 0xC

View File

@@ -1,530 +0,0 @@
# <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# modified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is not possible and the data is not usable at all.
# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
# carry the error message around.<li>
# <li>Warning: There are some minor issues (e.g. 1000000 animation
# keyframes with the same time), but further postprocessing and use
# of the data structure is still safe. Warning details are written
# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set
# in #aiScene::mFlags<li>
# <ul>
#
# This post-processing step is not time-consuming. Its use is not
# compulsory, but recommended.
#
aiProcess_ValidateDataStructure = 0x400
## <hr>Reorders triangles for better vertex cache locality.
#
# The step tries to improve the ACMR (average post-transform vertex cache
# miss ratio) for all meshes. The implementation runs in O(n) and is
# roughly based on the 'tipsify' algorithm (see <a href="
# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this
# paper<a>).
#
# If you intend to render huge models in hardware, this step might
# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config
# setting can be used to fine-tune the cache optimization.
#
aiProcess_ImproveCacheLocality = 0x800
## <hr>Searches for redundantunreferenced materials and removes them.
#
# This is especially useful in combination with the
# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
# Both join small meshes with equal characteristics, but they can't do
# their work if two meshes have different materials. Because several
# material settings are lost during Assimp's import filters,
# (and because many exporters don't check for redundant materials), huge
# models often have materials which are are defined several times with
# exactly the same settings.
#
# Several material settings not contributing to the final appearance of
# a surface are ignored in all comparisons (e.g. the material name).
# So, if you're passing additional information through the
# content pipeline (probably using #magic# material names), don't
# specify this flag. Alternatively take a look at the
# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting.
#
aiProcess_RemoveRedundantMaterials = 0x1000
## <hr>This step tries to determine which meshes have normal vectors
# that are facing inwards and inverts them.
#
# The algorithm is simple but effective:
# the bounding box of all vertices + their normals is compared against
# the volume of the bounding box of all vertices without their normals.
# This works well for most objects, problems might occur with planar
# surfaces. However, the step tries to filter such cases.
# The step inverts all in-facing normals. Generally it is recommended
# to enable this step, although the result is not always correct.
#
aiProcess_FixInfacingNormals = 0x2000
## <hr>This step splits meshes with more than one primitive type in
# homogeneous sub-meshes.
#
# The step is executed after the triangulation step. After the step
# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
# especially useful for real-time rendering where point and line
# primitives are often ignored or rendered separately.
# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which
# primitive types you need. This can be used to easily exclude
# lines and points, which are rarely used, from the import.
#
aiProcess_SortByPType = 0x8000
## <hr>This step searches all meshes for degenerate primitives and
# converts them to proper lines or points.
#
# A face is 'degenerate' if one or more of its points are identical.
# To have the degenerate stuff not only detected and collapsed but
# removed, try one of the following procedures:
# <br><b>1.<b> (if you support lines and points for rendering but don't
# want the degenerates)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will
# cause the step to remove degenerate triangles from the import
# as soon as they're detected. They won't pass any further
# pipeline steps.
# <li>
# <ul>
# <br><b>2.<b>(if you don't support lines and points at all)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Specify the #aiProcess_SortByPType flag. This moves line and
# point primitives to separate meshes.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to
# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
# @endcode to cause SortByPType to reject point
# and line meshes from the scene.
# <li>
# <ul>
# @note Degenerate polygons are not necessarily evil and that's why
# they're not removed by default. There are several file formats which
# don't support lines or points, and some exporters bypass the
# format specification and write them as degenerate triangles instead.
#
aiProcess_FindDegenerates = 0x10000
## <hr>This step searches all meshes for invalid data, such as zeroed
# normal vectors or invalid UV coords and removesfixes them. This is
# intended to get rid of some common exporter errors.
#
# This is especially useful for normals. If they are invalid, and
# the step recognizes this, they will be removed and can later
# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br>
# The step will also remove meshes that are infinitely small and reduce
# animation tracks consisting of hundreds if redundant keys to a single
# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides
# the accuracy of the check for duplicate animation tracks.
#
aiProcess_FindInvalidData = 0x20000
## <hr>This step converts non-UV mappings (such as spherical or
# cylindrical mapping) to proper texture coordinate channels.
#
# Most applications will support UV mapping only, so you will
# probably want to specify this step in every case. Note that Assimp is not
# always able to match the original mapping implementation of the
# 3D app which produced a model perfectly. It's always better to let the
# modelling app compute the UV channels - 3ds max, Maya, Blender,
# LightWave, and Modo do this for example.
#
# @note If this step is not requested, you'll need to process the
# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets
# properly.
#
aiProcess_GenUVCoords = 0x40000
## <hr>This step applies per-texture UV transformations and bakes
# them into stand-alone vtexture coordinate channels.
#
# UV transformations are specified per-texture - see the
# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information.
# This step processes all textures with
# transformed input UV coordinates and generates a new (pre-transformed) UV channel
# which replaces the old channel. Most applications won't support UV
# transformations, so you will probably want to specify this step.
#
# @note UV transformations are usually implemented in real-time apps by
# transforming texture coordinates at vertex shader stage with a 3x3
# (homogenous) transformation matrix.
#
aiProcess_TransformUVCoords = 0x80000
## <hr>This step searches for duplicate meshes and replaces them
# with references to the first mesh.
#
# This step takes a while, so don't use it if speed is a concern.
# Its main purpose is to workaround the fact that many export
# file formats don't support instanced meshes, so exporters need to
# duplicate meshes. This step removes the duplicates again. Please
# note that Assimp does not currently support per-node material
# assignment to meshes, which means that identical meshes with
# different materials are currently #not# joined, although this is
# planned for future versions.
#
aiProcess_FindInstances = 0x100000
## <hr>A postprocessing step to reduce the number of meshes.
#
# This will, in fact, reduce the number of draw calls.
#
# This is a very effective optimization and is recommended to be used
# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
#
aiProcess_OptimizeMeshes = 0x200000
## <hr>A postprocessing step to optimize the scene hierarchy.
#
# Nodes without animations, bones, lights or cameras assigned are
# collapsed and joined.
#
# Node names can be lost during this step. If you use special 'tag nodes'
# to pass additional information through your content pipeline, use the
# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node
# names you want to be kept. Nodes matching one of the names in this list won't
# be touched or modified.
#
# Use this flag with caution. Most simple files will be collapsed to a
# single node, so complex hierarchies are usually completely lost. This is not
# useful for editor environments, but probably a very effective
# optimization if you just want to get the model data, convert it to your
# own format, and render it as fast as possible.
#
# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
# results.
#
# @note 'Crappy' scenes with thousands of extremely small meshes packed
# in deeply nested nodes exist for almost all file formats.
# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
# usually fixes them all and makes them renderable.
#
aiProcess_OptimizeGraph = 0x400000
## <hr>This step flips all UV coordinates along the y-axis and adjusts
# material settings and bitangents accordingly.
#
# <b>Output UV coordinate system:<b>
# @code
# 0y|0y ---------- 1x|0y
# | |
# | |
# | |
# 0x|1y ---------- 1x|1y
# @endcode
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_FlipUVs = 0x800000
## <hr>This step adjusts the output face winding order to be CW.
#
# The default face winding order is counter clockwise (CCW).
#
# <b>Output face order:<b>
# @code
# x2
#
# x0
# x1
# @endcode
#
aiProcess_FlipWindingOrder = 0x1000000
## <hr>This step splits meshes with many bones into sub-meshes so that each
# su-bmesh has fewer or as many bones as a given limit.
#
aiProcess_SplitByBoneCount = 0x2000000
## <hr>This step removes bones losslessly or according to some threshold.
#
# In some cases (i.e. formats that require it) exporters are forced to
# assign dummy bone weights to otherwise static meshes assigned to
# animated meshes. Full, weight-based skinning is expensive while
# animating nodes is extremely cheap, so this step is offered to clean up
# the data in that regard.
#
# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this.
# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and
# only if all bones within the scene qualify for removal.
#
aiProcess_Debone = 0x4000000
aiProcess_GenEntityMeshes = 0x100000
aiProcess_OptimizeAnimations = 0x200000
aiProcess_FixTexturePaths = 0x200000
aiProcess_EmbedTextures = 0x10000000,
## @def aiProcess_ConvertToLeftHanded
# @brief Shortcut flag for Direct3D-based applications.
#
# Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
# #aiProcess_FlipWindingOrder flags.
# The output data matches Direct3D's conventions: left-handed geometry, upper-left
# origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
#
# @deprecated
#
aiProcess_ConvertToLeftHanded = ( \
aiProcess_MakeLeftHanded | \
aiProcess_FlipUVs | \
aiProcess_FlipWindingOrder | \
0 )
## @def aiProcessPreset_TargetRealtimeUse_Fast
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Applications would want to use this preset to load models on end-user PCs,
# maybe for direct use in game.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be of
# use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Fast = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
0 )
## @def aiProcessPreset_TargetRealtime_Quality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
# performs some extra optimizations to improve rendering speed and
# to minimize memory usage. It could be a good choice for a level editor
# environment where import speed is not so important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Quality = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenSmoothNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_ImproveCacheLocality | \
aiProcess_LimitBoneWeights | \
aiProcess_RemoveRedundantMaterials | \
aiProcess_SplitLargeMeshes | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
aiProcess_FindDegenerates | \
aiProcess_FindInvalidData | \
0 )
## @def aiProcessPreset_TargetRealtime_MaxQuality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# This preset enables almost every optimization step to achieve perfectly
# optimized data. It's your choice for level editor environments where import speed
# is not important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application, apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_MaxQuality = ( \
aiProcessPreset_TargetRealtime_Quality | \
aiProcess_FindInstances | \
aiProcess_ValidateDataStructure | \
aiProcess_OptimizeMeshes | \
0 )

File diff suppressed because it is too large Load Diff