Build Assimp from source
This commit is contained in:
109
thirdparty/assimp/code/Assjson/cencode.c
vendored
Normal file
109
thirdparty/assimp/code/Assjson/cencode.c
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
cencoder.c - c source to a base64 encoding algorithm implementation
|
||||
|
||||
This is part of the libb64 project, and has been placed in the public domain.
|
||||
For details, see http://sourceforge.net/projects/libb64
|
||||
*/
|
||||
|
||||
#include "cencode.h" // changed from <B64/cencode.h>
|
||||
|
||||
const int CHARS_PER_LINE = 72;
|
||||
|
||||
void base64_init_encodestate(base64_encodestate* state_in)
|
||||
{
|
||||
state_in->step = step_A;
|
||||
state_in->result = 0;
|
||||
state_in->stepcount = 0;
|
||||
}
|
||||
|
||||
char base64_encode_value(char value_in)
|
||||
{
|
||||
static const char* encoding = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
if (value_in > 63) return '=';
|
||||
return encoding[(int)value_in];
|
||||
}
|
||||
|
||||
int base64_encode_block(const char* plaintext_in, int length_in, char* code_out, base64_encodestate* state_in)
|
||||
{
|
||||
const char* plainchar = plaintext_in;
|
||||
const char* const plaintextend = plaintext_in + length_in;
|
||||
char* codechar = code_out;
|
||||
char result;
|
||||
char fragment;
|
||||
|
||||
result = state_in->result;
|
||||
|
||||
switch (state_in->step)
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
case step_A:
|
||||
if (plainchar == plaintextend)
|
||||
{
|
||||
state_in->result = result;
|
||||
state_in->step = step_A;
|
||||
return codechar - code_out;
|
||||
}
|
||||
fragment = *plainchar++;
|
||||
result = (fragment & 0x0fc) >> 2;
|
||||
*codechar++ = base64_encode_value(result);
|
||||
result = (fragment & 0x003) << 4;
|
||||
case step_B:
|
||||
if (plainchar == plaintextend)
|
||||
{
|
||||
state_in->result = result;
|
||||
state_in->step = step_B;
|
||||
return codechar - code_out;
|
||||
}
|
||||
fragment = *plainchar++;
|
||||
result |= (fragment & 0x0f0) >> 4;
|
||||
*codechar++ = base64_encode_value(result);
|
||||
result = (fragment & 0x00f) << 2;
|
||||
case step_C:
|
||||
if (plainchar == plaintextend)
|
||||
{
|
||||
state_in->result = result;
|
||||
state_in->step = step_C;
|
||||
return codechar - code_out;
|
||||
}
|
||||
fragment = *plainchar++;
|
||||
result |= (fragment & 0x0c0) >> 6;
|
||||
*codechar++ = base64_encode_value(result);
|
||||
result = (fragment & 0x03f) >> 0;
|
||||
*codechar++ = base64_encode_value(result);
|
||||
|
||||
++(state_in->stepcount);
|
||||
if (state_in->stepcount == CHARS_PER_LINE/4)
|
||||
{
|
||||
*codechar++ = '\n';
|
||||
state_in->stepcount = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* control should not reach here */
|
||||
return codechar - code_out;
|
||||
}
|
||||
|
||||
int base64_encode_blockend(char* code_out, base64_encodestate* state_in)
|
||||
{
|
||||
char* codechar = code_out;
|
||||
|
||||
switch (state_in->step)
|
||||
{
|
||||
case step_B:
|
||||
*codechar++ = base64_encode_value(state_in->result);
|
||||
*codechar++ = '=';
|
||||
*codechar++ = '=';
|
||||
break;
|
||||
case step_C:
|
||||
*codechar++ = base64_encode_value(state_in->result);
|
||||
*codechar++ = '=';
|
||||
break;
|
||||
case step_A:
|
||||
break;
|
||||
}
|
||||
*codechar++ = '\n';
|
||||
|
||||
return codechar - code_out;
|
||||
}
|
||||
|
||||
31
thirdparty/assimp/code/Assjson/cencode.h
vendored
Normal file
31
thirdparty/assimp/code/Assjson/cencode.h
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
cencode.h - c header for a base64 encoding algorithm
|
||||
|
||||
This is part of the libb64 project, and has been placed in the public domain.
|
||||
For details, see http://sourceforge.net/projects/libb64
|
||||
*/
|
||||
|
||||
#ifndef BASE64_CENCODE_H
|
||||
#define BASE64_CENCODE_H
|
||||
|
||||
typedef enum
|
||||
{
|
||||
step_A, step_B, step_C
|
||||
} base64_encodestep;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
base64_encodestep step;
|
||||
char result;
|
||||
int stepcount;
|
||||
} base64_encodestate;
|
||||
|
||||
void base64_init_encodestate(base64_encodestate* state_in);
|
||||
|
||||
char base64_encode_value(char value_in);
|
||||
|
||||
int base64_encode_block(const char* plaintext_in, int length_in, char* code_out, base64_encodestate* state_in);
|
||||
|
||||
int base64_encode_blockend(char* code_out, base64_encodestate* state_in);
|
||||
|
||||
#endif /* BASE64_CENCODE_H */
|
||||
809
thirdparty/assimp/code/Assjson/json_exporter.cpp
vendored
Normal file
809
thirdparty/assimp/code/Assjson/json_exporter.cpp
vendored
Normal file
@@ -0,0 +1,809 @@
|
||||
/*
|
||||
Assimp2Json
|
||||
Copyright (c) 2011, Alexander C. Gessler
|
||||
|
||||
Licensed under a 3-clause BSD license. See the LICENSE file for more information.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef ASSIMP_BUILD_NO_EXPORT
|
||||
#ifndef ASSIMP_BUILD_NO_ASSJSON_EXPORTER
|
||||
|
||||
#include <assimp/Importer.hpp>
|
||||
#include <assimp/Exporter.hpp>
|
||||
#include <assimp/IOStream.hpp>
|
||||
#include <assimp/IOSystem.hpp>
|
||||
#include <assimp/scene.h>
|
||||
|
||||
#include <sstream>
|
||||
#include <limits>
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
|
||||
#define CURRENT_FORMAT_VERSION 100
|
||||
|
||||
// grab scoped_ptr from assimp to avoid a dependency on boost.
|
||||
//#include <assimp/../../code/BoostWorkaround/boost/scoped_ptr.hpp>
|
||||
|
||||
#include "mesh_splitter.h"
|
||||
|
||||
extern "C" {
|
||||
#include "cencode.h"
|
||||
}
|
||||
namespace Assimp {
|
||||
|
||||
void ExportAssimp2Json(const char*, Assimp::IOSystem*, const aiScene*, const Assimp::ExportProperties*);
|
||||
|
||||
// small utility class to simplify serializing the aiScene to Json
|
||||
class JSONWriter {
|
||||
public:
|
||||
enum {
|
||||
Flag_DoNotIndent = 0x1,
|
||||
Flag_WriteSpecialFloats = 0x2,
|
||||
};
|
||||
|
||||
JSONWriter(Assimp::IOStream& out, unsigned int flags = 0u)
|
||||
: out(out)
|
||||
, first()
|
||||
, flags(flags) {
|
||||
// make sure that all formatting happens using the standard, C locale and not the user's current locale
|
||||
buff.imbue(std::locale("C"));
|
||||
}
|
||||
|
||||
~JSONWriter() {
|
||||
Flush();
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
const std::string s = buff.str();
|
||||
out.Write(s.c_str(), s.length(), 1);
|
||||
buff.clear();
|
||||
}
|
||||
|
||||
void PushIndent() {
|
||||
indent += '\t';
|
||||
}
|
||||
|
||||
void PopIndent() {
|
||||
indent.erase(indent.end() - 1);
|
||||
}
|
||||
|
||||
void Key(const std::string& name) {
|
||||
AddIndentation();
|
||||
Delimit();
|
||||
buff << '\"' + name + "\": ";
|
||||
}
|
||||
|
||||
template<typename Literal>
|
||||
void Element(const Literal& name) {
|
||||
AddIndentation();
|
||||
Delimit();
|
||||
|
||||
LiteralToString(buff, name) << '\n';
|
||||
}
|
||||
|
||||
template<typename Literal>
|
||||
void SimpleValue(const Literal& s) {
|
||||
LiteralToString(buff, s) << '\n';
|
||||
}
|
||||
|
||||
void SimpleValue(const void* buffer, size_t len) {
|
||||
base64_encodestate s;
|
||||
base64_init_encodestate(&s);
|
||||
|
||||
char* const out = new char[std::max(len * 2, static_cast<size_t>(16u))];
|
||||
const int n = base64_encode_block(reinterpret_cast<const char*>(buffer), static_cast<int>(len), out, &s);
|
||||
out[n + base64_encode_blockend(out + n, &s)] = '\0';
|
||||
|
||||
// base64 encoding may add newlines, but JSON strings may not contain 'real' newlines
|
||||
// (only escaped ones). Remove any newlines in out.
|
||||
for (char* cur = out; *cur; ++cur) {
|
||||
if (*cur == '\n') {
|
||||
*cur = ' ';
|
||||
}
|
||||
}
|
||||
|
||||
buff << '\"' << out << "\"\n";
|
||||
delete[] out;
|
||||
}
|
||||
|
||||
void StartObj(bool is_element = false) {
|
||||
// if this appears as a plain array element, we need to insert a delimiter and we should also indent it
|
||||
if (is_element) {
|
||||
AddIndentation();
|
||||
if (!first) {
|
||||
buff << ',';
|
||||
}
|
||||
}
|
||||
first = true;
|
||||
buff << "{\n";
|
||||
PushIndent();
|
||||
}
|
||||
|
||||
void EndObj() {
|
||||
PopIndent();
|
||||
AddIndentation();
|
||||
first = false;
|
||||
buff << "}\n";
|
||||
}
|
||||
|
||||
void StartArray(bool is_element = false) {
|
||||
// if this appears as a plain array element, we need to insert a delimiter and we should also indent it
|
||||
if (is_element) {
|
||||
AddIndentation();
|
||||
if (!first) {
|
||||
buff << ',';
|
||||
}
|
||||
}
|
||||
first = true;
|
||||
buff << "[\n";
|
||||
PushIndent();
|
||||
}
|
||||
|
||||
void EndArray() {
|
||||
PopIndent();
|
||||
AddIndentation();
|
||||
buff << "]\n";
|
||||
first = false;
|
||||
}
|
||||
|
||||
void AddIndentation() {
|
||||
if (!(flags & Flag_DoNotIndent)) {
|
||||
buff << indent;
|
||||
}
|
||||
}
|
||||
|
||||
void Delimit() {
|
||||
if (!first) {
|
||||
buff << ',';
|
||||
}
|
||||
else {
|
||||
buff << ' ';
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename Literal>
|
||||
std::stringstream& LiteralToString(std::stringstream& stream, const Literal& s) {
|
||||
stream << s;
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::stringstream& LiteralToString(std::stringstream& stream, const aiString& s) {
|
||||
std::string t;
|
||||
|
||||
// escape backslashes and single quotes, both would render the JSON invalid if left as is
|
||||
t.reserve(s.length);
|
||||
for (size_t i = 0; i < s.length; ++i) {
|
||||
|
||||
if (s.data[i] == '\\' || s.data[i] == '\'' || s.data[i] == '\"') {
|
||||
t.push_back('\\');
|
||||
}
|
||||
|
||||
t.push_back(s.data[i]);
|
||||
}
|
||||
stream << "\"";
|
||||
stream << t;
|
||||
stream << "\"";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::stringstream& LiteralToString(std::stringstream& stream, float f) {
|
||||
if (!std::numeric_limits<float>::is_iec559) {
|
||||
// on a non IEEE-754 platform, we make no assumptions about the representation or existence
|
||||
// of special floating-point numbers.
|
||||
stream << f;
|
||||
return stream;
|
||||
}
|
||||
|
||||
// JSON does not support writing Inf/Nan
|
||||
// [RFC 4672: "Numeric values that cannot be represented as sequences of digits
|
||||
// (such as Infinity and NaN) are not permitted."]
|
||||
// Nevertheless, many parsers will accept the special keywords Infinity, -Infinity and NaN
|
||||
if (std::numeric_limits<float>::infinity() == fabs(f)) {
|
||||
if (flags & Flag_WriteSpecialFloats) {
|
||||
stream << (f < 0 ? "\"-" : "\"") + std::string("Infinity\"");
|
||||
return stream;
|
||||
}
|
||||
// we should print this warning, but we can't - this is called from within a generic assimp exporter, we cannot use cerr
|
||||
// std::cerr << "warning: cannot represent infinite number literal, substituting 0 instead (use -i flag to enforce Infinity/NaN)" << std::endl;
|
||||
stream << "0.0";
|
||||
return stream;
|
||||
}
|
||||
// f!=f is the most reliable test for NaNs that I know of
|
||||
else if (f != f) {
|
||||
if (flags & Flag_WriteSpecialFloats) {
|
||||
stream << "\"NaN\"";
|
||||
return stream;
|
||||
}
|
||||
// we should print this warning, but we can't - this is called from within a generic assimp exporter, we cannot use cerr
|
||||
// std::cerr << "warning: cannot represent infinite number literal, substituting 0 instead (use -i flag to enforce Infinity/NaN)" << std::endl;
|
||||
stream << "0.0";
|
||||
return stream;
|
||||
}
|
||||
|
||||
stream << f;
|
||||
return stream;
|
||||
}
|
||||
|
||||
private:
|
||||
Assimp::IOStream& out;
|
||||
std::string indent, newline;
|
||||
std::stringstream buff;
|
||||
bool first;
|
||||
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
void Write(JSONWriter& out, const aiVector3D& ai, bool is_elem = true) {
|
||||
out.StartArray(is_elem);
|
||||
out.Element(ai.x);
|
||||
out.Element(ai.y);
|
||||
out.Element(ai.z);
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiQuaternion& ai, bool is_elem = true) {
|
||||
out.StartArray(is_elem);
|
||||
out.Element(ai.w);
|
||||
out.Element(ai.x);
|
||||
out.Element(ai.y);
|
||||
out.Element(ai.z);
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiColor3D& ai, bool is_elem = true) {
|
||||
out.StartArray(is_elem);
|
||||
out.Element(ai.r);
|
||||
out.Element(ai.g);
|
||||
out.Element(ai.b);
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiMatrix4x4& ai, bool is_elem = true) {
|
||||
out.StartArray(is_elem);
|
||||
for (unsigned int x = 0; x < 4; ++x) {
|
||||
for (unsigned int y = 0; y < 4; ++y) {
|
||||
out.Element(ai[x][y]);
|
||||
}
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiBone& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("offsetmatrix");
|
||||
Write(out, ai.mOffsetMatrix, false);
|
||||
|
||||
out.Key("weights");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumWeights; ++i) {
|
||||
out.StartArray(true);
|
||||
out.Element(ai.mWeights[i].mVertexId);
|
||||
out.Element(ai.mWeights[i].mWeight);
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiFace& ai, bool is_elem = true) {
|
||||
out.StartArray(is_elem);
|
||||
for (unsigned int i = 0; i < ai.mNumIndices; ++i) {
|
||||
out.Element(ai.mIndices[i]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiMesh& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("materialindex");
|
||||
out.SimpleValue(ai.mMaterialIndex);
|
||||
|
||||
out.Key("primitivetypes");
|
||||
out.SimpleValue(ai.mPrimitiveTypes);
|
||||
|
||||
out.Key("vertices");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
out.Element(ai.mVertices[i].x);
|
||||
out.Element(ai.mVertices[i].y);
|
||||
out.Element(ai.mVertices[i].z);
|
||||
}
|
||||
out.EndArray();
|
||||
|
||||
if (ai.HasNormals()) {
|
||||
out.Key("normals");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
out.Element(ai.mNormals[i].x);
|
||||
out.Element(ai.mNormals[i].y);
|
||||
out.Element(ai.mNormals[i].z);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasTangentsAndBitangents()) {
|
||||
out.Key("tangents");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
out.Element(ai.mTangents[i].x);
|
||||
out.Element(ai.mTangents[i].y);
|
||||
out.Element(ai.mTangents[i].z);
|
||||
}
|
||||
out.EndArray();
|
||||
|
||||
out.Key("bitangents");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
out.Element(ai.mBitangents[i].x);
|
||||
out.Element(ai.mBitangents[i].y);
|
||||
out.Element(ai.mBitangents[i].z);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.GetNumUVChannels()) {
|
||||
out.Key("numuvcomponents");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.GetNumUVChannels(); ++n) {
|
||||
out.Element(ai.mNumUVComponents[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
|
||||
out.Key("texturecoords");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.GetNumUVChannels(); ++n) {
|
||||
const unsigned int numc = ai.mNumUVComponents[n] ? ai.mNumUVComponents[n] : 2;
|
||||
|
||||
out.StartArray(true);
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
for (unsigned int c = 0; c < numc; ++c) {
|
||||
out.Element(ai.mTextureCoords[n][i][c]);
|
||||
}
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.GetNumColorChannels()) {
|
||||
out.Key("colors");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.GetNumColorChannels(); ++n) {
|
||||
out.StartArray(true);
|
||||
for (unsigned int i = 0; i < ai.mNumVertices; ++i) {
|
||||
out.Element(ai.mColors[n][i].r);
|
||||
out.Element(ai.mColors[n][i].g);
|
||||
out.Element(ai.mColors[n][i].b);
|
||||
out.Element(ai.mColors[n][i].a);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.mNumBones) {
|
||||
out.Key("bones");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumBones; ++n) {
|
||||
Write(out, *ai.mBones[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
out.Key("faces");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumFaces; ++n) {
|
||||
Write(out, ai.mFaces[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiNode& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("transformation");
|
||||
Write(out, ai.mTransformation, false);
|
||||
|
||||
if (ai.mNumMeshes) {
|
||||
out.Key("meshes");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumMeshes; ++n) {
|
||||
out.Element(ai.mMeshes[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.mNumChildren) {
|
||||
out.Key("children");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumChildren; ++n) {
|
||||
Write(out, *ai.mChildren[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiMaterial& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("properties");
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < ai.mNumProperties; ++i) {
|
||||
const aiMaterialProperty* const prop = ai.mProperties[i];
|
||||
out.StartObj(true);
|
||||
out.Key("key");
|
||||
out.SimpleValue(prop->mKey);
|
||||
out.Key("semantic");
|
||||
out.SimpleValue(prop->mSemantic);
|
||||
out.Key("index");
|
||||
out.SimpleValue(prop->mIndex);
|
||||
|
||||
out.Key("type");
|
||||
out.SimpleValue(prop->mType);
|
||||
|
||||
out.Key("value");
|
||||
switch (prop->mType) {
|
||||
case aiPTI_Float:
|
||||
if (prop->mDataLength / sizeof(float) > 1) {
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < prop->mDataLength / sizeof(float); ++i) {
|
||||
out.Element(reinterpret_cast<float*>(prop->mData)[i]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
else {
|
||||
out.SimpleValue(*reinterpret_cast<float*>(prop->mData));
|
||||
}
|
||||
break;
|
||||
|
||||
case aiPTI_Integer:
|
||||
if (prop->mDataLength / sizeof(int) > 1) {
|
||||
out.StartArray();
|
||||
for (unsigned int i = 0; i < prop->mDataLength / sizeof(int); ++i) {
|
||||
out.Element(reinterpret_cast<int*>(prop->mData)[i]);
|
||||
}
|
||||
out.EndArray();
|
||||
} else {
|
||||
out.SimpleValue(*reinterpret_cast<int*>(prop->mData));
|
||||
}
|
||||
break;
|
||||
|
||||
case aiPTI_String:
|
||||
{
|
||||
aiString s;
|
||||
aiGetMaterialString(&ai, prop->mKey.data, prop->mSemantic, prop->mIndex, &s);
|
||||
out.SimpleValue(s);
|
||||
}
|
||||
break;
|
||||
case aiPTI_Buffer:
|
||||
{
|
||||
// binary data is written as series of hex-encoded octets
|
||||
out.SimpleValue(prop->mData, prop->mDataLength);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
out.EndArray();
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiTexture& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("width");
|
||||
out.SimpleValue(ai.mWidth);
|
||||
|
||||
out.Key("height");
|
||||
out.SimpleValue(ai.mHeight);
|
||||
|
||||
out.Key("formathint");
|
||||
out.SimpleValue(aiString(ai.achFormatHint));
|
||||
|
||||
out.Key("data");
|
||||
if (!ai.mHeight) {
|
||||
out.SimpleValue(ai.pcData, ai.mWidth);
|
||||
}
|
||||
else {
|
||||
out.StartArray();
|
||||
for (unsigned int y = 0; y < ai.mHeight; ++y) {
|
||||
out.StartArray(true);
|
||||
for (unsigned int x = 0; x < ai.mWidth; ++x) {
|
||||
const aiTexel& tx = ai.pcData[y*ai.mWidth + x];
|
||||
out.StartArray(true);
|
||||
out.Element(static_cast<unsigned int>(tx.r));
|
||||
out.Element(static_cast<unsigned int>(tx.g));
|
||||
out.Element(static_cast<unsigned int>(tx.b));
|
||||
out.Element(static_cast<unsigned int>(tx.a));
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiLight& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("type");
|
||||
out.SimpleValue(ai.mType);
|
||||
|
||||
if (ai.mType == aiLightSource_SPOT || ai.mType == aiLightSource_UNDEFINED) {
|
||||
out.Key("angleinnercone");
|
||||
out.SimpleValue(ai.mAngleInnerCone);
|
||||
|
||||
out.Key("angleoutercone");
|
||||
out.SimpleValue(ai.mAngleOuterCone);
|
||||
}
|
||||
|
||||
out.Key("attenuationconstant");
|
||||
out.SimpleValue(ai.mAttenuationConstant);
|
||||
|
||||
out.Key("attenuationlinear");
|
||||
out.SimpleValue(ai.mAttenuationLinear);
|
||||
|
||||
out.Key("attenuationquadratic");
|
||||
out.SimpleValue(ai.mAttenuationQuadratic);
|
||||
|
||||
out.Key("diffusecolor");
|
||||
Write(out, ai.mColorDiffuse, false);
|
||||
|
||||
out.Key("specularcolor");
|
||||
Write(out, ai.mColorSpecular, false);
|
||||
|
||||
out.Key("ambientcolor");
|
||||
Write(out, ai.mColorAmbient, false);
|
||||
|
||||
if (ai.mType != aiLightSource_POINT) {
|
||||
out.Key("direction");
|
||||
Write(out, ai.mDirection, false);
|
||||
|
||||
}
|
||||
|
||||
if (ai.mType != aiLightSource_DIRECTIONAL) {
|
||||
out.Key("position");
|
||||
Write(out, ai.mPosition, false);
|
||||
}
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiNodeAnim& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mNodeName);
|
||||
|
||||
out.Key("prestate");
|
||||
out.SimpleValue(ai.mPreState);
|
||||
|
||||
out.Key("poststate");
|
||||
out.SimpleValue(ai.mPostState);
|
||||
|
||||
if (ai.mNumPositionKeys) {
|
||||
out.Key("positionkeys");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumPositionKeys; ++n) {
|
||||
const aiVectorKey& pos = ai.mPositionKeys[n];
|
||||
out.StartArray(true);
|
||||
out.Element(pos.mTime);
|
||||
Write(out, pos.mValue);
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.mNumRotationKeys) {
|
||||
out.Key("rotationkeys");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumRotationKeys; ++n) {
|
||||
const aiQuatKey& rot = ai.mRotationKeys[n];
|
||||
out.StartArray(true);
|
||||
out.Element(rot.mTime);
|
||||
Write(out, rot.mValue);
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.mNumScalingKeys) {
|
||||
out.Key("scalingkeys");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumScalingKeys; ++n) {
|
||||
const aiVectorKey& scl = ai.mScalingKeys[n];
|
||||
out.StartArray(true);
|
||||
out.Element(scl.mTime);
|
||||
Write(out, scl.mValue);
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiAnimation& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("tickspersecond");
|
||||
out.SimpleValue(ai.mTicksPerSecond);
|
||||
|
||||
out.Key("duration");
|
||||
out.SimpleValue(ai.mDuration);
|
||||
|
||||
out.Key("channels");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumChannels; ++n) {
|
||||
Write(out, *ai.mChannels[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiCamera& ai, bool is_elem = true) {
|
||||
out.StartObj(is_elem);
|
||||
|
||||
out.Key("name");
|
||||
out.SimpleValue(ai.mName);
|
||||
|
||||
out.Key("aspect");
|
||||
out.SimpleValue(ai.mAspect);
|
||||
|
||||
out.Key("clipplanefar");
|
||||
out.SimpleValue(ai.mClipPlaneFar);
|
||||
|
||||
out.Key("clipplanenear");
|
||||
out.SimpleValue(ai.mClipPlaneNear);
|
||||
|
||||
out.Key("horizontalfov");
|
||||
out.SimpleValue(ai.mHorizontalFOV);
|
||||
|
||||
out.Key("up");
|
||||
Write(out, ai.mUp, false);
|
||||
|
||||
out.Key("lookat");
|
||||
Write(out, ai.mLookAt, false);
|
||||
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void WriteFormatInfo(JSONWriter& out) {
|
||||
out.StartObj();
|
||||
out.Key("format");
|
||||
out.SimpleValue("\"assimp2json\"");
|
||||
out.Key("version");
|
||||
out.SimpleValue(CURRENT_FORMAT_VERSION);
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
void Write(JSONWriter& out, const aiScene& ai) {
|
||||
out.StartObj();
|
||||
|
||||
out.Key("__metadata__");
|
||||
WriteFormatInfo(out);
|
||||
|
||||
out.Key("rootnode");
|
||||
Write(out, *ai.mRootNode, false);
|
||||
|
||||
out.Key("flags");
|
||||
out.SimpleValue(ai.mFlags);
|
||||
|
||||
if (ai.HasMeshes()) {
|
||||
out.Key("meshes");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumMeshes; ++n) {
|
||||
Write(out, *ai.mMeshes[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasMaterials()) {
|
||||
out.Key("materials");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumMaterials; ++n) {
|
||||
Write(out, *ai.mMaterials[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasAnimations()) {
|
||||
out.Key("animations");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumAnimations; ++n) {
|
||||
Write(out, *ai.mAnimations[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasLights()) {
|
||||
out.Key("lights");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumLights; ++n) {
|
||||
Write(out, *ai.mLights[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasCameras()) {
|
||||
out.Key("cameras");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumCameras; ++n) {
|
||||
Write(out, *ai.mCameras[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
|
||||
if (ai.HasTextures()) {
|
||||
out.Key("textures");
|
||||
out.StartArray();
|
||||
for (unsigned int n = 0; n < ai.mNumTextures; ++n) {
|
||||
Write(out, *ai.mTextures[n]);
|
||||
}
|
||||
out.EndArray();
|
||||
}
|
||||
out.EndObj();
|
||||
}
|
||||
|
||||
|
||||
void ExportAssimp2Json(const char* file, Assimp::IOSystem* io, const aiScene* scene, const Assimp::ExportProperties*) {
|
||||
std::unique_ptr<Assimp::IOStream> str(io->Open(file, "wt"));
|
||||
if (!str) {
|
||||
//throw Assimp::DeadlyExportError("could not open output file");
|
||||
}
|
||||
|
||||
// get a copy of the scene so we can modify it
|
||||
aiScene* scenecopy_tmp;
|
||||
aiCopyScene(scene, &scenecopy_tmp);
|
||||
|
||||
try {
|
||||
// split meshes so they fit into a 16 bit index buffer
|
||||
MeshSplitter splitter;
|
||||
splitter.SetLimit(1 << 16);
|
||||
splitter.Execute(scenecopy_tmp);
|
||||
|
||||
// XXX Flag_WriteSpecialFloats is turned on by default, right now we don't have a configuration interface for exporters
|
||||
JSONWriter s(*str, JSONWriter::Flag_WriteSpecialFloats);
|
||||
Write(s, *scenecopy_tmp);
|
||||
|
||||
}
|
||||
catch (...) {
|
||||
aiFreeScene(scenecopy_tmp);
|
||||
throw;
|
||||
}
|
||||
aiFreeScene(scenecopy_tmp);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // ASSIMP_BUILD_NO_ASSJSON_EXPORTER
|
||||
#endif // ASSIMP_BUILD_NO_EXPORT
|
||||
320
thirdparty/assimp/code/Assjson/mesh_splitter.cpp
vendored
Normal file
320
thirdparty/assimp/code/Assjson/mesh_splitter.cpp
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
/*
|
||||
Assimp2Json
|
||||
Copyright (c) 2011, Alexander C. Gessler
|
||||
|
||||
Licensed under a 3-clause BSD license. See the LICENSE file for more information.
|
||||
|
||||
*/
|
||||
|
||||
#include "mesh_splitter.h"
|
||||
|
||||
#include <assimp/scene.h>
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Note: this is largely based on assimp's SplitLargeMeshes_Vertex process.
|
||||
// it is refactored and the coding style is slightly improved, though.
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
// Executes the post processing step on the given imported data.
|
||||
void MeshSplitter::Execute( aiScene* pScene) {
|
||||
std::vector<std::pair<aiMesh*, unsigned int> > source_mesh_map;
|
||||
|
||||
for( unsigned int a = 0; a < pScene->mNumMeshes; a++) {
|
||||
SplitMesh(a, pScene->mMeshes[a],source_mesh_map);
|
||||
}
|
||||
|
||||
const unsigned int size = static_cast<unsigned int>(source_mesh_map.size());
|
||||
if (size != pScene->mNumMeshes) {
|
||||
// it seems something has been split. rebuild the mesh list
|
||||
delete[] pScene->mMeshes;
|
||||
pScene->mNumMeshes = size;
|
||||
pScene->mMeshes = new aiMesh*[size]();
|
||||
|
||||
for (unsigned int i = 0; i < size;++i) {
|
||||
pScene->mMeshes[i] = source_mesh_map[i].first;
|
||||
}
|
||||
|
||||
// now we need to update all nodes
|
||||
UpdateNode(pScene->mRootNode,source_mesh_map);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void MeshSplitter::UpdateNode(aiNode* pcNode, const std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map) {
|
||||
// TODO: should better use std::(multi)set for source_mesh_map.
|
||||
|
||||
// for every index in out list build a new entry
|
||||
std::vector<unsigned int> aiEntries;
|
||||
aiEntries.reserve(pcNode->mNumMeshes + 1);
|
||||
for (unsigned int i = 0; i < pcNode->mNumMeshes;++i) {
|
||||
for (unsigned int a = 0, end = static_cast<unsigned int>(source_mesh_map.size()); a < end;++a) {
|
||||
if (source_mesh_map[a].second == pcNode->mMeshes[i]) {
|
||||
aiEntries.push_back(a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now build the new list
|
||||
delete pcNode->mMeshes;
|
||||
pcNode->mNumMeshes = static_cast<unsigned int>(aiEntries.size());
|
||||
pcNode->mMeshes = new unsigned int[pcNode->mNumMeshes];
|
||||
|
||||
for (unsigned int b = 0; b < pcNode->mNumMeshes;++b) {
|
||||
pcNode->mMeshes[b] = aiEntries[b];
|
||||
}
|
||||
|
||||
// recursively update children
|
||||
for (unsigned int i = 0, end = pcNode->mNumChildren; i < end;++i) {
|
||||
UpdateNode ( pcNode->mChildren[i], source_mesh_map );
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#define WAS_NOT_COPIED 0xffffffff
|
||||
|
||||
typedef std::pair <unsigned int,float> PerVertexWeight;
|
||||
typedef std::vector <PerVertexWeight> VertexWeightTable;
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
VertexWeightTable* ComputeVertexBoneWeightTable(const aiMesh* pMesh) {
|
||||
if (!pMesh || !pMesh->mNumVertices || !pMesh->mNumBones) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
VertexWeightTable* const avPerVertexWeights = new VertexWeightTable[pMesh->mNumVertices];
|
||||
for (unsigned int i = 0; i < pMesh->mNumBones;++i) {
|
||||
|
||||
aiBone* bone = pMesh->mBones[i];
|
||||
for (unsigned int a = 0; a < bone->mNumWeights;++a) {
|
||||
const aiVertexWeight& weight = bone->mWeights[a];
|
||||
avPerVertexWeights[weight.mVertexId].push_back( std::make_pair(i,weight.mWeight) );
|
||||
}
|
||||
}
|
||||
return avPerVertexWeights;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------------------------
|
||||
void MeshSplitter :: SplitMesh(unsigned int a, aiMesh* in_mesh, std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map) {
|
||||
// TODO: should better use std::(multi)set for source_mesh_map.
|
||||
|
||||
if (in_mesh->mNumVertices <= LIMIT) {
|
||||
source_mesh_map.push_back(std::make_pair(in_mesh,a));
|
||||
return;
|
||||
}
|
||||
|
||||
// build a per-vertex weight list if necessary
|
||||
VertexWeightTable* avPerVertexWeights = ComputeVertexBoneWeightTable(in_mesh);
|
||||
|
||||
// we need to split this mesh into sub meshes. Estimate submesh size
|
||||
const unsigned int sub_meshes = (in_mesh->mNumVertices / LIMIT) + 1;
|
||||
|
||||
// create a std::vector<unsigned int> to remember which vertices have already
|
||||
// been copied and to which position (i.e. output index)
|
||||
std::vector<unsigned int> was_copied_to;
|
||||
was_copied_to.resize(in_mesh->mNumVertices,WAS_NOT_COPIED);
|
||||
|
||||
// Try to find a good estimate for the number of output faces
|
||||
// per mesh. Add 12.5% as buffer
|
||||
unsigned int size_estimated = in_mesh->mNumFaces / sub_meshes;
|
||||
size_estimated += size_estimated / 8;
|
||||
|
||||
// now generate all submeshes
|
||||
unsigned int base = 0;
|
||||
while (true) {
|
||||
const unsigned int out_vertex_index = LIMIT;
|
||||
|
||||
aiMesh* out_mesh = new aiMesh();
|
||||
out_mesh->mNumVertices = 0;
|
||||
out_mesh->mMaterialIndex = in_mesh->mMaterialIndex;
|
||||
|
||||
// the name carries the adjacency information between the meshes
|
||||
out_mesh->mName = in_mesh->mName;
|
||||
|
||||
typedef std::vector<aiVertexWeight> BoneWeightList;
|
||||
if (in_mesh->HasBones()) {
|
||||
out_mesh->mBones = new aiBone*[in_mesh->mNumBones]();
|
||||
}
|
||||
|
||||
// clear the temporary helper array
|
||||
if (base) {
|
||||
std::fill(was_copied_to.begin(), was_copied_to.end(), WAS_NOT_COPIED);
|
||||
}
|
||||
|
||||
std::vector<aiFace> vFaces;
|
||||
|
||||
// reserve enough storage for most cases
|
||||
if (in_mesh->HasPositions()) {
|
||||
out_mesh->mVertices = new aiVector3D[out_vertex_index];
|
||||
}
|
||||
|
||||
if (in_mesh->HasNormals()) {
|
||||
out_mesh->mNormals = new aiVector3D[out_vertex_index];
|
||||
}
|
||||
|
||||
if (in_mesh->HasTangentsAndBitangents()) {
|
||||
out_mesh->mTangents = new aiVector3D[out_vertex_index];
|
||||
out_mesh->mBitangents = new aiVector3D[out_vertex_index];
|
||||
}
|
||||
|
||||
for (unsigned int c = 0; in_mesh->HasVertexColors(c);++c) {
|
||||
out_mesh->mColors[c] = new aiColor4D[out_vertex_index];
|
||||
}
|
||||
|
||||
for (unsigned int c = 0; in_mesh->HasTextureCoords(c);++c) {
|
||||
out_mesh->mNumUVComponents[c] = in_mesh->mNumUVComponents[c];
|
||||
out_mesh->mTextureCoords[c] = new aiVector3D[out_vertex_index];
|
||||
}
|
||||
vFaces.reserve(size_estimated);
|
||||
|
||||
// (we will also need to copy the array of indices)
|
||||
while (base < in_mesh->mNumFaces) {
|
||||
const unsigned int iNumIndices = in_mesh->mFaces[base].mNumIndices;
|
||||
|
||||
// doesn't catch degenerates but is quite fast
|
||||
unsigned int iNeed = 0;
|
||||
for (unsigned int v = 0; v < iNumIndices;++v) {
|
||||
unsigned int index = in_mesh->mFaces[base].mIndices[v];
|
||||
|
||||
// check whether we do already have this vertex
|
||||
if (WAS_NOT_COPIED == was_copied_to[index]) {
|
||||
iNeed++;
|
||||
}
|
||||
}
|
||||
if (out_mesh->mNumVertices + iNeed > out_vertex_index) {
|
||||
// don't use this face
|
||||
break;
|
||||
}
|
||||
|
||||
vFaces.push_back(aiFace());
|
||||
aiFace& rFace = vFaces.back();
|
||||
|
||||
// setup face type and number of indices
|
||||
rFace.mNumIndices = iNumIndices;
|
||||
rFace.mIndices = new unsigned int[iNumIndices];
|
||||
|
||||
// need to update the output primitive types
|
||||
switch (rFace.mNumIndices)
|
||||
{
|
||||
case 1:
|
||||
out_mesh->mPrimitiveTypes |= aiPrimitiveType_POINT;
|
||||
break;
|
||||
case 2:
|
||||
out_mesh->mPrimitiveTypes |= aiPrimitiveType_LINE;
|
||||
break;
|
||||
case 3:
|
||||
out_mesh->mPrimitiveTypes |= aiPrimitiveType_TRIANGLE;
|
||||
break;
|
||||
default:
|
||||
out_mesh->mPrimitiveTypes |= aiPrimitiveType_POLYGON;
|
||||
}
|
||||
|
||||
// and copy the contents of the old array, offset them by current base
|
||||
for (unsigned int v = 0; v < iNumIndices;++v) {
|
||||
const unsigned int index = in_mesh->mFaces[base].mIndices[v];
|
||||
|
||||
// check whether we do already have this vertex
|
||||
if (WAS_NOT_COPIED != was_copied_to[index]) {
|
||||
rFace.mIndices[v] = was_copied_to[index];
|
||||
continue;
|
||||
}
|
||||
|
||||
// copy positions
|
||||
out_mesh->mVertices[out_mesh->mNumVertices] = (in_mesh->mVertices[index]);
|
||||
|
||||
// copy normals
|
||||
if (in_mesh->HasNormals()) {
|
||||
out_mesh->mNormals[out_mesh->mNumVertices] = (in_mesh->mNormals[index]);
|
||||
}
|
||||
|
||||
// copy tangents/bi-tangents
|
||||
if (in_mesh->HasTangentsAndBitangents()) {
|
||||
out_mesh->mTangents[out_mesh->mNumVertices] = (in_mesh->mTangents[index]);
|
||||
out_mesh->mBitangents[out_mesh->mNumVertices] = (in_mesh->mBitangents[index]);
|
||||
}
|
||||
|
||||
// texture coordinates
|
||||
for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS;++c) {
|
||||
if (in_mesh->HasTextureCoords( c)) {
|
||||
out_mesh->mTextureCoords[c][out_mesh->mNumVertices] = in_mesh->mTextureCoords[c][index];
|
||||
}
|
||||
}
|
||||
// vertex colors
|
||||
for (unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS;++c) {
|
||||
if (in_mesh->HasVertexColors( c)) {
|
||||
out_mesh->mColors[c][out_mesh->mNumVertices] = in_mesh->mColors[c][index];
|
||||
}
|
||||
}
|
||||
// check whether we have bone weights assigned to this vertex
|
||||
rFace.mIndices[v] = out_mesh->mNumVertices;
|
||||
if (avPerVertexWeights) {
|
||||
VertexWeightTable& table = avPerVertexWeights[ out_mesh->mNumVertices ];
|
||||
for (VertexWeightTable::const_iterator iter = table.begin(), end = table.end(); iter != end;++iter) {
|
||||
// allocate the bone weight array if necessary and store it in the mBones field (HACK!)
|
||||
BoneWeightList* weight_list = reinterpret_cast<BoneWeightList*>(out_mesh->mBones[(*iter).first]);
|
||||
if (!weight_list) {
|
||||
weight_list = new BoneWeightList();
|
||||
out_mesh->mBones[(*iter).first] = reinterpret_cast<aiBone*>(weight_list);
|
||||
}
|
||||
weight_list->push_back(aiVertexWeight(out_mesh->mNumVertices,(*iter).second));
|
||||
}
|
||||
}
|
||||
|
||||
was_copied_to[index] = out_mesh->mNumVertices;
|
||||
out_mesh->mNumVertices++;
|
||||
}
|
||||
base++;
|
||||
if(out_mesh->mNumVertices == out_vertex_index) {
|
||||
// break here. The face is only added if it was complete
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check which bones we'll need to create for this submesh
|
||||
if (in_mesh->HasBones()) {
|
||||
aiBone** ppCurrent = out_mesh->mBones;
|
||||
for (unsigned int k = 0; k < in_mesh->mNumBones;++k) {
|
||||
// check whether the bone exists
|
||||
BoneWeightList* const weight_list = reinterpret_cast<BoneWeightList*>(out_mesh->mBones[k]);
|
||||
|
||||
if (weight_list) {
|
||||
const aiBone* const bone_in = in_mesh->mBones[k];
|
||||
aiBone* const bone_out = new aiBone();
|
||||
*ppCurrent++ = bone_out;
|
||||
bone_out->mName = aiString(bone_in->mName);
|
||||
bone_out->mOffsetMatrix =bone_in->mOffsetMatrix;
|
||||
bone_out->mNumWeights = (unsigned int)weight_list->size();
|
||||
bone_out->mWeights = new aiVertexWeight[bone_out->mNumWeights];
|
||||
|
||||
// copy the vertex weights
|
||||
::memcpy(bone_out->mWeights, &(*weight_list)[0],bone_out->mNumWeights * sizeof(aiVertexWeight));
|
||||
|
||||
delete weight_list;
|
||||
out_mesh->mNumBones++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copy the face list to the mesh
|
||||
out_mesh->mFaces = new aiFace[vFaces.size()];
|
||||
out_mesh->mNumFaces = (unsigned int)vFaces.size();
|
||||
|
||||
for (unsigned int p = 0; p < out_mesh->mNumFaces;++p) {
|
||||
out_mesh->mFaces[p] = vFaces[p];
|
||||
}
|
||||
|
||||
// add the newly created mesh to the list
|
||||
source_mesh_map.push_back(std::make_pair(out_mesh,a));
|
||||
|
||||
if (base == in_mesh->mNumFaces) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// delete the per-vertex weight list again
|
||||
delete[] avPerVertexWeights;
|
||||
|
||||
// now delete the old mesh data
|
||||
delete in_mesh;
|
||||
}
|
||||
61
thirdparty/assimp/code/Assjson/mesh_splitter.h
vendored
Normal file
61
thirdparty/assimp/code/Assjson/mesh_splitter.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Assimp2Json
|
||||
Copyright (c) 2011, Alexander C. Gessler
|
||||
|
||||
Licensed under a 3-clause BSD license. See the LICENSE file for more information.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef INCLUDED_MESH_SPLITTER
|
||||
#define INCLUDED_MESH_SPLITTER
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Note: this is largely based on assimp's SplitLargeMeshes_Vertex process.
|
||||
// it is refactored and the coding style is slightly improved, though.
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
#include <vector>
|
||||
|
||||
struct aiScene;
|
||||
struct aiMesh;
|
||||
struct aiNode;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
/** Splits meshes of unique vertices into meshes with no more vertices than
|
||||
* a given, configurable threshold value.
|
||||
*/
|
||||
class MeshSplitter
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
void SetLimit(unsigned int l) {
|
||||
LIMIT = l;
|
||||
}
|
||||
|
||||
unsigned int GetLimit() const {
|
||||
return LIMIT;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
/** Executes the post processing step on the given imported data.
|
||||
* At the moment a process is not supposed to fail.
|
||||
* @param pScene The imported data to work at.
|
||||
*/
|
||||
void Execute( aiScene* pScene);
|
||||
|
||||
|
||||
private:
|
||||
|
||||
void UpdateNode(aiNode* pcNode, const std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map);
|
||||
void SplitMesh (unsigned int index, aiMesh* mesh, std::vector<std::pair<aiMesh*, unsigned int> >& source_mesh_map);
|
||||
|
||||
public:
|
||||
|
||||
unsigned int LIMIT;
|
||||
};
|
||||
|
||||
#endif // INCLUDED_MESH_SPLITTER
|
||||
|
||||
Reference in New Issue
Block a user