Skip to content

Instantly share code, notes, and snippets.

@notlion
Last active February 15, 2022 07:20
Show Gist options
  • Save notlion/37f9ed06f0ec448057bd61e0fc88d996 to your computer and use it in GitHub Desktop.
Save notlion/37f9ed06f0ec448057bd61e0fc88d996 to your computer and use it in GitHub Desktop.
GLTF2 Utils for Cinder (With support for up to 4 blendshapes)
precision highp float;
// The PBR shader below is adapted from the Khronos glTF WebGL example:
// https://github.com/KhronosGroup/glTF-WebGL-PBR/blob/master/shaders/pbr-frag.glsl
struct PBRInfo {
float NdotL; // cos angle between normal and light direction
float NdotV; // cos angle between normal and view direction
float NdotH; // cos angle between normal and half vector
float LdotH; // cos angle between light direction and half vector
float VdotH; // cos angle between view direction and half vector
float perceptualRoughness; // roughness value, as authored by the model creator (input to shader)
float metalness; // metallic value at the surface
vec3 reflectance0; // full reflectance color (normal incidence angle)
vec3 reflectance90; // reflectance color at grazing angle
float alphaRoughness; // roughness mapped to a more linear change in the roughness (proposed by [2])
vec3 diffuseColor; // color contribution from diffuse lighting
vec3 specularColor; // color contribution from specular lighting
};
const float M_PI = 3.141592653589793;
vec3 diffuse(PBRInfo pbrInputs) {
return pbrInputs.diffuseColor / M_PI;
}
vec3 specularReflection(PBRInfo pbrInputs) {
return pbrInputs.reflectance0 + (pbrInputs.reflectance90 - pbrInputs.reflectance0) * pow(clamp(1.0 - pbrInputs.VdotH, 0.0, 1.0), 5.0);
}
float geometricOcclusion(PBRInfo pbrInputs) {
float NdotL = pbrInputs.NdotL;
float NdotV = pbrInputs.NdotV;
float r = pbrInputs.alphaRoughness;
float attenuationL = 2.0 * NdotL / (NdotL + sqrt(r * r + (1.0 - r * r) * (NdotL * NdotL)));
float attenuationV = 2.0 * NdotV / (NdotV + sqrt(r * r + (1.0 - r * r) * (NdotV * NdotV)));
return attenuationL * attenuationV;
}
float microfacetDistribution(PBRInfo pbrInputs) {
float roughnessSq = pbrInputs.alphaRoughness * pbrInputs.alphaRoughness;
float f = (pbrInputs.NdotH * roughnessSq - pbrInputs.NdotH) * pbrInputs.NdotH + 1.0;
return roughnessSq / (M_PI * f * f);
}
uniform vec3 uEyePosition;
uniform vec3 uLightDirection;
uniform vec3 uLightColor;
uniform vec4 uBaseColorFactor;
uniform float uMetallicFactor;
uniform float uRoughnessFactor;
#if HAS_BASE_COLOR_TEXTURE
uniform sampler2D uBaseColorTexture;
#endif
in vec3 vPosition;
in vec3 vNormal;
in vec2 vTexcoord;
out vec4 fragColor;
void main() {
vec4 baseColor = uBaseColorFactor;
#if HAS_BASE_COLOR_TEXTURE
baseColor *= texture(uBaseColorTexture, vTexcoord);
#endif
float alphaRoughness = uRoughnessFactor * uRoughnessFactor;
vec3 f0 = vec3(0.04);
vec3 diffuseColor = baseColor.rgb * (vec3(1.0) - f0);
diffuseColor *= 1.0 - uMetallicFactor;
vec3 specularColor = mix(f0, baseColor.rgb, uMetallicFactor);
float reflectance = max(max(specularColor.r, specularColor.g), specularColor.b);
// For typical incident reflectance range (between 4% to 100%) set the grazing reflectance to 100% for typical fresnel effect.
// For very low reflectance range on highly diffuse objects (below 4%), incrementally reduce grazing reflecance to 0%.
float reflectance90 = clamp(reflectance * 25.0, 0.0, 1.0);
vec3 specularEnvironmentR0 = specularColor.rgb;
vec3 specularEnvironmentR90 = vec3(1.0) * reflectance90;
vec3 n = normalize(vNormal); // normal at surface point
vec3 v = normalize(uEyePosition - vPosition); // Vector from surface point to camera
vec3 l = normalize(uLightDirection); // Vector from surface point to light
vec3 h = normalize(l + v); // Half vector between both l and v
vec3 reflection = -normalize(reflect(v, n));
float NdotL = clamp(dot(n, l), 0.001, 1.0);
float NdotV = clamp(abs(dot(n, v)), 0.001, 1.0);
float NdotH = clamp(dot(n, h), 0.0, 1.0);
float LdotH = clamp(dot(l, h), 0.0, 1.0);
float VdotH = clamp(dot(v, h), 0.0, 1.0);
PBRInfo pbrInputs = PBRInfo(NdotL,
NdotV,
NdotH,
LdotH,
VdotH,
uRoughnessFactor,
uMetallicFactor,
specularEnvironmentR0,
specularEnvironmentR90,
alphaRoughness,
diffuseColor,
specularColor);
// Calculate the shading terms for the microfacet specular shading model
vec3 F = specularReflection(pbrInputs);
float G = geometricOcclusion(pbrInputs);
float D = microfacetDistribution(pbrInputs);
// Calculation of analytical lighting contribution
vec3 diffuseContrib = (1.0 - F) * diffuse(pbrInputs);
vec3 specContrib = F * G * D / (4.0 * NdotL * NdotV);
// Obtain final intensity as reflectance (BRDF) scaled by the energy of the light (cosine law)
vec3 color = NdotL * uLightColor * (diffuseContrib + specContrib);
fragColor = vec4(pow(color, vec3(1.0 / 2.2)), baseColor.a);
// Hack in some ambient lighting..
fragColor.rgb += uLightColor * 0.2;
}
precision highp float;
// The TARGET macros here work around GLES3's lack of support for vertex shader attribute arrays.
#if TARGET_COUNT > 0
uniform float targetWeights[TARGET_COUNT];
#define DEF_TARGET_ATTRIBS(i) \
in vec3 targetPosition##i; \
in vec3 targetNormal##i;
DEF_TARGET_ATTRIBS(0)
#if TARGET_COUNT > 1
DEF_TARGET_ATTRIBS(1)
#endif
#if TARGET_COUNT > 2
DEF_TARGET_ATTRIBS(2)
#endif
#if TARGET_COUNT > 3
DEF_TARGET_ATTRIBS(3)
#endif
#endif
uniform mat4 ciModelViewProjection;
uniform mat4 ciModelMatrix;
uniform mat3 ciNormalMatrix;
in vec3 ciPosition;
in vec3 ciNormal;
in vec2 ciTexCoord0;
out vec3 vPosition;
out vec3 vNormal;
out vec2 vTexcoord;
void main() {
vec3 position = ciPosition;
vec3 normal = ciNormal;
#if TARGET_COUNT > 0
#define ADD_TARGET(i) \
position += targetWeights[i] * targetPosition##i; \
normal += targetWeights[i] * targetNormal##i;
ADD_TARGET(0)
#if TARGET_COUNT > 1
ADD_TARGET(1)
#endif
#if TARGET_COUNT > 2
ADD_TARGET(2)
#endif
#if TARGET_COUNT > 3
ADD_TARGET(3)
#endif
#endif
vPosition = vec3(ciModelMatrix * vec4(position, 1.0));
vNormal = ciNormalMatrix * normal;
vTexcoord = ciTexCoord0;
gl_Position = ciModelViewProjection * vec4(position, 1.0);
}
#include "gltf_utils.hpp"
#define TINYGLTF_NO_STB_IMAGE_WRITE
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#include "tiny_gltf.h"
#include "cinder/Log.h"
#include "cinder/app/App.h"
#include "cinder/gl/gl.h"
#include "glm/gtx/matrix_decompose.hpp"
#include <iterator>
using namespace ci;
namespace gltf {
template <typename T>
class AccessorIterator : public std::iterator<std::input_iterator_tag, T> {
const unsigned char *mBufferData;
ptrdiff_t mStride;
public:
AccessorIterator(const tinygltf::Model &model, const tinygltf::Accessor &accessor) {
const auto &bufferView = model.bufferViews[accessor.bufferView];
const auto &buffer = model.buffers[bufferView.buffer];
mBufferData = buffer.data.data() + (bufferView.byteOffset + accessor.byteOffset);
mStride = accessor.ByteStride(bufferView);
}
AccessorIterator(const AccessorIterator &it) = default;
AccessorIterator &operator++() {
mBufferData += mStride;
return *this;
}
AccessorIterator operator++(int) {
AccessorIterator prev(*this);
operator++();
return prev;
}
AccessorIterator &operator+=(ptrdiff_t n) {
mBufferData += mStride * n;
return *this;
}
bool operator==(const AccessorIterator &rhs) const {
return mBufferData == rhs.mBufferData && mStride == rhs.mStride;
}
bool operator!=(const AccessorIterator &rhs) const {
return mBufferData != rhs.mBufferData || mStride != rhs.mStride;
}
const T &operator*() const {
return *reinterpret_cast<const T *>(mBufferData);
}
};
template <typename T>
AccessorIterator<T> accessorBegin(const tinygltf::Model &model, const tinygltf::Accessor &accessor) {
return AccessorIterator<T>(model, accessor);
}
template <typename T>
AccessorIterator<T> accessorEnd(const tinygltf::Model &model, const tinygltf::Accessor &accessor) {
return AccessorIterator<T>(model, accessor) += accessor.count;
}
template <typename T>
void convertKeyframes(const tinygltf::Model &model, const tinygltf::AnimationSampler &sampler, AnimationChannel<T> &destChannel) {
const auto &inputAccessor = model.accessors[sampler.input];
const auto inputEnd = accessorEnd<float>(model, inputAccessor);
auto inputIter = accessorBegin<float>(model, inputAccessor);
const auto &outputAccessor = model.accessors[sampler.output];
const auto outputEnd = accessorEnd<T>(model, outputAccessor);
auto outputIter = accessorBegin<T>(model, outputAccessor);
destChannel.keyframes.reserve(inputAccessor.count);
destChannel.componentCount = int(outputAccessor.count / inputAccessor.count);
while (inputIter != inputEnd && outputIter != outputEnd) {
destChannel.keyframes.emplace_back();
destChannel.keyframes.back().timeSeconds = *inputIter;
auto &values = destChannel.keyframes.back().values;
values.reserve(destChannel.componentCount);
for (int i = 0; i < destChannel.componentCount; ++i) {
values.push_back(*outputIter);
++outputIter;
}
++inputIter;
}
}
static vec4 convertColor(const tinygltf::ColorValue &color) {
return vec4(color[0], color[1], color[2], color[3]);
}
static geom::Attrib getAttribSemantic(const std::string &attribName) {
using namespace geom;
if (attribName == "POSITION")
return Attrib::POSITION;
else if (attribName == "NORMAL")
return Attrib::NORMAL;
else if (attribName == "TEXCOORD_0")
return Attrib::TEX_COORD_0;
else if (attribName == "TEXCOORD_1")
return Attrib::TEX_COORD_1;
else if (attribName == "TEXCOORD_2")
return Attrib::TEX_COORD_2;
else if (attribName == "TEXCOORD_3")
return Attrib::TEX_COORD_3;
else if (attribName == "COLOR")
return Attrib::COLOR;
else if (attribName == "TANGENT")
return Attrib::TANGENT;
else if (attribName == "JOINT")
return Attrib::BONE_INDEX;
else if (attribName == "JOINTMATRIX")
return Attrib::CUSTOM_0;
else if (attribName == "WEIGHT")
return Attrib::BONE_WEIGHT;
else
return Attrib::NUM_ATTRIBS;
}
template <>
quat interpolateKeyframeLinear(const AnimationKeyframe<quat> &k0, const AnimationKeyframe<quat> &k1, float timeSeconds, int valueIndex) {
const float t = (timeSeconds - k0.timeSeconds) / (k1.timeSeconds - k0.timeSeconds);
return glm::slerp(k0.values[valueIndex], k1.values[valueIndex], t);
}
void Node::updateWorldTransform(const mat4 parentWorldTransform) {
worldTransform = parentWorldTransform;
worldTransform *= glm::translate(currentTranslation);
worldTransform *= glm::mat4_cast(currentRotation);
worldTransform *= glm::scale(currentScale);
}
void Node::draw(const Model &model, const vec3 &eyePosition, const vec3 &lightDirection, const vec3 &lightColor) const {
if (meshId >= 0) {
gl::ScopedModelMatrix scopedModel;
gl::multModelMatrix(worldTransform);
gl::color(1.0f, 1.0f, 1.0f, 1.0f);
for (const auto &prim : model.meshes[meshId].primitives) {
gl::ScopedTextureBind scopedTex(GL_TEXTURE_2D, 0);
const auto &prog = prim.batch->getGlslProg();
const auto &material = model.materials[prim.materialId];
prog->uniform("uEyePosition", eyePosition);
prog->uniform("uLightDirection", lightDirection);
prog->uniform("uLightColor", lightColor);
prog->uniform("uBaseColorFactor", material.baseColorFactor);
prog->uniform("uMetallicFactor", material.metallicFactor);
prog->uniform("uRoughnessFactor", material.roughnessFactor);
if (material.baseColorTextureId >= 0) {
model.textures[material.baseColorTextureId]->bind(0);
prog->uniform("uBaseColorTexture", 0);
}
if (prim.targetCount > 0) {
prog->uniform("targetWeights", blendWeights.data(), int(blendWeights.size()));
}
prim.batch->draw();
}
}
}
static Model loadModel(const tinygltf::Model &inModel) {
Model outModel;
outModel.textures.reserve(inModel.textures.size());
outModel.materials.reserve(inModel.materials.size());
outModel.meshes.reserve(inModel.meshes.size());
outModel.nodes.reserve(inModel.nodes.size());
outModel.scenes.reserve(inModel.scenes.size());
outModel.animations.reserve(inModel.animations.size());
// Proactively create VBOs for all buffer views in this model, assuming we will use them all.
std::vector<gl::VboRef> bufferViewVbos;
for (const auto &bufferView : inModel.bufferViews) {
const auto &buffer = inModel.buffers[bufferView.buffer];
bufferViewVbos.push_back(gl::Vbo::create(bufferView.target,
bufferView.byteLength,
buffer.data.data() + bufferView.byteOffset,
GL_STATIC_DRAW));
}
// Texures
//
for (const auto &texture : inModel.textures) {
const auto &sampler = inModel.samplers[texture.sampler];
const auto fmt = gl::Texture::Format()
.magFilter(sampler.magFilter)
.minFilter(sampler.minFilter)
.mipmap(sampler.minFilter == GL_LINEAR_MIPMAP_LINEAR ||
sampler.minFilter == GL_LINEAR_MIPMAP_NEAREST ||
sampler.minFilter == GL_NEAREST_MIPMAP_LINEAR ||
sampler.minFilter == GL_NEAREST_MIPMAP_NEAREST)
.wrapS(sampler.wrapS)
.wrapT(sampler.wrapT)
.loadTopDown(true);
auto &image = inModel.images[texture.source];
const auto tmpSurface = Surface(const_cast<uint8_t *>(image.image.data()),
image.width,
image.height,
image.width * image.component,
image.component == 3 ? SurfaceChannelOrder::RGB : SurfaceChannelOrder::RGBA);
outModel.textures.push_back(gl::Texture::create(tmpSurface, fmt));
}
// Materials
//
for (const auto &material : inModel.materials) {
outModel.materials.emplace_back();
auto &outMaterial = outModel.materials.back();
auto baseColorFactorIter = material.values.find("baseColorFactor");
outMaterial.baseColorFactor = baseColorFactorIter != material.values.end()
? convertColor(baseColorFactorIter->second.ColorFactor())
: vec4(1.0f);
auto metallicFactorIter = material.values.find("metallicFactor");
outMaterial.metallicFactor = metallicFactorIter != material.values.end()
? metallicFactorIter->second.Factor()
: 0.0f;
auto roughnessFactorIter = material.values.find("roughnessFactor");
outMaterial.roughnessFactor = roughnessFactorIter != material.values.end()
? roughnessFactorIter->second.Factor()
: 0.5f;
auto baseColorTextureIter = material.values.find("baseColorTexture");
outMaterial.baseColorTextureId = baseColorTextureIter != material.values.end()
? baseColorTextureIter->second.TextureIndex()
: -1;
}
// Meshes
//
for (const auto &mesh : inModel.meshes) {
outModel.meshes.emplace_back();
auto &outMesh = outModel.meshes.back();
outMesh.primitives.reserve(mesh.primitives.size());
for (const auto &prim : mesh.primitives) {
std::vector<std::pair<geom::BufferLayout, gl::VboRef>> layoutVbos;
for (const auto &attrib : prim.attributes) {
const auto &accessor = inModel.accessors[attrib.second];
const int byteStride = accessor.ByteStride(inModel.bufferViews[accessor.bufferView]);
assert(byteStride != -1);
geom::BufferLayout layout;
layout.append(getAttribSemantic(attrib.first), tinygltf::GetTypeSizeInBytes(accessor.type), byteStride, accessor.byteOffset);
layoutVbos.emplace_back(layout, bufferViewVbos[accessor.bufferView]);
}
auto attribMapping = gl::Batch::AttributeMapping();
auto progFmt = gl::GlslProg::Format()
.vertex(loadFile(app::getAssetPath("shaders/gltf_primitive_pbr_vs.glsl")))
.fragment(loadFile(app::getAssetPath("shaders/gltf_primitive_pbr_fs.glsl")));
for (auto targetIt = prim.targets.begin(); targetIt != prim.targets.end(); ++targetIt) {
for (const auto &targetAttrib : *targetIt) {
const auto &accessor = inModel.accessors[targetAttrib.second];
auto semantic = getAttribSemantic(targetAttrib.first);
if (semantic == geom::Attrib::POSITION || semantic == geom::Attrib::NORMAL) {
const auto namePrefix = semantic == geom::Attrib::POSITION ? "targetPosition" : "targetNormal";
const auto name = namePrefix + std::to_string(std::distance(prim.targets.begin(), targetIt));
semantic = static_cast<geom::Attrib>(geom::Attrib::CUSTOM_0 + attribMapping.size());
attribMapping.emplace(semantic, name);
progFmt.attrib(semantic, name);
const int byteStride = accessor.ByteStride(inModel.bufferViews[accessor.bufferView]);
assert(byteStride != -1);
geom::BufferLayout layout;
layout.append(semantic, tinygltf::GetTypeSizeInBytes(accessor.type), byteStride, accessor.byteOffset);
layoutVbos.emplace_back(layout, bufferViewVbos[accessor.bufferView]);
}
}
}
const auto &indexAccessor = inModel.accessors[prim.indices];
const auto &indexView = inModel.bufferViews[indexAccessor.bufferView];
const auto indexData = inModel.buffers[indexView.buffer].data.data() + indexView.byteOffset + indexAccessor.byteOffset;
const auto indexVbo = gl::Vbo::create(indexView.target, indexAccessor.ByteStride(indexView) * indexAccessor.count, indexData, GL_STATIC_DRAW);
const auto primitiveVboMesh = gl::VboMesh::create(0, prim.mode, layoutVbos, uint32_t(indexAccessor.count), indexAccessor.componentType, indexVbo);
outMesh.primitives.emplace_back();
auto &outPrim = outMesh.primitives.back();
outPrim.materialId = prim.material;
outPrim.targetCount = int(prim.targets.size());
const auto &material = outModel.materials[outPrim.materialId];
progFmt.define("TARGET_COUNT", std::to_string(outPrim.targetCount))
.define("HAS_BASE_COLOR_TEXTURE", material.baseColorTextureId >= 0 ? "1" : "0");
outPrim.batch = gl::Batch::create(primitiveVboMesh, gl::GlslProg::create(progFmt), attribMapping);
}
}
// Nodes
//
for (int i = 0; i < inModel.nodes.size(); ++i) {
const auto &node = inModel.nodes[i];
outModel.nodes.emplace_back();
auto &outNode = outModel.nodes.back();
outNode.name = node.name;
outNode.id = i;
if (16 == node.matrix.size()) {
const auto &m = node.matrix;
const auto localTransform = mat4(m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], m[11], m[12], m[13], m[14], m[15]);
vec3 skew;
vec4 perspective;
glm::decompose(localTransform, outNode.originalScale, outNode.originalRotation, outNode.originalTranslation, skew, perspective);
}
else {
if (3 == node.translation.size()) {
outNode.originalTranslation = vec3(node.translation[0], node.translation[1], node.translation[2]);
}
if (4 == node.rotation.size()) {
outNode.originalRotation = quat(node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3]);
}
if (3 == node.scale.size()) {
outNode.originalScale = vec3(node.scale[0], node.scale[1], node.scale[2]);
}
}
outNode.meshId = node.mesh;
outNode.childIds = node.children;
}
// Scenes
//
for (const auto &scene : inModel.scenes) {
outModel.scenes.emplace_back();
outModel.scenes.back().nodeIds = scene.nodes;
}
// Animations
//
for (const auto &anim : inModel.animations) {
outModel.animations.emplace_back();
auto &outAnim = outModel.animations.back();
for (const auto &channel : anim.channels) {
const auto &sampler = anim.samplers[channel.sampler];
const auto processChannel = [&](auto &transformChannel) {
transformChannel.targetNodeId = channel.target_node;
convertKeyframes(inModel, sampler, transformChannel);
if (!transformChannel.keyframes.empty() && transformChannel.keyframes.back().timeSeconds > outAnim.durationSeconds) {
const auto duration = transformChannel.keyframes.back().timeSeconds;
outAnim.durationSeconds = duration;
outModel.animationLoopDurationSeconds = std::max(duration, outModel.animationLoopDurationSeconds);
}
};
if ("translation" == channel.target_path) {
outAnim.translationChannels.emplace_back();
processChannel(outAnim.translationChannels.back());
}
else if ("scale" == channel.target_path) {
outAnim.scaleChannels.emplace_back();
processChannel(outAnim.scaleChannels.back());
}
else if ("rotation" == channel.target_path) {
outAnim.rotationChannels.emplace_back();
processChannel(outAnim.rotationChannels.back());
}
if ("weights" == channel.target_path) {
outAnim.blendWeightsChannels.emplace_back();
processChannel(outAnim.blendWeightsChannels.back());
}
}
}
return outModel;
}
Model loadModelFileASCII(const fs::path &filePath) {
tinygltf::Model inModel;
tinygltf::TinyGLTF gltfContext;
std::string err;
bool success = gltfContext.LoadASCIIFromFile(&inModel, &err, filePath.string());
assert(success && err.empty());
return loadModel(inModel);
}
static void updateNodeWorldTransformRecursive(Model &model, int nodeId, mat4 parentWorldTransform = {}) {
model.nodes[nodeId].updateWorldTransform(parentWorldTransform);
for (auto &childId : model.nodes[nodeId].childIds) {
updateNodeWorldTransformRecursive(model, childId, model.nodes[nodeId].worldTransform);
}
}
void Model::setPlayheadTimeSeconds(double timeSeconds) {
for (auto &node : nodes) {
node.currentTranslation = node.originalTranslation;
node.currentRotation = node.originalRotation;
node.currentScale = node.originalScale;
}
for (const auto &anim : animations) {
const auto t = float(clamp(timeSeconds, 0.0, double(anim.durationSeconds)));
for (const auto &channel : anim.translationChannels) {
nodes[channel.targetNodeId].currentTranslation = channel.sampleLinear(t);
}
for (const auto &channel : anim.scaleChannels) {
nodes[channel.targetNodeId].currentScale = channel.sampleLinear(t);
}
for (const auto &channel : anim.rotationChannels) {
nodes[channel.targetNodeId].currentRotation = channel.sampleLinear(t);
}
for (const auto &channel : anim.blendWeightsChannels) {
auto &weights = nodes[channel.targetNodeId].blendWeights;
weights.resize(channel.componentCount);
for (int i = 0; i < channel.componentCount; ++i) {
weights[i] = channel.sampleLinear(t, i);
}
}
}
for (auto &scene : scenes) {
for (auto &nodeId : scene.nodeIds) {
updateNodeWorldTransformRecursive(*this, nodeId);
}
}
}
void Model::setNodeVisibilityRecursive(int nodeId, bool visible) {
assert(nodeId >= 0 && nodeId < nodes.size());
nodes[nodeId].visible = visible;
for (auto id : nodes[nodeId].childIds) {
setNodeVisibilityRecursive(id, visible);
}
}
} // gltf
#pragma once
#include "cinder/gl/gl.h"
#include <string>
namespace gltf {
using glm::mat4;
using glm::quat;
using glm::vec3;
using glm::vec4;
struct Model;
struct Scene {
std::vector<int> nodeIds;
};
struct Node {
mat4 worldTransform;
vec3 originalTranslation;
vec3 originalScale{ 1.0f };
quat originalRotation;
vec3 currentTranslation;
vec3 currentScale{ 1.0f };
quat currentRotation;
std::vector<float> blendWeights;
std::vector<int> childIds;
int id;
int meshId;
std::string name;
bool visible = true;
void updateWorldTransform(const mat4 parentWorldTransform = {});
void draw(const Model &model, const vec3 &eyePosition, const vec3 &lightDirection, const vec3 &lightColor) const;
};
struct Mesh {
struct Primitive {
ci::gl::BatchRef batch;
int materialId;
int targetCount;
};
std::vector<Primitive> primitives;
};
struct Material {
vec4 baseColorFactor;
float metallicFactor;
float roughnessFactor;
int baseColorTextureId;
int baseColorTextureTexcoord;
};
template <typename T>
struct AnimationKeyframe {
float timeSeconds;
// @Memory
// This data should be stored in a strided array that includes multi-values for every keyframe
// -ryan
std::vector<T> values;
};
template <typename T>
T interpolateKeyframeLinear(const AnimationKeyframe<T> &k0, const AnimationKeyframe<T> &k1, float timeSeconds, int valueIndex) {
const float t = (timeSeconds - k0.timeSeconds) / (k1.timeSeconds - k0.timeSeconds);
return glm::mix(k0.values[valueIndex], k1.values[valueIndex], t);
}
template <typename T>
struct AnimationChannel {
std::vector<AnimationKeyframe<T>> keyframes;
int targetNodeId;
int componentCount = 1;
T sampleLinear(float timeSeconds, int valueIndex = 0) const {
assert(!keyframes.empty());
const auto it = std::find_if(keyframes.begin(), keyframes.end(), [&](const auto &key) {
return key.timeSeconds >= timeSeconds;
});
if (it == keyframes.begin()) {
return it->values[valueIndex];
}
else {
return interpolateKeyframeLinear(*(it - 1), *it, timeSeconds, valueIndex);
}
}
bool hasKeyframes() const {
return !keyframes.empty();
}
};
struct Animation {
std::vector<AnimationChannel<vec3>> translationChannels;
std::vector<AnimationChannel<vec3>> scaleChannels;
std::vector<AnimationChannel<quat>> rotationChannels;
std::vector<AnimationChannel<float>> blendWeightsChannels;
float timeSeconds = 0.0f;
float durationSeconds = 0.0f;
};
struct Model {
std::vector<Scene> scenes;
std::vector<Node> nodes;
std::vector<Mesh> meshes;
std::vector<Animation> animations;
std::vector<ci::gl::TextureRef> textures;
std::vector<Material> materials;
float animationLoopDurationSeconds = 0.0f;
void setPlayheadTimeSeconds(double timeSeconds);
void setNodeVisibilityRecursive(int nodeId, bool visible);
};
Model loadModelFileASCII(const ci::fs::path &path);
} // gltf
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment