Merge pull request #1 from PixarAnimationStudios/master

Update from Pixar
This commit is contained in:
Andrew Aye 2015-10-24 12:49:38 -07:00
commit f542b8c8c3
85 changed files with 2381 additions and 654 deletions

View File

@ -113,7 +113,7 @@ set(CMAKE_MODULE_PATH
# OpenSubdiv trips bugs in some older gcc versions
if (CMAKE_COMPILER_IS_GNUCC)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
message(FATAL_ERROR "g++ 4.8 or newer required")
message(WARNING "g++ 4.8 or newer recommended")
endif()
endif()
@ -314,6 +314,8 @@ option(NO_DX "Disable DirectX support")
option(NO_TESTS "Disable all tests")
option(NO_GLTESTS "Disable GL tests")
set(OSD_GPU FALSE)
# Check for dependencies
if(NOT NO_OMP)
find_package(OpenMP)
@ -338,7 +340,7 @@ endif()
if(NOT NO_CUDA)
find_package(CUDA 4.0)
endif()
if(NOT ANDROID AND NOT IOS)
if(NOT NO_OPENGL AND NOT ANDROID AND NOT IOS)
find_package(GLFW 3.0.0)
endif()
if(NOT NO_PTEX)
@ -406,6 +408,10 @@ else()
endif()
endif()
if( OPENGL_FOUND AND NOT NO_OPENGL)
set(OSD_GPU TRUE)
endif()
if(GLFW_FOUND AND (GLFW_VERSION VERSION_EQUAL 3.0 OR GLFW_VERSION VERSION_GREATER 3.0))
add_definitions( -DGLFW_VERSION_3 )
endif()
@ -449,6 +455,7 @@ if(OPENGLES_FOUND)
add_definitions(
-DOPENSUBDIV_HAS_OPENGLES
)
set(OSD_GPU TRUE)
endif()
if(OPENCL_FOUND)
@ -486,6 +493,7 @@ if(OPENCL_FOUND)
)
endif()
endif()
set(OSD_GPU TRUE)
else()
if (NOT NO_OPENCL)
message(WARNING
@ -499,6 +507,7 @@ if(CUDA_FOUND)
add_definitions(
-DOPENSUBDIV_HAS_CUDA
)
set(OSD_GPU TRUE)
else()
if (NOT NO_CUDA)
message(WARNING
@ -540,21 +549,23 @@ if (NOT NO_MAYA)
endif()
# Link examples & regressions dynamically against Osd
set( OSD_LINK_TARGET osd_dynamic_cpu osd_dynamic_gpu )
if( OSD_GPU )
set( OSD_LINK_TARGET osd_dynamic_cpu osd_dynamic_gpu )
else()
set( OSD_LINK_TARGET osd_dynamic_cpu )
endif()
if (WIN32)
add_definitions(
# Link against the static version of GLEW.
-DGLEW_STATIC
)
# Link examples & regressions statically against Osd for
# Windows until all the kinks can be worked out.
set( OSD_LINK_TARGET osd_static_cpu osd_static_gpu )
if (DXSDK_FOUND AND NOT NO_DX)
add_definitions(
-DOPENSUBDIV_HAS_DX11SDK
)
set(OSD_GPU TRUE)
elseif(NOT NO_DX)
message(WARNING
"DirectX11 SDK was not found. "
@ -565,6 +576,15 @@ if (WIN32)
"environment variable."
)
endif()
# Link examples & regressions statically against Osd for
# Windows until all the kinks can be worked out.
if( OSD_GPU )
set( OSD_LINK_TARGET osd_static_cpu osd_static_gpu )
else()
set( OSD_LINK_TARGET osd_static_cpu )
endif()
endif()

View File

@ -92,6 +92,7 @@ if (DOCUTILS_FOUND AND PYTHONINTERP_FOUND)
osd_overview.rst
osd_shader_interface.rst
porting.rst
references.rst
release_notes.rst
release_notes_2x.rst
roadmap.rst

Binary file not shown.

After

Width:  |  Height:  |  Size: 613 KiB

View File

@ -25,7 +25,7 @@
Introduction
------------
.. image:: images/geri.jpg
.. image:: images/insideout.png
:width: 100%
:align: center
@ -94,7 +94,7 @@ between Pixar and Microsoft.
|
| *Analytic Displacement Mapping using Hardware Tessellation*
| Matthias Niessner, Charles Loop
| ACM Transactions on Graphics, To appear 2013
| ACM Transactions on Graphics, Vol. 32 No. 3 Article 26 June 2013
| `<http://research.microsoft.com/en-us/um/people/cloop/TOG2013.pdf>`_
----

View File

@ -40,6 +40,7 @@
<li><a href="cmake_build.html">Building OpenSubdiv</a></li>
<li><a href="code_examples.html">Code Examples</a></li>
<li><a href="roadmap.html">Roadmap</a></li>
<li><a href="references.html">References</a></li>
</ul>
<p></p>
<li><a href="intro_30.html">Release 3.0</a></li>
@ -101,7 +102,7 @@
<li><a href="additional_resources.html#videos">Videos</a>
</ul>
<p></p>
<li><a href="release_notes.html">Release Notes</a>
<li><a href="release_notes.html">Release Notes</a></li>
<p></p>
<li><a href="doxy_html/index.html" target="_blank">Doxygen</a></li>
</ul>

View File

@ -0,0 +1,118 @@
..
Copyright 2015 Pixar
Licensed under the Apache License, Version 2.0 (the "Apache License")
with the following modification; you may not use this file except in
compliance with the Apache License and the following modification to it:
Section 6. Trademarks. is deleted and replaced with:
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor
and its affiliates, except as required to comply with Section 4(c) of
the License and to reproduce the content of the NOTICE file.
You may obtain a copy of the Apache License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the Apache License with the above modification is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the Apache License for the specific
language governing permissions and limitations under the Apache License.
References
----------
.. contents::
:local:
:backlinks: none
----
References
==========
| *Analytic Displacement Mapping using Hardware Tessellation*
| Matthias Niessner, Charles Loop
| ACM Transactions on Graphics, Vol. 32 No. 3 Article 26 June 2013
| `<http://research.microsoft.com/en-us/um/people/cloop/TOG2013.pdf>`_
| `<http://doi.org/10.1145/2487228.2487234>`_
|
| *Feature Adaptive GPU Rendering of Catmull-Clark Subdivision Surfaces*
| Matthias Niessner, Charles Loop, Mark Meyer, and Tony DeRose
| ACM Transactions on Graphics, Vol. 31 No. 1 Article 6 January 2012
| `<http://research.microsoft.com/en-us/um/people/cloop/tog2012.pdf>`_
| `<http://doi.org/10.1145/2077341.2077347>`_
|
| *Efficient Evaluation of Semi-Smooth Creases in Catmull-Clark Subdivision Surfaces*
| Matthias Niessner, Charles Loop, and Guenter Greiner.
| Eurographics Proceedings, Cagliari, 2012
| `<http://research.microsoft.com/en-us/um/people/cloop/EG2012.pdf>`_
| `<http://doi.org/10.2312/conf/EG2012/short/041-044>`_
|
| *Approximating Subdivision Surfaces with Gregory Patches for Hardware Tessellation*
| Charles Loop, Scott Schaefer, Tianyun Ni, Ignacio Castano
| SIGGRAPH Asia Conference Proceedings 2009
| `<http://www.dgp.toronto.edu/people/stam/reality/Research/pdf/sig98.pdf>`_
| `<http://doi.org/10.1145/1661412.1618497>`_
|
| GPU Smoothing of Quad Meshes
| T. L. Ni, Y. Yeo, A. Myles, V. Goel and J. Peters
| Proc. IEEE SMI 2008
| `<http://www.cise.ufl.edu/research/SurfLab/papers/smi08.pdf>`_
| `<http://doi.org/10.1109/SMI.2008.4547938>`_
|
| *Fast Parallel Construction of Smooth Surfaces from Meshes with Tri/Quad/Pent Facets*
| A. Myles and T. Ni and J. Peters
| Eurographics Symposium on Geometry Processing 2008
| `<https://www.cise.ufl.edu/research/SurfLab/papers/08poly.pdf>`_
| `<http://doi.org/10.1111/j.1467-8659.2008.01276.x>`_
|
| *Approximating Catmull-Clark Subdivision Surfaces with Bicubic Patches*
| Charles Loop, Scott Schaefer
| ACM Transactions on Graphics, Vol. 27 No. 1 Article 8 March 2008
| `<http://research.microsoft.com/en-us/um/people/cloop/acctog.pdf>`_
| `<http://doi.org/10.1145/1330511.1330519>`_
|
| *Rapid Evaluation of Catmull-Clark Subdivision Surfaces*
| Jeffrey Bolz and Peter Schroder
| Web3D Proceedings 2002
| `<http://www.multires.caltech.edu/pubs/fastsubd.pdf>`_
| `<http://doi.org/10.1145/504502.504505>`_
|
| *Piecewise Smooth Subdivision Surfaces with Normal Control*
| Henning Biermann, Adi Levin and Denis Zorin
| SIGGRAPH 2000 Conference Proceedings
| `<http://mrl.nyu.edu/~dzorin/papers/biermann2000pss.pdf>`_
| `<http://doi.org/10.1145/344779.344841>`_
|
| *Subdivision for Modeling and Animation*
| Denis Zorin, Peter Schroder
| Course Notes of SIGGRAPH 1999
| `<http://www.multires.caltech.edu/pubs/sig99notes.pdf>`_
|
| *Exact Evaluation of Catmull-Clark Subdivision Surfaces at Arbitrary Parameter Values*
| Jos Stam
| SIGGRAPH 98 Conference Proceedings, Annual Conference Series, July 1998
| `<http://www.dgp.toronto.edu/people/stam/reality/Research/pdf/sig98.pdf>`_
| `<http://doi.org/10.1145/280814.280945>`_
|
| *Subdivision Surfaces in Character Animation*
| Tony DeRose, Michael Kass, Tien Truong
| Proceedings of SIGGRAPH 1998
| `<http://graphics.pixar.com/library/Geri/paper.pdf>`_
| `<http://doi.org/10.1145/280814.280826>`_
|
| *Efficient, Fair Interpolation Using Catmull-Clark Surfaces*
| Mark Halstead, Michael Kass, Tony DeRose
| SIGGRAPH 93 Conference Proceedings
| `<http://graphics.pixar.com/library/FairSubdivision/paper.pdf>`_
| `<http://doi.org/10.1145/166117.166121>`_
|
| *Recursively generated B-spline surfaces on arbitrary topological meshes*
| Catmull, E.; Clark, J. Computer-Aided Design 10 (6) (1978)
| `<http://doi.org/10.1016/0010-4485%2878%2990110-0>`_

View File

@ -31,6 +31,63 @@
----
Release 3.0.3
=============
Release 3.0.3 is a minor stability release which includes important performance
and bug fixes.
**New Features**
- Smooth normal generation tutorial, far_tutorial_8
**Changes**
- Major performance improvement in PatchTable construction
- Improved patch approximations for non-manifold features
**Bug Fixes**
- Fixed double delete in GLSL Compute controller
- Fixed buffer layout for GLSL Compute kernel
- Fixed GL buffer leak in Osd::GLPatchTable
- Fixed out-of-bounds data access for TBB and OMP stencil evaluation
- Fixed WIN32_LEAN_AND_MEAN typo
- Fixed Loop-related shader issues glFVarViewer
Release 3.0.2
=============
Release 3.0.2 is a minor release for a specific fix.
**Bug Fixes**
- Fixed drawing of single crease patches
Release 3.0.1
=============
Release 3.0.1 is a minor release focused on stability and correctness.
**Changes**
- Added a references section to the documentation, please see `References <references.html>`__
- Removed references to AddVaryingWithWeight from examples and tutorials
- Added more regression test shapes
- Addressed general compiler warnings (e.g. signed vs unsigned comparisons)
- Addressed compiler warnings in the core libraries reported by GCC's -Wshadow
- Eased GCC version restriction, earlier requirement for version 4.8 or newer is no longer needed
- Replaced topology initialization assertions with errors
- Improved compatibility with ICC
- Improved descriptive content and formatting of Far error messages
- Improved build when configured to include no GPU specific code
**Bug Fixes**
- Fixed handling of unconnected vertices to avoid out of bounds data access
- Fixed non-zero starting offsets for TbbEvalStencils and OmpEvalStencils
- Fixed Far::StencilTableFactory::Options::factorizeIntermediateLevels
- Fixed Far::PatchTablesFactory::Options::generateAllLevels
- Fixed the behavior of VTX_BOUNDARY_NONE for meshes with bilinear scheme
- Fixed some template method specializations which produced duplicate definitions
- Disabled depth buffering when drawing the UI in the example viewers
- Disabled the fractional tessellation spacing option in example viewers
since this mode is currently not supported
Release 3.0.0
=============

View File

@ -158,10 +158,12 @@ include_directories(
set(INC_FILES )
if(OPENGL_FOUND OR DXSDK_FOUND)
_stringify("${EXAMPLES_COMMON_SHADER_FILES}" INC_FILES)
source_group("Shaders" FILES ${EXAMPLES_COMMON_SHADER_FILES})
source_group("Inc" FILES ${INC_FILES})
endif()
add_library(examples_common_obj
OBJECT

View File

@ -27,6 +27,7 @@
#include "clDeviceContext.h"
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#elif defined(__APPLE__)
#include <OpenGL/OpenGL.h>

View File

@ -25,6 +25,7 @@
#include "cudaDeviceContext.h"
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#elif defined(__APPLE__)
#include <OpenGL/OpenGL.h>

View File

@ -277,6 +277,7 @@ GLhud::Flush() {
ortho(proj, 0, 0, float(GetWidth()), float(GetHeight()));
glUniformMatrix4fv(_mvpMatrix, 1, GL_FALSE, proj);
glDisable(GL_DEPTH_TEST);
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _fontTexture);
@ -289,6 +290,7 @@ GLhud::Flush() {
glBindTexture(GL_TEXTURE_2D, 0);
}
glEnable(GL_DEPTH_TEST);
return true;
}

View File

@ -25,12 +25,12 @@
#ifndef STOPWATCH_H
#define STOPWATCH_H
#if not (_WIN32 or _WIN64)
#if (_WIN32 or _WIN64)
#include <windows.h>
#else
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#else
#endif
class Stopwatch {

View File

@ -1403,8 +1403,8 @@ initHUD() {
450, 10, callbackCheckBox, HUD_CB_ANIMATE_VERTICES, 'm');
g_hud->AddCheckBox("Screen space LOD (V)", g_screenSpaceTess,
450, 30, callbackCheckBox, HUD_CB_VIEW_LOD, 'v');
g_hud->AddCheckBox("Fractional spacing (T)", g_fractionalSpacing,
450, 50, callbackCheckBox, HUD_CB_FRACTIONAL_SPACING, 't');
//g_hud->AddCheckBox("Fractional spacing (T)", g_fractionalSpacing,
// 450, 50, callbackCheckBox, HUD_CB_FRACTIONAL_SPACING, 't');
g_hud->AddCheckBox("Frustum Patch Culling (B)", g_patchCull,
450, 70, callbackCheckBox, HUD_CB_PATCH_CULL, 'b');
g_hud->AddCheckBox("Freeze (spc)", g_freeze,

View File

@ -128,12 +128,10 @@ int g_freeze = 0,
g_adaptive = 1,
g_endCap = kEndCapBSplineBasis,
g_singleCreasePatch = 1,
g_drawPatchCVs = 0,
g_drawNormals = 0,
g_mbutton[3] = {0, 0, 0};
int g_displayPatchColor = 1,
g_screenSpaceTess = 1,
int g_screenSpaceTess = 1,
g_fractionalSpacing = 1,
g_patchCull = 1,
g_displayPatchCounts = 0;
@ -1130,12 +1128,6 @@ callbackCheckBox(bool checked, int button) {
case kHUD_CB_ANIMATE_VERTICES:
g_moveScale = checked;
break;
case kHUD_CB_DISPLAY_PATCH_COLOR:
g_displayPatchColor = checked;
break;
case kHUD_CB_DISPLAY_PATCH_CVs:
g_drawPatchCVs = checked;
break;
case kHUD_CB_VIEW_LOD:
g_screenSpaceTess = checked;
break;
@ -1204,13 +1196,32 @@ initHUD() {
kShadingNormal,
g_shadingMode == kShadingNormal);
g_hud->AddCheckBox("Patch CVs (L)", false, 10, 50, callbackCheckBox, kHUD_CB_DISPLAY_PATCH_CVs, 'L');
g_hud->AddCheckBox("Patch Color (P)", true, 10, 70, callbackCheckBox, kHUD_CB_DISPLAY_PATCH_COLOR, 'P');
g_hud->AddCheckBox("Animate vertices (M)", g_moveScale != 0, 10, 110, callbackCheckBox, kHUD_CB_ANIMATE_VERTICES, 'M');
g_hud->AddCheckBox("Freeze (spc)", false, 10, 130, callbackCheckBox, kHUD_CB_FREEZE, ' ');
g_hud->AddCheckBox("Screen space LOD (V)", g_screenSpaceTess != 0, 10, 150, callbackCheckBox, kHUD_CB_VIEW_LOD, 'V');
g_hud->AddCheckBox("Fractional spacing (T)", g_fractionalSpacing != 0, 10, 170, callbackCheckBox, kHUD_CB_FRACTIONAL_SPACING, 'T');
g_hud->AddCheckBox("Frustum Patch Culling (B)", g_patchCull != 0, 10, 190, callbackCheckBox, kHUD_CB_PATCH_CULL, 'B');
int y = 10;
g_hud->AddCheckBox("Control edges (H)",
g_controlMeshDisplay->GetEdgesDisplay(),
10, y, callbackCheckBox,
kHUD_CB_DISPLAY_CONTROL_MESH_EDGES, 'H');
y += 20;
g_hud->AddCheckBox("Control vertices (J)",
g_controlMeshDisplay->GetVerticesDisplay(),
10, y, callbackCheckBox,
kHUD_CB_DISPLAY_CONTROL_MESH_VERTS, 'J');
y += 20;
g_hud->AddCheckBox("Animate vertices (M)", g_moveScale != 0,
10, y, callbackCheckBox, kHUD_CB_ANIMATE_VERTICES, 'M');
y += 20;
g_hud->AddCheckBox("Screen space LOD (V)", g_screenSpaceTess != 0,
10, y, callbackCheckBox, kHUD_CB_VIEW_LOD, 'V');
y += 20;
//g_hud->AddCheckBox("Fractional spacing (T)", g_fractionalSpacing != 0,
// 10, y, callbackCheckBox, kHUD_CB_FRACTIONAL_SPACING, 'T');
//y += 20;
g_hud->AddCheckBox("Frustum Patch Culling (B)", g_patchCull != 0,
10, y, callbackCheckBox, kHUD_CB_PATCH_CULL, 'B');
y += 20;
g_hud->AddCheckBox("Freeze (spc)", g_freeze != 0,
10, y, callbackCheckBox, kHUD_CB_FREEZE, ' ');
y += 20;
g_hud->AddCheckBox("Adaptive (`)", true, 10, 230, callbackAdaptive, 0, '`');
g_hud->AddCheckBox("Single Crease Patch (S)", g_singleCreasePatch!=0, 10, 250, callbackSingleCreasePatch, 0, 'S');

View File

@ -79,7 +79,6 @@ static void initShapes() {
g_defaultShapes.push_back(ShapeDesc("catmark_righthanded", catmark_righthanded, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_pole8", catmark_pole8, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_pole64", catmark_pole64, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_pole360", catmark_pole360, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_nonman_quadpole8", catmark_nonman_quadpole8, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_nonman_quadpole64", catmark_nonman_quadpole64, kCatmark));
g_defaultShapes.push_back(ShapeDesc("catmark_nonman_quadpole360", catmark_nonman_quadpole360, kCatmark));

View File

@ -188,8 +188,6 @@ struct Vertex {
_pos[2]+=weight*src._pos[2];
}
void AddVaryingWithWeight(Vertex const & , float) { }
void Clear( void * =0 ) { _pos[0]=_pos[1]=_pos[2]=0.0f; }
void SetPosition(float x, float y, float z) { _pos[0]=x; _pos[1]=y; _pos[2]=z; }

View File

@ -502,18 +502,25 @@ createOsdMesh(ShapeDesc const & shapeDesc, int level) {
Far::StencilTable const * vertexStencils = NULL;
Far::StencilTable const * varyingStencils = NULL;
int nverts=0;
{
// Apply feature adaptive refinement to the mesh so that we can use the
// limit evaluation API features.
Far::TopologyRefiner::AdaptiveOptions options(level);
topologyRefiner->RefineAdaptive(options);
bool adaptive = (sdctype == OpenSubdiv::Sdc::SCHEME_CATMARK);
if (adaptive) {
// Apply feature adaptive refinement to the mesh so that we can use the
// limit evaluation API features.
Far::TopologyRefiner::AdaptiveOptions options(level);
topologyRefiner->RefineAdaptive(options);
} else {
Far::TopologyRefiner::UniformOptions options(level);
topologyRefiner->RefineUniform(options);
}
// Generate stencil table to update the bi-cubic patches control
// vertices after they have been re-posed (both for vertex & varying
// interpolation)
Far::StencilTableFactory::Options soptions;
soptions.generateOffsets=true;
soptions.generateIntermediateLevels=true;
soptions.generateIntermediateLevels=adaptive;
vertexStencils =
Far::StencilTableFactory::Create(*topologyRefiner, soptions);

View File

@ -40,8 +40,6 @@ static std::vector<ShapeDesc> g_defaultShapes;
//------------------------------------------------------------------------------
static void initShapes() {
// g_defaultShapes.push_back( ShapeDesc("bilinear_cube", bilinear_cube, kBilinear) );
g_defaultShapes.push_back( ShapeDesc("catmark_cube_corner0", catmark_cube_corner0, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_cube_corner1", catmark_cube_corner1, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_cube_corner2", catmark_cube_corner2, kCatmark ) );
@ -87,5 +85,7 @@ static void initShapes() {
g_defaultShapes.push_back( ShapeDesc("catmark_helmet", catmark_helmet, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_pawn", catmark_pawn, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_rook", catmark_rook, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("bilinear_cube", bilinear_cube, kBilinear) );
}
//------------------------------------------------------------------------------

View File

@ -501,6 +501,9 @@ public:
if (type == Far::PatchDescriptor::QUADS) {
ss << "#define PRIM_QUAD\n";
} else if (type == Far::PatchDescriptor::TRIANGLES) {
ss << "#define PRIM_TRI\n";
ss << "#define LOOP\n";
} else {
ss << "#define PRIM_TRI\n";
}

View File

@ -115,6 +115,8 @@ public:
ss << "#define OSD_ENABLE_PATCH_CULL\n";
ss << "#define GEOMETRY_OUT_LINE\n";
ss << "#define OSD_PATCH_ENABLE_SINGLE_CREASE\n";
// include osd PatchCommon
ss << Osd::GLSLPatchShaderSource::GetCommonShaderSource();
std::string common = ss.str();

View File

@ -280,6 +280,90 @@ out vec4 outColor;
uniform vec4 diffuseColor = vec4(1);
uniform vec4 ambientColor = vec4(1);
vec4
getAdaptivePatchColor(ivec3 patchParam)
{
const vec4 patchColors[7*6] = vec4[7*6](
vec4(1.0f, 1.0f, 1.0f, 1.0f), // regular
vec4(0.0f, 1.0f, 1.0f, 1.0f), // regular pattern 0
vec4(0.0f, 0.5f, 1.0f, 1.0f), // regular pattern 1
vec4(0.0f, 0.5f, 0.5f, 1.0f), // regular pattern 2
vec4(0.5f, 0.0f, 1.0f, 1.0f), // regular pattern 3
vec4(1.0f, 0.5f, 1.0f, 1.0f), // regular pattern 4
vec4(1.0f, 0.5f, 0.5f, 1.0f), // single crease
vec4(1.0f, 0.70f, 0.6f, 1.0f), // single crease pattern 0
vec4(1.0f, 0.65f, 0.6f, 1.0f), // single crease pattern 1
vec4(1.0f, 0.60f, 0.6f, 1.0f), // single crease pattern 2
vec4(1.0f, 0.55f, 0.6f, 1.0f), // single crease pattern 3
vec4(1.0f, 0.50f, 0.6f, 1.0f), // single crease pattern 4
vec4(0.8f, 0.0f, 0.0f, 1.0f), // boundary
vec4(0.0f, 0.0f, 0.75f, 1.0f), // boundary pattern 0
vec4(0.0f, 0.2f, 0.75f, 1.0f), // boundary pattern 1
vec4(0.0f, 0.4f, 0.75f, 1.0f), // boundary pattern 2
vec4(0.0f, 0.6f, 0.75f, 1.0f), // boundary pattern 3
vec4(0.0f, 0.8f, 0.75f, 1.0f), // boundary pattern 4
vec4(0.0f, 1.0f, 0.0f, 1.0f), // corner
vec4(0.25f, 0.25f, 0.25f, 1.0f), // corner pattern 0
vec4(0.25f, 0.25f, 0.25f, 1.0f), // corner pattern 1
vec4(0.25f, 0.25f, 0.25f, 1.0f), // corner pattern 2
vec4(0.25f, 0.25f, 0.25f, 1.0f), // corner pattern 3
vec4(0.25f, 0.25f, 0.25f, 1.0f), // corner pattern 4
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 1.0f, 0.0f, 1.0f), // gregory
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.5f, 0.0f, 1.0f), // gregory boundary
vec4(1.0f, 0.7f, 0.3f, 1.0f), // gregory basis
vec4(1.0f, 0.7f, 0.3f, 1.0f), // gregory basis
vec4(1.0f, 0.7f, 0.3f, 1.0f), // gregory basis
vec4(1.0f, 0.7f, 0.3f, 1.0f), // gregory basis
vec4(1.0f, 0.7f, 0.3f, 1.0f), // gregory basis
vec4(1.0f, 0.7f, 0.3f, 1.0f) // gregory basis
);
int patchType = 0;
int edgeCount = bitCount(OsdGetPatchBoundaryMask(patchParam));
if (edgeCount == 1) {
patchType = 2; // BOUNDARY
}
if (edgeCount == 2) {
patchType = 3; // CORNER
}
#if defined OSD_PATCH_ENABLE_SINGLE_CREASE
// check this after boundary/corner since single crease patch also has edgeCount.
float sharpness = OsdGetPatchSharpness(patchParam);
if (sharpness > 0) {
patchType = 1;
}
#elif defined OSD_PATCH_GREGORY
patchType = 4;
#elif defined OSD_PATCH_GREGORY_BOUNDARY
patchType = 5;
#elif defined OSD_PATCH_GREGORY_BASIS
patchType = 6;
#endif
int pattern = bitCount(OsdGetPatchTransitionMask(patchParam));
return patchColors[6*patchType + pattern];
}
vec4
edgeColor(vec4 Cfill, vec4 edgeDistance)
{
@ -316,7 +400,7 @@ main()
#if defined(DISPLAY_MODE_VARYING)
vec4 color = vec4(inpt.color, 1);
#else
vec4 color = diffuseColor;
vec4 color = getAdaptivePatchColor(OsdGetPatchParam(OsdGetPatchIndex(gl_PrimitiveID)));
#endif
vec4 Cf = color * d;

View File

@ -1879,8 +1879,8 @@ int main(int argc, char ** argv) {
10, 30, callbackCheckBox, HUD_CB_ANIMATE_VERTICES, 'm');
g_hud.AddCheckBox("Screen space LOD (V)", g_screenSpaceTess,
10, 50, callbackCheckBox, HUD_CB_VIEW_LOD, 'v');
g_hud.AddCheckBox("Fractional spacing (T)", g_fractionalSpacing,
10, 70, callbackCheckBox, HUD_CB_FRACTIONAL_SPACING, 't');
//g_hud.AddCheckBox("Fractional spacing (T)", g_fractionalSpacing,
// 10, 70, callbackCheckBox, HUD_CB_FRACTIONAL_SPACING, 't');
g_hud.AddCheckBox("Frustum Patch Culling (B)", g_patchCull,
10, 90, callbackCheckBox, HUD_CB_PATCH_CULL, 'b');
g_hud.AddCheckBox("Bloom (Y)", g_bloom,

View File

@ -1461,9 +1461,9 @@ initHUD() {
g_hud.AddCheckBox("Screen space LOD (V)", g_screenSpaceTess != 0,
10, y, callbackCheckBox, kHUD_CB_VIEW_LOD, 'v');
y += 20;
g_hud.AddCheckBox("Fractional spacing (T)", g_fractionalSpacing != 0,
10, y, callbackCheckBox, kHUD_CB_FRACTIONAL_SPACING, 't');
y += 20;
//g_hud.AddCheckBox("Fractional spacing (T)", g_fractionalSpacing != 0,
// 10, y, callbackCheckBox, kHUD_CB_FRACTIONAL_SPACING, 't');
//y += 20;
g_hud.AddCheckBox("Frustum Patch Culling (B)", g_patchCull != 0,
10, y, callbackCheckBox, kHUD_CB_PATCH_CULL, 'b');
y += 20;

View File

@ -80,7 +80,6 @@ static void initShapes() {
g_defaultShapes.push_back( ShapeDesc("catmark_righthanded", catmark_righthanded, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_pole8", catmark_pole8, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_pole64", catmark_pole64, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_pole360", catmark_pole360, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_nonman_quadpole8", catmark_nonman_quadpole8, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_nonman_quadpole64", catmark_nonman_quadpole64, kCatmark ) );
g_defaultShapes.push_back( ShapeDesc("catmark_nonman_quadpole360", catmark_nonman_quadpole360, kCatmark ) );

View File

@ -568,8 +568,6 @@ struct Vertex {
position[2]+=weight*src.position[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
float position[3];
};
@ -1237,4 +1235,4 @@ MStatus uninitializePlugin( MObject obj) {
MCHECKERR(returnStatus, "deregisterNode");
return returnStatus;
}
}

View File

@ -153,21 +153,23 @@ if (NOT NO_LIB)
install( TARGETS osd_static_cpu DESTINATION "${CMAKE_LIBDIR_BASE}" )
# this macro uses FindCUDA.cmake to compile .cu kernel files
# the target then adds the other obj dependencies and include files
_add_possibly_cuda_library(osd_static_gpu
STATIC
version.cpp
$<TARGET_OBJECTS:osd_gpu_obj>
${CUDA_KERNEL_FILES}
)
set_target_properties(osd_static_gpu PROPERTIES OUTPUT_NAME osdGPU CLEAN_DIRECT_OUTPUT 1)
if( OSD_GPU )
# this macro uses FindCUDA.cmake to compile .cu kernel files
# the target then adds the other obj dependencies and include files
_add_possibly_cuda_library(osd_static_gpu
STATIC
version.cpp
$<TARGET_OBJECTS:osd_gpu_obj>
${CUDA_KERNEL_FILES}
)
set_target_properties(osd_static_gpu PROPERTIES OUTPUT_NAME osdGPU CLEAN_DIRECT_OUTPUT 1)
target_link_libraries(osd_static_gpu
${PLATFORM_CPU_LIBRARIES} ${PLATFORM_GPU_LIBRARIES}
)
target_link_libraries(osd_static_gpu
${PLATFORM_CPU_LIBRARIES} ${PLATFORM_GPU_LIBRARIES}
)
install( TARGETS osd_static_gpu DESTINATION "${CMAKE_LIBDIR_BASE}" )
install( TARGETS osd_static_gpu DESTINATION "${CMAKE_LIBDIR_BASE}" )
endif()
# Build dynamic libs ----------------------------------
@ -207,35 +209,37 @@ if (NOT NO_LIB)
install( TARGETS osd_dynamic_cpu LIBRARY DESTINATION "${CMAKE_LIBDIR_BASE}" )
#---------------------------------------------------
_add_possibly_cuda_library(osd_dynamic_gpu
SHARED
version.cpp
$<TARGET_OBJECTS:osd_gpu_obj>
${CUDA_KERNEL_FILES}
)
if( OSD_GPU )
_add_possibly_cuda_library(osd_dynamic_gpu
SHARED
version.cpp
$<TARGET_OBJECTS:osd_gpu_obj>
${CUDA_KERNEL_FILES}
)
if (NOT ANDROID)
set_target_properties(osd_dynamic_gpu
PROPERTIES
OUTPUT_NAME osdGPU
CLEAN_DIRECT_OUTPUT 1
SOVERSION ${OSD_SONAME}
)
else()
set_target_properties(osd_dynamic_gpu
PROPERTIES
OUTPUT_NAME osdGPU
CLEAN_DIRECT_OUTPUT 1
)
if (NOT ANDROID)
set_target_properties(osd_dynamic_gpu
PROPERTIES
OUTPUT_NAME osdGPU
CLEAN_DIRECT_OUTPUT 1
SOVERSION ${OSD_SONAME}
)
else()
set_target_properties(osd_dynamic_gpu
PROPERTIES
OUTPUT_NAME osdGPU
CLEAN_DIRECT_OUTPUT 1
)
endif()
target_link_libraries(osd_dynamic_gpu
osd_dynamic_cpu
${PLATFORM_CPU_LIBRARIES} ${PLATFORM_GPU_LIBRARIES}
)
install( TARGETS osd_dynamic_gpu LIBRARY DESTINATION "${CMAKE_LIBDIR_BASE}" )
endif()
target_link_libraries(osd_dynamic_gpu
osd_dynamic_cpu
${PLATFORM_CPU_LIBRARIES} ${PLATFORM_GPU_LIBRARIES}
)
install( TARGETS osd_dynamic_gpu LIBRARY DESTINATION "${CMAKE_LIBDIR_BASE}" )
endif()
endif()

View File

@ -37,17 +37,75 @@ namespace OPENSUBDIV_VERSION {
namespace Far {
namespace {
#ifdef __INTEL_COMPILER
#pragma warning (push)
#pragma warning disable 1572
#endif
inline bool isWeightNonZero(float w) { return (w != 0.0f); }
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
}
EndCapBSplineBasisPatchFactory::EndCapBSplineBasisPatchFactory(
TopologyRefiner const & refiner) :
_refiner(&refiner), _numVertices(0), _numPatches(0) {
// Sanity check: the mesh must be adaptively refined
assert(not refiner.IsUniform());
// Reserve the patch point stencils. Ideally topology refiner
// would have an API to return how many endcap patches will be required.
// Instead we conservatively estimate by the number of patches at the
// finest level.
int numMaxLevelFaces = refiner.GetLevel(refiner.GetMaxLevel()).GetNumFaces();
_vertexStencils.reserve(numMaxLevelFaces*16);
_varyingStencils.reserve(numMaxLevelFaces*16);
}
ConstIndexArray
EndCapBSplineBasisPatchFactory::GetPatchPoints(
Vtr::internal::Level const * level, Index faceIndex,
PatchTableFactory::PatchFaceTag const * /*levelPatchTags*/,
Vtr::internal::Level const * level, Index thisFace,
PatchTableFactory::PatchFaceTag const *levelPatchTags,
int levelVertOffset) {
Vtr::ConstIndexArray facePoints = level->getFaceVertices(thisFace);
PatchTableFactory::PatchFaceTag patchTag = levelPatchTags[thisFace];
// if it's boundary, fallback to use GregoryBasis
if (patchTag._boundaryCount > 0) {
return getPatchPointsFromGregoryBasis(
level, thisFace, facePoints, levelVertOffset);
}
// there's a short-cut when the face contains only 1 extraordinary vertex.
// (we can achieve this by isolating 2 levels)
// look for the extraordinary vertex
int irregular = -1;
for (int i = 0; i < 4; ++i) {
int valence = level->getVertexFaces(facePoints[i]).size();
if (valence != 4) {
if (irregular != -1) {
// more than one extraoridinary vertices.
// fallback to use GregoryBasis
return getPatchPointsFromGregoryBasis(
level, thisFace, facePoints, levelVertOffset);
}
irregular = i;
}
}
// faster B-spline endcap generation
return getPatchPoints(level, thisFace, irregular, facePoints,
levelVertOffset);
}
ConstIndexArray
EndCapBSplineBasisPatchFactory::getPatchPointsFromGregoryBasis(
Vtr::internal::Level const * level, Index thisFace,
ConstIndexArray facePoints, int levelVertOffset) {
// XXX: For now, always create new 16 indices for each patch.
// we'll optimize later to share all regular control points with
// other patches as well as to try to make extra ordinary verts watertight.
@ -57,66 +115,386 @@ EndCapBSplineBasisPatchFactory::GetPatchPoints(
_patchPoints.push_back(_numVertices + offset);
++_numVertices;
}
GregoryBasis::ProtoBasis basis(*level, thisFace, levelVertOffset, -1);
// XXX: temporary hack. we should traverse topology and find existing
// vertices if available
//
// Reorder gregory basis stencils into regular bezier
GregoryBasis::ProtoBasis basis(*level, faceIndex, levelVertOffset, -1);
std::vector<GregoryBasis::Point> bezierCP;
bezierCP.reserve(16);
GregoryBasis::Point const *bezierCP[16];
bezierCP.push_back(basis.P[0]);
bezierCP.push_back(basis.Ep[0]);
bezierCP.push_back(basis.Em[1]);
bezierCP.push_back(basis.P[1]);
bezierCP[0] = &basis.P[0];
bezierCP[1] = &basis.Ep[0];
bezierCP[2] = &basis.Em[1];
bezierCP[3] = &basis.P[1];
bezierCP.push_back(basis.Em[0]);
bezierCP.push_back(basis.Fp[0]); // arbitrary
bezierCP.push_back(basis.Fp[1]); // arbitrary
bezierCP.push_back(basis.Ep[1]);
bezierCP[4] = &basis.Em[0];
bezierCP[5] = &basis.Fp[0]; // arbitrary
bezierCP[6] = &basis.Fp[1]; // arbitrary
bezierCP[7] = &basis.Ep[1];
bezierCP.push_back(basis.Ep[3]);
bezierCP.push_back(basis.Fp[3]); // arbitrary
bezierCP.push_back(basis.Fp[2]); // arbitrary
bezierCP.push_back(basis.Em[2]);
bezierCP[8] = &basis.Ep[3];
bezierCP[9] = &basis.Fp[3]; // arbitrary
bezierCP[10] = &basis.Fp[2]; // arbitrary
bezierCP[11] = &basis.Em[2];
bezierCP.push_back(basis.P[3]);
bezierCP.push_back(basis.Em[3]);
bezierCP.push_back(basis.Ep[2]);
bezierCP.push_back(basis.P[2]);
bezierCP[12] = &basis.P[3];
bezierCP[13] = &basis.Em[3];
bezierCP[14] = &basis.Ep[2];
bezierCP[15] = &basis.P[2];
// all stencils should have the same capacity.
int stencilCapacity = basis.P[0].GetCapacity();
// Apply basis conversion from bezier to b-spline
float Q[4][4] = {{ 6, -7, 2, 0},
{ 0, 2, -1, 0},
{ 0, -1, 2, 0},
{ 0, 2, -7, 6} };
std::vector<GregoryBasis::Point> H(16);
Vtr::internal::StackBuffer<GregoryBasis::Point, 16> H(16);
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
H[i*4+j].Clear(stencilCapacity);
for (int k = 0; k < 4; ++k) {
if (Q[i][k] != 0) H[i*4+j] += bezierCP[j+k*4] * Q[i][k];
if (isWeightNonZero(Q[i][k])) {
H[i*4+j].AddWithWeight(*bezierCP[j+k*4], Q[i][k]);
}
}
}
}
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
GregoryBasis::Point p;
GregoryBasis::Point p(stencilCapacity);
for (int k = 0; k < 4; ++k) {
if (Q[j][k] != 0) p += H[i*4+k] * Q[j][k];
if (isWeightNonZero(Q[j][k])) {
p.AddWithWeight(H[i*4+k], Q[j][k]);
}
}
_vertexStencils.push_back(p);
}
}
int varyingIndices[] = { 0, 0, 1, 1,
0, 0, 1, 1,
3, 3, 2, 2,
3, 3, 2, 2,};
for (int i = 0; i < 16; ++i) {
_varyingStencils.push_back(basis.V[varyingIndices[i]]);
GregoryBasis::Point p(1);
p.AddWithWeight(facePoints[varyingIndices[i]] + levelVertOffset, 1.0f);
_varyingStencils.push_back(p);
}
++_numPatches;
return ConstIndexArray(&_patchPoints[(_numPatches-1)*16], 16);
}
void
EndCapBSplineBasisPatchFactory::computeLimitStencils(
Vtr::internal::Level const *level,
ConstIndexArray facePoints, int vid,
GregoryBasis::Point *P, GregoryBasis::Point *Ep, GregoryBasis::Point *Em)
{
int maxvalence = level->getMaxValence();
Vtr::internal::StackBuffer<Index, 40> manifoldRing;
manifoldRing.SetSize(maxvalence*2);
int ringSize =
level->gatherQuadRegularRingAroundVertex(
facePoints[vid], manifoldRing, /*fvarChannel*/-1);
// note: this function has not yet supported boundary.
assert((ringSize & 1) == 0);
int valence = ringSize/2;
int stencilCapacity = ringSize + 1;
Index start = -1, prev = -1;
{
int ip = (vid+1)%4, im = (vid+3)%4;
for (int i = 0; i < valence; ++i) {
if (manifoldRing[i*2] == facePoints[ip])
start = i;
if (manifoldRing[i*2] == facePoints[im])
prev = i;
}
}
assert(start > -1 && prev > -1);
GregoryBasis::Point e0, e1;
e0.Clear(stencilCapacity);
e1.Clear(stencilCapacity);
float t = 2.0f * float(M_PI) / float(valence);
float ef = 1.0f / (valence * (cosf(t) + 5.0f +
sqrtf((cosf(t) + 9) * (cosf(t) + 1)))/16.0f);
for (int i = 0; i < valence; ++i) {
Index ip = (i+1)%valence;
Index idx_neighbor = (manifoldRing[2*i + 0]),
idx_diagonal = (manifoldRing[2*i + 1]),
idx_neighbor_p = (manifoldRing[2*ip + 0]);
float d = float(valence)+5.0f;
GregoryBasis::Point f(4);
f.AddWithWeight(facePoints[vid], float(valence)/d);
f.AddWithWeight(idx_neighbor_p, 2.0f/d);
f.AddWithWeight(idx_neighbor, 2.0f/d);
f.AddWithWeight(idx_diagonal, 1.0f/d);
P->AddWithWeight(f, 1.0f/float(valence));
float c0 = 0.5f*cosf((float(2*M_PI) * float(i)/float(valence)))
+ 0.5f*cosf((float(2*M_PI) * float(ip)/float(valence)));
float c1 = 0.5f*sinf((float(2*M_PI) * float(i)/float(valence)))
+ 0.5f*sinf((float(2*M_PI) * float(ip)/float(valence)));
e0.AddWithWeight(f, c0*ef);
e1.AddWithWeight(f, c1*ef);
}
*Ep = *P;
Ep->AddWithWeight(e0, cosf((float(2*M_PI) * float(start)/float(valence))));
Ep->AddWithWeight(e1, sinf((float(2*M_PI) * float(start)/float(valence))));
*Em = *P;
Em->AddWithWeight(e0, cosf((float(2*M_PI) * float(prev)/float(valence))));
Em->AddWithWeight(e1, sinf((float(2*M_PI) * float(prev)/float(valence))));
}
ConstIndexArray
EndCapBSplineBasisPatchFactory::getPatchPoints(
Vtr::internal::Level const *level, Index thisFace,
Index extraOrdinaryIndex, ConstIndexArray facePoints,
int levelVertOffset) {
// Fast B-spline endcap construction.
//
// This function assumes the patch is not on boundary
// and it contains only 1 extraordinary vertex.
// The location of the extraoridnary vertex can be one of
// 0-ring quad corner.
//
// B-Spline control point gathering indice
//
// [5] (4)---(15)--(14) 0 : extraoridnary vertex
// | | |
// | | | 1,2,3,9,10,11,12,13 :
// (6)----0-----3-----13 B-Spline control points, gathered by
// | | | | traversing topology
// | | | |
// (7)----1-----2-----12 (5) :
// | | | | Fitted patch point (from limit position)
// | | | |
// (8)----9-----10----11 (4),(6),(7),(8),(14),(15) :
// Fitted patch points
// (from limit tangents and bezier CP)
//
static int const rotation[4][16] = {
/*= 0 ring =*/ /* ================ 1 ring ================== */
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ,14, 15},
{ 1, 2, 3, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4, 5, 6},
{ 2, 3, 0, 1, 10, 11, 12, 13, 14, 15, 4, 5, 6, 7, 8, 9},
{ 3, 0, 1, 2, 13, 14, 15, 4, 5, 6, 7, 8, 9, 10, 11, 12}};
int maxvalence = level->getMaxValence();
int stencilCapacity = 2*maxvalence + 16;
GregoryBasis::Point P(stencilCapacity), Em(stencilCapacity), Ep(stencilCapacity);
computeLimitStencils(level, facePoints, extraOrdinaryIndex, &P, &Em, &Ep);
P.OffsetIndices(levelVertOffset);
Em.OffsetIndices(levelVertOffset);
Ep.OffsetIndices(levelVertOffset);
// returning patch indices (a mix of cage vertices and patch points)
int patchPoints[16];
// first, we traverse the topology to gather 15 vertices. This process is
// similar to Vtr::Level::gatherQuadRegularInteriorPatchPoints
int pointIndex = 0;
int vid = extraOrdinaryIndex;
// 0-ring
patchPoints[pointIndex++] = facePoints[0] + levelVertOffset;
patchPoints[pointIndex++] = facePoints[1] + levelVertOffset;
patchPoints[pointIndex++] = facePoints[2] + levelVertOffset;
patchPoints[pointIndex++] = facePoints[3] + levelVertOffset;
// 1-ring
ConstIndexArray thisFaceVerts = level->getFaceVertices(thisFace);
for (int i = 0; i < 4; ++i) {
Index v = thisFaceVerts[i];
ConstIndexArray vFaces = level->getVertexFaces(v);
ConstLocalIndexArray vInFaces = level->getVertexFaceLocalIndices(v);
if (i != vid) {
// regular corner
int thisFaceInVFaces = vFaces.FindIndexIn4Tuple(thisFace);
int intFaceInVFaces = (thisFaceInVFaces + 2) & 0x3;
Index intFace = vFaces[intFaceInVFaces];
int vInIntFace = vInFaces[intFaceInVFaces];
ConstIndexArray facePoints = level->getFaceVertices(intFace);
patchPoints[pointIndex++] =
facePoints[(vInIntFace + 1)&3] + levelVertOffset;
patchPoints[pointIndex++] =
facePoints[(vInIntFace + 2)&3] + levelVertOffset;
patchPoints[pointIndex++] =
facePoints[(vInIntFace + 3)&3] + levelVertOffset;
} else {
// irregular corner
int thisFaceInVFaces = vFaces.FindIndex(thisFace);
int valence = vFaces.size();
{
// first
int intFaceInVFaces = (thisFaceInVFaces + 1) % valence;
Index intFace = vFaces[intFaceInVFaces];
int vInIntFace = vInFaces[intFaceInVFaces];
ConstIndexArray facePoints = level->getFaceVertices(intFace);
patchPoints[pointIndex++] =
facePoints[(vInIntFace+3)&3] + levelVertOffset;
}
{
// middle: (n-vertices) needs a limit stencil. skip for now
pointIndex++;
}
{
// end
int intFaceInVFaces = (thisFaceInVFaces + (valence-1)) %valence;
Index intFace = vFaces[intFaceInVFaces];
int vInIntFace = vInFaces[intFaceInVFaces];
ConstIndexArray facePoints = level->getFaceVertices(intFace);
patchPoints[pointIndex++] =
facePoints[(vInIntFace+1)&3] + levelVertOffset;
}
}
}
// stencils for patch points
GregoryBasis::Point X5(stencilCapacity),
X6(stencilCapacity),
X7(stencilCapacity),
X8(stencilCapacity),
X4(stencilCapacity),
X15(stencilCapacity),
X14(stencilCapacity);
// limit tangent : Em
// X6 = 1/3 * ( 36Em - 16P0 - 8P1 - 2P2 - 4P3 - P6 - 2P7)
// X7 = 1/3 * (-18Em + 8P0 + 4P1 + P2 + 2P3 + 2P6 + 4P7)
// X8 = X6 + (P8-P6)
X6.AddWithWeight(Em, 36.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][0]], -16.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][1]], -8.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][2]], -2.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][3]], -4.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][6]], -1.0f/3.0f);
X6.AddWithWeight(patchPoints[rotation[vid][7]], -2.0f/3.0f);
X7.AddWithWeight(Em, -18.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][0]], 8.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][1]], 4.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][2]], 1.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][3]], 2.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][6]], 2.0f/3.0f);
X7.AddWithWeight(patchPoints[rotation[vid][7]], 4.0f/3.0f);
X8 = X6;
X8.AddWithWeight(patchPoints[rotation[vid][8]], 1.0f);
X8.AddWithWeight(patchPoints[rotation[vid][6]], -1.0f);
// limit tangent : Ep
// X4 = 1/3 * ( 36EP - 16P0 - 4P1 - 2P15 - 2P2 - 8P3 - P4)
// X15 = 1/3 * (-18EP + 8P0 + 2P1 + 4P15 + P2 + 4P3 + 2P4)
// X14 = X4 + (P14 - P4)
X4.AddWithWeight(Ep, 36.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][0]], -16.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][1]], -4.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][2]], -2.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][3]], -8.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][4]], -1.0f/3.0f);
X4.AddWithWeight(patchPoints[rotation[vid][15]], -2.0f/3.0f);
X15.AddWithWeight(Ep, -18.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][0]], 8.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][1]], 2.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][2]], 1.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][3]], 4.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][4]], 2.0f/3.0f);
X15.AddWithWeight(patchPoints[rotation[vid][15]], 4.0f/3.0f);
X14 = X4;
X14.AddWithWeight(patchPoints[rotation[vid][14]], 1.0f);
X14.AddWithWeight(patchPoints[rotation[vid][4]], -1.0f);
// limit corner (16th free vert)
// X5 = 36LP - 16P0 - 4(P1 + P3 + P4 + P6) - (P2 + P7 + P15)
X5.AddWithWeight(P, 36.0f);
X5.AddWithWeight(patchPoints[rotation[vid][0]], -16.0f);
X5.AddWithWeight(patchPoints[rotation[vid][1]], -4.0f);
X5.AddWithWeight(patchPoints[rotation[vid][3]], -4.0f);
X5.AddWithWeight(X4, -4.0f);
X5.AddWithWeight(X6, -4.0f);
X5.AddWithWeight(patchPoints[rotation[vid][2]], -1.0f);
X5.AddWithWeight(X7, -1.0f);
X5.AddWithWeight(X15, -1.0f);
// [5] (4)---(15)--(14) 0 : extraoridnary vertex
// | | |
// | | | 1,2,3,9,10,11,12,13 :
// (6)----0-----3-----13 B-Spline control points, gathered by
// | | | | traversing topology
// | | | |
// (7)----1-----2-----12 (5) :
// | | | | Fitted patch point (from limit position)
// | | | |
// (8)----9-----10----11 (4),(6),(7),(8),(14),(15) :
//
// patch point stencils will be stored in this order
// (Em) 6, 7, 8, (Ep) 4, 15, 14, (P) 5
int offset = _refiner->GetNumVerticesTotal();
GregoryBasis::Point V0, V1, V3;
V0.AddWithWeight(facePoints[vid] + levelVertOffset, 1.0f);
V1.AddWithWeight(facePoints[(vid+1)&3] + levelVertOffset, 1.0f);
V3.AddWithWeight(facePoints[(vid+3)&3] + levelVertOffset, 1.0f);
// push back to stencils;
patchPoints[3* vid + 6] = (_numVertices++) + offset;
_vertexStencils.push_back(X6);
_varyingStencils.push_back(V0);
patchPoints[3*((vid+1)%4) + 4] = (_numVertices++) + offset;
_vertexStencils.push_back(X7);
_varyingStencils.push_back(V1);
patchPoints[3*((vid+1)%4) + 5] = (_numVertices++) + offset;
_vertexStencils.push_back(X8);
_varyingStencils.push_back(V1);
patchPoints[3* vid + 4] = (_numVertices++) + offset;
_vertexStencils.push_back(X4);
_varyingStencils.push_back(V0);
patchPoints[3*((vid+3)%4) + 6] = (_numVertices++) + offset;
_vertexStencils.push_back(X15);
_varyingStencils.push_back(V3);
patchPoints[3*((vid+3)%4) + 5] = (_numVertices++) + offset;
_vertexStencils.push_back(X14);
_varyingStencils.push_back(V3);
patchPoints[3*vid + 5] = (_numVertices++) + offset;
_vertexStencils.push_back(X5);
_varyingStencils.push_back(V0);
// reorder into UV row-column
static int const permuteRegular[16] =
{ 5, 6, 7, 8, 4, 0, 1, 9, 15, 3, 2, 10, 14, 13, 12, 11 };
for (int i = 0; i < 16; ++i) {
_patchPoints.push_back(patchPoints[permuteRegular[i]]);
}
++_numPatches;
return ConstIndexArray(&_patchPoints[(_numPatches-1)*16], 16);
}

View File

@ -91,6 +91,22 @@ public:
}
private:
ConstIndexArray getPatchPointsFromGregoryBasis(
Vtr::internal::Level const * level, Index thisFace,
ConstIndexArray facePoints,
int levelVertOffset);
ConstIndexArray getPatchPoints(
Vtr::internal::Level const *level, Index thisFace,
Index extraOrdinaryIndex, ConstIndexArray facePoints,
int levelVertOffset);
void computeLimitStencils(
Vtr::internal::Level const *level,
ConstIndexArray facePoints, int vid,
GregoryBasis::Point *P, GregoryBasis::Point *Ep, GregoryBasis::Point *Em);
TopologyRefiner const *_refiner;
GregoryBasis::PointsVector _vertexStencils;
GregoryBasis::PointsVector _varyingStencils;

View File

@ -47,6 +47,15 @@ EndCapGregoryBasisPatchFactory::EndCapGregoryBasisPatchFactory(
// Sanity check: the mesh must be adaptively refined
assert(not refiner.IsUniform());
// Reserve the patch point stencils. Ideally topology refiner
// would have an API to return how many endcap patches will be required.
// Instead we conservatively estimate by the number of patches at the
// finest level.
int numMaxLevelFaces = refiner.GetLevel(refiner.GetMaxLevel()).GetNumFaces();
_vertexStencils.reserve(numMaxLevelFaces*20);
_varyingStencils.reserve(numMaxLevelFaces*20);
}
//
@ -133,11 +142,11 @@ EndCapGregoryBasisPatchFactory::GetPatchPoints(
{ // Gather adjacent faces
ConstIndexArray adjfaces = level->getEdgeFaces(edge);
for (int i=0; i<adjfaces.size(); ++i) {
if (adjfaces[i]==faceIndex) {
for (int j=0; j<adjfaces.size(); ++j) {
if (adjfaces[j]==faceIndex) {
// XXXX manuelk if 'edge' is non-manifold, arbitrarily pick the
// next face in the list of adjacent faces
adjface = (adjfaces[(i+1)%adjfaces.size()]);
adjface = (adjfaces[(j+1)%adjfaces.size()]);
break;
}
}
@ -156,7 +165,7 @@ EndCapGregoryBasisPatchFactory::GetPatchPoints(
// Find index of basis in the list of basis already generated
struct compare {
static int op(void const * a, void const * b) {
return *(Index *)a - *(Index *)b;
return *(Index const*)a - *(Index const*)b;
}
};

View File

@ -112,7 +112,7 @@ EndCapLegacyGregoryPatchFactory::Finalize(
size_t numTotalGregoryPatches =
numGregoryPatches + numGregoryBoundaryPatches;
Vtr::internal::Level const &level = _refiner.getLevel(_refiner.GetMaxLevel());
Vtr::internal::Level const &maxLevel = _refiner.getLevel(_refiner.GetMaxLevel());
quadOffsetsTable->resize(numTotalGregoryPatches*4);
@ -120,11 +120,11 @@ EndCapLegacyGregoryPatchFactory::Finalize(
PatchTable::QuadOffsetsTable::value_type *p =
&((*quadOffsetsTable)[0]);
for (size_t i = 0; i < numGregoryPatches; ++i) {
getQuadOffsets(level, _gregoryFaceIndices[i], p);
getQuadOffsets(maxLevel, _gregoryFaceIndices[i], p);
p += 4;
}
for (size_t i = 0; i < numGregoryBoundaryPatches; ++i) {
getQuadOffsets(level, _gregoryBoundaryFaceIndices[i], p);
getQuadOffsets(maxLevel, _gregoryBoundaryFaceIndices[i], p);
p += 4;
}
}

View File

@ -33,38 +33,44 @@ namespace OPENSUBDIV_VERSION {
namespace Far {
//
// Statics for the publicly assignable callbacks and the methods to
// assign them (disable static assignment warnings when doing so):
//
static ErrorCallbackFunc errorFunc = 0;
static char const * errors[] = {
"FAR_NO_ERROR",
"FAR_FATAL_ERROR",
"FAR_INTERNAL_CODING_ERROR",
"FAR_CODING_ERROR",
"FAR_RUNTIME_ERROR"
};
void SetErrorCallback(ErrorCallbackFunc func) {
static WarningCallbackFunc warningFunc = 0;
#ifdef __INTEL_COMPILER
#pragma warning disable 1711
#endif
void SetErrorCallback(ErrorCallbackFunc func) {
errorFunc = func;
}
void SetWarningCallback(WarningCallbackFunc func) {
warningFunc = func;
}
#ifdef __INTEL_COMPILER
#pragma warning enable 1711
#endif
}
void Error(ErrorType err) {
if (errorFunc) {
errorFunc(err, NULL);
} else {
fprintf(stderr, "Error : %s\n",errors[err]);
}
}
//
// The default error and warning callbacks eventually belong in the
// internal namespace:
//
void Error(ErrorType err, const char *format, ...) {
static char const * errorTypeLabel[] = {
"No Error",
"Fatal Error",
"Coding Error (internal)",
"Coding Error",
"Error"
};
assert(err!=FAR_NO_ERROR);
char message[10240];
@ -76,23 +82,10 @@ void Error(ErrorType err, const char *format, ...) {
if (errorFunc) {
errorFunc(err, message);
} else {
printf("Error %s : %s\n",errors[err], message);
printf("%s: %s\n", errorTypeLabel[err], message);
}
}
static WarningCallbackFunc warningFunc = 0;
void SetWarningCallback(WarningCallbackFunc func) {
#ifdef __INTEL_COMPILER
#pragma warning disable 1711
#endif
warningFunc = func;
#ifdef __INTEL_COMPILER
#pragma warning enable 1711
#endif
}
void Warning(const char *format, ...) {
char message[10240];
@ -104,7 +97,7 @@ void Warning(const char *format, ...) {
if (warningFunc) {
warningFunc(message);
} else {
fprintf(stdout, "Warning : %s\n", message);
fprintf(stdout, "Warning: %s\n", message);
}
}

View File

@ -40,6 +40,8 @@ typedef enum {
FAR_RUNTIME_ERROR ///< Issue a generic runtime error, but continue execution.
} ErrorType;
/// \brief The error callback function type (default is "printf")
typedef void (*ErrorCallbackFunc)(ErrorType err, const char *message);
/// \brief Sets the error callback function (default is "printf")
@ -50,22 +52,8 @@ typedef void (*ErrorCallbackFunc)(ErrorType err, const char *message);
///
void SetErrorCallback(ErrorCallbackFunc func);
/// \brief Sends an OSD error
///
/// @param err the error type
///
void Error(ErrorType err);
/// \brief Sends an OSD error with a message
///
/// @param err the error type
///
/// @param format the format of the message (followed by arguments)
///
void Error(ErrorType err, const char *format, ...);
/// \brief Sets the warning callback function (default is "printf")
/// \brief The warning callback function type (default is "printf")
typedef void (*WarningCallbackFunc)(const char *message);
/// \brief Sets the warning callback function (default is "printf")
@ -76,7 +64,21 @@ typedef void (*WarningCallbackFunc)(const char *message);
///
void SetWarningCallback(WarningCallbackFunc func);
/// \brief Sends an OSD warning message
//
// The following are intended for internal use only (and will eventually
// be moved within namespace internal)
//
/// \brief Sends an OSD error with a message (internal use only)
///
/// @param err the error type
///
/// @param format the format of the message (followed by arguments)
///
void Error(ErrorType err, const char *format, ...);
/// \brief Sends an OSD warning message (internal use only)
///
/// @param format the format of the message (followed by arguments)
///

View File

@ -36,57 +36,6 @@ namespace OpenSubdiv {
namespace OPENSUBDIV_VERSION {
namespace Far {
// Builds a table of local indices pairs for each vertex of the patch.
//
// o
// N0 |
// | ....
// | .... : Gregory patch
// o ------ o ------ o ....
// N1 V | .... M3
// | .......
// | .......
// o .......
// N2
//
// [...] [N2 - N3] [...]
//
// Each value pair is composed of 2 index values in range [0-4[ pointing
// to the 2 neighbor vertices of the vertex 'V' belonging to the Gregory patch.
// Neighbor ordering is valence CCW and must match the winding of the 1-ring
// vertices.
//
static void
getQuadOffsets(Vtr::internal::Level const & level, Vtr::Index fIndex,
Vtr::Index offsets[], int fvarChannel=-1) {
Far::ConstIndexArray fPoints = (fvarChannel<0) ?
level.getFaceVertices(fIndex) :
level.getFaceFVarValues(fIndex, fvarChannel);
assert(fPoints.size()==4);
for (int i = 0; i < 4; ++i) {
Vtr::Index vIndex = fPoints[i];
Vtr::ConstIndexArray vFaces = level.getVertexFaces(vIndex),
vEdges = level.getVertexEdges(vIndex);
int thisFaceInVFaces = -1;
for (int j = 0; j < vFaces.size(); ++j) {
if (fIndex == vFaces[j]) {
thisFaceInVFaces = j;
break;
}
}
assert(thisFaceInVFaces != -1);
// we have to use the number of incident edges to modulo the local index
// because there could be 2 consecutive edges in the face belonging to
// the Gregory patch.
offsets[i*2+0] = thisFaceInVFaces;
offsets[i*2+1] = (thisFaceInVFaces + 1)%vEdges.size();
}
}
int
GregoryBasis::ProtoBasis::GetNumElements() const {
@ -153,6 +102,8 @@ GregoryBasis::ProtoBasis::ProtoBasis(
Vtr::internal::Level const & level, Index faceIndex,
int levelVertOffset, int fvarChannel) {
// XXX: This function is subject to refactor in 3.1
Vtr::ConstIndexArray facePoints = (fvarChannel<0) ?
level.getFaceVertices(faceIndex) :
level.getFaceFVarValues(faceIndex, fvarChannel);
@ -162,27 +113,45 @@ GregoryBasis::ProtoBasis::ProtoBasis(
valences[4],
zerothNeighbors[4];
Vtr::internal::StackBuffer<Index,40> manifoldRing((maxvalence+2)*2);
// XXX: a temporary hack for the performance issue
// ensure Point has a capacity for the neighborhood of
// 2 extraordinary verts + 2 regular verts
// worse case: n-valence verts at a corner of n-gon.
int stencilCapacity =
4/*0-ring*/ + 2*(2*(maxvalence-2)/*1-ring around extraordinaries*/
+ 2/*1-ring around regulars, excluding shared ones*/);
Vtr::internal::StackBuffer<Point,16> f(maxvalence);
Vtr::internal::StackBuffer<Point,64> r(maxvalence*4);
Point e0[4], e1[4];
for (int i = 0; i < 4; ++i) {
P[i].Clear(stencilCapacity);
e0[i].Clear(stencilCapacity);
e1[i].Clear(stencilCapacity);
V[i].Clear(1);
}
Point e0[4], e1[4], org[4];
Vtr::internal::StackBuffer<Index, 40> manifoldRings[4];
manifoldRings[0].SetSize(maxvalence*2);
manifoldRings[1].SetSize(maxvalence*2);
manifoldRings[2].SetSize(maxvalence*2);
manifoldRings[3].SetSize(maxvalence*2);
Vtr::internal::StackBuffer<Point, 10> f(maxvalence);
Vtr::internal::StackBuffer<Point, 40> r(maxvalence*4);
// the first phase
for (int vid=0; vid<4; ++vid) {
org[vid] = facePoints[vid];
// save for varying stencils
V[vid] = facePoints[vid];
V[vid].AddWithWeight(facePoints[vid], 1.0f);
int ringSize =
level.gatherQuadRegularRingAroundVertex(
facePoints[vid], manifoldRing, fvarChannel);
facePoints[vid], manifoldRings[vid], fvarChannel);
int valence;
if (ringSize & 1) {
// boundary vertex
manifoldRing[ringSize] = manifoldRing[ringSize-1];
manifoldRings[vid][ringSize] = manifoldRings[vid][ringSize-1];
++ringSize;
valence = -ringSize/2;
} else {
@ -196,21 +165,19 @@ GregoryBasis::ProtoBasis::ProtoBasis(
zerothNeighbor=0,
ibefore=0;
Point pos(facePoints[vid]);
for (int i=0; i<ivalence; ++i) {
Index im = (i+ivalence-1)%ivalence,
ip = (i+1)%ivalence;
Index idx_neighbor = (manifoldRing[2*i + 0]),
idx_diagonal = (manifoldRing[2*i + 1]),
idx_neighbor_p = (manifoldRing[2*ip + 0]),
idx_neighbor_m = (manifoldRing[2*im + 0]),
idx_diagonal_m = (manifoldRing[2*im + 1]);
Index idx_neighbor = (manifoldRings[vid][2*i + 0]),
idx_diagonal = (manifoldRings[vid][2*i + 1]),
idx_neighbor_p = (manifoldRings[vid][2*ip + 0]),
idx_neighbor_m = (manifoldRings[vid][2*im + 0]),
idx_diagonal_m = (manifoldRings[vid][2*im + 1]);
bool boundaryNeighbor = (level.getVertexEdges(idx_neighbor).size() >
level.getVertexFaces(idx_neighbor).size());
level.getVertexFaces(idx_neighbor).size());
if (fvarChannel>=0) {
// XXXX manuelk need logic to check for boundary in fvar
@ -232,21 +199,22 @@ GregoryBasis::ProtoBasis::ProtoBasis(
}
}
Point neighbor(idx_neighbor),
diagonal(idx_diagonal),
neighbor_p(idx_neighbor_p),
neighbor_m(idx_neighbor_m),
diagonal_m(idx_diagonal_m);
float d = float(ivalence)+5.0f;
f[i].Clear(4);
f[i].AddWithWeight(facePoints[vid], float(ivalence)/d);
f[i].AddWithWeight(idx_neighbor_p, 2.0f/d);
f[i].AddWithWeight(idx_neighbor, 2.0f/d);
f[i].AddWithWeight(idx_diagonal, 1.0f/d);
P[vid].AddWithWeight(f[i], 1.0f/float(ivalence));
f[i] = (pos*float(ivalence) + (neighbor_p+neighbor)*2.0f + diagonal) / (float(ivalence)+5.0f);
P[vid] += f[i];
r[vid*maxvalence+i] = (neighbor_p-neighbor_m)/3.0f + (diagonal-diagonal_m)/6.0f;
int rid = vid * maxvalence + i;
r[rid].Clear(4);
r[rid].AddWithWeight(idx_neighbor_p, 1.0f/3.0f);
r[rid].AddWithWeight(idx_neighbor_m, -1.0f/3.0f);
r[rid].AddWithWeight(idx_diagonal, 1.0f/6.0f);
r[rid].AddWithWeight(idx_diagonal_m, -1.0f/6.0f);
}
P[vid] /= float(ivalence);
zerothNeighbors[vid] = zerothNeighbor;
if (currentNeighbor == 1) {
boundaryEdgeNeighbors[1] = boundaryEdgeNeighbors[0];
@ -254,24 +222,27 @@ GregoryBasis::ProtoBasis::ProtoBasis(
for (int i=0; i<ivalence; ++i) {
int im = (i+ivalence-1)%ivalence;
Point e = (f[i]+f[im])*0.5f;
e0[vid] += e * csf(ivalence-3, 2*i);
e1[vid] += e * csf(ivalence-3, 2*i+1);
float c0 = 0.5f * csf(ivalence-3, 2*i);
float c1 = 0.5f * csf(ivalence-3, 2*i+1);
e0[vid].AddWithWeight(f[i ], c0);
e0[vid].AddWithWeight(f[im], c0);
e1[vid].AddWithWeight(f[i ], c1);
e1[vid].AddWithWeight(f[im], c1);
}
float ef = computeCoefficient(ivalence);
e0[vid] *= ef;
e1[vid] *= ef;
if (valence<0) {
Point b0(boundaryEdgeNeighbors[0]),
b1(boundaryEdgeNeighbors[1]);
// Boundary gregory case:
if (valence < 0) {
P[vid].Clear(stencilCapacity);
if (ivalence>2) {
P[vid] = (b0 + b1 + pos*4.0f)/6.0f;
P[vid].AddWithWeight(boundaryEdgeNeighbors[0], 1.0f/6.0f);
P[vid].AddWithWeight(boundaryEdgeNeighbors[1], 1.0f/6.0f);
P[vid].AddWithWeight(facePoints[vid], 4.0f/6.0f);
} else {
P[vid] = pos;
P[vid].AddWithWeight(facePoints[vid], 1.0f);
}
float k = float(float(ivalence) - 1.0f); //k is the number of faces
float c = cosf(float(M_PI)/k);
@ -280,10 +251,17 @@ GregoryBasis::ProtoBasis::ProtoBasis(
float alpha_0k = -((1.0f+2.0f*c)*sqrtf(1.0f+c))/((3.0f*k+c)*sqrtf(1.0f-c));
float beta_0 = s/(3.0f*k + c);
Point diagonal(manifoldRing[2*zerothNeighbor + 1]);
int idx_diagonal = manifoldRings[vid][2*zerothNeighbor + 1];
e0[vid] = (b0 - b1)/6.0f;
e1[vid] = pos*gamma + diagonal*beta_0 + (b0 + b1)*alpha_0k;
e0[vid].Clear(stencilCapacity);
e0[vid].AddWithWeight(boundaryEdgeNeighbors[0], 1.0f/6.0f);
e0[vid].AddWithWeight(boundaryEdgeNeighbors[1], -1.0f/6.0f);
e1[vid].Clear(stencilCapacity);
e1[vid].AddWithWeight(facePoints[vid], gamma);
e1[vid].AddWithWeight(idx_diagonal, beta_0);
e1[vid].AddWithWeight(boundaryEdgeNeighbors[0], alpha_0k);
e1[vid].AddWithWeight(boundaryEdgeNeighbors[1], alpha_0k);
for (int x=1; x<ivalence-1; ++x) {
@ -292,50 +270,68 @@ GregoryBasis::ProtoBasis::ProtoBasis(
float alpha = (4.0f*sinf((float(M_PI) * float(x))/k))/(3.0f*k+c),
beta = (sinf((float(M_PI) * float(x))/k) + sinf((float(M_PI) * float(x+1))/k))/(3.0f*k+c);
Index idx_neighbor = manifoldRing[2*curri + 0],
idx_diagonal = manifoldRing[2*curri + 1];
Index idx_neighbor = manifoldRings[vid][2*curri + 0],
idx_diagonal = manifoldRings[vid][2*curri + 1];
Point p_neighbor(idx_neighbor),
p_diagonal(idx_diagonal);
e1[vid] += p_neighbor*alpha + p_diagonal*beta;
e1[vid].AddWithWeight(idx_neighbor, alpha);
e1[vid].AddWithWeight(idx_diagonal, beta);
}
e1[vid] /= 3.0f;
e1[vid] *= 1.0f/3.0f;
}
}
Index quadOffsets[8];
getQuadOffsets(level, faceIndex, quadOffsets, fvarChannel);
// the second phase
for (int vid=0; vid<4; ++vid) {
int n = abs(valences[vid]),
ivalence = n;
int n = abs(valences[vid]);
int ivalence = n;
int ip = (vid+1)%4,
im = (vid+3)%4,
np = abs(valences[ip]),
nm = abs(valences[im]);
Index start = quadOffsets[vid*2+0],
prev = quadOffsets[vid*2+1],
start_m = quadOffsets[im*2],
prev_p = quadOffsets[ip*2+1];
Index start = -1, prev = -1, start_m = -1, prev_p = -1;
for (int i = 0; i < n; ++i) {
if (manifoldRings[vid][i*2] == facePoints[ip])
start = i;
if (manifoldRings[vid][i*2] == facePoints[im])
prev = i;
}
for (int i = 0; i < np; ++i) {
if (manifoldRings[ip][i*2] == facePoints[vid]) {
prev_p = i;
break;
}
}
for (int i = 0; i < nm; ++i) {
if (manifoldRings[im][i*2] == facePoints[vid]) {
start_m = i;
break;
}
}
assert(start != -1 && prev != -1 && start_m != -1 && prev_p != -1);
Point Em_ip, Ep_im;
Point Em_ip = P[ip];
Point Ep_im = P[im];
if (valences[ip]<-2) {
Index j = (np + prev_p - zerothNeighbors[ip]) % np;
Em_ip = P[ip] + e0[ip]*cosf((float(M_PI)*j)/float(np-1)) + e1[ip]*sinf((float(M_PI)*j)/float(np-1));
Em_ip.AddWithWeight(e0[ip], cosf((float(M_PI)*j)/float(np-1)));
Em_ip.AddWithWeight(e1[ip], sinf((float(M_PI)*j)/float(np-1)));
} else {
Em_ip = P[ip] + e0[ip]*csf(np-3,2*prev_p) + e1[ip]*csf(np-3,2*prev_p+1);
Em_ip.AddWithWeight(e0[ip], csf(np-3, 2*prev_p));
Em_ip.AddWithWeight(e1[ip], csf(np-3, 2*prev_p+1));
}
if (valences[im]<-2) {
Index j = (nm + start_m - zerothNeighbors[im]) % nm;
Ep_im = P[im] + e0[im]*cosf((float(M_PI)*j)/float(nm-1)) + e1[im]*sinf((float(M_PI)*j)/float(nm-1));
Ep_im.AddWithWeight(e0[im], cosf((float(M_PI)*j)/float(nm-1)));
Ep_im.AddWithWeight(e1[im], sinf((float(M_PI)*j)/float(nm-1)));
} else {
Ep_im = P[im] + e0[im]*csf(nm-3,2*start_m) + e1[im]*csf(nm-3,2*start_m+1);
Ep_im.AddWithWeight(e0[im], csf(nm-3, 2*start_m));
Ep_im.AddWithWeight(e1[im], csf(nm-3, 2*start_m+1));
}
if (valences[vid] < 0) {
@ -355,12 +351,25 @@ GregoryBasis::ProtoBasis::ProtoBasis(
float s1 = 3.0f - 2.0f*csf(n-3,2)-csf(np-3,2),
s2 = 2.0f*csf(n-3,2),
s3 = 3.0f -2.0f*cosf(2.0f*float(M_PI)/float(n)) - cosf(2.0f*float(M_PI)/float(nm));
Ep[vid] = P[vid];
Ep[vid].AddWithWeight(e0[vid], csf(n-3, 2*start));
Ep[vid].AddWithWeight(e1[vid], csf(n-3, 2*start +1));
Ep[vid] = P[vid] + e0[vid]*csf(n-3, 2*start) + e1[vid]*csf(n-3, 2*start +1);
Em[vid] = P[vid] + e0[vid]*csf(n-3, 2*prev ) + e1[vid]*csf(n-3, 2*prev + 1);
Fp[vid] = (P[vid]*csf(np-3,2) + Ep[vid]*s1 + Em_ip*s2 + rp[start])/3.0f;
Fm[vid] = (P[vid]*csf(nm-3,2) + Em[vid]*s3 + Ep_im*s2 - rp[prev])/3.0f;
Em[vid] = P[vid];
Em[vid].AddWithWeight(e0[vid], csf(n-3, 2*prev ));
Em[vid].AddWithWeight(e1[vid], csf(n-3, 2*prev + 1));
Fp[vid].Clear(stencilCapacity);
Fp[vid].AddWithWeight(P[vid], csf(np-3, 2)/3.0f);
Fp[vid].AddWithWeight(Ep[vid], s1/3.0f);
Fp[vid].AddWithWeight(Em_ip, s2/3.0f);
Fp[vid].AddWithWeight(rp[start], 1.0f/3.0f);
Fm[vid].Clear(stencilCapacity);
Fm[vid].AddWithWeight(P[vid], csf(nm-3, 2)/3.0f);
Fm[vid].AddWithWeight(Em[vid], s3/3.0f);
Fm[vid].AddWithWeight(Ep_im, s2/3.0f);
Fm[vid].AddWithWeight(rp[prev], -1.0f/3.0f);
} else if (valences[vid] < -2) {
Index jp = (ivalence + start - zerothNeighbors[vid]) % ivalence,
@ -370,24 +379,59 @@ GregoryBasis::ProtoBasis::ProtoBasis(
s2 = 2*csf(n-3,2),
s3 = 3.0f-2.0f*cosf(2.0f*float(M_PI)/n)-cosf(2.0f*float(M_PI)/nm);
Ep[vid] = P[vid] + e0[vid]*cosf((float(M_PI)*jp)/float(ivalence-1)) + e1[vid]*sinf((float(M_PI)*jp)/float(ivalence-1));
Em[vid] = P[vid] + e0[vid]*cosf((float(M_PI)*jm)/float(ivalence-1)) + e1[vid]*sinf((float(M_PI)*jm)/float(ivalence-1));
Fp[vid] = (P[vid]*csf(np-3,2) + Ep[vid]*s1 + Em_ip*s2 + rp[start])/3.0f;
Fm[vid] = (P[vid]*csf(nm-3,2) + Em[vid]*s3 + Ep_im*s2 - rp[prev])/3.0f;
Ep[vid] = P[vid];
Ep[vid].AddWithWeight(e0[vid], cosf((float(M_PI)*jp)/float(ivalence-1)));
Ep[vid].AddWithWeight(e1[vid], sinf((float(M_PI)*jp)/float(ivalence-1)));
Em[vid] = P[vid];
Em[vid].AddWithWeight(e0[vid], cosf((float(M_PI)*jm)/float(ivalence-1)));
Em[vid].AddWithWeight(e1[vid], sinf((float(M_PI)*jm)/float(ivalence-1)));
Fp[vid].Clear(stencilCapacity);
Fp[vid].AddWithWeight(P[vid], csf(np-3,2)/3.0f);
Fp[vid].AddWithWeight(Ep[vid], s1/3.0f);
Fp[vid].AddWithWeight(Em_ip, s2/3.0f);
Fp[vid].AddWithWeight(rp[start], 1.0f/3.0f);
Fm[vid].Clear(stencilCapacity);
Fm[vid].AddWithWeight(P[vid], csf(nm-3,2)/3.0f);
Fm[vid].AddWithWeight(Em[vid], s3/3.0f);
Fm[vid].AddWithWeight(Ep_im, s2/3.0f);
Fm[vid].AddWithWeight(rp[prev], -1.0f/3.0f);
if (valences[im]<0) {
s1=3-2*csf(n-3,2)-csf(np-3,2);
Fp[vid] = Fm[vid] = (P[vid]*csf(np-3,2) + Ep[vid]*s1 + Em_ip*s2 + rp[start])/3.0f;
Fp[vid].Clear(stencilCapacity);
Fp[vid].AddWithWeight(P[vid], csf(np-3,2)/3.0f);
Fp[vid].AddWithWeight(Ep[vid], s1/3.0f);
Fp[vid].AddWithWeight(Em_ip, s2/3.0f);
Fp[vid].AddWithWeight(rp[start], 1.0f/3.0f);
Fm[vid] = Fp[vid];
} else if (valences[ip]<0) {
s1 = 3.0f-2.0f*cosf(2.0f*float(M_PI)/n)-cosf(2.0f*float(M_PI)/nm);
Fm[vid] = Fp[vid] = (P[vid]*csf(nm-3,2) + Em[vid]*s1 + Ep_im*s2 - rp[prev])/3.0f;
Fm[vid].Clear(stencilCapacity);
Fm[vid].AddWithWeight(P[vid], csf(nm-3,2)/3.0f);
Fm[vid].AddWithWeight(Em[vid], s1/3.0f);
Fm[vid].AddWithWeight(Ep_im, s2/3.0f);
Fm[vid].AddWithWeight(rp[prev], -1.0f/3.0f);
Fp[vid] = Fm[vid];
}
} else if (valences[vid]==-2) {
Ep[vid].Clear(stencilCapacity);
Ep[vid].AddWithWeight(facePoints[vid], 2.0f/3.0f);
Ep[vid].AddWithWeight(facePoints[ip], 1.0f/3.0f);
Ep[vid] = (org[vid]*2.0f + org[ip])/3.0f;
Em[vid] = (org[vid]*2.0f + org[im])/3.0f;
Fp[vid] = Fm[vid] = (org[vid]*4.0f + org[((vid+2)%n)] + org[ip]*2.0f + org[im]*2.0f)/9.0f;
Em[vid].Clear(stencilCapacity);
Em[vid].AddWithWeight(facePoints[vid], 2.0f/3.0f);
Em[vid].AddWithWeight(facePoints[im], 1.0f/3.0f);
Fp[vid].Clear(stencilCapacity);
Fp[vid].AddWithWeight(facePoints[vid], 4.0f/9.0f);
Fp[vid].AddWithWeight(facePoints[((vid+2)%n)], 1.0f/9.0f);
Fp[vid].AddWithWeight(facePoints[ip], 2.0f/9.0f);
Fp[vid].AddWithWeight(facePoints[im], 2.0f/9.0f);
Fm[vid] = Fp[vid];
}
}
@ -429,16 +473,7 @@ GregoryBasis::CreateStencilTable(PointsVector const &stencils) {
float * weights = &stencilTable->_weights[0];
for (int i = 0; i < nStencils; ++i) {
GregoryBasis::Point const &src = stencils[i];
int size = src.GetSize();
memcpy(indices, src.GetIndices(), size*sizeof(Index));
memcpy(weights, src.GetWeights(), size*sizeof(float));
*sizes = size;
indices += size;
weights += size;
++sizes;
stencils[i].Copy(&sizes, &indices, &weights);
}
stencilTable->generateOffsets();

View File

@ -26,6 +26,7 @@
#define OPENSUBDIV3_FAR_GREGORY_BASIS_H
#include "../vtr/level.h"
#include "../vtr/stackBuffer.h"
#include "../far/types.h"
#include "../far/stencilTable.h"
#include <cstring>
@ -79,22 +80,15 @@ public:
//
class Point {
public:
static const int RESERVED_ENTRY_SIZE = 64;
// 40 means up to valence=10 is on stack
static const int RESERVED_STENCIL_SIZE = 40;
Point() : _size(0) {
_indices.reserve(RESERVED_ENTRY_SIZE);
_weights.reserve(RESERVED_ENTRY_SIZE);
}
Point(Vtr::Index idx, float weight = 1.0f) {
_indices.reserve(RESERVED_ENTRY_SIZE);
_weights.reserve(RESERVED_ENTRY_SIZE);
_size = 1;
_indices.push_back(idx);
_weights.push_back(weight);
Point(int stencilCapacity=RESERVED_STENCIL_SIZE) : _size(0) {
_stencils.SetSize(stencilCapacity);
}
Point(Point const & other) {
_stencils.SetSize(other._stencils.GetSize());
*this = other;
}
@ -102,96 +96,81 @@ public:
return _size;
}
Vtr::Index const * GetIndices() const {
return &_indices[0];
int GetCapacity() const {
return _stencils.GetSize();
}
float const * GetWeights() const {
return &_weights[0];
void Clear(int capacity) {
_size = 0;
if ((int)_stencils.GetSize() < capacity) {
_stencils.SetSize(capacity);
}
}
void AddWithWeight(Vtr::Index idx, float weight) {
for (int i = 0; i < _size; ++i) {
if (_stencils[i].index == idx) {
_stencils[i].weight += weight;
return;
}
}
assert(_size < (int)_stencils.GetSize());
_stencils[_size].index = idx;
_stencils[_size].weight = weight;
++_size;
}
void AddWithWeight(Point const &src, float weight) {
for (int i = 0; i < src._size; ++i) {
AddWithWeight(src._stencils[i].index,
src._stencils[i].weight * weight);
}
}
Point & operator = (Point const & other) {
Clear(other.GetCapacity());
_size = other._size;
_indices = other._indices;
_weights = other._weights;
return *this;
}
Point & operator += (Point const & other) {
for (int i=0; i<other._size; ++i) {
Vtr::Index idx = findIndex(other._indices[i]);
_weights[idx] += other._weights[i];
}
return *this;
}
Point & operator -= (Point const & other) {
for (int i=0; i<other._size; ++i) {
Vtr::Index idx = findIndex(other._indices[i]);
_weights[idx] -= other._weights[i];
assert(_size <= (int)_stencils.GetSize());
for (int i = 0; i < _size; ++i) {
_stencils[i] = other._stencils[i];
}
return *this;
}
Point & operator *= (float f) {
for (int i=0; i<_size; ++i) {
_weights[i] *= f;
_stencils[i].weight *= f;
}
return *this;
}
Point & operator /= (float f) {
return (*this)*=(1.0f/f);
}
friend Point operator * (Point const & src, float f) {
Point p( src ); return p*=f;
}
friend Point operator / (Point const & src, float f) {
Point p( src ); return p*= (1.0f/f);
}
Point operator + (Point const & other) {
Point p(*this); return p+=other;
}
Point operator - (Point const & other) {
Point p(*this); return p-=other;
}
void OffsetIndices(Vtr::Index offset) {
for (int i=0; i<_size; ++i) {
_indices[i] += offset;
_stencils[i].index += offset;
}
}
void Copy(int ** size, Vtr::Index ** indices, float ** weights) const {
memcpy(*indices, &_indices[0], _size*sizeof(Vtr::Index));
memcpy(*weights, &_weights[0], _size*sizeof(float));
for (int i = 0; i < _size; ++i) {
**indices = _stencils[i].index;
**weights = _stencils[i].weight;
++(*indices);
++(*weights);
}
**size = _size;
*indices += _size;
*weights += _size;
++(*size);
}
private:
int findIndex(Vtr::Index idx) {
for (int i=0; i<_size; ++i) {
if (_indices[i]==idx) {
return i;
}
}
_indices.push_back(idx);
_weights.push_back(0.0f);
++_size;
return _size-1;
}
int _size;
std::vector<Vtr::Index> _indices;
std::vector<float> _weights;
struct Stencil {
Vtr::Index index;
float weight;
};
Vtr::internal::StackBuffer<Stencil, RESERVED_STENCIL_SIZE> _stencils;
};
//

View File

@ -53,7 +53,7 @@ namespace Far {
/// -----------|:----:|------------------------------------------------------
/// level | 4 | the subdivision level of the patch
/// nonquad | 1 | whether the patch is the child of a non-quad face
/// unused | 3 | transition edge mask encoding
/// unused | 3 | unused
/// boundary | 4 | boundary edge mask encoding
/// v | 10 | log2 value of u parameter at first patch corner
/// u | 10 | log2 value of v parameter at first patch corner

View File

@ -416,7 +416,7 @@ PatchTable::ComputeLocalPointValues(T const *src, T *dst) const {
if (_localPointStencils) {
_localPointStencils->UpdateValues(src, dst);
}
};
}
} // end namespace Far

View File

@ -40,13 +40,15 @@ namespace OpenSubdiv {
namespace OPENSUBDIV_VERSION {
namespace {
//
// A convenience container for the different types of feature adaptive patches
// A convenience container for the different types of feature adaptive patches.
// Each instance associates a value of the template parameter type with each
// patch type.
//
template <class TYPE>
struct PatchTypes {
TYPE R, // regular patch
G, // gregory patch
GB, // gregory boundary patch
@ -54,7 +56,6 @@ struct PatchTypes {
PatchTypes() { std::memset(this, 0, sizeof(PatchTypes<TYPE>)); }
// Returns the number of patches based on the patch type in the descriptor
TYPE & getValue( Far::PatchDescriptor desc ) {
switch (desc.GetType()) {
case Far::PatchDescriptor::REGULAR : return R;
@ -66,17 +67,6 @@ struct PatchTypes {
// can't be reached (suppress compiler warning)
return R;
}
// Counts the number of arrays required to store each type of patch used
// in the primitive
int getNumPatchArrays() const {
int result=0;
if (R) ++result;
if (G) ++result;
if (GB) ++result;
if (GP) ++result;
return result;
}
};
typedef PatchTypes<Far::Index *> PatchCVPointers;
@ -85,6 +75,17 @@ typedef PatchTypes<Far::Index *> SharpnessIndexPointers;
typedef PatchTypes<Far::Index> PatchFVarOffsets;
typedef PatchTypes<Far::Index **> PatchFVarPointers;
// Helpers for compiler warnings and floating point equality tests
#ifdef __INTEL_COMPILER
#pragma warning (push)
#pragma warning disable 1572
#endif
inline bool isSharpnessEqual(float s1, float s2) { return (s1 == s2); }
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
} // namespace anon
@ -251,8 +252,8 @@ public:
}
// Compare cursor positions
bool operator != (int pos) {
return _currentChannel < pos;
bool operator != (int posArg) {
return _currentChannel < posArg;
}
// Return FVar channel index in the TopologyRefiner list
@ -512,7 +513,7 @@ assignSharpnessIndex(float sharpness, std::vector<float> & sharpnessValues) {
// linear search
for (int i=0; i<(int)sharpnessValues.size(); ++i) {
if (sharpnessValues[i] == sharpness) {
if (isSharpnessEqual(sharpnessValues[i], sharpness)) {
return i;
}
}
@ -544,9 +545,12 @@ PatchTableFactory::createUniform(TopologyRefiner const & refiner, Options option
options.triangulateQuads &= (refiner.GetSchemeType()==Sdc::SCHEME_BILINEAR or
refiner.GetSchemeType()==Sdc::SCHEME_CATMARK);
// level=0 may contain n-gons, which are not supported in PatchTable.
// even if generateAllLevels = true, we start from level 1.
int maxvalence = refiner.GetMaxValence(),
maxlevel = refiner.GetMaxLevel(),
firstlevel = options.generateAllLevels ? 0 : maxlevel,
firstlevel = options.generateAllLevels ? 1 : maxlevel,
nlevels = maxlevel-firstlevel+1;
PtexIndices ptexIndices(refiner);
@ -590,9 +594,7 @@ PatchTableFactory::createUniform(TopologyRefiner const & refiner, Options option
if (options.triangulateQuads)
npatches *= 2;
if (level>=firstlevel) {
table->pushPatchArray(desc, npatches, &voffset, &poffset, 0);
}
table->pushPatchArray(desc, npatches, &voffset, &poffset, 0);
}
// Allocate various tables
@ -614,8 +616,8 @@ PatchTableFactory::createUniform(TopologyRefiner const & refiner, Options option
PatchParam * pptr = &table->_paramTable[0];
Index ** fptr = 0;
Index levelVertOffset = options.generateAllLevels ?
0 : refiner.GetLevel(0).GetNumVertices();
// we always skip level=0 vertices (control cages)
Index levelVertOffset = refiner.GetLevel(0).GetNumVertices();
Index * levelFVarVertOffsets = 0;
if (generateFVarPatches) {
@ -715,7 +717,13 @@ PatchTableFactory::createAdaptive(TopologyRefiner const & refiner, Options optio
context.table = new PatchTable(maxValence);
// Populate the patch array descriptors
context.table->reservePatchArrays(context.patchInventory.getNumPatchArrays());
int numPatchArrays = 0;
if (context.patchInventory.R > 0) ++numPatchArrays;
if (context.patchInventory.G > 0) ++numPatchArrays;
if (context.patchInventory.GB > 0) ++numPatchArrays;
if (context.patchInventory.GP > 0) ++numPatchArrays;
context.table->reservePatchArrays(numPatchArrays);
// Sort through the inventory and push back non-empty patch arrays
ConstPatchDescriptorArray const & descs =
@ -890,7 +898,10 @@ PatchTableFactory::identifyAdaptivePatches(AdaptiveContext & context) {
}
// Identify boundaries for both regular and xordinary patches -- non-manifold
// edges and vertices are interpreted as boundaries for regular patches
// (infinitely sharp) edges and vertices are currently interpreted as boundaries
// for regular patches, though an irregular patch or extrapolated boundary patch
// is really necessary in future for some non-manifold cases.
//
if (hasBoundaryVertex or hasNonManifoldVertex) {
Vtr::ConstIndexArray fEdges = level->getFaceEdges(faceIndex);
@ -903,6 +914,27 @@ PatchTableFactory::identifyAdaptivePatches(AdaptiveContext & context) {
((level->getEdgeTag(fEdges[1])._nonManifold) << 1) |
((level->getEdgeTag(fEdges[2])._nonManifold) << 2) |
((level->getEdgeTag(fEdges[3])._nonManifold) << 3);
// Other than non-manifold edges, non-manifold vertices that were made
// sharp should also trigger new "boundary" edges for the sharp corner
// patches introduced in these cases.
//
if (level->getVertexTag(fVerts[0])._nonManifold &&
level->getVertexTag(fVerts[0])._infSharp) {
nonManEdgeMask |= (1 << 0) | (1 << 3);
}
if (level->getVertexTag(fVerts[1])._nonManifold &&
level->getVertexTag(fVerts[1])._infSharp) {
nonManEdgeMask |= (1 << 1) | (1 << 0);
}
if (level->getVertexTag(fVerts[2])._nonManifold &&
level->getVertexTag(fVerts[2])._infSharp) {
nonManEdgeMask |= (1 << 2) | (1 << 1);
}
if (level->getVertexTag(fVerts[3])._nonManifold &&
level->getVertexTag(fVerts[3])._infSharp) {
nonManEdgeMask |= (1 << 3) | (1 << 2);
}
boundaryEdgeMask |= nonManEdgeMask;
}
@ -1163,7 +1195,7 @@ PatchTableFactory::populateAdaptivePatches(
permutation = permuteCorner[bIndex];
level->gatherQuadRegularCornerPatchPoints(faceIndex, patchVerts, bIndex);
} else {
assert(patchTag._boundaryCount >=0 && patchTag._boundaryCount <= 2);
assert(patchTag._boundaryCount <= 2);
}
offsetAndPermuteIndices(patchVerts, 16, levelVertOffset, permutation, iptrs.R);

View File

@ -214,7 +214,12 @@ private:
typedef float Weight; // Also part of the expected interface
public:
Mask(Weight* v, Weight* e, Weight* f) : _vertWeights(v), _edgeWeights(e), _faceWeights(f) { }
Mask(Weight* v, Weight* e, Weight* f) :
_vertWeights(v), _edgeWeights(e), _faceWeights(f),
_vertCount(0), _edgeCount(0), _faceCount(0),
_faceWeightsForFaceCenters(false)
{ }
~Mask() { }
public: // Generic interface expected of <typename MASK>:
@ -312,7 +317,8 @@ PrimvarRefiner::Limit(T const & src, U & dst) const {
if (_refiner.getLevel(_refiner.GetMaxLevel()).getNumVertexEdgesTotal() == 0) {
Error(FAR_RUNTIME_ERROR,
"Cannot compute limit points -- last level of refinement does not include full topology.");
"Failure in PrimvarRefiner::Limit() -- "
"last level of refinement does not include full topology.");
return;
}
@ -335,7 +341,8 @@ PrimvarRefiner::Limit(T const & src, U & dstPos, U1 & dstTan1, U2 & dstTan2) con
if (_refiner.getLevel(_refiner.GetMaxLevel()).getNumVertexEdgesTotal() == 0) {
Error(FAR_RUNTIME_ERROR,
"Cannot compute limit points -- last level of refinement does not include full topology.");
"Failure in PrimvarRefiner::Limit() -- "
"last level of refinement does not include full topology.");
return;
}
@ -358,7 +365,8 @@ PrimvarRefiner::LimitFaceVarying(T const & src, U & dst, int channel) const {
if (_refiner.getLevel(_refiner.GetMaxLevel()).getNumVertexEdgesTotal() == 0) {
Error(FAR_RUNTIME_ERROR,
"Cannot compute limit points -- last level of refinement does not include full topology.");
"Failure in PrimvarRefiner::LimitFaceVarying() -- "
"last level of refinement does not include full topology.");
return;
}

View File

@ -94,9 +94,9 @@ PtexIndices::GetAdjacency(
if (Sdc::SchemeTypeTraits::GetRegularFaceSize(
refiner.GetSchemeType()) != 4) {
Far::Error(FAR_CODING_ERROR,
"PtexIndices::GetAdjacency() is currently only implemented for "
"quad schemes.");
Far::Error(FAR_RUNTIME_ERROR,
"Failure in PtexIndices::GetAdjacency() -- "
"currently only implemented for quad schemes.");
return;
}

View File

@ -24,13 +24,26 @@
#include "../far/stencilBuilder.h"
#include "../far/topologyRefiner.h"
namespace OpenSubdiv {
namespace OPENSUBDIV_VERSION {
namespace Far {
namespace internal {
namespace {
#ifdef __INTEL_COMPILER
#pragma warning (push)
#pragma warning disable 1572
#endif
inline bool isWeightZero(float w) { return (w == 0.0f); }
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
}
struct PointDerivWeight {
float p;
float du;
@ -202,6 +215,10 @@ public:
std::vector<float> const&
GetDvWeights() const { return _dvWeights; }
void SetCoarseVertCount(int numVerts) {
_coarseVertCount = numVerts;
}
private:
// Merge a vertex weight into the stencil table, if there is an existing
@ -333,6 +350,12 @@ StencilBuilder::GetNumVertsInStencil(size_t stencilIndex) const
return (int)_weightTable->GetSizes()[stencilIndex];
}
void
StencilBuilder::SetCoarseVertCount(int numVerts)
{
_weightTable->SetCoarseVertCount(numVerts);
}
std::vector<int> const&
StencilBuilder::GetStencilOffsets() const {
return _weightTable->GetOffsets();
@ -367,8 +390,9 @@ void
StencilBuilder::Index::AddWithWeight(Index const & src, float weight)
{
// Ignore no-op weights.
if (weight == 0)
if (isWeightZero(weight)) {
return;
}
_owner->_weightTable->AddWithWeight(src._index, _index, weight,
_owner->_weightTable->GetScalarAccumulator());
}
@ -376,7 +400,7 @@ StencilBuilder::Index::AddWithWeight(Index const & src, float weight)
void
StencilBuilder::Index::AddWithWeight(Stencil const& src, float weight)
{
if(weight == 0.0f) {
if (isWeightZero(weight)) {
return;
}
@ -386,7 +410,7 @@ StencilBuilder::Index::AddWithWeight(Stencil const& src, float weight)
for (int i = 0; i < srcSize; ++i) {
float w = srcWeights[i];
if (w == 0.0f) {
if (isWeightZero(w)) {
continue;
}
@ -395,14 +419,14 @@ StencilBuilder::Index::AddWithWeight(Stencil const& src, float weight)
float wgt = weight * w;
_owner->_weightTable->AddWithWeight(srcIndex, _index, wgt,
_owner->_weightTable->GetScalarAccumulator());
}
}
}
void
StencilBuilder::Index::AddWithWeight(Stencil const& src,
float weight, float du, float dv)
{
if(weight == 0.0f and du == 0.0f and dv == 0.0f) {
if (isWeightZero(weight) and isWeightZero(du) and isWeightZero(dv)) {
return;
}
@ -412,7 +436,7 @@ StencilBuilder::Index::AddWithWeight(Stencil const& src,
for (int i = 0; i < srcSize; ++i) {
float w = srcWeights[i];
if (w == 0.0f) {
if (isWeightZero(w)) {
continue;
}

View File

@ -51,6 +51,8 @@ public:
int GetNumVertsInStencil(size_t stencilIndex) const;
void SetCoarseVertCount(int numVerts);
// Mapping from stencil[i] to it's starting offset in the sources[] and weights[] arrays;
std::vector<int> const& GetStencilOffsets() const;

View File

@ -133,6 +133,8 @@ class StencilTable {
public:
virtual ~StencilTable() {};
/// \brief Returns the number of stencils in the table
int GetNumStencils() const {
return (int)_sizes.size();

View File

@ -40,6 +40,19 @@ namespace OPENSUBDIV_VERSION {
namespace Far {
namespace {
#ifdef __INTEL_COMPILER
#pragma warning (push)
#pragma warning disable 1572
#endif
inline bool isWeightZero(float w) { return (w == 0.0f); }
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
}
//------------------------------------------------------------------------------
void
@ -81,8 +94,9 @@ StencilTableFactory::Create(TopologyRefiner const & refiner,
PrimvarRefiner primvarRefiner(refiner);
internal::StencilBuilder::Index srcIndex(&builder, 0);
internal::StencilBuilder::Index dstIndex(&builder,
refiner.GetLevel(0).GetNumVertices());
internal::StencilBuilder::Index dstIndex(&builder,
refiner.GetLevel(0).GetNumVertices());
for (int level=1; level<=maxlevel; ++level) {
if (not interpolateVarying) {
primvarRefiner.Interpolate(level, srcIndex, dstIndex);
@ -90,17 +104,26 @@ StencilTableFactory::Create(TopologyRefiner const & refiner,
primvarRefiner.InterpolateVarying(level, srcIndex, dstIndex);
}
srcIndex = dstIndex;
dstIndex = dstIndex[refiner.GetLevel(level).GetNumVertices()];
}
if (options.factorizeIntermediateLevels) {
srcIndex = dstIndex;
}
dstIndex = dstIndex[refiner.GetLevel(level).GetNumVertices()];
if (not options.factorizeIntermediateLevels) {
// All previous verts are considered as coarse verts, as a
// result, we don't update the srcIndex and update the coarse
// vertex count.
builder.SetCoarseVertCount(dstIndex.GetOffset());
}
}
size_t firstOffset = refiner.GetLevel(0).GetNumVertices();
if (not options.generateIntermediateLevels)
firstOffset = srcIndex.GetOffset();
// Copy stencils from the pool allocator into the tables
// always initialize numControlVertices (useful for torus case)
// Copy stencils from the StencilBuilder into the StencilTable.
// Always initialize numControlVertices (useful for torus case)
StencilTable * result =
new StencilTable(refiner.GetLevel(0).GetNumVertices(),
builder.GetStencilOffsets(),
@ -241,7 +264,7 @@ StencilTableFactory::AppendLocalPointStencilTable(
}
}
// copy all local points stencils to proto stencils, and factoriz if needed.
// copy all local points stencils to proto stencils, and factorize if needed.
int nLocalPointStencils = localPointStencilTable->GetNumStencils();
int nLocalPointStencilsElements = 0;
@ -258,7 +281,7 @@ StencilTableFactory::AppendLocalPointStencilTable(
for (int j = 0; j < src.GetSize(); ++j) {
Index index = src.GetVertexIndices()[j];
float weight = src.GetWeights()[j];
if (weight == 0.0) continue;
if (isWeightZero(weight)) continue;
if (factorize) {
dst.AddWithWeight(

View File

@ -192,12 +192,12 @@ TopologyRefiner::RefineUniform(UniformOptions options) {
if (_levels[0]->getNumVertices() == 0) {
Error(FAR_RUNTIME_ERROR,
"Cannot apply uniform refinement -- base level appears to be uninitialized.");
"Failure in TopologyRefiner::RefineUniform() -- base level is uninitialized.");
return;
}
if (_refinements.size()) {
Error(FAR_RUNTIME_ERROR,
"Cannot apply uniform refinement -- previous refinements already applied.");
"Failure in TopologyRefiner::RefineUniform() -- previous refinements already applied.");
return;
}
@ -220,7 +220,7 @@ TopologyRefiner::RefineUniform(UniformOptions options) {
for (int i = 1; i <= (int)options.refinementLevel; ++i) {
refineOptions._minimalTopology =
options.fullTopologyInLastLevel ? false : (i == options.refinementLevel);
options.fullTopologyInLastLevel ? false : (i == (int)options.refinementLevel);
Vtr::internal::Level& parentLevel = getLevel(i-1);
Vtr::internal::Level& childLevel = *(new Vtr::internal::Level);
@ -245,17 +245,17 @@ TopologyRefiner::RefineAdaptive(AdaptiveOptions options) {
if (_levels[0]->getNumVertices() == 0) {
Error(FAR_RUNTIME_ERROR,
"Cannot apply adaptive refinement -- base level appears to be uninitialized.");
"Failure in TopologyRefiner::RefineAdaptive() -- base level is uninitialized.");
return;
}
if (_refinements.size()) {
Error(FAR_RUNTIME_ERROR,
"Cannot apply adaptive refinement -- previous refinements already applied.");
"Failure in TopologyRefiner::RefineAdaptive() -- previous refinements already applied.");
return;
}
if (_subdivType != Sdc::SCHEME_CATMARK) {
Error(FAR_RUNTIME_ERROR,
"Cannot apply adaptive refinement -- currently only supported for scheme Catmark.");
"Failure in TopologyRefiner::RefineAdaptive() -- currently only supported for Catmark scheme.");
return;
}

View File

@ -232,7 +232,7 @@ private:
std::vector<Vtr::internal::Level *> _levels;
std::vector<Vtr::internal::Refinement *> _refinements;
std::vector<TopologyLevel> _farLevels;;
std::vector<TopologyLevel> _farLevels;
};

View File

@ -23,6 +23,7 @@
//
#include "../far/topologyRefinerFactory.h"
#include "../far/topologyRefiner.h"
#include "../sdc/types.h"
#include "../vtr/level.h"
#include <cstdio>
@ -55,31 +56,42 @@ TopologyRefinerFactoryBase::prepareComponentTopologySizing(TopologyRefiner& refi
int vCount = baseLevel.getNumVertices();
int fCount = baseLevel.getNumFaces();
assert((vCount > 0) && (fCount > 0));
if (vCount == 0) {
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"mesh contains no vertices.");
return false;
}
if (fCount == 0) {
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"meshes without faces not yet supported.");
return false;
}
// Make sure no face was defined that would lead to a valence overflow -- the max
// valence has been initialized with the maximum number of face-vertices:
if (baseLevel.getMaxValence() > Vtr::VALENCE_LIMIT) {
char msg[1024];
snprintf(msg, 1024,
"Invalid topology specified : face with %d vertices > %d max.",
snprintf(msg, 1024, "Failure in TopologyRefinerFactory<>::Create() -- "
"face with %d vertices > %d max.",
baseLevel.getMaxValence(), Vtr::VALENCE_LIMIT);
Warning(msg);
Error(FAR_RUNTIME_ERROR, msg);
return false;
}
int fVertCount = baseLevel.getNumFaceVertices(fCount - 1) +
baseLevel.getOffsetOfFaceVertices(fCount - 1);
if (fVertCount == 0) {
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"mesh contains no face-vertices.");
return false;
}
if ((refiner.GetSchemeType() == Sdc::SCHEME_LOOP) && (fVertCount != (3 * fCount))) {
char msg[1024];
snprintf(msg, 1024,
"Invalid topology specified : non-triangular faces not supported by Loop scheme.");
Warning(msg);
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"non-triangular faces not supported by Loop scheme.");
return false;
}
baseLevel.resizeFaceVertices(fVertCount);
assert(baseLevel.getNumFaceVerticesTotal() > 0);
//
// If edges were sized, all other topological relations must be sized with it, in
@ -115,29 +127,29 @@ TopologyRefinerFactoryBase::prepareComponentTopologyAssignment(TopologyRefiner&
if (completeMissingTopology) {
if (not baseLevel.completeTopologyFromFaceVertices()) {
char msg[1024];
snprintf(msg, 1024,
"Invalid topology detected : vertex with valence %d > %d max.",
snprintf(msg, 1024, "Failure in TopologyRefinerFactory<>::Create() -- "
"vertex with valence %d > %d max.",
baseLevel.getMaxValence(), Vtr::VALENCE_LIMIT);
Warning(msg);
Error(FAR_RUNTIME_ERROR, msg);
return false;
}
} else {
if (baseLevel.getMaxValence() == 0) {
char msg[1024];
snprintf(msg, 1024, "Invalid topology detected : maximum valence not assigned.");
Warning(msg);
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"maximum valence not assigned.");
return false;
}
}
if (fullValidation) {
if (not baseLevel.validateTopology(callback, callbackData)) {
char msg[1024];
snprintf(msg, 1024,
completeMissingTopology ?
"Invalid topology detected as completed from partial specification." :
"Invalid topology detected as fully specified.");
Warning(msg);
if (completeMissingTopology) {
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"invalid topology detected from partial specification.");
} else {
Error(FAR_RUNTIME_ERROR, "Failure in TopologyRefinerFactory<>::Create() -- "
"invalid topology detected as fully specified.");
}
return false;
}
}
@ -161,7 +173,8 @@ TopologyRefinerFactoryBase::prepareComponentTagsAndSharpness(TopologyRefiner& re
Sdc::Options options = refiner.GetSchemeOptions();
Sdc::Crease creasing(options);
bool makeBoundaryFacesHoles = (options.GetVtxBoundaryInterpolation() == Sdc::Options::VTX_BOUNDARY_NONE);
bool makeBoundaryFacesHoles = (options.GetVtxBoundaryInterpolation() == Sdc::Options::VTX_BOUNDARY_NONE
&& Sdc::SchemeTypeTraits::GetLocalNeighborhoodSize(refiner.GetSchemeType()) > 0);
bool sharpenCornerVerts = (options.GetVtxBoundaryInterpolation() == Sdc::Options::VTX_BOUNDARY_EDGE_AND_CORNER);
bool sharpenNonManFeatures = true; //(options.GetNonManifoldInterpolation() == Sdc::Options::NON_MANIFOLD_SHARP);
@ -278,6 +291,13 @@ TopologyRefinerFactoryBase::prepareFaceVaryingChannels(TopologyRefiner& refiner)
int regBoundaryValence = regVertexValence / 2;
for (int channel=0; channel<refiner.GetNumFVarChannels(); ++channel) {
if (baseLevel.getNumFVarValues(channel) == 0) {
char msg[1024];
snprintf(msg, 1024, "Failure in TopologyRefinerFactory<>::Create() -- "
"face-varying channel %d has no values.", channel);
Error(FAR_RUNTIME_ERROR, msg);
return false;
}
baseLevel.completeFVarChannelTopology(channel, regBoundaryValence);
}
return true;

View File

@ -539,15 +539,12 @@ TopologyRefinerFactory<MESH>::getBaseFaceFVarValues(TopologyRefiner & newRefiner
}
// XXXX manuelk MSVC specializes these templated functions which creates duplicated symbols
#ifndef _MSC_VER
template <class MESH>
bool
TopologyRefinerFactory<MESH>::resizeComponentTopology(TopologyRefiner& /* refiner */, MESH const& /* mesh */) {
Error(FAR_RUNTIME_ERROR, "Missing specialization for TopologyRefinerFactory<MESH>::resizeComponentTopology()");
Error(FAR_RUNTIME_ERROR,
"Failure in TopologyRefinerFactory<>::resizeComponentTopology() -- no specialization provided.");
//
// Sizing the topology tables:
@ -587,7 +584,8 @@ template <class MESH>
bool
TopologyRefinerFactory<MESH>::assignComponentTopology(TopologyRefiner& /* refiner */, MESH const& /* mesh */) {
Error(FAR_RUNTIME_ERROR, "Missing specialization for TopologyRefinerFactory<MESH>::assignComponentTopology()");
Error(FAR_RUNTIME_ERROR,
"Failure in TopologyRefinerFactory<>::assignComponentTopology() -- no specialization provided.");
//
// Assigning the topology tables:
@ -680,8 +678,6 @@ TopologyRefinerFactory<MESH>::reportInvalidTopology(
//
}
#endif
} // end namespace Far
} // end namespace OPENSUBDIV_VERSION

View File

@ -317,13 +317,15 @@ add_library(osd_cpu_obj
${PUBLIC_HEADER_FILES}
)
add_library(osd_gpu_obj
OBJECT
${GPU_SOURCE_FILES}
${PRIVATE_HEADER_FILES}
${PUBLIC_HEADER_FILES}
${INC_FILES}
)
if( GPU_SOURCE_FILES )
add_library(osd_gpu_obj
OBJECT
${GPU_SOURCE_FILES}
${PRIVATE_HEADER_FILES}
${PUBLIC_HEADER_FILES}
${INC_FILES}
)
endif()
_add_doxy_headers( "${DOXY_HEADER_FILES}" )

View File

@ -79,8 +79,8 @@ CpuPatchTable::CpuPatchTable(const Far::PatchTable *farPatchTable) {
farPatchTable->GetPatchParamTable();
std::vector<Far::Index> const &sharpnessIndexTable =
farPatchTable->GetSharpnessIndexTable();
int numPatches = farPatchTable->GetNumPatches(j);
for (int k = 0; k < numPatches; ++k) {
int numPatchesJ = farPatchTable->GetNumPatches(j);
for (int k = 0; k < numPatchesJ; ++k) {
float sharpness = 0.0;
int patchIndex = (int)_patchParamBuffer.size();
if (patchIndex < (int)sharpnessIndexTable.size()) {

View File

@ -114,12 +114,6 @@ GLComputeEvaluator::GLComputeEvaluator() : _workGroupSize(64) {
}
GLComputeEvaluator::~GLComputeEvaluator() {
if (_stencilKernel.program) {
glDeleteProgram(_stencilKernel.program);
}
if (_patchKernel.program) {
glDeleteProgram(_patchKernel.program);
}
}
static GLuint

View File

@ -70,12 +70,11 @@ GLPatchTable::allocate(Far::PatchTable const *farPatchTable) {
patchTable.GetPatchArrayBuffer() + numPatchArrays);
// copy index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _patchIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
glBindBuffer(GL_ARRAY_BUFFER, _patchIndexBuffer);
glBufferData(GL_ARRAY_BUFFER,
indexSize * sizeof(GLint),
patchTable.GetPatchIndexBuffer(),
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// copy patchparam buffer
glBindBuffer(GL_ARRAY_BUFFER, _patchParamBuffer);
@ -89,17 +88,8 @@ GLPatchTable::allocate(Far::PatchTable const *farPatchTable) {
glGenTextures(1, &_patchIndexTexture);
glGenTextures(1, &_patchParamTexture);
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER,
indexSize * sizeof(GLint),
patchTable.GetPatchIndexBuffer(),
GL_STATIC_DRAW);
glBindTexture(GL_TEXTURE_BUFFER, _patchIndexTexture);
// glTexBuffer(GL_TEXTURE_BUFFER, GL_R32I, _patchIndexBuffer);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32I, buffer);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32I, _patchIndexBuffer);
glBindTexture(GL_TEXTURE_BUFFER, _patchParamTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_RGB32I, _patchParamBuffer);

View File

@ -26,6 +26,7 @@
layout(local_size_x=WORK_GROUP_SIZE, local_size_y=1, local_size_z=1) in;
layout(std430) buffer;
// source and destination buffers

View File

@ -124,7 +124,7 @@ convertToCompatibleStencilTable(
}
template <>
Far::StencilTable const *
inline Far::StencilTable const *
convertToCompatibleStencilTable<Far::StencilTable, Far::StencilTable, void>(
Far::StencilTable const *table, void * /*context*/) {
// no need for conversion
@ -134,7 +134,7 @@ convertToCompatibleStencilTable<Far::StencilTable, Far::StencilTable, void>(
}
template <>
Far::LimitStencilTable const *
inline Far::LimitStencilTable const *
convertToCompatibleStencilTable<Far::LimitStencilTable, Far::LimitStencilTable, void>(
Far::LimitStencilTable const *table, void * /*context*/) {
// no need for conversion
@ -144,7 +144,7 @@ convertToCompatibleStencilTable<Far::LimitStencilTable, Far::LimitStencilTable,
}
template <>
Far::StencilTable const *
inline Far::StencilTable const *
convertToCompatibleStencilTable<Far::StencilTable, Far::StencilTable, ID3D11DeviceContext>(
Far::StencilTable const *table, ID3D11DeviceContext * /*context*/) {
// no need for conversion
@ -173,12 +173,12 @@ public:
// XXX: FIXME, linear search
struct Entry {
Entry(BufferDescriptor const &srcDesc,
BufferDescriptor const &dstDesc,
BufferDescriptor const &duDesc,
BufferDescriptor const &dvDesc,
EVALUATOR *e) : srcDesc(srcDesc), dstDesc(dstDesc),
duDesc(duDesc), dvDesc(dvDesc), evaluator(e) {}
Entry(BufferDescriptor const &srcDescArg,
BufferDescriptor const &dstDescArg,
BufferDescriptor const &duDescArg,
BufferDescriptor const &dvDescArg,
EVALUATOR *evalArg) : srcDesc(srcDescArg), dstDesc(dstDescArg),
duDesc(duDescArg), dvDesc(dvDescArg), evaluator(evalArg) {}
BufferDescriptor srcDesc, dstDesc, duDesc, dvDesc;
EVALUATOR *evaluator;
};
@ -420,24 +420,24 @@ public:
instance, _deviceContext);
if (_varyingDesc.length > 0) {
BufferDescriptor srcDesc = _varyingDesc;
BufferDescriptor dstDesc(srcDesc);
dstDesc.offset += numControlVertices * dstDesc.stride;
BufferDescriptor vSrcDesc = _varyingDesc;
BufferDescriptor vDstDesc(vSrcDesc);
vDstDesc.offset += numControlVertices * vDstDesc.stride;
instance = GetEvaluator<Evaluator>(
_evaluatorCache, srcDesc, dstDesc,
_evaluatorCache, vSrcDesc, vDstDesc,
_deviceContext);
if (_varyingBuffer) {
// non-interleaved
Evaluator::EvalStencils(_varyingBuffer, srcDesc,
_varyingBuffer, dstDesc,
Evaluator::EvalStencils(_varyingBuffer, vSrcDesc,
_varyingBuffer, vDstDesc,
_varyingStencilTable,
instance, _deviceContext);
} else {
// interleaved
Evaluator::EvalStencils(_vertexBuffer, srcDesc,
_vertexBuffer, dstDesc,
Evaluator::EvalStencils(_vertexBuffer, vSrcDesc,
_vertexBuffer, vDstDesc,
_varyingStencilTable,
instance, _deviceContext);
}

View File

@ -79,6 +79,9 @@ public:
(void)instance; // unused
(void)deviceContext; // unused
if (stencilTable->GetNumStencils() == 0)
return false;
return EvalStencils(srcBuffer->BindCpuBuffer(), srcDesc,
dstBuffer->BindCpuBuffer(), dstDesc,
&stencilTable->GetSizes()[0],

View File

@ -80,11 +80,8 @@ OmpEvalStencils(float const * src, BufferDescriptor const &srcDesc,
int const * indices,
float const * weights,
int start, int end) {
if (start > 0) {
sizes += start;
indices += offsets[start];
weights += offsets[start];
}
start = (start > 0 ? start : 0);
src += srcDesc.offset;
dst += dstDesc.offset;
@ -96,7 +93,7 @@ OmpEvalStencils(float const * src, BufferDescriptor const &srcDesc,
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
int index = i + (start > 0 ? start : 0); // Stencil index
int index = i + start; // Stencil index
// Get thread-local pointers
int const * threadIndices = indices + offsets[index];
@ -129,13 +126,7 @@ OmpEvalStencils(float const * src, BufferDescriptor const &srcDesc,
float const * duWeights,
float const * dvWeights,
int start, int end) {
if (start > 0) {
sizes += start;
indices += offsets[start];
weights += offsets[start];
duWeights += offsets[start];
dvWeights += offsets[start];
}
start = (start > 0 ? start : 0);
src += srcDesc.offset;
dst += dstDesc.offset;
@ -152,7 +143,7 @@ OmpEvalStencils(float const * src, BufferDescriptor const &srcDesc,
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
int index = i + (start > 0 ? start : 0); // Stencil index
int index = i + start; // Stencil index
// Get thread-local pointers
int const * threadIndices = indices + offsets[index];

View File

@ -40,7 +40,7 @@
#include <GLES2/gl2.h>
#else
#if defined(_WIN32)
#define W32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#if defined(OSD_USES_GLEW)

View File

@ -80,6 +80,9 @@ public:
(void)instance; // unused
(void)deviceContext; // unused
if (stencilTable->GetNumStencils() == 0)
return false;
return EvalStencils(srcBuffer->BindCpuBuffer(), srcDesc,
dstBuffer->BindCpuBuffer(), dstDesc,
&stencilTable->GetSizes()[0],

View File

@ -169,11 +169,6 @@ TbbEvalStencils(float const * src, BufferDescriptor const &srcDesc,
float const * weights,
int start, int end) {
if (start > 0) {
sizes += start;
indices += offsets[start];
weights += offsets[start];
}
src += srcDesc.offset;
dst += dstDesc.offset;
@ -197,13 +192,6 @@ TbbEvalStencils(float const * src, BufferDescriptor const &srcDesc,
float const * duWeights,
float const * dvWeights,
int start, int end) {
if (start > 0) {
sizes += start;
indices += offsets[start];
weights += offsets[start];
duWeights += offsets[start];
dvWeights += offsets[start];
}
if (src) src += srcDesc.offset;
if (dst) dst += dstDesc.offset;

View File

@ -42,14 +42,14 @@ struct PatchCoord {
/// \brief Constructor
///
/// @param handle patch handle
/// @param handleArg patch handle
///
/// @param s parametric location on the patch
/// @param sArg parametric location on the patch
///
/// @param t parametric location on the patch
/// @param tArg parametric location on the patch
///
PatchCoord(Far::PatchTable::PatchHandle handle, float s, float t) :
handle(handle), s(s), t(t) { }
PatchCoord(Far::PatchTable::PatchHandle handleArg, float sArg, float tArg) :
handle(handleArg), s(sArg), t(tArg) { }
PatchCoord() : s(0), t(0) {
handle.arrayIndex = 0;
@ -63,10 +63,10 @@ struct PatchCoord {
struct PatchArray {
// 4-ints struct.
PatchArray(Far::PatchDescriptor desc, int numPatches,
int indexBase, int primitiveIdBase) :
desc(desc), numPatches(numPatches), indexBase(indexBase),
primitiveIdBase(primitiveIdBase) {}
PatchArray(Far::PatchDescriptor desc_in, int numPatches_in,
int indexBase_in, int primitiveIdBase_in) :
desc(desc_in), numPatches(numPatches_in), indexBase(indexBase_in),
primitiveIdBase(primitiveIdBase_in) {}
Far::PatchDescriptor const &GetDescriptor() const {
return desc;

View File

@ -489,8 +489,8 @@ Scheme<SCHEME_LOOP>::assignCreaseLimitTangentMasks(VERTEX const& vertex,
double theta = M_PI / (interiorEdgeCount + 1);
Weight cWeight = -3.0f * std::sin(theta);
Weight eWeightCoeff = -3.0f * (2.0f * std::cos(theta) - 2.0f);
Weight cWeight = -3.0f * (Weight) std::sin(theta);
Weight eWeightCoeff = -3.0f * (2.0f * (Weight) std::cos(theta) - 2.0f);
tan2Mask.VertexWeight(0) = 0.0f;
@ -498,7 +498,7 @@ Scheme<SCHEME_LOOP>::assignCreaseLimitTangentMasks(VERTEX const& vertex,
tan2Mask.EdgeWeight(creaseEnds[1]) = cWeight;
for (int i = 1; i <= interiorEdgeCount; ++i) {
tan2Mask.EdgeWeight(creaseEnds[0] + i) = eWeightCoeff * std::sin(i * theta);
tan2Mask.EdgeWeight(creaseEnds[0] + i) = eWeightCoeff * (Weight) std::sin(i * theta);
}
} else if (interiorEdgeCount == 1) {
// See notes above regarding scale factor of 3.0:
@ -566,8 +566,8 @@ Scheme<SCHEME_LOOP>::assignSmoothLimitTangentMasks(VERTEX const& vertex,
Weight alpha = (Weight) (2.0f * M_PI / valence);
for (int i = 0; i < valence; ++i) {
double alphaI = alpha * i;
tan1Mask.EdgeWeight(i) = std::cos(alphaI);
tan2Mask.EdgeWeight(i) = std::sin(alphaI);
tan1Mask.EdgeWeight(i) = (Weight) std::cos(alphaI);
tan2Mask.EdgeWeight(i) = (Weight) std::sin(alphaI);
}
}
}

View File

@ -25,7 +25,7 @@
#ifndef OPENSUBDIV3_VERSION_H
#define OPENSUBDIV3_VERSION_H
#define OPENSUBDIV_VERSION v3_0_0
#define OPENSUBDIV_VERSION v3_0_3
namespace OpenSubdiv {
namespace OPENSUBDIV_VERSION {

View File

@ -66,8 +66,8 @@ public:
ConstArray() : _begin(0), _size(0) { }
ConstArray(value_type const * ptr, size_type size) :
_begin(ptr), _size(size) { }
ConstArray(value_type const * ptr, size_type sizeArg) :
_begin(ptr), _size(sizeArg) { }
size_type size() const { return _size; }
@ -117,7 +117,7 @@ public:
Array() : ConstArray<TYPE>() { }
Array(value_type * ptr, size_type size) : ConstArray<TYPE>(ptr, size) { }
Array(value_type * ptr, size_type sizeArg) : ConstArray<TYPE>(ptr, sizeArg) { }
public:

View File

@ -249,7 +249,7 @@ FVarLevel::completeTopologyFromFaceValues(int regularBoundaryValence) {
eTag._linear = (ETag::ETagSize) _hasLinearBoundaries;
}
}
} else {
} else if (vFaces.size() > 0) {
//
// Unfortunately for non-manifold cases we can't make as much use of the
// retrieved face-values as there is no correlation between the incident
@ -308,7 +308,7 @@ FVarLevel::completeTopologyFromFaceValues(int regularBoundaryValence) {
// boundary vertices that have not already been tagged.
//
if (vIsBoundary && !vertexMismatch[vIndex]) {
if (_hasLinearBoundaries) {
if (_hasLinearBoundaries && (vFaces.size() > 0)) {
vertexMismatch[vIndex] = true;
if (vIsManifold) {
@ -419,7 +419,11 @@ FVarLevel::completeTopologyFromFaceValues(int regularBoundaryValence) {
//
IndexArray vValues = getVertexValues(vIndex);
vValues[0] = _faceVertValues[_level.getOffsetOfFaceVertices(vFaces[0]) + vInFace[0]];
if (vFaces.size() > 0) {
vValues[0] = _faceVertValues[_level.getOffsetOfFaceVertices(vFaces[0]) + vInFace[0]];
} else {
vValues[0] = 0;
}
if (!vertexMismatch[vIndex]) {
continue;
}

View File

@ -380,6 +380,15 @@ namespace {
}
return 0;
}
#ifdef __INTEL_COMPILER
#pragma warning (push)
#pragma warning disable 1572
#endif
inline bool isSharpnessEqual(float s1, float s2) { return (s1 == s2); }
#ifdef __INTEL_COMPILER
#pragma warning (pop)
#endif
}
void
@ -1296,8 +1305,8 @@ Level::isSingleCreasePatch(Index face, float *sharpnessOut, int *rotationOut) co
}
}
// sharpnesses have to be [0, x, 0, x] or [x, 0, x, 0]
if (sharpnesses[0] != sharpnesses[2] or
sharpnesses[1] != sharpnesses[3]) {
if (!isSharpnessEqual(sharpnesses[0], sharpnesses[2]) or
!isSharpnessEqual(sharpnesses[1], sharpnesses[3])) {
return false;
}
}

View File

@ -521,23 +521,23 @@ Level::getFaceEdges(Index faceIndex) {
//
inline ConstIndexArray
Level::getVertexFaces(Index vertIndex) const {
return ConstIndexArray(&_vertFaceIndices[_vertFaceCountsAndOffsets[vertIndex*2+1]],
return ConstIndexArray( (&_vertFaceIndices[0]) + _vertFaceCountsAndOffsets[vertIndex*2+1],
_vertFaceCountsAndOffsets[vertIndex*2]);
}
inline IndexArray
Level::getVertexFaces(Index vertIndex) {
return IndexArray(&_vertFaceIndices[_vertFaceCountsAndOffsets[vertIndex*2+1]],
return IndexArray( (&_vertFaceIndices[0]) + _vertFaceCountsAndOffsets[vertIndex*2+1],
_vertFaceCountsAndOffsets[vertIndex*2]);
}
inline ConstLocalIndexArray
Level::getVertexFaceLocalIndices(Index vertIndex) const {
return ConstLocalIndexArray(&_vertFaceLocalIndices[_vertFaceCountsAndOffsets[vertIndex*2+1]],
return ConstLocalIndexArray( (&_vertFaceLocalIndices[0]) + _vertFaceCountsAndOffsets[vertIndex*2+1],
_vertFaceCountsAndOffsets[vertIndex*2]);
}
inline LocalIndexArray
Level::getVertexFaceLocalIndices(Index vertIndex) {
return LocalIndexArray(&_vertFaceLocalIndices[_vertFaceCountsAndOffsets[vertIndex*2+1]],
return LocalIndexArray( (&_vertFaceLocalIndices[0]) + _vertFaceCountsAndOffsets[vertIndex*2+1],
_vertFaceCountsAndOffsets[vertIndex*2]);
}
@ -558,23 +558,23 @@ Level::trimVertexFaces(Index vertIndex, int count) {
//
inline ConstIndexArray
Level::getVertexEdges(Index vertIndex) const {
return ConstIndexArray(&_vertEdgeIndices[_vertEdgeCountsAndOffsets[vertIndex*2+1]],
return ConstIndexArray( (&_vertEdgeIndices[0]) +_vertEdgeCountsAndOffsets[vertIndex*2+1],
_vertEdgeCountsAndOffsets[vertIndex*2]);
}
inline IndexArray
Level::getVertexEdges(Index vertIndex) {
return IndexArray(&_vertEdgeIndices[_vertEdgeCountsAndOffsets[vertIndex*2+1]],
return IndexArray( (&_vertEdgeIndices[0]) +_vertEdgeCountsAndOffsets[vertIndex*2+1],
_vertEdgeCountsAndOffsets[vertIndex*2]);
}
inline ConstLocalIndexArray
Level::getVertexEdgeLocalIndices(Index vertIndex) const {
return ConstLocalIndexArray(&_vertEdgeLocalIndices[_vertEdgeCountsAndOffsets[vertIndex*2+1]],
return ConstLocalIndexArray( (&_vertEdgeLocalIndices[0]) + _vertEdgeCountsAndOffsets[vertIndex*2+1],
_vertEdgeCountsAndOffsets[vertIndex*2]);
}
inline LocalIndexArray
Level::getVertexEdgeLocalIndices(Index vertIndex) {
return LocalIndexArray(&_vertEdgeLocalIndices[_vertEdgeCountsAndOffsets[vertIndex*2+1]],
return LocalIndexArray( (&_vertEdgeLocalIndices[0]) + _vertEdgeCountsAndOffsets[vertIndex*2+1],
_vertEdgeCountsAndOffsets[vertIndex*2]);
}
@ -614,23 +614,27 @@ Level::getEdgeVertices(Index edgeIndex) {
//
inline ConstIndexArray
Level::getEdgeFaces(Index edgeIndex) const {
return ConstIndexArray(&_edgeFaceIndices[_edgeFaceCountsAndOffsets[edgeIndex*2+1]],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
return ConstIndexArray(&_edgeFaceIndices[0] +
_edgeFaceCountsAndOffsets[edgeIndex*2+1],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
}
inline IndexArray
Level::getEdgeFaces(Index edgeIndex) {
return IndexArray(&_edgeFaceIndices[_edgeFaceCountsAndOffsets[edgeIndex*2+1]],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
return IndexArray(&_edgeFaceIndices[0] +
_edgeFaceCountsAndOffsets[edgeIndex*2+1],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
}
inline ConstLocalIndexArray
Level::getEdgeFaceLocalIndices(Index edgeIndex) const {
return ConstLocalIndexArray(&_edgeFaceLocalIndices[_edgeFaceCountsAndOffsets[edgeIndex*2+1]],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
return ConstLocalIndexArray(&_edgeFaceLocalIndices[0] +
_edgeFaceCountsAndOffsets[edgeIndex*2+1],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
}
inline LocalIndexArray
Level::getEdgeFaceLocalIndices(Index edgeIndex) {
return LocalIndexArray(&_edgeFaceLocalIndices[_edgeFaceCountsAndOffsets[edgeIndex*2+1]],
return LocalIndexArray(&_edgeFaceLocalIndices[0] +
_edgeFaceCountsAndOffsets[edgeIndex*2+1],
_edgeFaceCountsAndOffsets[edgeIndex*2]);
}

View File

@ -40,8 +40,8 @@ namespace internal {
//
// Simple constructor, destructor and basic initializers:
//
QuadRefinement::QuadRefinement(Level const & parent, Level & child, Sdc::Options const & options) :
Refinement(parent, child, options) {
QuadRefinement::QuadRefinement(Level const & parentArg, Level & childArg, Sdc::Options const & optionsArg) :
Refinement(parentArg, childArg, optionsArg) {
_splitType = Sdc::SPLIT_TO_QUADS;
_regFaceSize = 4;
@ -344,9 +344,6 @@ QuadRefinement::populateEdgeVerticesFromParentEdges() {
void
QuadRefinement::populateEdgeFaceRelation() {
const Level& parent = *_parent;
Level& child = *_child;
//
// Notes on allocating/initializing the edge-face counts/offsets vector:
//
@ -367,27 +364,27 @@ QuadRefinement::populateEdgeFaceRelation() {
// - could at least make a quick traversal of components and use the above
// two points to get much closer estimate than what is used for uniform
//
int childEdgeFaceIndexSizeEstimate = (int)parent._faceVertIndices.size() * 2 +
(int)parent._edgeFaceIndices.size() * 2;
int childEdgeFaceIndexSizeEstimate = (int)_parent->_faceVertIndices.size() * 2 +
(int)_parent->_edgeFaceIndices.size() * 2;
child._edgeFaceCountsAndOffsets.resize(child.getNumEdges() * 2);
child._edgeFaceIndices.resize( childEdgeFaceIndexSizeEstimate);
child._edgeFaceLocalIndices.resize(childEdgeFaceIndexSizeEstimate);
_child->_edgeFaceCountsAndOffsets.resize(_child->getNumEdges() * 2);
_child->_edgeFaceIndices.resize( childEdgeFaceIndexSizeEstimate);
_child->_edgeFaceLocalIndices.resize(childEdgeFaceIndexSizeEstimate);
// Update _maxEdgeFaces from the parent level before calling the
// populateEdgeFacesFromParent methods below, as these may further
// update _maxEdgeFaces.
child._maxEdgeFaces = parent._maxEdgeFaces;
_child->_maxEdgeFaces = _parent->_maxEdgeFaces;
populateEdgeFacesFromParentFaces();
populateEdgeFacesFromParentEdges();
// Revise the over-allocated estimate based on what is used (as indicated in the
// count/offset for the last vertex) and trim the index vector accordingly:
childEdgeFaceIndexSizeEstimate = child.getNumEdgeFaces(child.getNumEdges()-1) +
child.getOffsetOfEdgeFaces(child.getNumEdges()-1);
child._edgeFaceIndices.resize( childEdgeFaceIndexSizeEstimate);
child._edgeFaceLocalIndices.resize(childEdgeFaceIndexSizeEstimate);
childEdgeFaceIndexSizeEstimate = _child->getNumEdgeFaces(_child->getNumEdges()-1) +
_child->getOffsetOfEdgeFaces(_child->getNumEdges()-1);
_child->_edgeFaceIndices.resize( childEdgeFaceIndexSizeEstimate);
_child->_edgeFaceLocalIndices.resize(childEdgeFaceIndexSizeEstimate);
}
void
@ -516,9 +513,6 @@ QuadRefinement::populateEdgeFacesFromParentEdges() {
void
QuadRefinement::populateVertexFaceRelation() {
const Level& parent = *_parent;
Level& child = *_child;
//
// Notes on allocating/initializing the vertex-face counts/offsets vector:
//
@ -537,13 +531,13 @@ QuadRefinement::populateVertexFaceRelation() {
// - where the 1 or 2 is number of child edges of parent edge
// - same as parent vert for verts from parent verts (catmark)
//
int childVertFaceIndexSizeEstimate = (int)parent._faceVertIndices.size()
+ (int)parent._edgeFaceIndices.size() * 2
+ (int)parent._vertFaceIndices.size();
int childVertFaceIndexSizeEstimate = (int)_parent->_faceVertIndices.size()
+ (int)_parent->_edgeFaceIndices.size() * 2
+ (int)_parent->_vertFaceIndices.size();
child._vertFaceCountsAndOffsets.resize(child.getNumVertices() * 2);
child._vertFaceIndices.resize( childVertFaceIndexSizeEstimate);
child._vertFaceLocalIndices.resize( childVertFaceIndexSizeEstimate);
_child->_vertFaceCountsAndOffsets.resize(_child->getNumVertices() * 2);
_child->_vertFaceIndices.resize( childVertFaceIndexSizeEstimate);
_child->_vertFaceLocalIndices.resize( childVertFaceIndexSizeEstimate);
if (getFirstChildVertexFromVertices() == 0) {
populateVertexFacesFromParentVertices();
@ -557,10 +551,10 @@ QuadRefinement::populateVertexFaceRelation() {
// Revise the over-allocated estimate based on what is used (as indicated in the
// count/offset for the last vertex) and trim the index vectors accordingly:
childVertFaceIndexSizeEstimate = child.getNumVertexFaces(child.getNumVertices()-1) +
child.getOffsetOfVertexFaces(child.getNumVertices()-1);
child._vertFaceIndices.resize( childVertFaceIndexSizeEstimate);
child._vertFaceLocalIndices.resize(childVertFaceIndexSizeEstimate);
childVertFaceIndexSizeEstimate = _child->getNumVertexFaces(_child->getNumVertices()-1) +
_child->getOffsetOfVertexFaces(_child->getNumVertices()-1);
_child->_vertFaceIndices.resize( childVertFaceIndexSizeEstimate);
_child->_vertFaceLocalIndices.resize(childVertFaceIndexSizeEstimate);
}
void
@ -702,9 +696,6 @@ QuadRefinement::populateVertexFacesFromParentVertices() {
void
QuadRefinement::populateVertexEdgeRelation() {
const Level& parent = *_parent;
Level& child = *_child;
//
// Notes on allocating/initializing the vertex-edge counts/offsets vector:
//
@ -727,13 +718,13 @@ QuadRefinement::populateVertexEdgeRelation() {
// - any end vertex will require all N child faces (catmark)
// - same as parent vert for verts from parent verts (catmark)
//
int childVertEdgeIndexSizeEstimate = (int)parent._faceVertIndices.size()
+ (int)parent._edgeFaceIndices.size() + parent.getNumEdges() * 2
+ (int)parent._vertEdgeIndices.size();
int childVertEdgeIndexSizeEstimate = (int)_parent->_faceVertIndices.size()
+ (int)_parent->_edgeFaceIndices.size() + _parent->getNumEdges() * 2
+ (int)_parent->_vertEdgeIndices.size();
child._vertEdgeCountsAndOffsets.resize(child.getNumVertices() * 2);
child._vertEdgeIndices.resize( childVertEdgeIndexSizeEstimate);
child._vertEdgeLocalIndices.resize( childVertEdgeIndexSizeEstimate);
_child->_vertEdgeCountsAndOffsets.resize(_child->getNumVertices() * 2);
_child->_vertEdgeIndices.resize( childVertEdgeIndexSizeEstimate);
_child->_vertEdgeLocalIndices.resize( childVertEdgeIndexSizeEstimate);
if (getFirstChildVertexFromVertices() == 0) {
populateVertexEdgesFromParentVertices();
@ -747,10 +738,10 @@ QuadRefinement::populateVertexEdgeRelation() {
// Revise the over-allocated estimate based on what is used (as indicated in the
// count/offset for the last vertex) and trim the index vectors accordingly:
childVertEdgeIndexSizeEstimate = child.getNumVertexEdges(child.getNumVertices()-1) +
child.getOffsetOfVertexEdges(child.getNumVertices()-1);
child._vertEdgeIndices.resize( childVertEdgeIndexSizeEstimate);
child._vertEdgeLocalIndices.resize(childVertEdgeIndexSizeEstimate);
childVertEdgeIndexSizeEstimate = _child->getNumVertexEdges(_child->getNumVertices()-1) +
_child->getOffsetOfVertexEdges(_child->getNumVertices()-1);
_child->_vertEdgeIndices.resize( childVertEdgeIndexSizeEstimate);
_child->_vertEdgeLocalIndices.resize(childVertEdgeIndexSizeEstimate);
}
void

View File

@ -45,9 +45,9 @@ namespace internal {
//
// Simple constructor, destructor and basic initializers:
//
Refinement::Refinement(Level const & parent, Level & child, Sdc::Options const& options) :
_parent(&parent),
_child(&child),
Refinement::Refinement(Level const & parentArg, Level & childArg, Sdc::Options const& options) :
_parent(&parentArg),
_child(&childArg),
_options(options),
_regFaceSize(-1),
_uniform(false),
@ -65,8 +65,8 @@ Refinement::Refinement(Level const & parent, Level & child, Sdc::Options const&
_firstChildVertFromEdge(0),
_firstChildVertFromVert(0) {
assert((child.getDepth() == 0) && (child.getNumVertices() == 0));
child._depth = 1 + parent.getDepth();
assert((childArg.getDepth() == 0) && (childArg.getNumVertices() == 0));
childArg._depth = 1 + parentArg.getDepth();
}
Refinement::~Refinement() {

View File

@ -40,8 +40,8 @@ namespace internal {
//
// Simple constructor, destructor and basic initializers:
//
TriRefinement::TriRefinement(Level const & parent, Level & child, Sdc::Options const & options) :
Refinement(parent, child, options) {
TriRefinement::TriRefinement(Level const & parentArg, Level & childArg, Sdc::Options const & optionsArg) :
Refinement(parentArg, childArg, optionsArg) {
_splitType = Sdc::SPLIT_TO_TRIS;
_regFaceSize = 3;

View File

@ -30,6 +30,8 @@ if (NOT NO_REGRESSION)
add_subdirectory(far_regression)
add_subdirectory(far_perf)
if(OPENGL_FOUND AND (GLEW_FOUND OR APPLE) AND GLFW_FOUND)
add_subdirectory(osd_regression)
else()

View File

@ -47,8 +47,8 @@ namespace {
template<class T>
void
GetReorderedHbrVertexData(
const OpenSubdiv::Far::TopologyRefiner &refiner,
const OpenSubdiv::HbrMesh<T> &hmesh,
const OpenSubdiv::Far::TopologyRefiner &farRefiner,
const OpenSubdiv::HbrMesh<T> &hbrMesh,
std::vector<T> *hbrVertexData,
std::vector<bool> *hbrVertexOnBoundaryData = NULL)
{
@ -178,14 +178,14 @@ GetReorderedHbrVertexData(
}
};
Mapper mapper(refiner, hmesh);
Mapper mapper(farRefiner, hbrMesh);
int nverts = hmesh.GetNumVertices();
assert( nverts==refiner.GetNumVerticesTotal() );
int nverts = hbrMesh.GetNumVertices();
assert( nverts==farRefiner.GetNumVerticesTotal() );
hbrVertexData->resize(nverts);
for (int level=0, ofs=0; level<(refiner.GetMaxLevel()+1); ++level) {
for (int level=0, ofs=0; level<(farRefiner.GetMaxLevel()+1); ++level) {
typename Mapper::LevelMap & map = mapper.maps[level];
for (int i=0; i<(int)map.verts.size(); ++i) {

View File

@ -129,8 +129,8 @@ Shape * Shape::parseObj(char const * shapestr, Scheme shapescheme,
std::stringstream ss;
ss << ifs.rdbuf();
ifs.close();
std::string str = ss.str();
s->parseMtllib(str.c_str());
std::string tmpStr = ss.str();
s->parseMtllib(tmpStr.c_str());
s->mtllib = buf;
}
} break;

View File

@ -0,0 +1,49 @@
#
# Copyright 2015 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
include_directories(
"${OPENSUBDIV_INCLUDE_DIR}/"
"${PROJECT_SOURCE_DIR}/"
)
set(SOURCE_FILES
far_perf.cpp
)
set(PLATFORM_LIBRARIES
"${OSD_LINK_TARGET}"
)
_add_executable(far_perf
${SOURCE_FILES}
$<TARGET_OBJECTS:sdc_obj>
$<TARGET_OBJECTS:vtr_obj>
$<TARGET_OBJECTS:far_obj>
$<TARGET_OBJECTS:regression_common_obj>
)
install(TARGETS far_perf DESTINATION "${CMAKE_BINDIR_BASE}")
add_test(far_perf ${EXECUTABLE_OUTPUT_PATH}/far_regression)

View File

@ -0,0 +1,171 @@
//
// Copyright 2015 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include <cassert>
#include <cstdio>
#include <fstream>
#include <sstream>
#include <opensubdiv/far/primvarRefiner.h>
#include <opensubdiv/far/stencilTableFactory.h>
#include <opensubdiv/far/patchTableFactory.h>
#include "../../regression/common/far_utils.h"
// XXX: revisit the directory structure for examples/tests
#include "../../examples/common/stopwatch.h"
#include "init_shapes.h"
//------------------------------------------------------------------------------
static void
doPerf(const Shape *shape, int maxlevel, int endCapType)
{
using namespace OpenSubdiv;
Sdc::SchemeType type = OpenSubdiv::Sdc::SCHEME_CATMARK;
Sdc::Options sdcOptions;
sdcOptions.SetVtxBoundaryInterpolation(Sdc::Options::VTX_BOUNDARY_EDGE_ONLY);
Stopwatch s;
// ----------------------------------------------------------------------
// Instantiate a FarTopologyRefiner from the descriptor and refine
s.Start();
Far::TopologyRefiner * refiner = Far::TopologyRefinerFactory<Shape>::Create(
*shape, Far::TopologyRefinerFactory<Shape>::Options(type, sdcOptions));
{
Far::TopologyRefiner::AdaptiveOptions options(maxlevel);
refiner->RefineAdaptive(options);
}
s.Stop();
double timeRefine = s.GetElapsed();
// ----------------------------------------------------------------------
// Create stencil table
s.Start();
Far::StencilTable const * vertexStencils = NULL;
{
Far::StencilTableFactory::Options options;
vertexStencils = Far::StencilTableFactory::Create(*refiner, options);
}
s.Stop();
double timeCreateStencil = s.GetElapsed();
// ----------------------------------------------------------------------
// Create patch table
s.Start();
Far::PatchTable const * patchTable = NULL;
{
Far::PatchTableFactory::Options poptions(maxlevel);
poptions.SetEndCapType((Far::PatchTableFactory::Options::EndCapType)endCapType);
patchTable = Far::PatchTableFactory::Create(*refiner, poptions);
}
s.Stop();
double timeCreatePatch = s.GetElapsed();
// ----------------------------------------------------------------------
// append local points to stencils
s.Start();
{
if (Far::StencilTable const *vertexStencilsWithLocalPoints =
Far::StencilTableFactory::AppendLocalPointStencilTable(
*refiner, vertexStencils,
patchTable->GetLocalPointStencilTable())) {
delete vertexStencils;
vertexStencils = vertexStencilsWithLocalPoints;
}
}
s.Stop();
double timeAppendStencil = s.GetElapsed();
// ---------------------------------------------------------------------
double timeTotal = s.GetTotalElapsed();
printf("TopologyRefiner::Refine %f %5.2f%%\n",
timeRefine, timeRefine/timeTotal*100);
printf("StencilTableFactory::Create %f %5.2f%%\n",
timeCreateStencil, timeCreateStencil/timeTotal*100);
printf("PatchTableFactory::Create %f %5.2f%%\n",
timeCreatePatch, timeCreatePatch/timeTotal*100);
printf("StencilTableFactory::Append %f %5.2f%%\n",
timeAppendStencil, timeAppendStencil/timeTotal*100);
printf("Total %f\n", timeTotal);
}
//------------------------------------------------------------------------------
int main(int argc, char **argv)
{
using namespace OpenSubdiv;
int maxlevel = 8;
std::string str;
int endCapType = Far::PatchTableFactory::Options::ENDCAP_GREGORY_BASIS;
for (int i = 1; i < argc; ++i) {
if (strstr(argv[i], ".obj")) {
std::ifstream ifs(argv[i]);
if (ifs) {
std::stringstream ss;
ss << ifs.rdbuf();
ifs.close();
str = ss.str();
g_shapes.push_back(ShapeDesc(argv[i], str.c_str(), kCatmark));
}
}
else if (!strcmp(argv[i], "-l")) {
maxlevel = atoi(argv[++i]);
}
else if (!strcmp(argv[i], "-e")) {
const char *type = argv[++i];
if (!strcmp(type, "bspline")) {
endCapType = Far::PatchTableFactory::Options::ENDCAP_BSPLINE_BASIS;
} else if (!strcmp(type, "gregory")) {
endCapType = Far::PatchTableFactory::Options::ENDCAP_GREGORY_BASIS;
} else {
printf("Unknown endcap type %s\n", type);
return 1;
}
}
}
if (g_shapes.empty()) {
initShapes();
}
for (int i = 0; i < (int)g_shapes.size(); ++i) {
Shape const * shape = Shape::parseObj(
g_shapes[i].data.c_str(),
g_shapes[i].scheme,
g_shapes[i].isLeftHanded);
for (int lv = 1; lv <= maxlevel; ++lv) {
printf("---- %s, level %d ----\n", g_shapes[i].name.c_str(), lv);
doPerf(shape, lv, endCapType);
}
}
}
//------------------------------------------------------------------------------

View File

@ -0,0 +1,48 @@
//
// Copyright 2013 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#include "../common/shape_utils.h"
struct ShapeDesc {
ShapeDesc(char const * iname, std::string const & idata, Scheme ischeme,
bool iisLeftHanded=false) :
name(iname), data(idata), scheme(ischeme), isLeftHanded(iisLeftHanded) { }
std::string name,
data;
Scheme scheme;
bool isLeftHanded;
};
static std::vector<ShapeDesc> g_shapes;
#include "../shapes/all.h"
//------------------------------------------------------------------------------
static void initShapes() {
g_shapes.push_back( ShapeDesc("catmark_car", catmark_car, kCatmark ) );
g_shapes.push_back( ShapeDesc("catmark_pole64", catmark_pole64, kCatmark ) );
}
//------------------------------------------------------------------------------

View File

@ -585,13 +585,13 @@ usage(char ** argv) {
static void
parseArgs(int argc, char ** argv) {
for (int i=1; i<argc; ++i) {
if (not strcmp(argv[i],"-compute")) {
for (int argi=1; argi<argc; ++argi) {
if (not strcmp(argv[argi],"-compute")) {
const char * backend = NULL;
if (i<(argc-1))
backend = argv[++i];
if (argi<(argc-1))
backend = argv[++argi];
if (not strcmp(backend, "all")) {
g_Backend = -1;
@ -612,8 +612,8 @@ parseArgs(int argc, char ** argv) {
exit(0);
}
}
} else if ( (not strcmp(argv[i],"-help")) or
(not strcmp(argv[i],"-h")) ) {
} else if ( (not strcmp(argv[argi],"-help")) or
(not strcmp(argv[argi],"-h")) ) {
usage(argv);
exit(1);
} else {

View File

@ -70,9 +70,11 @@
#include "catmark_nonman_quadpole8.h"
#include "catmark_nonman_quadpole64.h"
#include "catmark_nonman_quadpole360.h"
#include "catmark_nonman_bareverts.h"
#include "catmark_pawn.h"
#include "catmark_pyramid_creases0.h"
#include "catmark_pyramid_creases1.h"
#include "catmark_pyramid_creases2.h"
#include "catmark_pyramid.h"
#include "catmark_rook.h"
#include "catmark_single_crease.h"
@ -82,11 +84,13 @@
#include "catmark_square_hedit1.h"
#include "catmark_square_hedit2.h"
#include "catmark_square_hedit3.h"
#include "catmark_square_hedit4.h"
#include "catmark_tent_creases0.h"
#include "catmark_tent_creases1.h"
#include "catmark_tent.h"
#include "catmark_torus.h"
#include "catmark_torus_creases0.h"
#include "catmark_torus_creases1.h"
#include "bilinear_cube.h"

View File

@ -0,0 +1,91 @@
//
// Copyright 2015 DreamWorks Animation LLC.
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
static const std::string catmark_nonman_bareverts =
"# 4x4 grid of quads -- 25 vertices:\n"
"v 0.00 0.00 0.00\n"
"v 0.25 0.00 0.00\n"
"v 0.50 0.00 0.00\n"
"v 0.75 0.00 0.00\n"
"v 1.00 0.00 0.00\n"
"v 0.00 0.25 0.00\n"
"v 0.25 0.25 0.20\n"
"v 0.50 0.25 0.20\n"
"v 0.75 0.25 0.20\n"
"v 1.00 0.25 0.00\n"
"v 0.00 0.50 0.00\n"
"v 0.25 0.50 0.20\n"
"v 0.50 0.50 0.30\n"
"v 0.75 0.50 0.20\n"
"v 1.00 0.50 0.00\n"
"v 0.00 0.75 0.00\n"
"v 0.25 0.75 0.20\n"
"v 0.50 0.75 0.20\n"
"v 0.75 0.75 0.20\n"
"v 1.00 0.75 0.00\n"
"v 0.00 1.00 0.00\n"
"v 0.25 1.00 0.00\n"
"v 0.50 1.00 0.00\n"
"v 0.75 1.00 0.00\n"
"v 1.00 1.00 0.00\n"
"vt 0.00 0.00\n"
"vt 0.25 0.00\n"
"vt 0.50 0.00\n"
"vt 0.75 0.00\n"
"vt 1.00 0.00\n"
"vt 0.00 0.20\n"
"vt 0.25 0.20\n"
"vt 0.50 0.10\n"
"vt 0.75 0.20\n"
"vt 1.00 0.20\n"
"vt 0.00 0.40\n"
"vt 0.25 0.40\n"
"vt 0.50 0.20\n"
"vt 0.75 0.40\n"
"vt 1.00 0.40\n"
"vt 0.00 0.60\n"
"vt 0.25 0.60\n"
"vt 0.50 0.40\n"
"vt 0.75 0.60\n"
"vt 1.00 0.60\n"
"vt 0.00 0.80\n"
"vt 0.25 0.80\n"
"vt 0.50 0.65\n"
"vt 0.75 0.80\n"
"vt 1.00 0.80\n"
"vt 0.00 1.00\n"
"vt 0.25 1.00\n"
"vt 0.40 1.00\n"
"vt 0.75 1.00\n"
"vt 1.00 1.00\n"
"vt 0.60 1.00\n"
"f 6/6 7/7 12/12 11/11\n"
"f 7/7 8/8 13/13 12/12\n"
"f 8/8 9/9 14/14 13/13\n"
"f 9/9 10/10 15/15 14/14\n"
"f 11/16 12/17 17/22 16/21\n"
"f 12/17 13/18 18/23 17/22\n"
"f 13/18 14/19 19/24 18/23\n"
"f 14/19 15/20 20/25 19/24\n"
;

View File

@ -31,6 +31,7 @@ set(TUTORIALS
tutorial_5
tutorial_6
tutorial_7
tutorial_8
)
foreach(tutorial ${TUTORIALS})

View File

@ -59,8 +59,6 @@ struct Vertex {
_position[2]+=weight*src._position[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
// Public interface ------------------------------------
void SetPosition(float x, float y, float z) {
_position[0]=x;

View File

@ -186,7 +186,7 @@ public:
int const * GetFaceVerts(int face) const { return g_faceverts+getCompOffset(g_facenverts, face); }
int const * GetFaceEdges(int edge) const { return g_faceedges+getCompOffset(g_facenverts, edge); }
int const * GetFaceEdges(int face) const { return g_faceedges+getCompOffset(g_facenverts, face); }
//
@ -403,8 +403,6 @@ struct Vertex {
_position[2]+=weight*src._position[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
// Public interface ------------------------------------
void SetPosition(float x, float y, float z) {
_position[0]=x;

View File

@ -61,8 +61,6 @@ struct Vertex {
_position[2]+=weight*src._position[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
// Public interface ------------------------------------
void SetPosition(float x, float y, float z) {
_position[0]=x;

View File

@ -96,8 +96,6 @@ struct Vertex {
point[2] += weight * src.point[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
float point[3];
};

View File

@ -72,8 +72,6 @@ struct Vertex {
_position[2]+=weight*src._position[2];
}
void AddVaryingWithWeight(Vertex const &, float) { }
// Public interface ------------------------------------
void SetPosition(float x, float y, float z) {
_position[0]=x;

View File

@ -0,0 +1,37 @@
#
# Copyright 2013 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
set(SOURCE_FILES
far_tutorial_8.cpp
)
_add_executable(far_tutorial_8
${SOURCE_FILES}
$<TARGET_OBJECTS:sdc_obj>
$<TARGET_OBJECTS:vtr_obj>
$<TARGET_OBJECTS:far_obj>
)
install(TARGETS far_tutorial_8 DESTINATION "${CMAKE_BINDIR_BASE}/tutorials")

View File

@ -0,0 +1,527 @@
//
// Copyright 2013 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
//------------------------------------------------------------------------------
// Tutorial description:
//
// NOTE: The following approaches are approximations to compute smooth normals,
// for highest fidelity patches should be used for positions and normals,
// which form the true limit surface.
//
// Building on tutorial 3, this example shows how to instantiate a simple mesh,
// refine it uniformly, interpolate both 'vertex' and 'face-varying'
// primvar data, and finally calculate approximated smooth normals.
// The resulting interpolated data is output in 'obj' format.
//
// Currently, this tutorial supports 3 methods to approximate smooth normals:
//
// CrossTriangle : Calculates smooth normals (accumulating per vertex) using
// 3 verts to generate 2 vectors. This approximation has
// trouble when working with quads (which can be non-planar)
// since it only takes into account half of each face.
//
// CrossQuad : Calculates smooth normals (accumulating per vertex)
// but this time, instead of taking into account only 3 verts
// it creates 2 vectors crossing the quad.
// This approximation builds upon CrossTriangle but takes
// into account the 4 verts of the face.
//
// Limit : Calculates the normals at the limit for each vert
// at the last level of subdivision.
// These are the true limit normals, however, in this example
// they are used with verts that are not at the limit.
// This can lead to new visual artifacts since the normals
// and the positions don't match. Additionally, this approach
// requires extra computation to calculate the limit normals.
// For this reason, we strongly suggest using
// limit positions with limit normals.
//
#include <opensubdiv/far/topologyDescriptor.h>
#include <opensubdiv/far/primvarRefiner.h>
#include <cstdio>
//------------------------------------------------------------------------------
// Math helpers.
//
//
// Returns the normalized version of the input vector
inline void
normalize(float *n) {
float rn = 1.0f/sqrtf(n[0]*n[0] + n[1]*n[1] + n[2]*n[2]);
n[0] *= rn;
n[1] *= rn;
n[2] *= rn;
}
// Returns the cross product of \p v1 and \p v2.
void cross(float const *v1, float const *v2, float* vOut)
{
vOut[0] = v1[1] * v2[2] - v1[2] * v2[1];
vOut[1] = v1[2] * v2[0] - v1[0] * v2[2];
vOut[2] = v1[0] * v2[1] - v1[1] * v2[0];
}
//------------------------------------------------------------------------------
// Face-varying implementation.
//
//
struct Vertex {
// Minimal required interface ----------------------
Vertex() {
Clear();
}
Vertex(Vertex const & src) {
position[0] = src.position[0];
position[1] = src.position[1];
position[2] = src.position[2];
}
void Clear() {
position[0]=position[1]=position[2]=0.0f;
}
void AddWithWeight(Vertex const & src, float weight) {
position[0]+=weight*src.position[0];
position[1]+=weight*src.position[1];
position[2]+=weight*src.position[2];
}
// Public interface ------------------------------------
void SetPosition(float x, float y, float z) {
position[0]=x;
position[1]=y;
position[2]=z;
}
const float * GetPosition() const {
return position;
}
float position[3];
};
//------------------------------------------------------------------------------
// Face-varying container implementation.
//
// We are using a uv texture layout as a 'face-varying' primtiive variable
// attribute. Because face-varying data is specified 'per-face-per-vertex',
// we cannot use the same container that we use for 'vertex' or 'varying'
// data. We specify a new container, which only carries (u,v) coordinates.
// Similarly to our 'Vertex' container, we add a minimaliztic interpolation
// interface with a 'Clear()' and 'AddWithWeight()' methods.
//
struct FVarVertexUV {
// Minimal required interface ----------------------
void Clear() {
u=v=0.0f;
}
void AddWithWeight(FVarVertexUV const & src, float weight) {
u += weight * src.u;
v += weight * src.v;
}
// Basic 'uv' layout channel
float u,v;
};
struct FVarVertexColor {
// Minimal required interface ----------------------
void Clear() {
r=g=b=a=0.0f;
}
void AddWithWeight(FVarVertexColor const & src, float weight) {
r += weight * src.r;
g += weight * src.g;
b += weight * src.b;
a += weight * src.a;
}
// Basic 'color' layout channel
float r,g,b,a;
};
//------------------------------------------------------------------------------
// Cube geometry from catmark_cube.h
// 'vertex' primitive variable data & topology
static float g_verts[8][3] = {{ -0.5f, -0.5f, 0.5f },
{ 0.5f, -0.5f, 0.5f },
{ -0.5f, 0.5f, 0.5f },
{ 0.5f, 0.5f, 0.5f },
{ -0.5f, 0.5f, -0.5f },
{ 0.5f, 0.5f, -0.5f },
{ -0.5f, -0.5f, -0.5f },
{ 0.5f, -0.5f, -0.5f }};
static int g_nverts = 8,
g_nfaces = 6;
static int g_vertsperface[6] = { 4, 4, 4, 4, 4, 4 };
static int g_vertIndices[24] = { 0, 1, 3, 2,
2, 3, 5, 4,
4, 5, 7, 6,
6, 7, 1, 0,
1, 7, 5, 3,
6, 0, 2, 4 };
// 'face-varying' primitive variable data & topology for UVs
static float g_uvs[14][2] = {{ 0.375, 0.00 },
{ 0.625, 0.00 },
{ 0.375, 0.25 },
{ 0.625, 0.25 },
{ 0.375, 0.50 },
{ 0.625, 0.50 },
{ 0.375, 0.75 },
{ 0.625, 0.75 },
{ 0.375, 1.00 },
{ 0.625, 1.00 },
{ 0.875, 0.00 },
{ 0.875, 0.25 },
{ 0.125, 0.00 },
{ 0.125, 0.25 }};
static int g_nuvs = 14;
static int g_uvIndices[24] = { 0, 1, 3, 2,
2, 3, 5, 4,
4, 5, 7, 6,
6, 7, 9, 8,
1, 10, 11, 3,
12, 0, 2, 13 };
// 'face-varying' primitive variable data & topology for color
static float g_colors[24][4] = {{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 0.0, 0.0, 1.0},
{1.0, 0.0, 0.0, 1.0},
{1.0, 0.0, 0.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0}};
static int g_ncolors = 24;
static int g_colorIndices[24] = { 0, 3, 9, 6,
7, 10, 15, 12,
13, 16, 21, 18,
19, 22, 4, 1,
5, 23, 17, 11,
20, 2, 8, 14 };
using namespace OpenSubdiv;
// Approximation methods for smooth normal computations
enum NormalApproximation
{
CrossTriangle,
CrossQuad,
Limit
};
//------------------------------------------------------------------------------
int main(int argc, char ** argv) {
const int maxlevel = 2;
enum NormalApproximation normalApproximation = CrossTriangle;
// Parsing command line parameters to see if the user wants to use a
// specific method to calculate normals
for (int i = 1; i < argc; ++i) {
if (strstr(argv[i], "-limit")) {
normalApproximation = Limit;
} else if (!strcmp(argv[i], "-crossquad")) {
normalApproximation = CrossQuad;
} else if (!strcmp(argv[i], "-crosstriangle")) {
normalApproximation = CrossTriangle;
} else {
printf("Parameters : \n");
printf(" -crosstriangle : use the cross product of vectors\n");
printf(" generated from 3 verts (default).\n");
printf(" -crossquad : use the cross product of vectors\n");
printf(" generated from 4 verts.\n");
printf(" -limit : use normals calculated from the limit.\n");
return 0;
}
}
typedef Far::TopologyDescriptor Descriptor;
Sdc::SchemeType type = OpenSubdiv::Sdc::SCHEME_CATMARK;
Sdc::Options options;
options.SetVtxBoundaryInterpolation(Sdc::Options::VTX_BOUNDARY_EDGE_ONLY);
options.SetFVarLinearInterpolation(Sdc::Options::FVAR_LINEAR_NONE);
// Populate a topology descriptor with our raw data
Descriptor desc;
desc.numVertices = g_nverts;
desc.numFaces = g_nfaces;
desc.numVertsPerFace = g_vertsperface;
desc.vertIndicesPerFace = g_vertIndices;
// Create a face-varying channel descriptor
const int numChannels = 2;
const int channelUV = 0;
const int channelColor = 1;
Descriptor::FVarChannel channels[numChannels];
channels[channelUV].numValues = g_nuvs;
channels[channelUV].valueIndices = g_uvIndices;
channels[channelColor].numValues = g_ncolors;
channels[channelColor].valueIndices = g_colorIndices;
// Add the channel topology to the main descriptor
desc.numFVarChannels = numChannels;
desc.fvarChannels = channels;
// Instantiate a FarTopologyRefiner from the descriptor
Far::TopologyRefiner * refiner =
Far::TopologyRefinerFactory<Descriptor>::Create(desc,
Far::TopologyRefinerFactory<Descriptor>::Options(type, options));
// Uniformly refine the topolgy up to 'maxlevel'
// note: fullTopologyInLastLevel must be true to work with face-varying data
{
Far::TopologyRefiner::UniformOptions refineOptions(maxlevel);
refineOptions.fullTopologyInLastLevel = true;
refiner->RefineUniform(refineOptions);
}
// Allocate and initialize the 'vertex' primvar data (see tutorial 2 for
// more details).
std::vector<Vertex> vbuffer(refiner->GetNumVerticesTotal());
Vertex * verts = &vbuffer[0];
for (int i=0; i<g_nverts; ++i) {
verts[i].SetPosition(g_verts[i][0], g_verts[i][1], g_verts[i][2]);
}
// Allocate & initialize the first channel of 'face-varying' primvars (UVs)
std::vector<FVarVertexUV> fvBufferUV(refiner->GetNumFVarValuesTotal(channelUV));
FVarVertexUV * fvVertsUV = &fvBufferUV[0];
for (int i=0; i<g_nuvs; ++i) {
fvVertsUV[i].u = g_uvs[i][0];
fvVertsUV[i].v = g_uvs[i][1];
}
// Allocate & interpolate the 'face-varying' primvar data (colors)
std::vector<FVarVertexColor> fvBufferColor(refiner->GetNumFVarValuesTotal(channelColor));
FVarVertexColor * fvVertsColor = &fvBufferColor[0];
for (int i=0; i<g_ncolors; ++i) {
fvVertsColor[i].r = g_colors[i][0];
fvVertsColor[i].g = g_colors[i][1];
fvVertsColor[i].b = g_colors[i][2];
fvVertsColor[i].a = g_colors[i][3];
}
// Interpolate both vertex and face-varying primvar data
Far::PrimvarRefiner primvarRefiner(*refiner);
Vertex * srcVert = verts;
FVarVertexUV * srcFVarUV = fvVertsUV;
FVarVertexColor * srcFVarColor = fvVertsColor;
for (int level = 1; level <= maxlevel; ++level) {
Vertex * dstVert = srcVert + refiner->GetLevel(level-1).GetNumVertices();
FVarVertexUV * dstFVarUV = srcFVarUV + refiner->GetLevel(level-1).GetNumFVarValues(channelUV);
FVarVertexColor * dstFVarColor = srcFVarColor + refiner->GetLevel(level-1).GetNumFVarValues(channelColor);
primvarRefiner.Interpolate(level, srcVert, dstVert);
primvarRefiner.InterpolateFaceVarying(level, srcFVarUV, dstFVarUV, channelUV);
primvarRefiner.InterpolateFaceVarying(level, srcFVarColor, dstFVarColor, channelColor);
srcVert = dstVert;
srcFVarUV = dstFVarUV;
srcFVarColor = dstFVarColor;
}
// Approximate normals
Far::TopologyLevel const & refLastLevel = refiner->GetLevel(maxlevel);
int nverts = refLastLevel.GetNumVertices();
int nfaces = refLastLevel.GetNumFaces();
int firstOfLastVerts = refiner->GetNumVerticesTotal() - nverts;
std::vector<Vertex> normals(nverts);
// Different ways to approximate smooth normals
//
// For details check the description at the beginning of the file
if (normalApproximation == Limit) {
// Approximation using the normal at the limit with verts that are
// not at the limit
//
// For details check the description at the beginning of the file
std::vector<Vertex> fineLimitPos(nverts);
std::vector<Vertex> fineDu(nverts);
std::vector<Vertex> fineDv(nverts);
primvarRefiner.Limit(&verts[firstOfLastVerts], fineLimitPos, fineDu, fineDv);
for (int vert = 0; vert < nverts; ++vert) {
float const * du = fineDu[vert].GetPosition();
float const * dv = fineDv[vert].GetPosition();
float norm[3];
cross(du, dv, norm);
normals[vert].SetPosition(norm[0], norm[1], norm[2]);
}
} else if (normalApproximation == CrossQuad) {
// Approximate smooth normals by accumulating normal vectors computed as
// the cross product of two vectors generated by the 4 verts that
// form each quad
//
// For details check the description at the beginning of the file
for (int f = 0; f < nfaces; f++) {
Far::ConstIndexArray faceVertices = refLastLevel.GetFaceVertices(f);
// We will use the first three verts to calculate a normal
const float * v0 = verts[ firstOfLastVerts + faceVertices[0] ].GetPosition();
const float * v1 = verts[ firstOfLastVerts + faceVertices[1] ].GetPosition();
const float * v2 = verts[ firstOfLastVerts + faceVertices[2] ].GetPosition();
const float * v3 = verts[ firstOfLastVerts + faceVertices[3] ].GetPosition();
// Calculate the cross product between the vectors formed by v1-v0 and
// v2-v0, and then normalize the result
float normalCalculated [] = {0.0,0.0,0.0};
float a[3] = { v2[0]-v0[0], v2[1]-v0[1], v2[2]-v0[2] };
float b[3] = { v3[0]-v1[0], v3[1]-v1[1], v3[2]-v1[2] };
cross(a, b, normalCalculated);
normalize(normalCalculated);
// Accumulate that normal on all verts that are part of that face
for(int vInFace = 0; vInFace < faceVertices.size() ; vInFace++ ) {
int vertexIndex = faceVertices[vInFace];
normals[vertexIndex].position[0] += normalCalculated[0];
normals[vertexIndex].position[1] += normalCalculated[1];
normals[vertexIndex].position[2] += normalCalculated[2];
}
}
} else if (normalApproximation == CrossTriangle) {
// Approximate smooth normals by accumulating normal vectors computed as
// the cross product of two vectors generated by 3 verts of the quad
//
// For details check the description at the beginning of the file
for (int f = 0; f < nfaces; f++) {
Far::ConstIndexArray faceVertices = refLastLevel.GetFaceVertices(f);
// We will use the first three verts to calculate a normal
const float * v0 = verts[ firstOfLastVerts + faceVertices[0] ].GetPosition();
const float * v1 = verts[ firstOfLastVerts + faceVertices[1] ].GetPosition();
const float * v2 = verts[ firstOfLastVerts + faceVertices[2] ].GetPosition();
// Calculate the cross product between the vectors formed by v1-v0 and
// v2-v0, and then normalize the result
float normalCalculated [] = {0.0,0.0,0.0};
float a[3] = { v1[0]-v0[0], v1[1]-v0[1], v1[2]-v0[2] };
float b[3] = { v2[0]-v0[0], v2[1]-v0[1], v2[2]-v0[2] };
cross(a, b, normalCalculated);
normalize(normalCalculated);
// Accumulate that normal on all verts that are part of that face
for(int vInFace = 0; vInFace < faceVertices.size() ; vInFace++ ) {
int vertexIndex = faceVertices[vInFace];
normals[vertexIndex].position[0] += normalCalculated[0];
normals[vertexIndex].position[1] += normalCalculated[1];
normals[vertexIndex].position[2] += normalCalculated[2];
}
}
}
// Finally we just need to normalize the accumulated normals
for (int vert = 0; vert < nverts; ++vert) {
normalize(&normals[vert].position[0]);
}
{ // Output OBJ of the highest level refined -----------
// Print vertex positions
for (int vert = 0; vert < nverts; ++vert) {
float const * pos = verts[firstOfLastVerts + vert].GetPosition();
printf("v %f %f %f\n", pos[0], pos[1], pos[2]);
}
// Print vertex normals
for (int vert = 0; vert < nverts; ++vert) {
float const * pos = normals[vert].GetPosition();
printf("vn %f %f %f\n", pos[0], pos[1], pos[2]);
}
// Print uvs
int nuvs = refLastLevel.GetNumFVarValues(channelUV);
int firstOfLastUvs = refiner->GetNumFVarValuesTotal(channelUV) - nuvs;
for (int fvvert = 0; fvvert < nuvs; ++fvvert) {
FVarVertexUV const & uv = fvVertsUV[firstOfLastUvs + fvvert];
printf("vt %f %f\n", uv.u, uv.v);
}
// Print faces
for (int face = 0; face < nfaces; ++face) {
Far::ConstIndexArray fverts = refLastLevel.GetFaceVertices(face);
Far::ConstIndexArray fuvs = refLastLevel.GetFaceFVarValues(face, channelUV);
// all refined Catmark faces should be quads
assert(fverts.size()==4 and fuvs.size()==4);
printf("f ");
for (int vert=0; vert<fverts.size(); ++vert) {
// OBJ uses 1-based arrays...
printf("%d/%d/%d ", fverts[vert]+1, fuvs[vert]+1, fverts[vert]+1);
}
printf("\n");
}
}
}
//------------------------------------------------------------------------------