2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// Copyright 2013 Pixar
|
2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "Apache License")
|
|
|
|
// with the following modification; you may not use this file except in
|
|
|
|
// compliance with the Apache License and the following modification to it:
|
|
|
|
// Section 6. Trademarks. is deleted and replaced with:
|
2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// 6. Trademarks. This License does not grant permission to use the trade
|
|
|
|
// names, trademarks, service marks, or product names of the Licensor
|
|
|
|
// and its affiliates, except as required to comply with Section 4(c) of
|
|
|
|
// the License and to reproduce the content of the NOTICE file.
|
2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// You may obtain a copy of the Apache License at
|
2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2012-06-09 21:22:57 +00:00
|
|
|
//
|
2013-09-26 19:04:57 +00:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the Apache License with the above modification is
|
|
|
|
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
|
|
// KIND, either express or implied. See the Apache License for the specific
|
|
|
|
// language governing permissions and limitations under the Apache License.
|
2013-07-18 21:19:50 +00:00
|
|
|
//
|
|
|
|
|
2012-06-09 21:22:57 +00:00
|
|
|
#include <assert.h>
|
2016-09-29 16:56:15 +00:00
|
|
|
#define OSD_PATCH_BASIS_CUDA
|
|
|
|
#include "../osd/patchBasisCommon.h"
|
2012-06-09 21:22:57 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
template<int N> struct DeviceVertex {
|
|
|
|
|
2012-06-09 21:22:57 +00:00
|
|
|
float v[N];
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
__device__ void addWithWeight(DeviceVertex<N> const & src, float weight) {
|
2014-05-09 00:20:54 +00:00
|
|
|
#pragma unroll
|
2012-06-09 21:22:57 +00:00
|
|
|
for(int i = 0; i < N; ++i){
|
2014-09-05 22:07:46 +00:00
|
|
|
v[i] += src.v[i] * weight;
|
2012-06-09 21:22:57 +00:00
|
|
|
}
|
|
|
|
}
|
2014-05-09 00:20:54 +00:00
|
|
|
|
2012-06-09 21:22:57 +00:00
|
|
|
__device__ void clear() {
|
2014-05-09 00:20:54 +00:00
|
|
|
#pragma unroll
|
2012-06-09 21:22:57 +00:00
|
|
|
for(int i = 0; i < N; ++i){
|
|
|
|
v[i] = 0.0f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
// Specialize DeviceVertex for N=0 to avoid compile error:
|
2012-06-09 21:22:57 +00:00
|
|
|
// "flexible array member in otherwise empty struct"
|
2014-09-05 22:07:46 +00:00
|
|
|
template<> struct DeviceVertex<0> {
|
|
|
|
__device__ void addWithWeight(DeviceVertex<0> &src, float weight) {}
|
|
|
|
__device__ void clear() {}
|
2012-06-09 21:22:57 +00:00
|
|
|
};
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
2012-06-09 21:22:57 +00:00
|
|
|
|
|
|
|
__device__ void clear(float *dst, int count)
|
|
|
|
{
|
|
|
|
for(int i = 0; i < count; ++i) dst[i] = 0;
|
|
|
|
}
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
__device__ void addWithWeight(float *dst, float const *src, float weight, int count)
|
2012-06-09 21:22:57 +00:00
|
|
|
{
|
|
|
|
for(int i = 0; i < count; ++i) dst[i] += src[i] * weight;
|
|
|
|
}
|
|
|
|
|
2014-08-08 15:44:23 +00:00
|
|
|
// --------------------------------------------------------------------------------------------
|
2014-05-27 22:25:54 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
template <int NUM_ELEMENTS> __global__ void
|
|
|
|
computeStencils(float const * cvs, float * vbuffer,
|
2015-05-19 17:16:56 +00:00
|
|
|
int const * sizes,
|
2014-09-05 22:07:46 +00:00
|
|
|
int const * offsets,
|
|
|
|
int const * indices,
|
|
|
|
float const * weights,
|
|
|
|
int start, int end) {
|
2014-05-27 22:25:54 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
DeviceVertex<NUM_ELEMENTS> const * src =
|
|
|
|
(DeviceVertex<NUM_ELEMENTS> const *)cvs;
|
2014-05-27 22:25:54 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
DeviceVertex<NUM_ELEMENTS> * verts =
|
|
|
|
(DeviceVertex<NUM_ELEMENTS> *)vbuffer;
|
2014-05-27 22:25:54 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
int first = start + threadIdx.x + blockIdx.x*blockDim.x;
|
2014-05-09 00:20:54 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
for (int i=first; i<end; i += blockDim.x * gridDim.x) {
|
2012-08-04 02:51:27 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
int const * lindices = indices + offsets[i];
|
|
|
|
float const * lweights = weights + offsets[i];
|
2012-08-04 02:51:27 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
DeviceVertex<NUM_ELEMENTS> dst;
|
2012-06-09 21:22:57 +00:00
|
|
|
dst.clear();
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
for (int j=0; j<sizes[i]; ++j) {
|
|
|
|
dst.addWithWeight(src[lindices[j]], lweights[j]);
|
2012-06-09 21:22:57 +00:00
|
|
|
}
|
2014-09-05 22:07:46 +00:00
|
|
|
verts[i] = dst;
|
2012-06-09 21:22:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__global__ void
|
2014-09-05 22:07:46 +00:00
|
|
|
computeStencils(float const * cvs, float * dst,
|
2015-05-07 23:11:00 +00:00
|
|
|
int length,
|
|
|
|
int srcStride,
|
|
|
|
int dstStride,
|
2015-05-19 17:16:56 +00:00
|
|
|
int const * sizes,
|
2015-05-07 23:11:00 +00:00
|
|
|
int const * offsets,
|
|
|
|
int const * indices,
|
|
|
|
float const * weights,
|
|
|
|
int start, int end) {
|
2012-08-04 02:51:27 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
int first = start + threadIdx.x + blockIdx.x*blockDim.x;
|
2012-06-09 21:22:57 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
for (int i=first; i<end; i += blockDim.x * gridDim.x) {
|
2012-06-09 21:22:57 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
int const * lindices = indices + offsets[i];
|
|
|
|
float const * lweights = weights + offsets[i];
|
2012-08-04 02:51:27 +00:00
|
|
|
|
2015-05-07 23:11:00 +00:00
|
|
|
float * dstVert = dst + i*dstStride;
|
2014-09-05 22:07:46 +00:00
|
|
|
clear(dstVert, length);
|
2014-06-10 23:31:44 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
for (int j=0; j<sizes[i]; ++j) {
|
2014-06-10 23:31:44 +00:00
|
|
|
|
2015-05-07 23:11:00 +00:00
|
|
|
float const * srcVert = cvs + lindices[j]*srcStride;
|
2014-06-10 23:31:44 +00:00
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
addWithWeight(dstVert, srcVert, lweights[j], length);
|
2014-06-10 23:31:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
2012-06-09 21:22:57 +00:00
|
|
|
|
2014-08-08 15:44:23 +00:00
|
|
|
#define USE_NVIDIA_OPTIMIZATION
|
|
|
|
#ifdef USE_NVIDIA_OPTIMIZATION
|
|
|
|
|
|
|
|
template< int NUM_ELEMENTS, int NUM_THREADS_PER_BLOCK >
|
|
|
|
__global__ void computeStencilsNv(float const *__restrict cvs,
|
|
|
|
float * vbuffer,
|
2015-05-19 17:16:56 +00:00
|
|
|
int const *__restrict sizes,
|
2014-08-08 15:44:23 +00:00
|
|
|
int const *__restrict offsets,
|
|
|
|
int const *__restrict indices,
|
|
|
|
float const *__restrict weights,
|
|
|
|
int start,
|
|
|
|
int end)
|
|
|
|
{
|
|
|
|
// Shared memory to stage indices/weights.
|
|
|
|
__shared__ int smem_indices_buffer[NUM_THREADS_PER_BLOCK];
|
|
|
|
__shared__ float smem_weights_buffer[NUM_THREADS_PER_BLOCK];
|
|
|
|
|
|
|
|
// The size of a single warp.
|
|
|
|
const int WARP_SIZE = 32;
|
|
|
|
// The number of warps per block.
|
|
|
|
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / WARP_SIZE;
|
|
|
|
// The number of outputs computed by a single warp.
|
|
|
|
const int NUM_OUTPUTS_PER_WARP = WARP_SIZE / NUM_ELEMENTS;
|
|
|
|
// The number of outputs computed by a block of threads.
|
|
|
|
const int NUM_OUTPUTS_PER_BLOCK = NUM_OUTPUTS_PER_WARP*NUM_WARPS_PER_BLOCK;
|
|
|
|
// The number of active threads in a warp.
|
|
|
|
const int NUM_ACTIVE_THREADS_PER_WARP = NUM_OUTPUTS_PER_WARP * NUM_ELEMENTS;
|
|
|
|
|
|
|
|
// The number of the warp inside the block.
|
|
|
|
const int warpId = threadIdx.x / WARP_SIZE;
|
|
|
|
const int laneId = threadIdx.x % WARP_SIZE;
|
|
|
|
|
|
|
|
// We use NUM_ELEMENTS threads per output. Find which output/element a thread works on.
|
|
|
|
int outputIdx = warpId*NUM_OUTPUTS_PER_WARP + laneId/NUM_ELEMENTS, elementIdx = laneId%NUM_ELEMENTS;
|
|
|
|
|
|
|
|
// Each output corresponds to a section of shared memory.
|
|
|
|
volatile int *smem_indices = &smem_indices_buffer[warpId*WARP_SIZE + (laneId/NUM_ELEMENTS)*NUM_ELEMENTS];
|
|
|
|
volatile float *smem_weights = &smem_weights_buffer[warpId*WARP_SIZE + (laneId/NUM_ELEMENTS)*NUM_ELEMENTS];
|
|
|
|
|
|
|
|
// Disable threads that have nothing to do inside the warp.
|
|
|
|
int i = end;
|
|
|
|
if( laneId < NUM_ACTIVE_THREADS_PER_WARP )
|
|
|
|
i = start + blockIdx.x*NUM_OUTPUTS_PER_BLOCK + outputIdx;
|
|
|
|
|
|
|
|
// Iterate over the vertices.
|
|
|
|
for( ; i < end ; i += gridDim.x*NUM_OUTPUTS_PER_BLOCK )
|
|
|
|
{
|
|
|
|
// Each thread computes an element of the final vertex.
|
2014-09-12 14:39:33 +00:00
|
|
|
float x = 0.f;
|
2014-08-08 15:44:23 +00:00
|
|
|
|
|
|
|
// Load the offset and the size for each vertex. We have NUM_THREADS_PER_VERTEX threads loading the same value.
|
|
|
|
const int offset_i = offsets[i], size_i = sizes[i];
|
|
|
|
|
|
|
|
// Iterate over the stencil.
|
|
|
|
for( int j = offset_i, j_end = offset_i+size_i ; j < j_end ; )
|
|
|
|
{
|
|
|
|
int j_it = j + elementIdx;
|
|
|
|
|
|
|
|
// Load some indices and some weights. The transaction is coalesced.
|
|
|
|
smem_indices[elementIdx] = j_it < j_end ? indices[j_it] : 0;
|
|
|
|
smem_weights[elementIdx] = j_it < j_end ? weights[j_it] : 0.f;
|
|
|
|
|
|
|
|
// Thread now collaborates to load the vertices.
|
2014-09-12 14:39:33 +00:00
|
|
|
#pragma unroll
|
|
|
|
for( int k = 0 ; k < NUM_ELEMENTS ; ++k, ++j )
|
|
|
|
if( j < j_end )
|
|
|
|
x += smem_weights[k] * cvs[smem_indices[k]*NUM_ELEMENTS + elementIdx];
|
2014-08-08 15:44:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store the vertex.
|
2014-09-12 14:39:33 +00:00
|
|
|
vbuffer[NUM_ELEMENTS*i + elementIdx] = x;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-12 17:43:18 +00:00
|
|
|
template< int NUM_THREADS_PER_BLOCK >
|
|
|
|
__global__ void computeStencilsNv_v4(float const *__restrict cvs,
|
2014-09-12 14:39:33 +00:00
|
|
|
float * vbuffer,
|
2015-05-19 17:16:56 +00:00
|
|
|
int const *__restrict sizes,
|
2014-09-12 14:39:33 +00:00
|
|
|
int const *__restrict offsets,
|
|
|
|
int const *__restrict indices,
|
|
|
|
float const *__restrict weights,
|
2014-09-12 17:43:18 +00:00
|
|
|
int start,
|
|
|
|
int end)
|
2014-09-12 14:39:33 +00:00
|
|
|
{
|
|
|
|
// Iterate over the vertices.
|
|
|
|
for( int i = start + blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x ; i < end ; i += gridDim.x*NUM_THREADS_PER_BLOCK )
|
|
|
|
{
|
|
|
|
// Each thread computes an element of the final vertex.
|
|
|
|
float4 x = make_float4(0.f, 0.f, 0.f, 0.f);
|
|
|
|
|
|
|
|
// Iterate over the stencil.
|
|
|
|
for( int j = offsets[i], j_end = offsets[i]+sizes[i] ; j < j_end ; ++j )
|
|
|
|
{
|
|
|
|
float w = weights[j];
|
2015-05-07 23:11:00 +00:00
|
|
|
float4 tmp = reinterpret_cast<const float4 *>(cvs)[indices[j]];
|
2014-09-12 14:39:33 +00:00
|
|
|
x.x += w*tmp.x;
|
|
|
|
x.y += w*tmp.y;
|
|
|
|
x.z += w*tmp.z;
|
|
|
|
x.w += w*tmp.w;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the vertex.
|
|
|
|
reinterpret_cast<float4*>(vbuffer)[i] = x;
|
2014-08-08 15:44:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-12 17:43:18 +00:00
|
|
|
#endif // USE_NVIDIA_OPTIMIZATION
|
2014-08-08 15:44:23 +00:00
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
// Osd::PatchCoord osd/types.h
|
|
|
|
struct PatchCoord {
|
|
|
|
int arrayIndex;
|
|
|
|
int patchIndex;
|
|
|
|
int vertIndex;
|
|
|
|
float s;
|
|
|
|
float t;
|
|
|
|
};
|
|
|
|
struct PatchArray {
|
|
|
|
int patchType; // Far::PatchDescriptor::Type
|
|
|
|
int numPatches;
|
|
|
|
int indexBase; // offset in the index buffer
|
|
|
|
int primitiveIdBase; // offset in the patch param buffer
|
|
|
|
};
|
|
|
|
struct PatchParam {
|
2015-06-11 20:18:25 +00:00
|
|
|
unsigned int field0;
|
|
|
|
unsigned int field1;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
float sharpness;
|
|
|
|
};
|
|
|
|
|
|
|
|
__device__
|
|
|
|
int getDepth(unsigned int patchBits) {
|
2015-06-11 20:18:25 +00:00
|
|
|
return (patchBits & 0xf);
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
__device__
|
|
|
|
float getParamFraction(unsigned int patchBits) {
|
2015-06-11 20:18:25 +00:00
|
|
|
bool nonQuadRoot = (patchBits >> 4) & 0x1;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
int depth = getDepth(patchBits);
|
|
|
|
if (nonQuadRoot) {
|
|
|
|
return 1.0f / float( 1 << (depth-1) );
|
|
|
|
} else {
|
|
|
|
return 1.0f / float( 1 << depth );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__device__
|
|
|
|
void normalizePatchCoord(unsigned int patchBits, float *u, float *v) {
|
|
|
|
float frac = getParamFraction(patchBits);
|
|
|
|
|
|
|
|
int iu = (patchBits >> 22) & 0x3ff;
|
|
|
|
int iv = (patchBits >> 12) & 0x3ff;
|
|
|
|
|
|
|
|
// top left corner
|
|
|
|
float pu = (float)iu*frac;
|
|
|
|
float pv = (float)iv*frac;
|
|
|
|
|
|
|
|
// normalize u,v coordinates
|
2015-05-26 18:13:30 +00:00
|
|
|
*u = (*u - pu) / frac;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
*v = (*v - pv) / frac;
|
|
|
|
}
|
|
|
|
|
2016-09-29 16:56:15 +00:00
|
|
|
__device__
|
|
|
|
bool isRegular(unsigned int patchBits) {
|
|
|
|
return ((patchBits >> 5) & 0x1) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__device__
|
|
|
|
int getNumControlVertices(int patchType) {
|
|
|
|
return (patchType == 3) ? 4 :
|
|
|
|
(patchType == 6) ? 16 :
|
|
|
|
(patchType == 9) ? 20 : 0;
|
|
|
|
}
|
|
|
|
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
__global__ void
|
|
|
|
computePatches(const float *src, float *dst, float *dstDu, float *dstDv,
|
|
|
|
int length, int srcStride, int dstStride, int dstDuStride, int dstDvStride,
|
|
|
|
int numPatchCoords, const PatchCoord *patchCoords,
|
|
|
|
const PatchArray *patchArrayBuffer,
|
|
|
|
const int *patchIndexBuffer,
|
|
|
|
const PatchParam *patchParamBuffer) {
|
|
|
|
|
|
|
|
int first = threadIdx.x + blockIdx.x * blockDim.x;
|
|
|
|
|
|
|
|
// PERFORMANCE: not yet optimized
|
|
|
|
|
2016-09-29 16:56:15 +00:00
|
|
|
float wP[20], wDs[20], wDt[20], wDss[20], wDst[20], wDtt[20];
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
|
|
|
|
for (int i = first; i < numPatchCoords; i += blockDim.x * gridDim.x) {
|
|
|
|
|
|
|
|
PatchCoord const &coord = patchCoords[i];
|
|
|
|
PatchArray const &array = patchArrayBuffer[coord.arrayIndex];
|
|
|
|
|
2015-06-11 20:18:25 +00:00
|
|
|
unsigned int patchBits = patchParamBuffer[coord.patchIndex].field1;
|
2016-09-29 16:56:15 +00:00
|
|
|
int patchType = isRegular(patchBits) ? 6 : array.patchType;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
|
|
|
|
// normalize
|
|
|
|
float s = coord.s;
|
|
|
|
float t = coord.t;
|
|
|
|
normalizePatchCoord(patchBits, &s, &t);
|
|
|
|
float dScale = (float)(1 << getDepth(patchBits));
|
2016-09-29 16:56:15 +00:00
|
|
|
int boundary = int((patchBits >> 8) & 0xfU);
|
|
|
|
|
|
|
|
int numControlVertices = 0;
|
|
|
|
if (patchType == 3) {
|
|
|
|
OsdGetBilinearPatchWeights(s, t, dScale,
|
|
|
|
wP, wDs, wDt, wDss, wDst, wDtt);
|
|
|
|
numControlVertices = 4;
|
|
|
|
} else if (patchType == 6) {
|
|
|
|
OsdGetBSplinePatchWeights(s, t, dScale, boundary,
|
|
|
|
wP, wDs, wDt, wDss, wDst, wDtt);
|
|
|
|
numControlVertices = 16;
|
|
|
|
} else if (patchType == 9) {
|
|
|
|
OsdGetGregoryPatchWeights(s, t, dScale,
|
|
|
|
wP, wDs, wDt, wDss, wDst, wDtt);
|
|
|
|
numControlVertices = 20;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
}
|
2016-09-29 16:56:15 +00:00
|
|
|
|
|
|
|
int indexStride = getNumControlVertices(array.patchType);
|
|
|
|
int indexBase = array.indexBase + indexStride *
|
|
|
|
(coord.patchIndex - array.primitiveIdBase);
|
|
|
|
|
|
|
|
const int *cvs = patchIndexBuffer + indexBase;
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
|
|
|
|
float * dstVert = dst + i * dstStride;
|
|
|
|
clear(dstVert, length);
|
|
|
|
for (int j = 0; j < numControlVertices; ++j) {
|
|
|
|
const float * srcVert = src + cvs[j] * srcStride;
|
|
|
|
addWithWeight(dstVert, srcVert, wP[j], length);
|
|
|
|
}
|
|
|
|
if (dstDu) {
|
|
|
|
float *d = dstDu + i * dstDuStride;
|
|
|
|
clear(d, length);
|
|
|
|
for (int j = 0; j < numControlVertices; ++j) {
|
|
|
|
const float * srcVert = src + cvs[j] * srcStride;
|
|
|
|
addWithWeight(d, srcVert, wDs[j], length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (dstDv) {
|
|
|
|
float *d = dstDv + i * dstDvStride;
|
|
|
|
clear(d, length);
|
|
|
|
for (int j = 0; j < numControlVertices; ++j) {
|
|
|
|
const float * srcVert = src + cvs[j] * srcStride;
|
|
|
|
addWithWeight(d, srcVert, wDt[j], length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
2012-06-12 00:02:27 +00:00
|
|
|
#include "../version.h"
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
#define OPT_KERNEL(NUM_ELEMENTS, KERNEL, X, Y, ARG) \
|
2015-05-07 23:11:00 +00:00
|
|
|
if (length==NUM_ELEMENTS && srcStride==length && dstStride==length) { \
|
2014-09-05 22:07:46 +00:00
|
|
|
KERNEL<NUM_ELEMENTS><<<X,Y>>>ARG; \
|
|
|
|
return; \
|
|
|
|
}
|
2012-06-09 21:22:57 +00:00
|
|
|
|
2014-08-08 15:44:23 +00:00
|
|
|
#ifdef USE_NVIDIA_OPTIMIZATION
|
|
|
|
#define OPT_KERNEL_NVIDIA(NUM_ELEMENTS, KERNEL, X, Y, ARG) \
|
2015-05-07 23:11:00 +00:00
|
|
|
if (length==NUM_ELEMENTS && srcStride==length && dstStride==length) { \
|
2014-08-08 15:44:23 +00:00
|
|
|
int gridDim = min(X, (end-start+Y-1)/Y); \
|
|
|
|
KERNEL<NUM_ELEMENTS, Y><<<gridDim, Y>>>ARG; \
|
|
|
|
return; \
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-06-12 00:02:27 +00:00
|
|
|
extern "C" {
|
|
|
|
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
void CudaEvalStencils(
|
|
|
|
const float *src, float *dst,
|
|
|
|
int length, int srcStride, int dstStride,
|
|
|
|
const int * sizes, const int * offsets, const int * indices,
|
|
|
|
const float * weights,
|
|
|
|
int start, int end) {
|
2016-06-25 21:23:55 +00:00
|
|
|
if (length == 0 || srcStride == 0 || dstStride == 0 || (end <= start)) {
|
2014-09-05 22:07:46 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-06-12 01:09:23 +00:00
|
|
|
|
2014-08-08 15:44:23 +00:00
|
|
|
#ifdef USE_NVIDIA_OPTIMIZATION
|
2015-05-07 23:11:00 +00:00
|
|
|
OPT_KERNEL_NVIDIA(3, computeStencilsNv, 2048, 256,
|
|
|
|
(src, dst, sizes, offsets, indices, weights, start, end));
|
|
|
|
//OPT_KERNEL_NVIDIA(4, computeStencilsNv, 2048, 256,
|
|
|
|
// (cvs, dst, sizes, offsets, indices, weights, start, end));
|
|
|
|
if (length == 4 && srcStride == length && dstStride == length) {
|
2014-09-12 14:39:33 +00:00
|
|
|
int gridDim = min(2048, (end-start+256-1)/256);
|
2015-05-07 23:11:00 +00:00
|
|
|
computeStencilsNv_v4<256><<<gridDim, 256>>>(
|
|
|
|
src, dst, sizes, offsets, indices, weights, start, end);
|
2014-09-12 17:43:18 +00:00
|
|
|
return;
|
2014-09-12 14:39:33 +00:00
|
|
|
}
|
2014-08-08 15:44:23 +00:00
|
|
|
#else
|
2015-05-07 23:11:00 +00:00
|
|
|
OPT_KERNEL(3, computeStencils, 512, 32,
|
|
|
|
(src, dst, sizes, offsets, indices, weights, start, end));
|
|
|
|
OPT_KERNEL(4, computeStencils, 512, 32,
|
|
|
|
(src, dst, sizes, offsets, indices, weights, start, end));
|
2014-08-08 15:44:23 +00:00
|
|
|
#endif
|
2012-06-12 01:09:23 +00:00
|
|
|
|
2015-05-07 23:11:00 +00:00
|
|
|
// generic case (slow)
|
|
|
|
computeStencils <<<512, 32>>>(
|
|
|
|
src, dst, length, srcStride, dstStride,
|
2014-09-05 22:07:46 +00:00
|
|
|
sizes, offsets, indices, weights, start, end);
|
2012-06-12 01:09:23 +00:00
|
|
|
}
|
|
|
|
|
2014-09-05 22:07:46 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
2012-08-04 02:51:27 +00:00
|
|
|
|
Osd API refactor: EvalStencils and EvalPatches
Add EvalStencils and EvalPatches API for most of CPU and GPU evaluators.
with this change, Eval API in the osd layer consists of following parts:
- Evaluators (Cpu, Omp, Tbb, Cuda, CL, GLXFB, GLCompute, D3D11Compute)
implements EvalStencils and EvalPatches(*). Both supports derivatives
(not fully implemented though)
- Interop vertex buffer classes (optional, same as before)
Note that these classes are not necessary to use Evaluators.
All evaluators have EvalStencils/Patches which take device-specific
buffer objects. For example, GLXFBEvaluator can take GLuint directly
for both stencil tables and input primvars. Although using these
interop classes makes it easy to integrate osd into relatively
simple applications.
- device-dependent StencilTable and PatchTable (optional)
These are also optional, but can be used simply a substitute of
Far::StencilTable and Far::PatchTable for osd evaluators.
- PatchArray, PatchCoord, PatchParam
They are tiny structs used for GPU based patch evaluation.
(*) TODO and known issues:
- CLEvaluator and D3D11Evaluator's EvalPatches() have not been implemented.
- GPU Gregory patch evaluation has not been implemented in EvalPatches().
- CudaEvaluator::EvalPatches() is very unstable.
- All patch evaluation kernels have not been well optimized.
- Currently GLXFB kernel doesn't support derivative evaluation.
There's a technical difficulty for the multi-stream output.
2015-05-26 04:51:55 +00:00
|
|
|
void CudaEvalPatches(
|
|
|
|
const float *src, float *dst,
|
|
|
|
int length, int srcStride, int dstStride,
|
|
|
|
int numPatchCoords, const PatchCoord *patchCoords,
|
|
|
|
const PatchArray *patchArrayBuffer,
|
|
|
|
const int *patchIndexBuffer,
|
|
|
|
const PatchParam *patchParamBuffer) {
|
|
|
|
|
|
|
|
// PERFORMANCE: not optimized at all
|
|
|
|
|
|
|
|
computePatches <<<512, 32>>>(
|
|
|
|
src, dst, NULL, NULL, length, srcStride, dstStride, 0, 0,
|
|
|
|
numPatchCoords, patchCoords,
|
|
|
|
patchArrayBuffer, patchIndexBuffer, patchParamBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CudaEvalPatchesWithDerivatives(
|
|
|
|
const float *src, float *dst, float *dstDu, float *dstDv,
|
|
|
|
int length, int srcStride, int dstStride, int dstDuStride, int dstDvStride,
|
|
|
|
int numPatchCoords, const PatchCoord *patchCoords,
|
|
|
|
const PatchArray *patchArrayBuffer,
|
|
|
|
const int *patchIndexBuffer,
|
|
|
|
const PatchParam *patchParamBuffer) {
|
|
|
|
|
|
|
|
// PERFORMANCE: not optimized at all
|
|
|
|
|
|
|
|
computePatches <<<512, 32>>>(
|
|
|
|
src, dst, dstDu, dstDv, length, srcStride, dstStride, dstDuStride, dstDvStride,
|
|
|
|
numPatchCoords, patchCoords,
|
|
|
|
patchArrayBuffer, patchIndexBuffer, patchParamBuffer);
|
|
|
|
}
|
|
|
|
|
2013-03-08 01:50:15 +00:00
|
|
|
} /* extern "C" */
|