Add support for Geom shaders in Vulkan backend.
Still requires SkSL support before it will work. The main changes here involve support for uniforms in the geometry shader. We use the same buffer for vertex and geometry shader stages. These uniforms are not expected to be updated as often as frag data so we keep them separate to avoid larger buffer uploads to the gpu. BUG=skia: Change-Id: I10b631c24071b6ffa258907a02a009ec6c8accd0 Reviewed-on: https://skia-review.googlesource.com/8413 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
parent
b7a330ff00
commit
18f9602094
@ -228,7 +228,7 @@ bool GrVkCopyManager::copySurfaceAsDraw(GrVkGpu* gpu,
|
||||
descriptorWrites.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrites.pNext = nullptr;
|
||||
descriptorWrites.dstSet = uniformDS->descriptorSet();
|
||||
descriptorWrites.dstBinding = GrVkUniformHandler::kVertexBinding;
|
||||
descriptorWrites.dstBinding = GrVkUniformHandler::kGeometryBinding;
|
||||
descriptorWrites.dstArrayElement = 0;
|
||||
descriptorWrites.descriptorCount = 1;
|
||||
descriptorWrites.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
|
@ -12,47 +12,47 @@
|
||||
#include "GrVkGpu.h"
|
||||
#include "GrVkUniformHandler.h"
|
||||
|
||||
GrVkDescriptorSetManager::GrVkDescriptorSetManager(GrVkGpu* gpu,
|
||||
VkDescriptorType type,
|
||||
const GrVkUniformHandler* uniformHandler)
|
||||
: fPoolManager(type, gpu, uniformHandler) {
|
||||
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu* gpu) {
|
||||
SkSTArray<2, uint32_t> visibilities;
|
||||
// We set the visibility of the first binding to all supported geometry processing shader
|
||||
// stages (vertex, tesselation, geometry, etc.) and the second binding to the fragment
|
||||
// shader.
|
||||
uint32_t geomStages = kVertex_GrShaderFlag;
|
||||
if (gpu->vkCaps().shaderCaps()->geometryShaderSupport()) {
|
||||
geomStages |= kGeometry_GrShaderFlag;
|
||||
}
|
||||
visibilities.push_back(geomStages);
|
||||
visibilities.push_back(kFragment_GrShaderFlag);
|
||||
return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities);
|
||||
}
|
||||
|
||||
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
|
||||
GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler& uniformHandler) {
|
||||
SkSTArray<4, uint32_t> visibilities;
|
||||
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
|
||||
SkASSERT(uniformHandler);
|
||||
for (int i = 0; i < uniformHandler->numSamplers(); ++i) {
|
||||
fBindingVisibilities.push_back(uniformHandler->samplerVisibility(i));
|
||||
}
|
||||
} else if (VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
|
||||
SkASSERT(uniformHandler);
|
||||
for (int i = 0; i < uniformHandler->numTexelBuffers(); ++i) {
|
||||
fBindingVisibilities.push_back(uniformHandler->texelBufferVisibility(i));
|
||||
for (int i = 0 ; i < uniformHandler.numSamplers(); ++i) {
|
||||
visibilities.push_back(uniformHandler.samplerVisibility(i));
|
||||
}
|
||||
} else {
|
||||
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
|
||||
// We set the visibility of the first binding to the vertex shader and the second to the
|
||||
// fragment shader.
|
||||
fBindingVisibilities.push_back(kVertex_GrShaderFlag);
|
||||
fBindingVisibilities.push_back(kFragment_GrShaderFlag);
|
||||
SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
|
||||
for (int i = 0 ; i < uniformHandler.numTexelBuffers(); ++i) {
|
||||
visibilities.push_back(uniformHandler.texelBufferVisibility(i));
|
||||
}
|
||||
}
|
||||
return CreateSamplerManager(gpu, type, visibilities);
|
||||
}
|
||||
|
||||
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
|
||||
GrVkGpu* gpu, VkDescriptorType type, const SkTArray<uint32_t>& visibilities) {
|
||||
return new GrVkDescriptorSetManager(gpu, type, visibilities);
|
||||
}
|
||||
|
||||
GrVkDescriptorSetManager::GrVkDescriptorSetManager(GrVkGpu* gpu,
|
||||
VkDescriptorType type,
|
||||
const SkTArray<uint32_t>& visibilities)
|
||||
: fPoolManager(type, gpu, visibilities) {
|
||||
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
|
||||
for (int i = 0; i < visibilities.count(); ++i) {
|
||||
fBindingVisibilities.push_back(visibilities[i]);
|
||||
}
|
||||
} else {
|
||||
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
|
||||
SkASSERT(2 == visibilities.count() &&
|
||||
kVertex_GrShaderFlag == visibilities[0] &&
|
||||
kFragment_GrShaderFlag == visibilities[1]);
|
||||
// We set the visibility of the first binding to the vertex shader and the second to the
|
||||
// fragment shader.
|
||||
fBindingVisibilities.push_back(kVertex_GrShaderFlag);
|
||||
fBindingVisibilities.push_back(kFragment_GrShaderFlag);
|
||||
for (int i = 0; i < visibilities.count(); ++i) {
|
||||
fBindingVisibilities.push_back(visibilities[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,16 +162,6 @@ VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
|
||||
return flags;
|
||||
}
|
||||
|
||||
GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
|
||||
VkDescriptorType type,
|
||||
GrVkGpu* gpu,
|
||||
const GrVkUniformHandler* uniformHandler)
|
||||
: fDescType(type)
|
||||
, fCurrentDescriptorCount(0)
|
||||
, fPool(nullptr) {
|
||||
this->init(gpu, type, uniformHandler, nullptr);
|
||||
}
|
||||
|
||||
GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
|
||||
VkDescriptorType type,
|
||||
GrVkGpu* gpu,
|
||||
@ -179,90 +169,55 @@ GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
|
||||
: fDescType(type)
|
||||
, fCurrentDescriptorCount(0)
|
||||
, fPool(nullptr) {
|
||||
this->init(gpu, type, nullptr, &visibilities);
|
||||
}
|
||||
|
||||
void GrVkDescriptorSetManager::DescriptorPoolManager::setupDescriptorLayout(
|
||||
GrVkGpu* gpu,
|
||||
VkDescriptorType type,
|
||||
const GrVkUniformHandler* uniformHandler,
|
||||
const SkTArray<uint32_t>* visibilities,
|
||||
uint32_t numSamplers) {
|
||||
SkASSERT(SkToBool(uniformHandler) != SkToBool(visibilities));
|
||||
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
|
||||
new VkDescriptorSetLayoutBinding[numSamplers]);
|
||||
for (uint32_t i = 0; i < numSamplers; ++i) {
|
||||
uint32_t visibility;
|
||||
if (uniformHandler) {
|
||||
if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
|
||||
visibility = uniformHandler->samplerVisibility(i);
|
||||
} else {
|
||||
SkASSERT(type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
|
||||
visibility = uniformHandler->texelBufferVisibility(i);
|
||||
}
|
||||
} else {
|
||||
visibility = (*visibilities)[i];
|
||||
}
|
||||
dsSamplerBindings[i].binding = i;
|
||||
dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
dsSamplerBindings[i].descriptorCount = 1;
|
||||
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
|
||||
dsSamplerBindings[i].pImmutableSamplers = nullptr;
|
||||
}
|
||||
|
||||
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
|
||||
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
|
||||
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
||||
dsSamplerLayoutCreateInfo.pNext = nullptr;
|
||||
dsSamplerLayoutCreateInfo.flags = 0;
|
||||
dsSamplerLayoutCreateInfo.bindingCount = numSamplers;
|
||||
// Setting to nullptr fixes an error in the param checker validation layer. Even though
|
||||
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
|
||||
// null.
|
||||
dsSamplerLayoutCreateInfo.pBindings = numSamplers ? dsSamplerBindings.get() : nullptr;
|
||||
|
||||
GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
|
||||
CreateDescriptorSetLayout(gpu->device(),
|
||||
&dsSamplerLayoutCreateInfo,
|
||||
nullptr,
|
||||
&fDescLayout));
|
||||
}
|
||||
|
||||
void GrVkDescriptorSetManager::DescriptorPoolManager::init(GrVkGpu* gpu,
|
||||
VkDescriptorType type,
|
||||
const GrVkUniformHandler* uniformHandler,
|
||||
const SkTArray<uint32_t>* visibilities) {
|
||||
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
|
||||
uint32_t numSamplers;
|
||||
if (uniformHandler) {
|
||||
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
|
||||
numSamplers = (uint32_t)uniformHandler->numSamplers();
|
||||
} else {
|
||||
numSamplers = (uint32_t)uniformHandler->numTexelBuffers();
|
||||
}
|
||||
} else {
|
||||
numSamplers = (uint32_t)visibilities->count();
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
|
||||
uint32_t numBindings = visibilities.count();
|
||||
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
|
||||
new VkDescriptorSetLayoutBinding[numBindings]);
|
||||
for (uint32_t i = 0; i < numBindings; ++i) {
|
||||
uint32_t visibility = visibilities[i];
|
||||
dsSamplerBindings[i].binding = i;
|
||||
dsSamplerBindings[i].descriptorType = type;
|
||||
dsSamplerBindings[i].descriptorCount = 1;
|
||||
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
|
||||
dsSamplerBindings[i].pImmutableSamplers = nullptr;
|
||||
}
|
||||
this->setupDescriptorLayout(gpu, type, uniformHandler, visibilities, numSamplers);
|
||||
|
||||
fDescCountPerSet = numSamplers;
|
||||
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
|
||||
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
|
||||
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
||||
dsSamplerLayoutCreateInfo.pNext = nullptr;
|
||||
dsSamplerLayoutCreateInfo.flags = 0;
|
||||
dsSamplerLayoutCreateInfo.bindingCount = numBindings;
|
||||
// Setting to nullptr fixes an error in the param checker validation layer. Even though
|
||||
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
|
||||
// null.
|
||||
dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
|
||||
|
||||
GR_VK_CALL_ERRCHECK(gpu->vkInterface(),
|
||||
CreateDescriptorSetLayout(gpu->device(),
|
||||
&dsSamplerLayoutCreateInfo,
|
||||
nullptr,
|
||||
&fDescLayout));
|
||||
fDescCountPerSet = visibilities.count();
|
||||
} else {
|
||||
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
|
||||
GR_STATIC_ASSERT(2 == kUniformDescPerSet);
|
||||
SkASSERT(kUniformDescPerSet == visibilities.count());
|
||||
// Create Uniform Buffer Descriptor
|
||||
// The vertex uniform buffer will have binding 0 and the fragment binding 1.
|
||||
static const uint32_t bindings[kUniformDescPerSet] =
|
||||
{ GrVkUniformHandler::kGeometryBinding, GrVkUniformHandler::kFragBinding };
|
||||
VkDescriptorSetLayoutBinding dsUniBindings[kUniformDescPerSet];
|
||||
memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding));
|
||||
dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding;
|
||||
dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
dsUniBindings[0].descriptorCount = 1;
|
||||
dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
|
||||
dsUniBindings[0].pImmutableSamplers = nullptr;
|
||||
dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding;
|
||||
dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
dsUniBindings[1].descriptorCount = 1;
|
||||
dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
dsUniBindings[1].pImmutableSamplers = nullptr;
|
||||
memset(&dsUniBindings, 0, kUniformDescPerSet * sizeof(VkDescriptorSetLayoutBinding));
|
||||
for (int i = 0; i < kUniformDescPerSet; ++i) {
|
||||
dsUniBindings[i].binding = bindings[i];
|
||||
dsUniBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
dsUniBindings[i].descriptorCount = 1;
|
||||
dsUniBindings[i].stageFlags = visibility_to_vk_stage_flags(visibilities[i]);
|
||||
dsUniBindings[i].pImmutableSamplers = nullptr;
|
||||
}
|
||||
|
||||
VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
|
||||
memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
|
||||
|
@ -26,13 +26,11 @@ class GrVkDescriptorSetManager {
|
||||
public:
|
||||
GR_DEFINE_RESOURCE_HANDLE_CLASS(Handle);
|
||||
|
||||
GrVkDescriptorSetManager(GrVkGpu* gpu,
|
||||
VkDescriptorType,
|
||||
const GrVkUniformHandler* handler = nullptr);
|
||||
|
||||
GrVkDescriptorSetManager(GrVkGpu* gpu,
|
||||
VkDescriptorType,
|
||||
const SkTArray<uint32_t>& visibilities);
|
||||
static GrVkDescriptorSetManager* CreateUniformManager(GrVkGpu* gpu);
|
||||
static GrVkDescriptorSetManager* CreateSamplerManager(GrVkGpu* gpu, VkDescriptorType type,
|
||||
const GrVkUniformHandler&);
|
||||
static GrVkDescriptorSetManager* CreateSamplerManager(GrVkGpu* gpu, VkDescriptorType type,
|
||||
const SkTArray<uint32_t>& visibilities);
|
||||
|
||||
~GrVkDescriptorSetManager() {}
|
||||
|
||||
@ -51,8 +49,6 @@ public:
|
||||
|
||||
private:
|
||||
struct DescriptorPoolManager {
|
||||
DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
|
||||
const GrVkUniformHandler* handler = nullptr);
|
||||
DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
|
||||
const SkTArray<uint32_t>& visibilities);
|
||||
|
||||
@ -81,18 +77,14 @@ private:
|
||||
kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors
|
||||
};
|
||||
|
||||
void init(GrVkGpu* gpu, VkDescriptorType type, const GrVkUniformHandler* uniformHandler,
|
||||
const SkTArray<uint32_t>* visibilities);
|
||||
|
||||
void getNewPool(GrVkGpu* gpu);
|
||||
|
||||
void setupDescriptorLayout(GrVkGpu* gpu,
|
||||
VkDescriptorType type,
|
||||
const GrVkUniformHandler* uniformHandler,
|
||||
const SkTArray<uint32_t>* visibilities,
|
||||
uint32_t numSamplers);
|
||||
};
|
||||
|
||||
GrVkDescriptorSetManager(GrVkGpu* gpu,
|
||||
VkDescriptorType,
|
||||
const SkTArray<uint32_t>& visibilities);
|
||||
|
||||
|
||||
DescriptorPoolManager fPoolManager;
|
||||
SkTArray<const GrVkDescriptorSet*, true> fFreeSets;
|
||||
SkSTArray<4, uint32_t> fBindingVisibilities;
|
||||
|
@ -32,7 +32,7 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
|
||||
const GrVkDescriptorSetManager::Handle& samplerDSHandle,
|
||||
const BuiltinUniformHandles& builtinUniformHandles,
|
||||
const UniformInfoArray& uniforms,
|
||||
uint32_t vertexUniformSize,
|
||||
uint32_t geometryUniformSize,
|
||||
uint32_t fragmentUniformSize,
|
||||
uint32_t numSamplers,
|
||||
GrGLSLPrimitiveProcessor* geometryProcessor,
|
||||
@ -50,7 +50,7 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
|
||||
, fXferProcessor(xferProcessor)
|
||||
, fFragmentProcessors(fragmentProcessors)
|
||||
, fDesc(desc)
|
||||
, fDataManager(uniforms, vertexUniformSize, fragmentUniformSize) {
|
||||
, fDataManager(uniforms, geometryUniformSize, fragmentUniformSize) {
|
||||
fSamplers.setReserve(numSamplers);
|
||||
fTextureViews.setReserve(numSamplers);
|
||||
fTextures.setReserve(numSamplers);
|
||||
@ -59,7 +59,7 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
|
||||
fDescriptorSets[1] = VK_NULL_HANDLE;
|
||||
|
||||
// Currently we are always binding a descriptor set for uniform buffers.
|
||||
if (vertexUniformSize || fragmentUniformSize) {
|
||||
if (geometryUniformSize || fragmentUniformSize) {
|
||||
fDSCount++;
|
||||
fStartDS = GrVkUniformHandler::kUniformBufferDescSet;
|
||||
}
|
||||
@ -68,7 +68,7 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
|
||||
fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet);
|
||||
}
|
||||
|
||||
fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize));
|
||||
fGeometryUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, geometryUniformSize));
|
||||
fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize));
|
||||
|
||||
fNumSamplers = numSamplers;
|
||||
@ -116,8 +116,8 @@ void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) {
|
||||
fPipelineLayout = VK_NULL_HANDLE;
|
||||
}
|
||||
|
||||
if (fVertexUniformBuffer) {
|
||||
fVertexUniformBuffer->release(gpu);
|
||||
if (fGeometryUniformBuffer) {
|
||||
fGeometryUniformBuffer->release(gpu);
|
||||
}
|
||||
|
||||
if (fFragmentUniformBuffer) {
|
||||
@ -143,7 +143,7 @@ void GrVkPipelineState::abandonGPUResources() {
|
||||
|
||||
fPipelineLayout = VK_NULL_HANDLE;
|
||||
|
||||
fVertexUniformBuffer->abandon();
|
||||
fGeometryUniformBuffer->abandon();
|
||||
fFragmentUniformBuffer->abandon();
|
||||
|
||||
for (int i = 0; i < fSamplers.count(); ++i) {
|
||||
@ -236,9 +236,9 @@ void GrVkPipelineState::setData(GrVkGpu* gpu,
|
||||
this->writeSamplers(gpu, textureBindings, pipeline.getAllowSRGBInputs());
|
||||
}
|
||||
|
||||
if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) {
|
||||
if (fGeometryUniformBuffer || fFragmentUniformBuffer) {
|
||||
if (fDataManager.uploadUniformBuffers(gpu,
|
||||
fVertexUniformBuffer.get(),
|
||||
fGeometryUniformBuffer.get(),
|
||||
fFragmentUniformBuffer.get())
|
||||
|| !fUniformDescriptorSet)
|
||||
{
|
||||
@ -253,62 +253,60 @@ void GrVkPipelineState::setData(GrVkGpu* gpu,
|
||||
}
|
||||
}
|
||||
|
||||
void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
|
||||
VkDescriptorBufferInfo* bufferInfo,
|
||||
const GrVkUniformBuffer* buffer,
|
||||
VkDescriptorSet descriptorSet,
|
||||
uint32_t binding) {
|
||||
|
||||
memset(bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
|
||||
bufferInfo->buffer = buffer->buffer();
|
||||
bufferInfo->offset = buffer->offset();
|
||||
bufferInfo->range = buffer->size();
|
||||
|
||||
memset(descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
|
||||
descriptorWrite->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrite->pNext = nullptr;
|
||||
descriptorWrite->dstSet = descriptorSet,
|
||||
descriptorWrite->dstBinding = binding;
|
||||
descriptorWrite->dstArrayElement = 0;
|
||||
descriptorWrite->descriptorCount = 1;
|
||||
descriptorWrite->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
descriptorWrite->pImageInfo = nullptr;
|
||||
descriptorWrite->pBufferInfo = bufferInfo;
|
||||
descriptorWrite->pTexelBufferView = nullptr;
|
||||
}
|
||||
|
||||
void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
|
||||
VkWriteDescriptorSet descriptorWrites[2];
|
||||
memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
|
||||
VkWriteDescriptorSet descriptorWrites[3];
|
||||
VkDescriptorBufferInfo bufferInfos[3];
|
||||
|
||||
uint32_t firstUniformWrite = 0;
|
||||
uint32_t uniformBindingUpdateCount = 0;
|
||||
uint32_t writeCount = 0;
|
||||
|
||||
VkDescriptorBufferInfo vertBufferInfo;
|
||||
// Vertex Uniform Buffer
|
||||
if (fVertexUniformBuffer.get()) {
|
||||
++uniformBindingUpdateCount;
|
||||
memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
|
||||
vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
|
||||
vertBufferInfo.offset = fVertexUniformBuffer->offset();
|
||||
vertBufferInfo.range = fVertexUniformBuffer->size();
|
||||
|
||||
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrites[0].pNext = nullptr;
|
||||
descriptorWrites[0].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
|
||||
descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
|
||||
descriptorWrites[0].dstArrayElement = 0;
|
||||
descriptorWrites[0].descriptorCount = 1;
|
||||
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
descriptorWrites[0].pImageInfo = nullptr;
|
||||
descriptorWrites[0].pBufferInfo = &vertBufferInfo;
|
||||
descriptorWrites[0].pTexelBufferView = nullptr;
|
||||
// Geometry Uniform Buffer
|
||||
if (fGeometryUniformBuffer.get()) {
|
||||
set_uniform_descriptor_writes(&descriptorWrites[writeCount],
|
||||
&bufferInfos[writeCount],
|
||||
fGeometryUniformBuffer.get(),
|
||||
fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet],
|
||||
GrVkUniformHandler::kGeometryBinding);
|
||||
++writeCount;
|
||||
}
|
||||
|
||||
VkDescriptorBufferInfo fragBufferInfo;
|
||||
// Fragment Uniform Buffer
|
||||
if (fFragmentUniformBuffer.get()) {
|
||||
if (0 == uniformBindingUpdateCount) {
|
||||
firstUniformWrite = 1;
|
||||
}
|
||||
++uniformBindingUpdateCount;
|
||||
memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
|
||||
fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
|
||||
fragBufferInfo.offset = fFragmentUniformBuffer->offset();
|
||||
fragBufferInfo.range = fFragmentUniformBuffer->size();
|
||||
|
||||
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
descriptorWrites[1].pNext = nullptr;
|
||||
descriptorWrites[1].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
|
||||
descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
|
||||
descriptorWrites[1].dstArrayElement = 0;
|
||||
descriptorWrites[1].descriptorCount = 1;
|
||||
descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
descriptorWrites[1].pImageInfo = nullptr;
|
||||
descriptorWrites[1].pBufferInfo = &fragBufferInfo;
|
||||
descriptorWrites[1].pTexelBufferView = nullptr;
|
||||
set_uniform_descriptor_writes(&descriptorWrites[writeCount],
|
||||
&bufferInfos[writeCount],
|
||||
fFragmentUniformBuffer.get(),
|
||||
fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet],
|
||||
GrVkUniformHandler::kFragBinding);
|
||||
++writeCount;
|
||||
}
|
||||
|
||||
if (uniformBindingUpdateCount) {
|
||||
if (writeCount) {
|
||||
GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
|
||||
uniformBindingUpdateCount,
|
||||
&descriptorWrites[firstUniformWrite],
|
||||
writeCount,
|
||||
descriptorWrites,
|
||||
0, nullptr));
|
||||
}
|
||||
}
|
||||
@ -401,8 +399,8 @@ void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) {
|
||||
commandBuffer.addRecycledResource(fSamplerDescriptorSet);
|
||||
}
|
||||
|
||||
if (fVertexUniformBuffer.get()) {
|
||||
commandBuffer.addRecycledResource(fVertexUniformBuffer->resource());
|
||||
if (fGeometryUniformBuffer.get()) {
|
||||
commandBuffer.addRecycledResource(fGeometryUniformBuffer->resource());
|
||||
}
|
||||
if (fFragmentUniformBuffer.get()) {
|
||||
commandBuffer.addRecycledResource(fFragmentUniformBuffer->resource());
|
||||
|
@ -94,7 +94,7 @@ private:
|
||||
const GrVkDescriptorSetManager::Handle& samplerDSHandle,
|
||||
const BuiltinUniformHandles& builtinUniformHandles,
|
||||
const UniformInfoArray& uniforms,
|
||||
uint32_t vertexUniformSize,
|
||||
uint32_t geometryUniformSize,
|
||||
uint32_t fragmentUniformSize,
|
||||
uint32_t numSamplers,
|
||||
GrGLSLPrimitiveProcessor* geometryProcessor,
|
||||
@ -210,7 +210,7 @@ private:
|
||||
int fStartDS;
|
||||
int fDSCount;
|
||||
|
||||
std::unique_ptr<GrVkUniformBuffer> fVertexUniformBuffer;
|
||||
std::unique_ptr<GrVkUniformBuffer> fGeometryUniformBuffer;
|
||||
std::unique_ptr<GrVkUniformBuffer> fFragmentUniformBuffer;
|
||||
|
||||
// GrVkResources used for sampling textures
|
||||
|
@ -180,7 +180,7 @@ GrVkPipelineState* GrVkPipelineStateBuilder::finalize(const GrStencilSettings& s
|
||||
samplerDSHandle,
|
||||
fUniformHandles,
|
||||
fUniformHandler.fUniforms,
|
||||
fUniformHandler.fCurrentVertexUBOOffset,
|
||||
fUniformHandler.fCurrentGeometryUBOOffset,
|
||||
fUniformHandler.fCurrentFragmentUBOOffset,
|
||||
(uint32_t)fUniformHandler.numSamplers(),
|
||||
fGeometryProcessor,
|
||||
|
@ -11,13 +11,13 @@
|
||||
#include "GrVkUniformBuffer.h"
|
||||
|
||||
GrVkPipelineStateDataManager::GrVkPipelineStateDataManager(const UniformInfoArray& uniforms,
|
||||
uint32_t vertexUniformSize,
|
||||
uint32_t geometryUniformSize,
|
||||
uint32_t fragmentUniformSize)
|
||||
: fVertexUniformSize(vertexUniformSize)
|
||||
: fGeometryUniformSize(geometryUniformSize)
|
||||
, fFragmentUniformSize(fragmentUniformSize)
|
||||
, fVertexUniformsDirty(false)
|
||||
, fGeometryUniformsDirty(false)
|
||||
, fFragmentUniformsDirty(false) {
|
||||
fVertexUniformData.reset(vertexUniformSize);
|
||||
fGeometryUniformData.reset(geometryUniformSize);
|
||||
fFragmentUniformData.reset(fragmentUniformSize);
|
||||
int count = uniforms.count();
|
||||
fUniforms.push_back_n(count);
|
||||
@ -32,20 +32,24 @@ GrVkPipelineStateDataManager::GrVkPipelineStateDataManager(const UniformInfoArra
|
||||
uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();
|
||||
uniform.fType = uniformInfo.fVariable.getType();
|
||||
);
|
||||
uniform.fBinding =
|
||||
(kVertex_GrShaderFlag == uniformInfo.fVisibility) ? GrVkUniformHandler::kVertexBinding
|
||||
: GrVkUniformHandler::kFragBinding;
|
||||
|
||||
if (kVertex_GrShaderFlag == uniformInfo.fVisibility ||
|
||||
kGeometry_GrShaderFlag == uniformInfo.fVisibility) {
|
||||
uniform.fBinding = GrVkUniformHandler::kGeometryBinding;
|
||||
} else {
|
||||
SkASSERT(kFragment_GrShaderFlag == uniformInfo.fVisibility);
|
||||
uniform.fBinding = GrVkUniformHandler::kFragBinding;
|
||||
}
|
||||
uniform.fOffset = uniformInfo.fUBOffset;
|
||||
}
|
||||
}
|
||||
|
||||
void* GrVkPipelineStateDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
|
||||
void* buffer;
|
||||
if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
|
||||
buffer = fVertexUniformData.get();
|
||||
fVertexUniformsDirty = true;
|
||||
}
|
||||
else {
|
||||
if (GrVkUniformHandler::kGeometryBinding == uni.fBinding) {
|
||||
buffer = fGeometryUniformData.get();
|
||||
fGeometryUniformsDirty = true;
|
||||
} else {
|
||||
SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
|
||||
buffer = fFragmentUniformData.get();
|
||||
fFragmentUniformsDirty = true;
|
||||
@ -233,9 +237,9 @@ template<int N> inline void GrVkPipelineStateDataManager::setMatrices(UniformHan
|
||||
(1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
|
||||
|
||||
void* buffer;
|
||||
if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
|
||||
buffer = fVertexUniformData.get();
|
||||
fVertexUniformsDirty = true;
|
||||
if (GrVkUniformHandler::kGeometryBinding == uni.fBinding) {
|
||||
buffer = fGeometryUniformData.get();
|
||||
fGeometryUniformsDirty = true;
|
||||
} else {
|
||||
SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
|
||||
buffer = fFragmentUniformData.get();
|
||||
@ -268,19 +272,19 @@ template<> struct set_uniform_matrix<4> {
|
||||
};
|
||||
|
||||
bool GrVkPipelineStateDataManager::uploadUniformBuffers(GrVkGpu* gpu,
|
||||
GrVkUniformBuffer* vertexBuffer,
|
||||
GrVkUniformBuffer* geometryBuffer,
|
||||
GrVkUniformBuffer* fragmentBuffer) const {
|
||||
bool updatedBuffer = false;
|
||||
if (vertexBuffer && fVertexUniformsDirty) {
|
||||
SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize,
|
||||
&updatedBuffer));
|
||||
fVertexUniformsDirty = false;
|
||||
if (geometryBuffer && fGeometryUniformsDirty) {
|
||||
SkAssertResult(geometryBuffer->updateData(gpu, fGeometryUniformData.get(),
|
||||
fGeometryUniformSize, &updatedBuffer));
|
||||
fGeometryUniformsDirty = false;
|
||||
}
|
||||
|
||||
if (fragmentBuffer && fFragmentUniformsDirty) {
|
||||
SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(),
|
||||
fFragmentUniformSize, &updatedBuffer));
|
||||
fFragmentUniformsDirty = false;
|
||||
}
|
||||
|
||||
return updatedBuffer;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
typedef GrVkUniformHandler::UniformInfoArray UniformInfoArray;
|
||||
|
||||
GrVkPipelineStateDataManager(const UniformInfoArray&,
|
||||
uint32_t vertexUniformSize,
|
||||
uint32_t geometryUniformSize,
|
||||
uint32_t fragmentUniformSize);
|
||||
|
||||
void set1i(UniformHandle, int32_t) const override;
|
||||
@ -49,11 +49,11 @@ public:
|
||||
SkFAIL("Only supported in NVPR, which is not in vulkan");
|
||||
}
|
||||
|
||||
// Returns true if either the vertex or fragment buffer needed to generate a new underlying
|
||||
// Returns true if either the geometry or fragment buffers needed to generate a new underlying
|
||||
// VkBuffer object in order upload data. If true is returned, this is a signal to the caller
|
||||
// that they will need to update the descriptor set that is using these buffers.
|
||||
bool uploadUniformBuffers(GrVkGpu* gpu,
|
||||
GrVkUniformBuffer* vertexBuffer,
|
||||
GrVkUniformBuffer* geometryBuffer,
|
||||
GrVkUniformBuffer* fragmentBuffer) const;
|
||||
private:
|
||||
struct Uniform {
|
||||
@ -70,14 +70,14 @@ private:
|
||||
|
||||
void* getBufferPtrAndMarkDirty(const Uniform& uni) const;
|
||||
|
||||
uint32_t fVertexUniformSize;
|
||||
uint32_t fGeometryUniformSize;
|
||||
uint32_t fFragmentUniformSize;
|
||||
|
||||
SkTArray<Uniform, true> fUniforms;
|
||||
|
||||
mutable SkAutoMalloc fVertexUniformData;
|
||||
mutable SkAutoMalloc fGeometryUniformData;
|
||||
mutable SkAutoMalloc fFragmentUniformData;
|
||||
mutable bool fVertexUniformsDirty;
|
||||
mutable bool fGeometryUniformsDirty;
|
||||
mutable bool fFragmentUniformsDirty;
|
||||
};
|
||||
|
||||
|
@ -50,7 +50,8 @@ void GrVkResourceProvider::init() {
|
||||
}
|
||||
|
||||
// Init uniform descriptor objects
|
||||
fDescriptorSetManagers.emplace_back(fGpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
|
||||
GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
|
||||
fDescriptorSetManagers.emplace_back(dsm);
|
||||
SkASSERT(1 == fDescriptorSetManagers.count());
|
||||
fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
|
||||
}
|
||||
@ -190,13 +191,15 @@ void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
|
||||
SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
|
||||
for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
|
||||
if (fDescriptorSetManagers[i].isCompatible(type, &uniformHandler)) {
|
||||
if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
|
||||
*handle = GrVkDescriptorSetManager::Handle(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fDescriptorSetManagers.emplace_back(fGpu, type, &uniformHandler);
|
||||
GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
|
||||
uniformHandler);
|
||||
fDescriptorSetManagers.emplace_back(dsm);
|
||||
*handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
|
||||
}
|
||||
|
||||
@ -207,37 +210,39 @@ void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
|
||||
SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
|
||||
for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
|
||||
if (fDescriptorSetManagers[i].isCompatible(type, visibilities)) {
|
||||
if (fDescriptorSetManagers[i]->isCompatible(type, visibilities)) {
|
||||
*handle = GrVkDescriptorSetManager::Handle(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fDescriptorSetManagers.emplace_back(fGpu, type, visibilities);
|
||||
GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
|
||||
visibilities);
|
||||
fDescriptorSetManagers.emplace_back(dsm);
|
||||
*handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
|
||||
}
|
||||
|
||||
VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
|
||||
SkASSERT(fUniformDSHandle.isValid());
|
||||
return fDescriptorSetManagers[fUniformDSHandle.toIndex()].layout();
|
||||
return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
|
||||
}
|
||||
|
||||
VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
|
||||
const GrVkDescriptorSetManager::Handle& handle) const {
|
||||
SkASSERT(handle.isValid());
|
||||
return fDescriptorSetManagers[handle.toIndex()].layout();
|
||||
return fDescriptorSetManagers[handle.toIndex()]->layout();
|
||||
}
|
||||
|
||||
const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
|
||||
SkASSERT(fUniformDSHandle.isValid());
|
||||
return fDescriptorSetManagers[fUniformDSHandle.toIndex()].getDescriptorSet(fGpu,
|
||||
fUniformDSHandle);
|
||||
return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
|
||||
fUniformDSHandle);
|
||||
}
|
||||
|
||||
const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
|
||||
const GrVkDescriptorSetManager::Handle& handle) {
|
||||
SkASSERT(handle.isValid());
|
||||
return fDescriptorSetManagers[handle.toIndex()].getDescriptorSet(fGpu, handle);
|
||||
return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
|
||||
}
|
||||
|
||||
void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
|
||||
@ -246,7 +251,7 @@ void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet
|
||||
SkASSERT(handle.isValid());
|
||||
int managerIdx = handle.toIndex();
|
||||
SkASSERT(managerIdx < fDescriptorSetManagers.count());
|
||||
fDescriptorSetManagers[managerIdx].recycleDescriptorSet(descSet);
|
||||
fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
|
||||
}
|
||||
|
||||
GrVkPrimaryCommandBuffer* GrVkResourceProvider::findOrCreatePrimaryCommandBuffer() {
|
||||
@ -358,7 +363,7 @@ void GrVkResourceProvider::destroyResources(bool deviceLost) {
|
||||
// We must release/destroy all command buffers and pipeline states before releasing the
|
||||
// GrVkDescriptorSetManagers
|
||||
for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
|
||||
fDescriptorSetManagers[i].release(fGpu);
|
||||
fDescriptorSetManagers[i]->release(fGpu);
|
||||
}
|
||||
fDescriptorSetManagers.reset();
|
||||
|
||||
@ -418,7 +423,7 @@ void GrVkResourceProvider::abandonResources() {
|
||||
// We must abandon all command buffers and pipeline states before abandoning the
|
||||
// GrVkDescriptorSetManagers
|
||||
for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
|
||||
fDescriptorSetManagers[i].abandon();
|
||||
fDescriptorSetManagers[i]->abandon();
|
||||
}
|
||||
fDescriptorSetManagers.reset();
|
||||
|
||||
|
@ -257,7 +257,7 @@ private:
|
||||
// Cache of GrVkPipelineStates
|
||||
PipelineStateCache* fPipelineStateCache;
|
||||
|
||||
SkSTArray<4, GrVkDescriptorSetManager, true> fDescriptorSetManagers;
|
||||
SkSTArray<4, std::unique_ptr<GrVkDescriptorSetManager>> fDescriptorSetManagers;
|
||||
|
||||
GrVkDescriptorSetManager::Handle fUniformDSHandle;
|
||||
};
|
||||
|
@ -145,9 +145,12 @@ GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
|
||||
int arrayCount,
|
||||
const char** outName) {
|
||||
SkASSERT(name && strlen(name));
|
||||
SkDEBUGCODE(static const uint32_t kVisibilityMask = kVertex_GrShaderFlag|kFragment_GrShaderFlag);
|
||||
SkASSERT(0 == (~kVisibilityMask & visibility));
|
||||
SkASSERT(0 != visibility);
|
||||
// For now asserting the the visibility is either geometry types (vertex, tesselation, geometry,
|
||||
// etc.) or only fragment.
|
||||
SkASSERT(kVertex_GrShaderFlag == visibility ||
|
||||
kGeometry_GrShaderFlag == visibility ||
|
||||
(kVertex_GrShaderFlag | kGeometry_GrShaderFlag) == visibility ||
|
||||
kFragment_GrShaderFlag == visibility);
|
||||
SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeIsFloatType(type));
|
||||
GrSLTypeIsFloatType(type);
|
||||
|
||||
@ -165,16 +168,20 @@ GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
|
||||
}
|
||||
fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
|
||||
uni.fVariable.setArrayCount(arrayCount);
|
||||
// For now asserting the the visibility is either only vertex or only fragment
|
||||
SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
|
||||
uni.fVisibility = visibility;
|
||||
uni.fVariable.setPrecision(precision);
|
||||
// When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
|
||||
// we set the modifier to none for all uniforms declared inside the block.
|
||||
uni.fVariable.setTypeModifier(GrShaderVar::kNone_TypeModifier);
|
||||
|
||||
uint32_t* currentOffset = kVertex_GrShaderFlag == visibility ? &fCurrentVertexUBOOffset
|
||||
: &fCurrentFragmentUBOOffset;
|
||||
uint32_t* currentOffset;
|
||||
uint32_t geomStages = kVertex_GrShaderFlag | kGeometry_GrShaderFlag;
|
||||
if (geomStages & visibility) {
|
||||
currentOffset = &fCurrentGeometryUBOOffset;
|
||||
} else {
|
||||
SkASSERT(kFragment_GrShaderFlag == visibility);
|
||||
currentOffset = &fCurrentFragmentUBOOffset;
|
||||
}
|
||||
get_ubo_aligned_offset(&uni.fUBOffset, currentOffset, type, arrayCount);
|
||||
|
||||
SkString layoutQualifier;
|
||||
@ -194,9 +201,10 @@ GrGLSLUniformHandler::SamplerHandle GrVkUniformHandler::addSampler(uint32_t visi
|
||||
GrSLPrecision precision,
|
||||
const char* name) {
|
||||
SkASSERT(name && strlen(name));
|
||||
SkDEBUGCODE(static const uint32_t kVisMask = kVertex_GrShaderFlag | kFragment_GrShaderFlag);
|
||||
SkASSERT(0 == (~kVisMask & visibility));
|
||||
SkASSERT(0 != visibility);
|
||||
// For now asserting the the visibility is either only vertex, geometry, or fragment
|
||||
SkASSERT(kVertex_GrShaderFlag == visibility ||
|
||||
kFragment_GrShaderFlag == visibility ||
|
||||
kGeometry_GrShaderFlag == visibility);
|
||||
SkString mangleName;
|
||||
char prefix = 'u';
|
||||
fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
|
||||
@ -218,7 +226,9 @@ GrGLSLUniformHandler::SamplerHandle GrVkUniformHandler::addSampler(uint32_t visi
|
||||
}
|
||||
|
||||
void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
|
||||
SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
|
||||
SkASSERT(kVertex_GrShaderFlag == visibility ||
|
||||
kGeometry_GrShaderFlag == visibility ||
|
||||
kFragment_GrShaderFlag == visibility);
|
||||
|
||||
for (int i = 0; i < fSamplers.count(); ++i) {
|
||||
const UniformInfo& sampler = fSamplers[i];
|
||||
@ -233,7 +243,7 @@ void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString*
|
||||
SkString uniformsString;
|
||||
for (int i = 0; i < fUniforms.count(); ++i) {
|
||||
const UniformInfo& localUniform = fUniforms[i];
|
||||
if (visibility == localUniform.fVisibility) {
|
||||
if (visibility & localUniform.fVisibility) {
|
||||
if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
|
||||
#ifdef SK_DEBUG
|
||||
if (!firstOffsetCheck) {
|
||||
@ -249,9 +259,19 @@ void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString*
|
||||
}
|
||||
}
|
||||
if (!uniformsString.isEmpty()) {
|
||||
uint32_t uniformBinding = (visibility == kVertex_GrShaderFlag) ? kVertexBinding
|
||||
: kFragBinding;
|
||||
const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "fragment";
|
||||
uint32_t uniformBinding;
|
||||
const char* stage;
|
||||
if (kVertex_GrShaderFlag == visibility) {
|
||||
uniformBinding = kGeometryBinding;
|
||||
stage = "vertex";
|
||||
} else if (kGeometry_GrShaderFlag == visibility) {
|
||||
uniformBinding = kGeometryBinding;
|
||||
stage = "geometry";
|
||||
} else {
|
||||
SkASSERT(kFragment_GrShaderFlag == visibility);
|
||||
uniformBinding = kFragBinding;
|
||||
stage = "fragment";
|
||||
}
|
||||
out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n",
|
||||
kUniformBufferDescSet, uniformBinding, stage);
|
||||
out->appendf("%s\n};\n", uniformsString.c_str());
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
kTexelBufferDescSet = 2,
|
||||
};
|
||||
enum {
|
||||
kVertexBinding = 0,
|
||||
kGeometryBinding = 0,
|
||||
kFragBinding = 1,
|
||||
};
|
||||
|
||||
@ -48,7 +48,7 @@ private:
|
||||
, fUniforms(kUniformsPerBlock)
|
||||
, fSamplers(kUniformsPerBlock)
|
||||
, fTexelBuffers(kUniformsPerBlock)
|
||||
, fCurrentVertexUBOOffset(0)
|
||||
, fCurrentGeometryUBOOffset(0)
|
||||
, fCurrentFragmentUBOOffset(0)
|
||||
, fCurrentSamplerBinding(0) {
|
||||
}
|
||||
@ -98,7 +98,7 @@ private:
|
||||
|
||||
void appendUniformDecls(GrShaderFlags, SkString*) const override;
|
||||
|
||||
bool hasVertexUniforms() const { return fCurrentVertexUBOOffset > 0; }
|
||||
bool hasGeometryUniforms() const { return fCurrentGeometryUBOOffset > 0; }
|
||||
bool hasFragmentUniforms() const { return fCurrentFragmentUBOOffset > 0; }
|
||||
|
||||
|
||||
@ -112,7 +112,7 @@ private:
|
||||
SkTArray<GrSwizzle> fSamplerSwizzles;
|
||||
UniformInfoArray fTexelBuffers;
|
||||
|
||||
uint32_t fCurrentVertexUBOOffset;
|
||||
uint32_t fCurrentGeometryUBOOffset;
|
||||
uint32_t fCurrentFragmentUBOOffset;
|
||||
uint32_t fCurrentSamplerBinding;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user