Add support for vulkan non coherent advanced blends.
There are basically 3 parts to this change. 1) Implement GrVkGpu::xferBarrier virtual 2) Plumbing the need of an xferBarrier from ProgramInfo when getting a render pass. 3) Tracking the need for an xferBarrier on GrOpsTask via GrProcessorSet::Analysis Bug: skia:10409 Change-Id: I6ab8f36719b3a4db576535eb6ed1c579ae34b7a4 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/310439 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
13ec544d3f
commit
9a18b08981
@ -359,7 +359,8 @@ public:
|
||||
const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) = 0;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) = 0;
|
||||
|
||||
// Called by GrDrawingManager when flushing.
|
||||
// Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
|
||||
|
@ -466,7 +466,8 @@ static GrOpsRenderPass* create_render_pass(
|
||||
GrGpu* gpu, GrRenderTarget* rt, GrStencilAttachment* stencil, GrSurfaceOrigin origin,
|
||||
const SkIRect& bounds, GrLoadOp colorLoadOp, const SkPMColor4f& loadClearColor,
|
||||
GrLoadOp stencilLoadOp, GrStoreOp stencilStoreOp,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
const GrOpsRenderPass::LoadAndStoreInfo kColorLoadStoreInfo {
|
||||
colorLoadOp,
|
||||
GrStoreOp::kStore,
|
||||
@ -484,7 +485,8 @@ static GrOpsRenderPass* create_render_pass(
|
||||
};
|
||||
|
||||
return gpu->getOpsRenderPass(rt, stencil, origin, bounds,
|
||||
kColorLoadStoreInfo, stencilLoadAndStoreInfo, sampledProxies);
|
||||
kColorLoadStoreInfo, stencilLoadAndStoreInfo, sampledProxies,
|
||||
usesXferBarriers);
|
||||
}
|
||||
|
||||
// TODO: this is where GrOp::renderTarget is used (which is fine since it
|
||||
@ -569,7 +571,7 @@ bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
|
||||
GrOpsRenderPass* renderPass = create_render_pass(
|
||||
flushState->gpu(), proxy->peekRenderTarget(), stencil, this->target(0).origin(),
|
||||
fClippedContentBounds, fColorLoadOp, fLoadClearColor, stencilLoadOp, stencilStoreOp,
|
||||
fSampledProxies);
|
||||
fSampledProxies, fUsesXferBarriers);
|
||||
if (!renderPass) {
|
||||
return false;
|
||||
}
|
||||
|
@ -115,8 +115,15 @@ public:
|
||||
if (dstProxyView.proxy()) {
|
||||
this->addSampledTexture(dstProxyView.proxy());
|
||||
addDependency(dstProxyView.proxy(), GrMipmapped::kNo);
|
||||
if (this->target(0).asTextureProxy() == dstProxyView.proxy()) {
|
||||
// Since we are sampling and drawing to the same surface we will need to use
|
||||
// texture barriers.
|
||||
fUsesXferBarriers |= true;
|
||||
}
|
||||
}
|
||||
|
||||
fUsesXferBarriers |= processorAnalysis.usesNonCoherentHWBlending();
|
||||
|
||||
this->recordOp(std::move(op), processorAnalysis, clip.doesClip() ? &clip : nullptr,
|
||||
&dstProxyView, caps);
|
||||
}
|
||||
@ -313,6 +320,8 @@ private:
|
||||
SkIRect fLastDevClipBounds;
|
||||
int fLastClipNumAnalyticElements;
|
||||
|
||||
bool fUsesXferBarriers = false;
|
||||
|
||||
// For ops/opsTask we have mean: 5 stdDev: 28
|
||||
SkSTArray<25, OpChain> fOpChains;
|
||||
|
||||
|
@ -160,6 +160,8 @@ GrProcessorSet::Analysis GrProcessorSet::finalize(
|
||||
SkToBool(props & GrXPFactory::AnalysisProperties::kCompatibleWithCoverageAsAlpha);
|
||||
analysis.fRequiresNonOverlappingDraws =
|
||||
SkToBool(props & GrXPFactory::AnalysisProperties::kRequiresNonOverlappingDraws);
|
||||
analysis.fUsesNonCoherentHWBlending =
|
||||
SkToBool(props & GrXPFactory::AnalysisProperties::kUsesNonCoherentHWBlending);
|
||||
if (props & GrXPFactory::AnalysisProperties::kIgnoresInputColor) {
|
||||
colorFPsToEliminate = this->hasColorFragmentProcessor() ? 1 : 0;
|
||||
analysis.fInputColorType =
|
||||
|
@ -85,6 +85,7 @@ public:
|
||||
bool inputColorIsOverridden() const {
|
||||
return fInputColorType == kOverridden_InputColorType;
|
||||
}
|
||||
bool usesNonCoherentHWBlending() const { return fUsesNonCoherentHWBlending; }
|
||||
|
||||
private:
|
||||
constexpr Analysis(Empty)
|
||||
@ -94,6 +95,7 @@ public:
|
||||
, fRequiresNonOverlappingDraws(false)
|
||||
, fHasColorFragmentProcessor(false)
|
||||
, fIsInitialized(true)
|
||||
, fUsesNonCoherentHWBlending(false)
|
||||
, fInputColorType(kOriginal_InputColorType) {}
|
||||
enum InputColorType : uint32_t {
|
||||
kOriginal_InputColorType,
|
||||
@ -111,6 +113,7 @@ public:
|
||||
PackedBool fRequiresNonOverlappingDraws : 1;
|
||||
PackedBool fHasColorFragmentProcessor : 1;
|
||||
PackedBool fIsInitialized : 1;
|
||||
PackedBool fUsesNonCoherentHWBlending : 1;
|
||||
PackedInputColorType fInputColorType : 2;
|
||||
|
||||
friend class GrProcessorSet;
|
||||
|
@ -277,6 +277,10 @@ public:
|
||||
* texture or because we need an xfer barrier).
|
||||
*/
|
||||
kRequiresNonOverlappingDraws = 0x20,
|
||||
/**
|
||||
* If set the draw will use fixed function non coherent advanced blends.
|
||||
*/
|
||||
kUsesNonCoherentHWBlending = 0x40,
|
||||
};
|
||||
GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(AnalysisProperties);
|
||||
|
||||
|
@ -102,11 +102,12 @@ void GrD3DGpu::destroyResources() {
|
||||
}
|
||||
|
||||
GrOpsRenderPass* GrD3DGpu::getOpsRenderPass(
|
||||
GrRenderTarget* rt, GrStencilAttachment*,
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
GrRenderTarget* rt, GrStencilAttachment*,
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
if (!fCachedOpsRenderPass) {
|
||||
fCachedOpsRenderPass.reset(new GrD3DOpsRenderPass(this));
|
||||
}
|
||||
|
@ -84,7 +84,8 @@ public:
|
||||
GrSurfaceOrigin, const SkIRect&,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
void addResourceBarriers(sk_sp<GrManagedResource> resource,
|
||||
int numBarriers,
|
||||
|
@ -140,7 +140,8 @@ GrOpsRenderPass* GrDawnGpu::getOpsRenderPass(
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
fOpsRenderPass.reset(new GrDawnOpsRenderPass(this, rt, origin, colorInfo, stencilInfo));
|
||||
return fOpsRenderPass.get();
|
||||
}
|
||||
|
@ -68,7 +68,8 @@ public:
|
||||
GrSurfaceOrigin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
SkSL::Compiler* shaderCompiler() const {
|
||||
return fCompiler.get();
|
||||
|
@ -357,7 +357,8 @@ GrXPFactory::AnalysisProperties CustomXPFactory::analysisProperties(
|
||||
return AnalysisProperties::kCompatibleWithCoverageAsAlpha;
|
||||
} else {
|
||||
return AnalysisProperties::kCompatibleWithCoverageAsAlpha |
|
||||
AnalysisProperties::kRequiresNonOverlappingDraws;
|
||||
AnalysisProperties::kRequiresNonOverlappingDraws |
|
||||
AnalysisProperties::kUsesNonCoherentHWBlending;
|
||||
}
|
||||
}
|
||||
return AnalysisProperties::kCompatibleWithCoverageAsAlpha |
|
||||
|
@ -2203,7 +2203,8 @@ GrOpsRenderPass* GrGLGpu::getOpsRenderPass(
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
if (!fCachedOpsRenderPass) {
|
||||
fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this);
|
||||
}
|
||||
|
@ -124,7 +124,8 @@ public:
|
||||
GrSurfaceOrigin, const SkIRect&,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
void invalidateBoundRenderTarget() {
|
||||
fHWBoundRenderTargetUniqueID.makeInvalid();
|
||||
|
@ -57,7 +57,8 @@ GrOpsRenderPass* GrMockGpu::getOpsRenderPass(
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
return new GrMockOpsRenderPass(this, rt, origin, colorInfo);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,8 @@ public:
|
||||
const SkIRect&,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
GrFence SK_WARN_UNUSED_RESULT insertFence() override { return 0; }
|
||||
bool waitFence(GrFence) override { return true; }
|
||||
|
@ -88,7 +88,8 @@ public:
|
||||
GrSurfaceOrigin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
|
||||
|
||||
|
@ -175,7 +175,8 @@ GrOpsRenderPass* GrMtlGpu::getOpsRenderPass(
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
return new GrMtlOpsRenderPass(this, renderTarget, origin, colorInfo, stencilInfo);
|
||||
}
|
||||
|
||||
|
@ -592,9 +592,7 @@ void GrVkCaps::initGrCaps(const GrVkInterface* vkInterface,
|
||||
if (blendFeatures && blendFeatures->advancedBlendCoherentOperations == VK_TRUE) {
|
||||
fBlendEquationSupport = kAdvancedCoherent_BlendEquationSupport;
|
||||
} else {
|
||||
// TODO: Currently non coherent blends are not supported in our vulkan backend. They
|
||||
// require us to support self dependencies in our render passes.
|
||||
// fBlendEquationSupport = kAdvanced_BlendEquationSupport;
|
||||
fBlendEquationSupport = kAdvanced_BlendEquationSupport;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1709,14 +1707,17 @@ GrProgramDesc GrVkCaps::makeDesc(GrRenderTarget* rt, const GrProgramInfo& progra
|
||||
// GrVkPipelineStateBuilder.cpp).
|
||||
b.add32(GrVkGpu::kShader_PersistentCacheKeyType);
|
||||
|
||||
bool usesXferBarrier = false; // TODO: get this from GrProgramInfo
|
||||
// Currently we only support blend barriers with the advanced blend function. Thus we pass in
|
||||
// nullptr for the texture.
|
||||
auto barrierType = programInfo.pipeline().xferBarrierType(nullptr, *this);
|
||||
bool usesXferBarriers = barrierType == kBlend_GrXferBarrierType;
|
||||
|
||||
if (rt) {
|
||||
GrVkRenderTarget* vkRT = (GrVkRenderTarget*) rt;
|
||||
|
||||
bool needsStencil = programInfo.numStencilSamples() || programInfo.isStencilEnabled();
|
||||
// TODO: support failure in getSimpleRenderPass
|
||||
const GrVkRenderPass* rp = vkRT->getSimpleRenderPass(needsStencil, usesXferBarrier);
|
||||
const GrVkRenderPass* rp = vkRT->getSimpleRenderPass(needsStencil, usesXferBarriers);
|
||||
SkASSERT(rp);
|
||||
rp->genKey(&b);
|
||||
|
||||
@ -1729,7 +1730,7 @@ GrProgramDesc GrVkCaps::makeDesc(GrRenderTarget* rt, const GrProgramInfo& progra
|
||||
GrVkRenderTarget::ReconstructAttachmentsDescriptor(*this, programInfo,
|
||||
&attachmentsDescriptor,
|
||||
&attachmentFlags);
|
||||
SkASSERT(rp->isCompatible(attachmentsDescriptor, attachmentFlags, usesXferBarrier));
|
||||
SkASSERT(rp->isCompatible(attachmentsDescriptor, attachmentFlags, usesXferBarriers));
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
@ -1742,7 +1743,7 @@ GrProgramDesc GrVkCaps::makeDesc(GrRenderTarget* rt, const GrProgramInfo& progra
|
||||
// kExternal_AttachmentFlag is only set for wrapped secondary command buffers - which
|
||||
// will always go through the above 'rt' path (i.e., we can always pass 0 as the final
|
||||
// parameter to GenKey).
|
||||
GrVkRenderPass::GenKey(&b, attachmentFlags, attachmentsDescriptor, usesXferBarrier, 0);
|
||||
GrVkRenderPass::GenKey(&b, attachmentFlags, attachmentsDescriptor, usesXferBarriers, 0);
|
||||
}
|
||||
|
||||
GrStencilSettings stencil = programInfo.nonGLStencilSettings();
|
||||
|
@ -97,18 +97,28 @@ void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
|
||||
void* barrier) {
|
||||
SkASSERT(!this->isWrapped());
|
||||
SkASSERT(fIsActive);
|
||||
#ifdef SK_DEBUG
|
||||
// For images we can have barriers inside of render passes but they require us to add more
|
||||
// support in subpasses which need self dependencies to have barriers inside them. Also, we can
|
||||
// never have buffer barriers inside of a render pass. For now we will just assert that we are
|
||||
// not in a render pass.
|
||||
SkASSERT(!fActiveRenderPass);
|
||||
bool isValidSubpassBarrier = false;
|
||||
if (barrierType == kImageMemory_BarrierType) {
|
||||
VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
|
||||
isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
|
||||
(imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
|
||||
(imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
|
||||
byRegion;
|
||||
}
|
||||
SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
|
||||
#endif
|
||||
|
||||
if (barrierType == kBufferMemory_BarrierType) {
|
||||
const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
|
||||
const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
|
||||
fBufferBarriers.push_back(*barrierPtr);
|
||||
} else {
|
||||
SkASSERT(barrierType == kImageMemory_BarrierType);
|
||||
const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
|
||||
const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
|
||||
// We need to check if we are adding a pipeline barrier that covers part of the same
|
||||
// subresource range as a barrier that is already in current batch. If it does, then we must
|
||||
// submit the first batch because the vulkan spec does not define a specific ordering for
|
||||
@ -136,7 +146,6 @@ void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
|
||||
fImageBarriers.push_back(*barrierPtr);
|
||||
}
|
||||
fBarriersByRegion |= byRegion;
|
||||
|
||||
fSrcStageMask = fSrcStageMask | srcStageMask;
|
||||
fDstStageMask = fDstStageMask | dstStageMask;
|
||||
|
||||
@ -144,9 +153,12 @@ void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
|
||||
if (resource) {
|
||||
this->addResource(resource);
|
||||
}
|
||||
if (fActiveRenderPass) {
|
||||
this->submitPipelineBarriers(gpu, true);
|
||||
}
|
||||
}
|
||||
|
||||
void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
|
||||
void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) {
|
||||
SkASSERT(fIsActive);
|
||||
|
||||
// Currently we never submit a pipeline barrier without at least one memory barrier.
|
||||
@ -155,7 +167,7 @@ void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
|
||||
// support in subpasses which need self dependencies to have barriers inside them. Also, we
|
||||
// can never have buffer barriers inside of a render pass. For now we will just assert that
|
||||
// we are not in a render pass.
|
||||
SkASSERT(!fActiveRenderPass);
|
||||
SkASSERT(!fActiveRenderPass || forSelfDependency);
|
||||
SkASSERT(!this->isWrapped());
|
||||
SkASSERT(fSrcStageMask && fDstStageMask);
|
||||
|
||||
|
@ -145,7 +145,7 @@ protected:
|
||||
|
||||
void addingWork(const GrVkGpu* gpu);
|
||||
|
||||
void submitPipelineBarriers(const GrVkGpu* gpu);
|
||||
void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
|
||||
|
||||
SkTDArray<const GrManagedResource*> fTrackedResources;
|
||||
SkTDArray<const GrRecycledResource*> fTrackedRecycledResources;
|
||||
|
@ -317,15 +317,14 @@ GrOpsRenderPass* GrVkGpu::getOpsRenderPass(
|
||||
GrSurfaceOrigin origin, const SkIRect& bounds,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) {
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) {
|
||||
if (!fCachedOpsRenderPass) {
|
||||
fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
|
||||
}
|
||||
|
||||
bool usesXferBarrier = false; // TODO: we should be passing this value
|
||||
|
||||
if (!fCachedOpsRenderPass->set(rt, stencil, origin, bounds, colorInfo, stencilInfo,
|
||||
sampledProxies, usesXferBarrier)) {
|
||||
sampledProxies, usesXferBarriers)) {
|
||||
return nullptr;
|
||||
}
|
||||
return fCachedOpsRenderPass.get();
|
||||
@ -1877,6 +1876,25 @@ void GrVkGpu::querySampleLocations(GrRenderTarget* renderTarget,
|
||||
}
|
||||
}
|
||||
|
||||
void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
|
||||
SkASSERT(barrierType == kBlend_GrXferBarrierType);
|
||||
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
|
||||
VkImageMemoryBarrier barrier;
|
||||
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
||||
barrier.pNext = nullptr;
|
||||
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
|
||||
VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
|
||||
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
|
||||
barrier.oldLayout = vkRT->currentLayout();
|
||||
barrier.newLayout = vkRT->currentLayout();
|
||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.image = vkRT->image();
|
||||
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, vkRT->mipLevels(), 0, 1};
|
||||
this->addImageMemoryBarrier(vkRT->resource(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, true, &barrier);
|
||||
}
|
||||
|
||||
void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
|
||||
SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
|
||||
|
||||
@ -1894,7 +1912,10 @@ bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInf
|
||||
GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
|
||||
&attachmentsDescriptor, &attachmentFlags);
|
||||
|
||||
bool willReadDst = false; // TODO: get this from GrProgramInfo
|
||||
// Currently we only support blend barriers with the advanced blend function. Thus we pass in
|
||||
// nullptr for the texture.
|
||||
auto barrierType = programInfo.pipeline().xferBarrierType(nullptr, *this->caps());
|
||||
bool willReadDst = barrierType == kBlend_GrXferBarrierType;
|
||||
sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
|
||||
&attachmentsDescriptor,
|
||||
attachmentFlags,
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
|
||||
void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) override;
|
||||
|
||||
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
|
||||
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
|
||||
|
||||
bool setBackendTextureState(const GrBackendTexture&,
|
||||
const GrBackendSurfaceMutableState&,
|
||||
@ -110,7 +110,8 @@ public:
|
||||
GrSurfaceOrigin, const SkIRect&,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarriers) override;
|
||||
|
||||
void addBufferMemoryBarrier(const GrManagedResource*,
|
||||
VkPipelineStageFlags srcStageMask,
|
||||
|
@ -194,7 +194,7 @@ void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
|
||||
fInfo.fImage, // image
|
||||
{ aspectFlags, 0, fInfo.fLevelCount, 0, 1 } // subresourceRange
|
||||
};
|
||||
|
||||
SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
|
||||
gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
|
||||
&imageMemoryBarrier);
|
||||
|
||||
|
@ -212,7 +212,7 @@ bool GrVkOpsRenderPass::set(GrRenderTarget* rt, GrStencilAttachment* stencil,
|
||||
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarrier) {
|
||||
bool usesXferBarriers) {
|
||||
SkASSERT(!fRenderTarget);
|
||||
SkASSERT(fGpu == rt->getContext()->priv().getGpu());
|
||||
|
||||
@ -243,7 +243,7 @@ bool GrVkOpsRenderPass::set(GrRenderTarget* rt, GrStencilAttachment* stencil,
|
||||
SkASSERT(bounds.isEmpty() || SkIRect::MakeWH(rt->width(), rt->height()).contains(bounds));
|
||||
fBounds = bounds;
|
||||
|
||||
fUsesXferBarriers = usesXferBarrier;
|
||||
fUsesXferBarriers = usesXferBarriers;
|
||||
|
||||
if (this->wrapsSecondaryCommandBuffer()) {
|
||||
return this->initWrapped();
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
const GrOpsRenderPass::LoadAndStoreInfo&,
|
||||
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
|
||||
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
|
||||
bool usesXferBarrier);
|
||||
bool usesXferBarriers);
|
||||
void reset();
|
||||
|
||||
void submit();
|
||||
|
@ -23,6 +23,7 @@ void setup_vk_attachment_description(VkAttachmentDescription* attachment,
|
||||
SkAssertResult(GrSampleCountToVkSampleCount(desc.fSamples, &attachment->samples));
|
||||
switch (layout) {
|
||||
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
|
||||
case VK_IMAGE_LAYOUT_GENERAL:
|
||||
attachment->loadOp = desc.fLoadStoreOps.fLoadOp;
|
||||
attachment->storeOp = desc.fLoadStoreOps.fStoreOp;
|
||||
attachment->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
||||
@ -75,7 +76,7 @@ GrVkRenderPass* GrVkRenderPass::Create(GrVkGpu* gpu,
|
||||
// need to create a self dependency for that subpass so that we can use barriers. Finally, the
|
||||
// color attachment will need to be set to the GENERAL layout since it will be used for reading
|
||||
// and writing here.
|
||||
SkASSERT(!needsSelfDependency);
|
||||
SkASSERT(!needsSelfDependency || gpu->caps()->advancedBlendEquationSupport());
|
||||
|
||||
uint32_t numAttachments = attachmentsDescriptor->fAttachmentCount;
|
||||
// Attachment descriptions to be set on the render pass
|
||||
|
Loading…
Reference in New Issue
Block a user