Remove AMD workaround for new command buffer on pipeline changes.
Change-Id: Iab4e53513b0d42eed60b2910455eb15ef7e3d359 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/233158 Reviewed-by: Brian Salomon <bsalomon@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
1dac7bc29f
commit
228518f0f8
@ -436,12 +436,6 @@ void GrVkCaps::applyDriverCorrectnessWorkarounds(const VkPhysicalDevicePropertie
|
||||
}
|
||||
#endif
|
||||
|
||||
// AMD seems to have issues binding new VkPipelines inside a secondary command buffer.
|
||||
// Current workaround is to use a different secondary command buffer for each new VkPipeline.
|
||||
if (kAMD_VkVendor == properties.vendorID) {
|
||||
fNewCBOnPipelineChange = true;
|
||||
}
|
||||
|
||||
// On Mali galaxy s7 we see lots of rendering issues when we suballocate VkImages.
|
||||
if (kARM_VkVendor == properties.vendorID) {
|
||||
fShouldAlwaysUseDedicatedImageMemory = true;
|
||||
|
@ -90,13 +90,6 @@ public:
|
||||
return fMustSleepOnTearDown;
|
||||
}
|
||||
|
||||
// Returns true if while adding commands to command buffers, we must make a new command buffer
|
||||
// everytime we want to bind a new VkPipeline. This is true for both primary and secondary
|
||||
// command buffers. This is to work around a driver bug specifically on AMD.
|
||||
bool newCBOnPipelineChange() const {
|
||||
return fNewCBOnPipelineChange;
|
||||
}
|
||||
|
||||
// Returns true if we should always make dedicated allocations for VkImages.
|
||||
bool shouldAlwaysUseDedicatedImageMemory() const {
|
||||
return fShouldAlwaysUseDedicatedImageMemory;
|
||||
@ -274,7 +267,6 @@ private:
|
||||
|
||||
bool fMustDoCopiesFromOrigin = false;
|
||||
bool fMustSleepOnTearDown = false;
|
||||
bool fNewCBOnPipelineChange = false;
|
||||
bool fShouldAlwaysUseDedicatedImageMemory = false;
|
||||
|
||||
bool fAvoidUpdateBuffers = false;
|
||||
|
@ -2476,7 +2476,7 @@ void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
|
||||
}
|
||||
|
||||
void GrVkGpu::submitSecondaryCommandBuffer(
|
||||
SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>>& buffers,
|
||||
std::unique_ptr<GrVkSecondaryCommandBuffer> buffer,
|
||||
const GrVkRenderPass* renderPass,
|
||||
const VkClearValue* colorClear,
|
||||
GrVkRenderTarget* target, GrSurfaceOrigin origin,
|
||||
@ -2518,10 +2518,7 @@ void GrVkGpu::submitSecondaryCommandBuffer(
|
||||
clears[1].depthStencil.stencil = 0;
|
||||
|
||||
fCurrentCmdBuffer->beginRenderPass(this, renderPass, clears, *target, *pBounds, true);
|
||||
for (int i = 0; i < buffers.count(); ++i) {
|
||||
std::unique_ptr<GrVkSecondaryCommandBuffer> scb = std::move(buffers[i]);
|
||||
fCurrentCmdBuffer->executeCommands(this, std::move(scb));
|
||||
}
|
||||
fCurrentCmdBuffer->executeCommands(this, std::move(buffer));
|
||||
fCurrentCmdBuffer->endRenderPass(this);
|
||||
|
||||
this->didWriteToSurface(target, origin, &bounds);
|
||||
|
@ -138,7 +138,7 @@ public:
|
||||
this->internalResolveRenderTarget(target, true);
|
||||
}
|
||||
|
||||
void submitSecondaryCommandBuffer(SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>>&,
|
||||
void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>,
|
||||
const GrVkRenderPass*,
|
||||
const VkClearValue* colorClear,
|
||||
GrVkRenderTarget*, GrSurfaceOrigin,
|
||||
|
@ -190,7 +190,7 @@ void GrVkGpuRTCommandBuffer::init() {
|
||||
cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
|
||||
}
|
||||
|
||||
cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
|
||||
cbInfo.fCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
|
||||
cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
|
||||
}
|
||||
|
||||
@ -205,9 +205,8 @@ void GrVkGpuRTCommandBuffer::initWrapped() {
|
||||
cbInfo.fRenderPass->ref();
|
||||
|
||||
cbInfo.fBounds.setEmpty();
|
||||
std::unique_ptr<GrVkSecondaryCommandBuffer> scb(
|
||||
cbInfo.fCommandBuffer.reset(
|
||||
GrVkSecondaryCommandBuffer::Create(vkRT->getExternalSecondaryCommandBuffer()));
|
||||
cbInfo.fCommandBuffers.push_back(std::move(scb));
|
||||
cbInfo.currentCmdBuf()->begin(fGpu, nullptr, cbInfo.fRenderPass);
|
||||
}
|
||||
|
||||
@ -317,7 +316,7 @@ void GrVkGpuRTCommandBuffer::submit() {
|
||||
SkIRect iBounds;
|
||||
cbInfo.fBounds.roundOut(&iBounds);
|
||||
|
||||
fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
|
||||
fGpu->submitSecondaryCommandBuffer(std::move(cbInfo.fCommandBuffer), cbInfo.fRenderPass,
|
||||
&cbInfo.fColorClearValue, vkRT, fOrigin, iBounds);
|
||||
}
|
||||
}
|
||||
@ -358,10 +357,8 @@ void GrVkGpuRTCommandBuffer::set(GrRenderTarget* rt, GrSurfaceOrigin origin,
|
||||
void GrVkGpuRTCommandBuffer::reset() {
|
||||
for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
|
||||
CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
|
||||
for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
|
||||
if (cbInfo.fCommandBuffers[j]) {
|
||||
cbInfo.fCommandBuffers[j].release()->recycle(fGpu);
|
||||
}
|
||||
if (cbInfo.fCommandBuffer) {
|
||||
cbInfo.fCommandBuffer.release()->recycle(fGpu);
|
||||
}
|
||||
cbInfo.fRenderPass->unref(fGpu);
|
||||
}
|
||||
@ -530,15 +527,6 @@ void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f&
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrVkGpuRTCommandBuffer::addAdditionalCommandBuffer() {
|
||||
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
|
||||
|
||||
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
|
||||
cbInfo.currentCmdBuf()->end(fGpu);
|
||||
cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
|
||||
cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
|
||||
}
|
||||
|
||||
void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
|
||||
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
|
||||
|
||||
@ -565,7 +553,7 @@ void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
|
||||
}
|
||||
cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
|
||||
|
||||
cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
|
||||
cbInfo.fCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu);
|
||||
// It shouldn't matter what we set the clear color to here since we will assume loading of the
|
||||
// attachment.
|
||||
memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
|
||||
@ -706,11 +694,6 @@ GrVkPipelineState* GrVkGpuRTCommandBuffer::prepareDrawState(
|
||||
return pipelineState;
|
||||
}
|
||||
|
||||
if (!cbInfo.fIsEmpty &&
|
||||
fLastPipelineState && fLastPipelineState != pipelineState &&
|
||||
fGpu->vkCaps().newCBOnPipelineChange()) {
|
||||
this->addAdditionalCommandBuffer();
|
||||
}
|
||||
fLastPipelineState = pipelineState;
|
||||
|
||||
pipelineState->bindPipeline(fGpu, cbInfo.currentCmdBuf());
|
||||
|
@ -182,7 +182,6 @@ private:
|
||||
|
||||
void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) override;
|
||||
|
||||
void addAdditionalCommandBuffer();
|
||||
void addAdditionalRenderPass();
|
||||
|
||||
enum class LoadStoreState {
|
||||
@ -195,7 +194,7 @@ private:
|
||||
struct CommandBufferInfo {
|
||||
using SampledTexture = GrPendingIOResource<GrVkTexture, kRead_GrIOType>;
|
||||
const GrVkRenderPass* fRenderPass;
|
||||
SkTArray<std::unique_ptr<GrVkSecondaryCommandBuffer>> fCommandBuffers;
|
||||
std::unique_ptr<GrVkSecondaryCommandBuffer> fCommandBuffer;
|
||||
int fNumPreCmds = 0;
|
||||
VkClearValue fColorClearValue;
|
||||
SkRect fBounds;
|
||||
@ -207,7 +206,7 @@ private:
|
||||
SkTArray<SampledTexture> fSampledTextures;
|
||||
|
||||
GrVkSecondaryCommandBuffer* currentCmdBuf() {
|
||||
return fCommandBuffers.back().get();
|
||||
return fCommandBuffer.get();
|
||||
}
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user