Revert "Revert "Support creation/use of mipped proxy in GrBackendTextureImageGenerator""
This reverts commit7477d96938
. Reason for revert: Putting in fixes in tests Original change's description: > Revert "Support creation/use of mipped proxy in GrBackendTextureImageGenerator" > > This reverts commitb8ad00b5a6
. > > Reason for revert: Some various test failures > > Original change's description: > > Support creation/use of mipped proxy in GrBackendTextureImageGenerator > > > > Bug: skia: > > Change-Id: I9d06780ccb2db0865100b67041c03408f2445c62 > > Reviewed-on: https://skia-review.googlesource.com/61241 > > Reviewed-by: Brian Salomon <bsalomon@google.com> > > Commit-Queue: Greg Daniel <egdaniel@google.com> > > TBR=egdaniel@google.com,bsalomon@google.com,brianosman@google.com > > Change-Id: I28e625776352ee6f9f27e66cd5d4b149ef50a22a > No-Presubmit: true > No-Tree-Checks: true > No-Try: true > Bug: skia: > Reviewed-on: https://skia-review.googlesource.com/61941 > Reviewed-by: Greg Daniel <egdaniel@google.com> > Commit-Queue: Greg Daniel <egdaniel@google.com> TBR=egdaniel@google.com,bsalomon@google.com,brianosman@google.com # Not skipping CQ checks because original CL landed > 1 day ago. Bug: skia: Change-Id: Ibfbca5101b06d9ff8f8a5d33bc6f2114806db552 Reviewed-on: https://skia-review.googlesource.com/62561 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
1cb41717bc
commit
261b8aa1de
@ -10,10 +10,12 @@
|
||||
#include "GrContext.h"
|
||||
#include "GrContextPriv.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrRenderTargetContext.h"
|
||||
#include "GrResourceCache.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrSemaphore.h"
|
||||
#include "GrTexture.h"
|
||||
#include "GrTexturePriv.h"
|
||||
|
||||
#include "SkGr.h"
|
||||
#include "SkMessageBus.h"
|
||||
@ -32,11 +34,12 @@ GrBackendTextureImageGenerator::RefHelper::~RefHelper() {
|
||||
static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
|
||||
int width, int height,
|
||||
GrPixelConfig config,
|
||||
GrMipMapped mipMapped,
|
||||
GrBackendObject handle) {
|
||||
switch (backend) {
|
||||
case kOpenGL_GrBackend: {
|
||||
const GrGLTextureInfo* glInfo = (const GrGLTextureInfo*)(handle);
|
||||
return GrBackendTexture(width, height, config, *glInfo);
|
||||
return GrBackendTexture(width, height, config, mipMapped, *glInfo);
|
||||
}
|
||||
#ifdef SK_VULKAN
|
||||
case kVulkan_GrBackend: {
|
||||
@ -46,7 +49,7 @@ static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
|
||||
#endif
|
||||
case kMock_GrBackend: {
|
||||
const GrMockTextureInfo* mockInfo = (const GrMockTextureInfo*)(handle);
|
||||
return GrBackendTexture(width, height, config, *mockInfo);
|
||||
return GrBackendTexture(width, height, config, mipMapped, *mockInfo);
|
||||
}
|
||||
default:
|
||||
return GrBackendTexture();
|
||||
@ -74,10 +77,13 @@ GrBackendTextureImageGenerator::Make(sk_sp<GrTexture> texture, GrSurfaceOrigin o
|
||||
context->getResourceCache()->insertCrossContextGpuResource(texture.get());
|
||||
|
||||
GrBackend backend = context->contextPriv().getBackend();
|
||||
GrMipMapped mipMapped = texture->texturePriv().hasMipMaps() ? GrMipMapped::kYes
|
||||
: GrMipMapped::kNo;
|
||||
GrBackendTexture backendTexture = make_backend_texture_from_handle(backend,
|
||||
texture->width(),
|
||||
texture->height(),
|
||||
texture->config(),
|
||||
mipMapped,
|
||||
texture->getTextureHandle());
|
||||
|
||||
SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType,
|
||||
@ -170,35 +176,28 @@ sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(
|
||||
sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex), fSurfaceOrigin);
|
||||
|
||||
if (0 == origin.fX && 0 == origin.fY &&
|
||||
info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height()) {
|
||||
// If the caller wants the entire texture, we're done
|
||||
info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height() &&
|
||||
(!willNeedMipMaps || proxy->isMipMapped())) {
|
||||
// If the caller wants the entire texture and we have the correct mip support, we're done
|
||||
return proxy;
|
||||
} else {
|
||||
// Otherwise, make a copy of the requested subset. Make sure our temporary is renderable,
|
||||
// because Vulkan will want to do the copy as a draw.
|
||||
GrSurfaceDesc desc;
|
||||
desc.fFlags = kRenderTarget_GrSurfaceFlag;
|
||||
desc.fOrigin = proxy->origin();
|
||||
desc.fWidth = info.width();
|
||||
desc.fHeight = info.height();
|
||||
desc.fConfig = proxy->config();
|
||||
// TODO: We should support the case where we can allocate the mips ahead of time then copy
|
||||
// the subregion into the base layer and then let the GPU generate the rest of the mip
|
||||
// levels.
|
||||
SkASSERT(!proxy->isMipMapped());
|
||||
// because Vulkan will want to do the copy as a draw. All other copies would require a
|
||||
// layout change in Vulkan and we do not change the layout of borrowed images.
|
||||
sk_sp<GrRenderTargetContext> rtContext(context->makeDeferredRenderTargetContext(
|
||||
SkBackingFit::kExact, info.width(), info.height(), proxy->config(), nullptr,
|
||||
0, willNeedMipMaps, proxy->origin(), nullptr, SkBudgeted::kYes));
|
||||
|
||||
sk_sp<GrSurfaceContext> sContext(context->contextPriv().makeDeferredSurfaceContext(
|
||||
desc, SkBackingFit::kExact, SkBudgeted::kYes));
|
||||
if (!sContext) {
|
||||
if (!rtContext) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
|
||||
if (!sContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) {
|
||||
if (!rtContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return sContext->asTextureProxyRef();
|
||||
return rtContext->asTextureProxyRef();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -4403,6 +4403,15 @@ GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, in
|
||||
mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
|
||||
}
|
||||
|
||||
size_t bpp = GrBytesPerPixel(config);
|
||||
size_t baseLayerSize = bpp * w * h;
|
||||
SkAutoMalloc defaultStorage(baseLayerSize);
|
||||
if (!pixels) {
|
||||
// Fill in the texture with all zeros so we don't have random garbage
|
||||
pixels = defaultStorage.get();
|
||||
memset(pixels, 0, baseLayerSize);
|
||||
}
|
||||
|
||||
int width = w;
|
||||
int height = h;
|
||||
for (int i = 0; i < mipLevels; ++i) {
|
||||
|
@ -1138,12 +1138,12 @@ GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc,
|
||||
bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size_t bufferOffset,
|
||||
size_t srcRowBytes, size_t dstRowBytes, int h) {
|
||||
void* mapPtr;
|
||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(),
|
||||
alloc.fMemory,
|
||||
alloc.fOffset,
|
||||
alloc.fOffset + bufferOffset,
|
||||
dstRowBytes * h,
|
||||
0,
|
||||
&mapPtr));
|
||||
@ -1255,6 +1255,37 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
||||
return 0;
|
||||
}
|
||||
|
||||
// We need to declare these early so that we can delete them at the end outside of the if block.
|
||||
GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
|
||||
VkBuffer buffer = VK_NULL_HANDLE;
|
||||
|
||||
VkResult err;
|
||||
const VkCommandBufferAllocateInfo cmdInfo = {
|
||||
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
|
||||
nullptr, // pNext
|
||||
fCmdPool, // commandPool
|
||||
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
|
||||
1 // bufferCount
|
||||
};
|
||||
|
||||
VkCommandBuffer cmdBuffer;
|
||||
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
|
||||
if (err) {
|
||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
VkCommandBufferBeginInfo cmdBufferBeginInfo;
|
||||
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
|
||||
cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
||||
cmdBufferBeginInfo.pNext = nullptr;
|
||||
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
|
||||
|
||||
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
|
||||
SkASSERT(!err);
|
||||
|
||||
size_t bpp = GrBytesPerPixel(config);
|
||||
size_t rowCopyBytes = bpp * w;
|
||||
if (linearTiling) {
|
||||
@ -1267,79 +1298,89 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
||||
|
||||
VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
|
||||
|
||||
if (!copy_testing_data(this, srcData, alloc, rowCopyBytes,
|
||||
if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
|
||||
static_cast<size_t>(layout.rowPitch), h)) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
GrVkMemory::FreeImageMemory(this, true, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
SkASSERT(w && h);
|
||||
|
||||
VkBuffer buffer;
|
||||
SkTArray<size_t> individualMipOffsets(mipLevels);
|
||||
individualMipOffsets.push_back(0);
|
||||
size_t combinedBufferSize = w * bpp * h;
|
||||
int currentWidth = w;
|
||||
int currentHeight = h;
|
||||
// The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
|
||||
// config. This works with the assumption that the bytes in pixel config is always a power
|
||||
// of 2.
|
||||
SkASSERT((bpp & (bpp - 1)) == 0);
|
||||
const size_t alignmentMask = 0x3 | (bpp - 1);
|
||||
for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
|
||||
currentWidth = SkTMax(1, currentWidth/2);
|
||||
currentHeight = SkTMax(1, currentHeight/2);
|
||||
|
||||
const size_t trimmedSize = currentWidth * bpp * currentHeight;
|
||||
const size_t alignmentDiff = combinedBufferSize & alignmentMask;
|
||||
if (alignmentDiff != 0) {
|
||||
combinedBufferSize += alignmentMask - alignmentDiff + 1;
|
||||
}
|
||||
individualMipOffsets.push_back(combinedBufferSize);
|
||||
combinedBufferSize += trimmedSize;
|
||||
}
|
||||
|
||||
VkBufferCreateInfo bufInfo;
|
||||
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
|
||||
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||
bufInfo.flags = 0;
|
||||
bufInfo.size = rowCopyBytes * h;
|
||||
bufInfo.size = combinedBufferSize;
|
||||
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
||||
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
bufInfo.queueFamilyIndexCount = 0;
|
||||
bufInfo.pQueueFamilyIndices = nullptr;
|
||||
VkResult err;
|
||||
err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
|
||||
|
||||
if (err) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
|
||||
if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
|
||||
true, &bufferAlloc)) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!copy_testing_data(this, srcData, bufferAlloc, rowCopyBytes, rowCopyBytes, h)) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
return 0;
|
||||
currentWidth = w;
|
||||
currentHeight = h;
|
||||
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
|
||||
SkASSERT(0 == currentMipLevel || !srcData);
|
||||
size_t currentRowBytes = bpp * currentWidth;
|
||||
size_t bufferOffset = individualMipOffsets[currentMipLevel];
|
||||
if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
|
||||
currentRowBytes, currentRowBytes, currentHeight)) {
|
||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
return 0;
|
||||
}
|
||||
currentWidth = SkTMax(1, currentWidth/2);
|
||||
currentHeight = SkTMax(1, currentHeight/2);
|
||||
}
|
||||
|
||||
const VkCommandBufferAllocateInfo cmdInfo = {
|
||||
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
|
||||
nullptr, // pNext
|
||||
fCmdPool, // commandPool
|
||||
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
|
||||
1 // bufferCount
|
||||
};
|
||||
|
||||
VkCommandBuffer cmdBuffer;
|
||||
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
|
||||
if (err) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
VkCommandBufferBeginInfo cmdBufferBeginInfo;
|
||||
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
|
||||
cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
||||
cmdBufferBeginInfo.pNext = nullptr;
|
||||
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
|
||||
|
||||
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
|
||||
SkASSERT(!err);
|
||||
|
||||
// Set image layout and add barrier
|
||||
VkImageMemoryBarrier barrier;
|
||||
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
|
||||
@ -1347,11 +1388,12 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
||||
barrier.pNext = nullptr;
|
||||
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
|
||||
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
barrier.oldLayout = initialLayout;
|
||||
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.image = image;
|
||||
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0 , 1};
|
||||
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
|
||||
|
||||
VK_CALL(CmdPipelineBarrier(cmdBuffer,
|
||||
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
|
||||
@ -1362,70 +1404,102 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
||||
1, &barrier));
|
||||
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
||||
|
||||
// Submit copy command
|
||||
VkBufferImageCopy region;
|
||||
memset(®ion, 0, sizeof(VkBufferImageCopy));
|
||||
region.bufferOffset = 0;
|
||||
region.bufferRowLength = w;
|
||||
region.bufferImageHeight = h;
|
||||
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
||||
region.imageOffset = { 0, 0, 0 };
|
||||
region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 };
|
||||
SkTArray<VkBufferImageCopy> regions(mipLevels);
|
||||
|
||||
VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, 1, ®ion));
|
||||
|
||||
// End CommandBuffer
|
||||
err = VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
SkASSERT(!err);
|
||||
|
||||
// Create Fence for queue
|
||||
VkFence fence;
|
||||
VkFenceCreateInfo fenceInfo;
|
||||
memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
|
||||
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
||||
|
||||
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
|
||||
SkASSERT(!err);
|
||||
|
||||
VkSubmitInfo submitInfo;
|
||||
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
submitInfo.pNext = nullptr;
|
||||
submitInfo.waitSemaphoreCount = 0;
|
||||
submitInfo.pWaitSemaphores = nullptr;
|
||||
submitInfo.pWaitDstStageMask = 0;
|
||||
submitInfo.commandBufferCount = 1;
|
||||
submitInfo.pCommandBuffers = &cmdBuffer;
|
||||
submitInfo.signalSemaphoreCount = 0;
|
||||
submitInfo.pSignalSemaphores = nullptr;
|
||||
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
|
||||
SkASSERT(!err);
|
||||
|
||||
err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
|
||||
if (VK_TIMEOUT == err) {
|
||||
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
||||
SkDebugf("Fence failed to signal: %d\n", err);
|
||||
SK_ABORT("failing");
|
||||
currentWidth = w;
|
||||
currentHeight = h;
|
||||
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
|
||||
// Submit copy command
|
||||
VkBufferImageCopy& region = regions.push_back();
|
||||
memset(®ion, 0, sizeof(VkBufferImageCopy));
|
||||
region.bufferOffset = individualMipOffsets[currentMipLevel];
|
||||
region.bufferRowLength = currentWidth;
|
||||
region.bufferImageHeight = currentHeight;
|
||||
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
||||
region.imageOffset = { 0, 0, 0 };
|
||||
region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
|
||||
currentWidth = SkTMax(1, currentWidth/2);
|
||||
currentHeight = SkTMax(1, currentHeight/2);
|
||||
}
|
||||
SkASSERT(!err);
|
||||
|
||||
// Clean up transfer resources
|
||||
VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
|
||||
regions.begin()));
|
||||
}
|
||||
// Change Image layout to shader read since if we use this texture as a borrowed textures within
|
||||
// Ganesh we require that its layout be set to that
|
||||
VkImageMemoryBarrier barrier;
|
||||
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
|
||||
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
||||
barrier.pNext = nullptr;
|
||||
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
|
||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
barrier.oldLayout = initialLayout;
|
||||
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||
barrier.image = image;
|
||||
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
|
||||
|
||||
VK_CALL(CmdPipelineBarrier(cmdBuffer,
|
||||
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
|
||||
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
|
||||
0,
|
||||
0, nullptr,
|
||||
0, nullptr,
|
||||
1, &barrier));
|
||||
|
||||
// End CommandBuffer
|
||||
err = VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||
SkASSERT(!err);
|
||||
|
||||
// Create Fence for queue
|
||||
VkFence fence;
|
||||
VkFenceCreateInfo fenceInfo;
|
||||
memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
|
||||
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
||||
|
||||
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
|
||||
SkASSERT(!err);
|
||||
|
||||
VkSubmitInfo submitInfo;
|
||||
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
submitInfo.pNext = nullptr;
|
||||
submitInfo.waitSemaphoreCount = 0;
|
||||
submitInfo.pWaitSemaphores = nullptr;
|
||||
submitInfo.pWaitDstStageMask = 0;
|
||||
submitInfo.commandBufferCount = 1;
|
||||
submitInfo.pCommandBuffers = &cmdBuffer;
|
||||
submitInfo.signalSemaphoreCount = 0;
|
||||
submitInfo.pSignalSemaphores = nullptr;
|
||||
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
|
||||
SkASSERT(!err);
|
||||
|
||||
err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
|
||||
if (VK_TIMEOUT == err) {
|
||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
||||
SkDebugf("Fence failed to signal: %d\n", err);
|
||||
SK_ABORT("failing");
|
||||
}
|
||||
SkASSERT(!err);
|
||||
|
||||
// Clean up transfer resources
|
||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
||||
|
||||
|
||||
GrVkImageInfo* info = new GrVkImageInfo;
|
||||
info->fImage = image;
|
||||
info->fAlloc = alloc;
|
||||
info->fImageTiling = imageTiling;
|
||||
info->fImageLayout = initialLayout;
|
||||
info->fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
info->fFormat = pixelFormat;
|
||||
info->fLevelCount = mipLevels;
|
||||
|
||||
|
@ -107,6 +107,12 @@ DEF_GPUTEST_FOR_GL_RENDERING_CONTEXTS(EGLImageTest, reporter, ctxInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Since we are dealing with two different GL contexts here, we need to call finish so that the
|
||||
// clearing of the texture that happens in createTextingOnlyBackendTexture occurs before we call
|
||||
// TexSubImage below on the other context. Otherwise, it is possible the calls get reordered and
|
||||
// the clearing overwrites the TexSubImage writes.
|
||||
GR_GL_CALL(glCtx1->gl(), Finish());
|
||||
|
||||
// Populate the texture using GL context 1. Important to use TexSubImage as TexImage orphans
|
||||
// the EGL image. Also, this must be done after creating the EGLImage as the texture
|
||||
// contents may not be preserved when the image is created.
|
||||
|
@ -10,10 +10,12 @@
|
||||
#if SK_SUPPORT_GPU
|
||||
|
||||
#include "GrBackendSurface.h"
|
||||
#include "GrBackendTextureImageGenerator.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrContextPriv.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrRenderTargetContext.h"
|
||||
#include "GrSemaphore.h"
|
||||
#include "GrSurfaceProxyPriv.h"
|
||||
#include "GrTest.h"
|
||||
#include "GrTexturePriv.h"
|
||||
@ -21,6 +23,7 @@
|
||||
#include "SkCanvas.h"
|
||||
#include "SkImage_Base.h"
|
||||
#include "SkGpuDevice.h"
|
||||
#include "SkPoint.h"
|
||||
#include "SkSurface.h"
|
||||
#include "SkSurface_Gpu.h"
|
||||
#include "Test.h"
|
||||
@ -29,6 +32,9 @@
|
||||
// SkImages and SkSurfaces
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrWrappedMipMappedTest, reporter, ctxInfo) {
|
||||
GrContext* context = ctxInfo.grContext();
|
||||
if (!context->caps()->mipMapSupport()) {
|
||||
return;
|
||||
}
|
||||
for (auto mipMapped : {GrMipMapped::kNo, GrMipMapped::kYes}) {
|
||||
for (auto isRT : {false, true}) {
|
||||
// CreateTestingOnlyBackendTexture currently doesn't support uploading data to mip maps
|
||||
@ -94,4 +100,123 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrWrappedMipMappedTest, reporter, ctxInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we correctly copy or don't copy GrBackendTextures in the GrBackendTextureImageGenerator
|
||||
// based on if we will use mips in the draw and the mip status of the GrBackendTexture.
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrBackendTextureImageMipMappedTest, reporter, ctxInfo) {
|
||||
static const int kSize = 8;
|
||||
|
||||
GrContext* context = ctxInfo.grContext();
|
||||
if (!context->caps()->mipMapSupport()) {
|
||||
return;
|
||||
}
|
||||
for (auto mipMapped : {GrMipMapped::kNo, GrMipMapped::kYes}) {
|
||||
for (auto willUseMips : {false, true}) {
|
||||
GrBackendObject backendHandle = context->getGpu()->createTestingOnlyBackendTexture(
|
||||
nullptr, kSize, kSize, kRGBA_8888_GrPixelConfig, false, mipMapped);
|
||||
|
||||
GrBackend backend = context->contextPriv().getBackend();
|
||||
GrBackendTexture backendTex = GrTest::CreateBackendTexture(backend,
|
||||
kSize,
|
||||
kSize,
|
||||
kRGBA_8888_GrPixelConfig,
|
||||
mipMapped,
|
||||
backendHandle);
|
||||
|
||||
sk_sp<SkImage> image = SkImage::MakeFromTexture(context, backendTex,
|
||||
kTopLeft_GrSurfaceOrigin,
|
||||
kPremul_SkAlphaType, nullptr);
|
||||
|
||||
GrTextureProxy* proxy = as_IB(image)->peekProxy();
|
||||
REPORTER_ASSERT(reporter, proxy);
|
||||
if (!proxy) {
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
return;
|
||||
}
|
||||
|
||||
REPORTER_ASSERT(reporter, proxy->priv().isInstantiated());
|
||||
|
||||
sk_sp<GrTexture> texture = sk_ref_sp(proxy->priv().peekTexture());
|
||||
REPORTER_ASSERT(reporter, texture);
|
||||
if (!texture) {
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<SkImageGenerator> imageGen = GrBackendTextureImageGenerator::Make(
|
||||
texture, kTopLeft_GrSurfaceOrigin, nullptr, kPremul_SkAlphaType, nullptr);
|
||||
REPORTER_ASSERT(reporter, imageGen);
|
||||
if (!imageGen) {
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
return;
|
||||
}
|
||||
|
||||
SkIPoint origin = SkIPoint::Make(0,0);
|
||||
// The transfer function behavior isn't used in the generator so set we set it
|
||||
// arbitrarily here.
|
||||
SkTransferFunctionBehavior behavior = SkTransferFunctionBehavior::kIgnore;
|
||||
SkImageInfo imageInfo = SkImageInfo::Make(kSize, kSize, kRGBA_8888_SkColorType,
|
||||
kPremul_SkAlphaType);
|
||||
sk_sp<GrTextureProxy> genProxy = imageGen->generateTexture(context, imageInfo,
|
||||
origin, behavior,
|
||||
willUseMips);
|
||||
|
||||
REPORTER_ASSERT(reporter, genProxy);
|
||||
if (!genProxy) {
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
return;
|
||||
}
|
||||
|
||||
REPORTER_ASSERT(reporter, genProxy->priv().isInstantiated());
|
||||
|
||||
GrTexture* genTexture = genProxy->priv().peekTexture();
|
||||
REPORTER_ASSERT(reporter, genTexture);
|
||||
if (!genTexture) {
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
return;
|
||||
}
|
||||
|
||||
GrBackendObject genBackendObject = genTexture->getTextureHandle();
|
||||
|
||||
if (kOpenGL_GrBackend == backend) {
|
||||
const GrGLTextureInfo* origTexInfo = backendTex.getGLTextureInfo();
|
||||
GrGLTextureInfo* genTexInfo = (GrGLTextureInfo*)genBackendObject;
|
||||
if (willUseMips && GrMipMapped::kNo == mipMapped) {
|
||||
// We did a copy so the texture IDs should be different
|
||||
REPORTER_ASSERT(reporter, origTexInfo->fID != genTexInfo->fID);
|
||||
} else {
|
||||
REPORTER_ASSERT(reporter, origTexInfo->fID == genTexInfo->fID);
|
||||
}
|
||||
} else if (kVulkan_GrBackend == backend) {
|
||||
#ifdef SK_VULKAN
|
||||
const GrVkImageInfo* origImageInfo = backendTex.getVkImageInfo();
|
||||
GrVkImageInfo* genImageInfo = (GrVkImageInfo*)genBackendObject;
|
||||
if (willUseMips && GrMipMapped::kNo == mipMapped) {
|
||||
// We did a copy so the texture IDs should be different
|
||||
REPORTER_ASSERT(reporter, origImageInfo->fImage != genImageInfo->fImage);
|
||||
} else {
|
||||
REPORTER_ASSERT(reporter, origImageInfo->fImage == genImageInfo->fImage);
|
||||
}
|
||||
#endif
|
||||
} else if (kMetal_GrBackend == backend) {
|
||||
REPORTER_ASSERT(reporter, false);
|
||||
} else {
|
||||
REPORTER_ASSERT(reporter, false);
|
||||
}
|
||||
|
||||
// Must make sure the uses of the backend texture have finished (we possibly have a
|
||||
// queued up copy) before we delete the backend texture. Thus we use readPixels here
|
||||
// just to force the synchronization.
|
||||
sk_sp<GrSurfaceContext> surfContext =
|
||||
context->contextPriv().makeWrappedSurfaceContext(genProxy, nullptr);
|
||||
|
||||
SkBitmap bitmap;
|
||||
bitmap.allocPixels(imageInfo);
|
||||
surfContext->readPixels(imageInfo, bitmap.getPixels(), 0, 0, 0, 0);
|
||||
|
||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user