Revert "Support creation/use of mipped proxy in GrBackendTextureImageGenerator"
This reverts commit b8ad00b5a6
.
Reason for revert: Some various test failures
Original change's description:
> Support creation/use of mipped proxy in GrBackendTextureImageGenerator
>
> Bug: skia:
> Change-Id: I9d06780ccb2db0865100b67041c03408f2445c62
> Reviewed-on: https://skia-review.googlesource.com/61241
> Reviewed-by: Brian Salomon <bsalomon@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>
TBR=egdaniel@google.com,bsalomon@google.com,brianosman@google.com
Change-Id: I28e625776352ee6f9f27e66cd5d4b149ef50a22a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:
Reviewed-on: https://skia-review.googlesource.com/61941
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
b8ad00b5a6
commit
7477d96938
@ -10,12 +10,10 @@
|
|||||||
#include "GrContext.h"
|
#include "GrContext.h"
|
||||||
#include "GrContextPriv.h"
|
#include "GrContextPriv.h"
|
||||||
#include "GrGpu.h"
|
#include "GrGpu.h"
|
||||||
#include "GrRenderTargetContext.h"
|
|
||||||
#include "GrResourceCache.h"
|
#include "GrResourceCache.h"
|
||||||
#include "GrResourceProvider.h"
|
#include "GrResourceProvider.h"
|
||||||
#include "GrSemaphore.h"
|
#include "GrSemaphore.h"
|
||||||
#include "GrTexture.h"
|
#include "GrTexture.h"
|
||||||
#include "GrTexturePriv.h"
|
|
||||||
|
|
||||||
#include "SkGr.h"
|
#include "SkGr.h"
|
||||||
#include "SkMessageBus.h"
|
#include "SkMessageBus.h"
|
||||||
@ -34,12 +32,11 @@ GrBackendTextureImageGenerator::RefHelper::~RefHelper() {
|
|||||||
static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
|
static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
|
||||||
int width, int height,
|
int width, int height,
|
||||||
GrPixelConfig config,
|
GrPixelConfig config,
|
||||||
GrMipMapped mipMapped,
|
|
||||||
GrBackendObject handle) {
|
GrBackendObject handle) {
|
||||||
switch (backend) {
|
switch (backend) {
|
||||||
case kOpenGL_GrBackend: {
|
case kOpenGL_GrBackend: {
|
||||||
const GrGLTextureInfo* glInfo = (const GrGLTextureInfo*)(handle);
|
const GrGLTextureInfo* glInfo = (const GrGLTextureInfo*)(handle);
|
||||||
return GrBackendTexture(width, height, config, mipMapped, *glInfo);
|
return GrBackendTexture(width, height, config, *glInfo);
|
||||||
}
|
}
|
||||||
#ifdef SK_VULKAN
|
#ifdef SK_VULKAN
|
||||||
case kVulkan_GrBackend: {
|
case kVulkan_GrBackend: {
|
||||||
@ -49,7 +46,7 @@ static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
|
|||||||
#endif
|
#endif
|
||||||
case kMock_GrBackend: {
|
case kMock_GrBackend: {
|
||||||
const GrMockTextureInfo* mockInfo = (const GrMockTextureInfo*)(handle);
|
const GrMockTextureInfo* mockInfo = (const GrMockTextureInfo*)(handle);
|
||||||
return GrBackendTexture(width, height, config, mipMapped, *mockInfo);
|
return GrBackendTexture(width, height, config, *mockInfo);
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return GrBackendTexture();
|
return GrBackendTexture();
|
||||||
@ -77,13 +74,10 @@ GrBackendTextureImageGenerator::Make(sk_sp<GrTexture> texture, GrSurfaceOrigin o
|
|||||||
context->getResourceCache()->insertCrossContextGpuResource(texture.get());
|
context->getResourceCache()->insertCrossContextGpuResource(texture.get());
|
||||||
|
|
||||||
GrBackend backend = context->contextPriv().getBackend();
|
GrBackend backend = context->contextPriv().getBackend();
|
||||||
GrMipMapped mipMapped = texture->texturePriv().hasMipMaps() ? GrMipMapped::kYes
|
|
||||||
: GrMipMapped::kNo;
|
|
||||||
GrBackendTexture backendTexture = make_backend_texture_from_handle(backend,
|
GrBackendTexture backendTexture = make_backend_texture_from_handle(backend,
|
||||||
texture->width(),
|
texture->width(),
|
||||||
texture->height(),
|
texture->height(),
|
||||||
texture->config(),
|
texture->config(),
|
||||||
mipMapped,
|
|
||||||
texture->getTextureHandle());
|
texture->getTextureHandle());
|
||||||
|
|
||||||
SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType,
|
SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType,
|
||||||
@ -176,28 +170,35 @@ sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(
|
|||||||
sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex), fSurfaceOrigin);
|
sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex), fSurfaceOrigin);
|
||||||
|
|
||||||
if (0 == origin.fX && 0 == origin.fY &&
|
if (0 == origin.fX && 0 == origin.fY &&
|
||||||
info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height() &&
|
info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height()) {
|
||||||
(!willNeedMipMaps || proxy->isMipMapped())) {
|
// If the caller wants the entire texture, we're done
|
||||||
// If the caller wants the entire texture and we have the correct mip support, we're done
|
|
||||||
return proxy;
|
return proxy;
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, make a copy of the requested subset. Make sure our temporary is renderable,
|
// Otherwise, make a copy of the requested subset. Make sure our temporary is renderable,
|
||||||
// because Vulkan will want to do the copy as a draw. All other copies would require a
|
// because Vulkan will want to do the copy as a draw.
|
||||||
// layout change in Vulkan and we do not change the layout of borrowed images.
|
GrSurfaceDesc desc;
|
||||||
sk_sp<GrRenderTargetContext> rtContext(context->makeDeferredRenderTargetContext(
|
desc.fFlags = kRenderTarget_GrSurfaceFlag;
|
||||||
SkBackingFit::kExact, info.width(), info.height(), proxy->config(), nullptr,
|
desc.fOrigin = proxy->origin();
|
||||||
0, willNeedMipMaps, proxy->origin(), nullptr, SkBudgeted::kYes));
|
desc.fWidth = info.width();
|
||||||
|
desc.fHeight = info.height();
|
||||||
|
desc.fConfig = proxy->config();
|
||||||
|
// TODO: We should support the case where we can allocate the mips ahead of time then copy
|
||||||
|
// the subregion into the base layer and then let the GPU generate the rest of the mip
|
||||||
|
// levels.
|
||||||
|
SkASSERT(!proxy->isMipMapped());
|
||||||
|
|
||||||
if (!rtContext) {
|
sk_sp<GrSurfaceContext> sContext(context->contextPriv().makeDeferredSurfaceContext(
|
||||||
|
desc, SkBackingFit::kExact, SkBudgeted::kYes));
|
||||||
|
if (!sContext) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
|
SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
|
||||||
if (!rtContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) {
|
if (!sContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return rtContext->asTextureProxyRef();
|
return sContext->asTextureProxyRef();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -4403,15 +4403,6 @@ GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, in
|
|||||||
mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
|
mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t bpp = GrBytesPerPixel(config);
|
|
||||||
size_t baseLayerSize = bpp * w * h;
|
|
||||||
SkAutoMalloc defaultStorage(baseLayerSize);
|
|
||||||
if (!pixels) {
|
|
||||||
// Fill in the texture with all zeros so we don't have random garbage
|
|
||||||
pixels = defaultStorage.get();
|
|
||||||
memset(pixels, 0, baseLayerSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
int width = w;
|
int width = w;
|
||||||
int height = h;
|
int height = h;
|
||||||
for (int i = 0; i < mipLevels; ++i) {
|
for (int i = 0; i < mipLevels; ++i) {
|
||||||
|
@ -1138,12 +1138,12 @@ GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size_t bufferOffset,
|
bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc,
|
||||||
size_t srcRowBytes, size_t dstRowBytes, int h) {
|
size_t srcRowBytes, size_t dstRowBytes, int h) {
|
||||||
void* mapPtr;
|
void* mapPtr;
|
||||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(),
|
VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(),
|
||||||
alloc.fMemory,
|
alloc.fMemory,
|
||||||
alloc.fOffset + bufferOffset,
|
alloc.fOffset,
|
||||||
dstRowBytes * h,
|
dstRowBytes * h,
|
||||||
0,
|
0,
|
||||||
&mapPtr));
|
&mapPtr));
|
||||||
@ -1255,37 +1255,6 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to declare these early so that we can delete them at the end outside of the if block.
|
|
||||||
GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
|
|
||||||
VkBuffer buffer = VK_NULL_HANDLE;
|
|
||||||
|
|
||||||
VkResult err;
|
|
||||||
const VkCommandBufferAllocateInfo cmdInfo = {
|
|
||||||
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
|
|
||||||
nullptr, // pNext
|
|
||||||
fCmdPool, // commandPool
|
|
||||||
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
|
|
||||||
1 // bufferCount
|
|
||||||
};
|
|
||||||
|
|
||||||
VkCommandBuffer cmdBuffer;
|
|
||||||
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
|
|
||||||
if (err) {
|
|
||||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
VkCommandBufferBeginInfo cmdBufferBeginInfo;
|
|
||||||
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
|
|
||||||
cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
|
||||||
cmdBufferBeginInfo.pNext = nullptr;
|
|
||||||
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
|
||||||
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
|
|
||||||
|
|
||||||
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
|
|
||||||
SkASSERT(!err);
|
|
||||||
|
|
||||||
size_t bpp = GrBytesPerPixel(config);
|
size_t bpp = GrBytesPerPixel(config);
|
||||||
size_t rowCopyBytes = bpp * w;
|
size_t rowCopyBytes = bpp * w;
|
||||||
if (linearTiling) {
|
if (linearTiling) {
|
||||||
@ -1298,89 +1267,79 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
|||||||
|
|
||||||
VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
|
VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
|
||||||
|
|
||||||
if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
|
if (!copy_testing_data(this, srcData, alloc, rowCopyBytes,
|
||||||
static_cast<size_t>(layout.rowPitch), h)) {
|
static_cast<size_t>(layout.rowPitch), h)) {
|
||||||
GrVkMemory::FreeImageMemory(this, true, alloc);
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SkASSERT(w && h);
|
SkASSERT(w && h);
|
||||||
|
|
||||||
SkTArray<size_t> individualMipOffsets(mipLevels);
|
VkBuffer buffer;
|
||||||
individualMipOffsets.push_back(0);
|
|
||||||
size_t combinedBufferSize = w * bpp * h;
|
|
||||||
int currentWidth = w;
|
|
||||||
int currentHeight = h;
|
|
||||||
// The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
|
|
||||||
// config. This works with the assumption that the bytes in pixel config is always a power
|
|
||||||
// of 2.
|
|
||||||
SkASSERT((bpp & (bpp - 1)) == 0);
|
|
||||||
const size_t alignmentMask = 0x3 | (bpp - 1);
|
|
||||||
for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
|
|
||||||
currentWidth = SkTMax(1, currentWidth/2);
|
|
||||||
currentHeight = SkTMax(1, currentHeight/2);
|
|
||||||
|
|
||||||
const size_t trimmedSize = currentWidth * bpp * currentHeight;
|
|
||||||
const size_t alignmentDiff = combinedBufferSize & alignmentMask;
|
|
||||||
if (alignmentDiff != 0) {
|
|
||||||
combinedBufferSize += alignmentMask - alignmentDiff + 1;
|
|
||||||
}
|
|
||||||
individualMipOffsets.push_back(combinedBufferSize);
|
|
||||||
combinedBufferSize += trimmedSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
VkBufferCreateInfo bufInfo;
|
VkBufferCreateInfo bufInfo;
|
||||||
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
|
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
|
||||||
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||||
bufInfo.flags = 0;
|
bufInfo.flags = 0;
|
||||||
bufInfo.size = combinedBufferSize;
|
bufInfo.size = rowCopyBytes * h;
|
||||||
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
||||||
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||||
bufInfo.queueFamilyIndexCount = 0;
|
bufInfo.queueFamilyIndexCount = 0;
|
||||||
bufInfo.pQueueFamilyIndices = nullptr;
|
bufInfo.pQueueFamilyIndices = nullptr;
|
||||||
|
VkResult err;
|
||||||
err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
|
err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
|
||||||
if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
|
if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
|
||||||
true, &bufferAlloc)) {
|
true, &bufferAlloc)) {
|
||||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
currentWidth = w;
|
if (!copy_testing_data(this, srcData, bufferAlloc, rowCopyBytes, rowCopyBytes, h)) {
|
||||||
currentHeight = h;
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
SkASSERT(0 == currentMipLevel || !srcData);
|
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||||
size_t currentRowBytes = bpp * currentWidth;
|
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||||
size_t bufferOffset = individualMipOffsets[currentMipLevel];
|
return 0;
|
||||||
if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
|
|
||||||
currentRowBytes, currentRowBytes, currentHeight)) {
|
|
||||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
|
||||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
|
||||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
|
||||||
VK_CALL(EndCommandBuffer(cmdBuffer));
|
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
currentWidth = SkTMax(1, currentWidth/2);
|
|
||||||
currentHeight = SkTMax(1, currentHeight/2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const VkCommandBufferAllocateInfo cmdInfo = {
|
||||||
|
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
|
||||||
|
nullptr, // pNext
|
||||||
|
fCmdPool, // commandPool
|
||||||
|
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
|
||||||
|
1 // bufferCount
|
||||||
|
};
|
||||||
|
|
||||||
|
VkCommandBuffer cmdBuffer;
|
||||||
|
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
|
||||||
|
if (err) {
|
||||||
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
|
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||||
|
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkCommandBufferBeginInfo cmdBufferBeginInfo;
|
||||||
|
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
|
||||||
|
cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
||||||
|
cmdBufferBeginInfo.pNext = nullptr;
|
||||||
|
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||||
|
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
|
||||||
|
|
||||||
|
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
|
||||||
|
SkASSERT(!err);
|
||||||
|
|
||||||
// Set image layout and add barrier
|
// Set image layout and add barrier
|
||||||
VkImageMemoryBarrier barrier;
|
VkImageMemoryBarrier barrier;
|
||||||
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
|
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
|
||||||
@ -1388,12 +1347,11 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
|||||||
barrier.pNext = nullptr;
|
barrier.pNext = nullptr;
|
||||||
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
|
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
|
||||||
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||||
barrier.oldLayout = initialLayout;
|
|
||||||
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
||||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
||||||
barrier.image = image;
|
barrier.image = image;
|
||||||
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
|
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0 , 1};
|
||||||
|
|
||||||
VK_CALL(CmdPipelineBarrier(cmdBuffer,
|
VK_CALL(CmdPipelineBarrier(cmdBuffer,
|
||||||
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
|
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
|
||||||
@ -1404,102 +1362,70 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
|
|||||||
1, &barrier));
|
1, &barrier));
|
||||||
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
||||||
|
|
||||||
SkTArray<VkBufferImageCopy> regions(mipLevels);
|
// Submit copy command
|
||||||
|
VkBufferImageCopy region;
|
||||||
|
memset(®ion, 0, sizeof(VkBufferImageCopy));
|
||||||
|
region.bufferOffset = 0;
|
||||||
|
region.bufferRowLength = w;
|
||||||
|
region.bufferImageHeight = h;
|
||||||
|
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
||||||
|
region.imageOffset = { 0, 0, 0 };
|
||||||
|
region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 };
|
||||||
|
|
||||||
currentWidth = w;
|
VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, 1, ®ion));
|
||||||
currentHeight = h;
|
|
||||||
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
|
// End CommandBuffer
|
||||||
// Submit copy command
|
err = VK_CALL(EndCommandBuffer(cmdBuffer));
|
||||||
VkBufferImageCopy& region = regions.push_back();
|
SkASSERT(!err);
|
||||||
memset(®ion, 0, sizeof(VkBufferImageCopy));
|
|
||||||
region.bufferOffset = individualMipOffsets[currentMipLevel];
|
// Create Fence for queue
|
||||||
region.bufferRowLength = currentWidth;
|
VkFence fence;
|
||||||
region.bufferImageHeight = currentHeight;
|
VkFenceCreateInfo fenceInfo;
|
||||||
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
|
||||||
region.imageOffset = { 0, 0, 0 };
|
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
||||||
region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
|
|
||||||
currentWidth = SkTMax(1, currentWidth/2);
|
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
|
||||||
currentHeight = SkTMax(1, currentHeight/2);
|
SkASSERT(!err);
|
||||||
|
|
||||||
|
VkSubmitInfo submitInfo;
|
||||||
|
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
||||||
|
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||||
|
submitInfo.pNext = nullptr;
|
||||||
|
submitInfo.waitSemaphoreCount = 0;
|
||||||
|
submitInfo.pWaitSemaphores = nullptr;
|
||||||
|
submitInfo.pWaitDstStageMask = 0;
|
||||||
|
submitInfo.commandBufferCount = 1;
|
||||||
|
submitInfo.pCommandBuffers = &cmdBuffer;
|
||||||
|
submitInfo.signalSemaphoreCount = 0;
|
||||||
|
submitInfo.pSignalSemaphores = nullptr;
|
||||||
|
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
|
||||||
|
SkASSERT(!err);
|
||||||
|
|
||||||
|
err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
|
||||||
|
if (VK_TIMEOUT == err) {
|
||||||
|
GrVkMemory::FreeImageMemory(this, linearTiling, alloc);
|
||||||
|
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
||||||
|
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||||
|
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||||
|
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||||
|
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
||||||
|
SkDebugf("Fence failed to signal: %d\n", err);
|
||||||
|
SK_ABORT("failing");
|
||||||
}
|
}
|
||||||
|
SkASSERT(!err);
|
||||||
|
|
||||||
VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
|
// Clean up transfer resources
|
||||||
regions.begin()));
|
|
||||||
}
|
|
||||||
// Change Image layout to shader read since if we use this texture as a borrowed textures within
|
|
||||||
// Ganesh we require that its layout be set to that
|
|
||||||
VkImageMemoryBarrier barrier;
|
|
||||||
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
|
|
||||||
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
|
||||||
barrier.pNext = nullptr;
|
|
||||||
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
|
|
||||||
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
|
||||||
barrier.oldLayout = initialLayout;
|
|
||||||
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
|
||||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
||||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
||||||
barrier.image = image;
|
|
||||||
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
|
|
||||||
|
|
||||||
VK_CALL(CmdPipelineBarrier(cmdBuffer,
|
|
||||||
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
|
|
||||||
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
|
|
||||||
0,
|
|
||||||
0, nullptr,
|
|
||||||
0, nullptr,
|
|
||||||
1, &barrier));
|
|
||||||
|
|
||||||
// End CommandBuffer
|
|
||||||
err = VK_CALL(EndCommandBuffer(cmdBuffer));
|
|
||||||
SkASSERT(!err);
|
|
||||||
|
|
||||||
// Create Fence for queue
|
|
||||||
VkFence fence;
|
|
||||||
VkFenceCreateInfo fenceInfo;
|
|
||||||
memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
|
|
||||||
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
|
||||||
|
|
||||||
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
|
|
||||||
SkASSERT(!err);
|
|
||||||
|
|
||||||
VkSubmitInfo submitInfo;
|
|
||||||
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
|
|
||||||
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
|
||||||
submitInfo.pNext = nullptr;
|
|
||||||
submitInfo.waitSemaphoreCount = 0;
|
|
||||||
submitInfo.pWaitSemaphores = nullptr;
|
|
||||||
submitInfo.pWaitDstStageMask = 0;
|
|
||||||
submitInfo.commandBufferCount = 1;
|
|
||||||
submitInfo.pCommandBuffers = &cmdBuffer;
|
|
||||||
submitInfo.signalSemaphoreCount = 0;
|
|
||||||
submitInfo.pSignalSemaphores = nullptr;
|
|
||||||
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
|
|
||||||
SkASSERT(!err);
|
|
||||||
|
|
||||||
err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
|
|
||||||
if (VK_TIMEOUT == err) {
|
|
||||||
GrVkMemory::FreeImageMemory(this, false, alloc);
|
|
||||||
VK_CALL(DestroyImage(fDevice, image, nullptr));
|
|
||||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
||||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
||||||
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
||||||
SkDebugf("Fence failed to signal: %d\n", err);
|
|
||||||
SK_ABORT("failing");
|
|
||||||
}
|
}
|
||||||
SkASSERT(!err);
|
|
||||||
|
|
||||||
// Clean up transfer resources
|
|
||||||
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
|
|
||||||
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
|
|
||||||
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
|
|
||||||
VK_CALL(DestroyFence(fDevice, fence, nullptr));
|
|
||||||
|
|
||||||
|
|
||||||
GrVkImageInfo* info = new GrVkImageInfo;
|
GrVkImageInfo* info = new GrVkImageInfo;
|
||||||
info->fImage = image;
|
info->fImage = image;
|
||||||
info->fAlloc = alloc;
|
info->fAlloc = alloc;
|
||||||
info->fImageTiling = imageTiling;
|
info->fImageTiling = imageTiling;
|
||||||
info->fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
info->fImageLayout = initialLayout;
|
||||||
info->fFormat = pixelFormat;
|
info->fFormat = pixelFormat;
|
||||||
info->fLevelCount = mipLevels;
|
info->fLevelCount = mipLevels;
|
||||||
|
|
||||||
|
@ -10,12 +10,10 @@
|
|||||||
#if SK_SUPPORT_GPU
|
#if SK_SUPPORT_GPU
|
||||||
|
|
||||||
#include "GrBackendSurface.h"
|
#include "GrBackendSurface.h"
|
||||||
#include "GrBackendTextureImageGenerator.h"
|
|
||||||
#include "GrContext.h"
|
#include "GrContext.h"
|
||||||
#include "GrContextPriv.h"
|
#include "GrContextPriv.h"
|
||||||
#include "GrGpu.h"
|
#include "GrGpu.h"
|
||||||
#include "GrRenderTargetContext.h"
|
#include "GrRenderTargetContext.h"
|
||||||
#include "GrSemaphore.h"
|
|
||||||
#include "GrSurfaceProxyPriv.h"
|
#include "GrSurfaceProxyPriv.h"
|
||||||
#include "GrTest.h"
|
#include "GrTest.h"
|
||||||
#include "GrTexturePriv.h"
|
#include "GrTexturePriv.h"
|
||||||
@ -23,7 +21,6 @@
|
|||||||
#include "SkCanvas.h"
|
#include "SkCanvas.h"
|
||||||
#include "SkImage_Base.h"
|
#include "SkImage_Base.h"
|
||||||
#include "SkGpuDevice.h"
|
#include "SkGpuDevice.h"
|
||||||
#include "SkPoint.h"
|
|
||||||
#include "SkSurface.h"
|
#include "SkSurface.h"
|
||||||
#include "SkSurface_Gpu.h"
|
#include "SkSurface_Gpu.h"
|
||||||
#include "Test.h"
|
#include "Test.h"
|
||||||
@ -97,120 +94,4 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrWrappedMipMappedTest, reporter, ctxInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that we correctly copy or don't copy GrBackendTextures in the GrBackendTextureImageGenerator
|
|
||||||
// based on if we will use mips in the draw and the mip status of the GrBackendTexture.
|
|
||||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrBackendTextureImageMipMappedTest, reporter, ctxInfo) {
|
|
||||||
static const int kSize = 8;
|
|
||||||
|
|
||||||
GrContext* context = ctxInfo.grContext();
|
|
||||||
for (auto mipMapped : {GrMipMapped::kNo, GrMipMapped::kYes}) {
|
|
||||||
for (auto willUseMips : {false, true}) {
|
|
||||||
GrBackendObject backendHandle = context->getGpu()->createTestingOnlyBackendTexture(
|
|
||||||
nullptr, kSize, kSize, kRGBA_8888_GrPixelConfig, false, mipMapped);
|
|
||||||
|
|
||||||
GrBackend backend = context->contextPriv().getBackend();
|
|
||||||
GrBackendTexture backendTex = GrTest::CreateBackendTexture(backend,
|
|
||||||
kSize,
|
|
||||||
kSize,
|
|
||||||
kRGBA_8888_GrPixelConfig,
|
|
||||||
mipMapped,
|
|
||||||
backendHandle);
|
|
||||||
|
|
||||||
sk_sp<SkImage> image = SkImage::MakeFromTexture(context, backendTex,
|
|
||||||
kTopLeft_GrSurfaceOrigin,
|
|
||||||
kPremul_SkAlphaType, nullptr);
|
|
||||||
|
|
||||||
GrTextureProxy* proxy = as_IB(image)->peekProxy();
|
|
||||||
REPORTER_ASSERT(reporter, proxy);
|
|
||||||
if (!proxy) {
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
REPORTER_ASSERT(reporter, proxy->priv().isInstantiated());
|
|
||||||
|
|
||||||
sk_sp<GrTexture> texture = sk_ref_sp(proxy->priv().peekTexture());
|
|
||||||
REPORTER_ASSERT(reporter, texture);
|
|
||||||
if (!texture) {
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<SkImageGenerator> imageGen = GrBackendTextureImageGenerator::Make(
|
|
||||||
texture, kTopLeft_GrSurfaceOrigin, nullptr, kPremul_SkAlphaType, nullptr);
|
|
||||||
REPORTER_ASSERT(reporter, imageGen);
|
|
||||||
if (!imageGen) {
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
SkIPoint origin = SkIPoint::Make(0,0);
|
|
||||||
// The transfer function behavior isn't used in the generator so set we set it
|
|
||||||
// arbitrarily here.
|
|
||||||
SkTransferFunctionBehavior behavior = SkTransferFunctionBehavior::kIgnore;
|
|
||||||
SkImageInfo imageInfo = SkImageInfo::Make(kSize, kSize, kRGBA_8888_SkColorType,
|
|
||||||
kPremul_SkAlphaType);
|
|
||||||
sk_sp<GrTextureProxy> genProxy = imageGen->generateTexture(context, imageInfo,
|
|
||||||
origin, behavior,
|
|
||||||
willUseMips);
|
|
||||||
|
|
||||||
REPORTER_ASSERT(reporter, genProxy);
|
|
||||||
if (!genProxy) {
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
REPORTER_ASSERT(reporter, genProxy->priv().isInstantiated());
|
|
||||||
|
|
||||||
GrTexture* genTexture = genProxy->priv().peekTexture();
|
|
||||||
REPORTER_ASSERT(reporter, genTexture);
|
|
||||||
if (!genTexture) {
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
GrBackendObject genBackendObject = genTexture->getTextureHandle();
|
|
||||||
|
|
||||||
if (kOpenGL_GrBackend == backend) {
|
|
||||||
const GrGLTextureInfo* origTexInfo = backendTex.getGLTextureInfo();
|
|
||||||
GrGLTextureInfo* genTexInfo = (GrGLTextureInfo*)genBackendObject;
|
|
||||||
if (willUseMips && GrMipMapped::kNo == mipMapped) {
|
|
||||||
// We did a copy so the texture IDs should be different
|
|
||||||
REPORTER_ASSERT(reporter, origTexInfo->fID != genTexInfo->fID);
|
|
||||||
} else {
|
|
||||||
REPORTER_ASSERT(reporter, origTexInfo->fID == genTexInfo->fID);
|
|
||||||
}
|
|
||||||
} else if (kVulkan_GrBackend == backend) {
|
|
||||||
#ifdef SK_VULKAN
|
|
||||||
const GrVkImageInfo* origImageInfo = backendTex.getVkImageInfo();
|
|
||||||
GrVkImageInfo* genImageInfo = (GrVkImageInfo*)genBackendObject;
|
|
||||||
if (willUseMips && GrMipMapped::kNo == mipMapped) {
|
|
||||||
// We did a copy so the texture IDs should be different
|
|
||||||
REPORTER_ASSERT(reporter, origImageInfo->fImage != genImageInfo->fImage);
|
|
||||||
} else {
|
|
||||||
REPORTER_ASSERT(reporter, origImageInfo->fImage == genImageInfo->fImage);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
} else if (kMetal_GrBackend == backend) {
|
|
||||||
REPORTER_ASSERT(reporter, false);
|
|
||||||
} else {
|
|
||||||
REPORTER_ASSERT(reporter, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must make sure the uses of the backend texture have finished (we possibly have a
|
|
||||||
// queued up copy) before we delete the backend texture. Thus we use readPixels here
|
|
||||||
// just to force the synchronization.
|
|
||||||
sk_sp<GrSurfaceContext> surfContext =
|
|
||||||
context->contextPriv().makeWrappedSurfaceContext(genProxy, nullptr);
|
|
||||||
|
|
||||||
SkBitmap bitmap;
|
|
||||||
bitmap.allocPixels(imageInfo);
|
|
||||||
surfContext->readPixels(imageInfo, bitmap.getPixels(), 0, 0, 0, 0);
|
|
||||||
|
|
||||||
context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user