Update SkImage::MakeFromYUVTexturesCopy to GrBackendTexture

Change-Id: I7ba030c5d7856309709e892a2b1b625cf74c70b8
Reviewed-on: https://skia-review.googlesource.com/82823
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Robert Phillips 2017-12-13 09:22:45 -05:00 committed by Skia Commit-Bot
parent 2a3009931d
commit c25db63753
14 changed files with 126 additions and 518 deletions

View File

@ -93,7 +93,7 @@ protected:
fRGBImage = SkImage::MakeRasterCopy(SkPixmap(rgbBmp.info(), rgbColors, rgbBmp.rowBytes())); fRGBImage = SkImage::MakeRasterCopy(SkPixmap(rgbBmp.info(), rgbColors, rgbBmp.rowBytes()));
} }
void createYUVTextures(GrContext* context, GrBackendObject yuvHandles[3]) { void createYUVTextures(GrContext* context, GrBackendTexture yuvTextures[3]) {
GrGpu* gpu = context->getGpu(); GrGpu* gpu = context->getGpu();
if (!gpu) { if (!gpu) {
return; return;
@ -101,15 +101,16 @@ protected:
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
SkASSERT(fYUVBmps[i].width() == SkToInt(fYUVBmps[i].rowBytes())); SkASSERT(fYUVBmps[i].width() == SkToInt(fYUVBmps[i].rowBytes()));
yuvHandles[i] = gpu->createTestingOnlyBackendObject(fYUVBmps[i].getPixels(), yuvTextures[i] = gpu->createTestingOnlyBackendTexture(fYUVBmps[i].getPixels(),
fYUVBmps[i].width(), fYUVBmps[i].width(),
fYUVBmps[i].height(), fYUVBmps[i].height(),
kAlpha_8_GrPixelConfig); kAlpha_8_GrPixelConfig,
false, GrMipMapped::kNo);
} }
context->resetContext(); context->resetContext();
} }
void deleteYUVTextures(GrContext* context, const GrBackendObject yuvHandles[3]) { void deleteYUVTextures(GrContext* context, GrBackendTexture yuvTextures[3]) {
GrGpu* gpu = context->getGpu(); GrGpu* gpu = context->getGpu();
if (!gpu) { if (!gpu) {
@ -117,7 +118,7 @@ protected:
} }
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
gpu->deleteTestingOnlyBackendObject(yuvHandles[i]); gpu->deleteTestingOnlyBackendTexture(&yuvTextures[i]);
} }
context->resetContext(); context->resetContext();
@ -141,13 +142,13 @@ protected:
SkTArray<sk_sp<SkImage>> images; SkTArray<sk_sp<SkImage>> images;
images.push_back(fRGBImage); images.push_back(fRGBImage);
for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace; ++space) { for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace; ++space) {
GrBackendObject yuvHandles[3]; GrBackendTexture yuvTextures[3];
this->createYUVTextures(context, yuvHandles); this->createYUVTextures(context, yuvTextures);
images.push_back(SkImage::MakeFromYUVTexturesCopy(context, images.push_back(SkImage::MakeFromYUVTexturesCopy(context,
static_cast<SkYUVColorSpace>(space), static_cast<SkYUVColorSpace>(space),
yuvHandles, sizes, yuvTextures, sizes,
kTopLeft_GrSurfaceOrigin)); kTopLeft_GrSurfaceOrigin));
this->deleteYUVTextures(context, yuvHandles); this->deleteYUVTextures(context, yuvTextures);
} }
for (int i = 0; i < images.count(); ++ i) { for (int i = 0; i < images.count(); ++ i) {
SkScalar y = (i + 1) * kPad + i * fYUVBmps[0].height(); SkScalar y = (i + 1) * kPad + i * fYUVBmps[0].height();

View File

@ -189,6 +189,29 @@ public:
GrSurfaceOrigin surfaceOrigin, GrSurfaceOrigin surfaceOrigin,
sk_sp<SkColorSpace> colorSpace = nullptr); sk_sp<SkColorSpace> colorSpace = nullptr);
/**
* Create a new image by copying the pixels from the specified y, u, v textures. The data
* from the textures is immediately ingested into the image and the textures can be modified or
* deleted after the function returns. The image will have the dimensions of the y texture.
*/
static sk_sp<SkImage> MakeFromYUVTexturesCopy(GrContext* context, SkYUVColorSpace yuvColorSpace,
const GrBackendTexture yuvTextureHandles[3],
const SkISize yuvSizes[3],
GrSurfaceOrigin surfaceOrigin,
sk_sp<SkColorSpace> colorSpace = nullptr);
/**
* Create a new image by copying the pixels from the specified y and uv textures. The data
* from the textures is immediately ingested into the image and the textures can be modified or
* deleted after the function returns. The image will have the dimensions of the y texture.
*/
static sk_sp<SkImage> MakeFromNV12TexturesCopy(GrContext* context,
SkYUVColorSpace yuvColorSpace,
const GrBackendTexture nv12TextureHandles[2],
const SkISize nv12Sizes[2],
GrSurfaceOrigin surfaceOrigin,
sk_sp<SkColorSpace> colorSpace = nullptr);
enum class BitDepth { enum class BitDepth {
kU8, kU8,
kF16, kF16,

View File

@ -217,7 +217,8 @@ public:
const GrSurfaceDesc&, SkBudgeted, const GrSurfaceDesc&, SkBudgeted,
const void* srcData, size_t rowBytes); const void* srcData, size_t rowBytes);
static sk_sp<GrTextureProxy> MakeWrappedBackend(GrContext*, GrBackendTexture&, GrSurfaceOrigin); static sk_sp<GrTextureProxy> MakeWrappedBackend(GrContext*, const GrBackendTexture&,
GrSurfaceOrigin);
using LazyInstantiateCallback = std::function<sk_sp<GrTexture>(GrResourceProvider*, using LazyInstantiateCallback = std::function<sk_sp<GrTexture>(GrResourceProvider*,
GrSurfaceOrigin* outOrigin)>; GrSurfaceOrigin* outOrigin)>;

View File

@ -450,20 +450,6 @@ public:
Stats* stats() { return &fStats; } Stats* stats() { return &fStats; }
void dumpJSON(SkJSONWriter*) const; void dumpJSON(SkJSONWriter*) const;
/** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
only to be used for testing (particularly for testing the methods that import an externally
created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */
virtual GrBackendObject createTestingOnlyBackendObject(
void* pixels, int w, int h,
GrPixelConfig config,
bool isRenderTarget = false,
GrMipMapped mipMapped = GrMipMapped::kNo) = 0;
/** If ownership of the backend texture has been transferred pass true for abandonTexture. This
will do any necessary cleanup of the handle without freeing the texture in the backend
API. */
virtual void deleteTestingOnlyBackendObject(GrBackendObject,
bool abandonTexture = false) = 0;
/** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is
only to be used for testing (particularly for testing the methods that import an externally only to be used for testing (particularly for testing the methods that import an externally
created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */ created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */

View File

@ -392,7 +392,7 @@ sk_sp<GrTextureProxy> GrSurfaceProxy::MakeDeferredMipMap(
} }
sk_sp<GrTextureProxy> GrSurfaceProxy::MakeWrappedBackend(GrContext* context, sk_sp<GrTextureProxy> GrSurfaceProxy::MakeWrappedBackend(GrContext* context,
GrBackendTexture& backendTex, const GrBackendTexture& backendTex,
GrSurfaceOrigin origin) { GrSurfaceOrigin origin) {
sk_sp<GrTexture> tex(context->resourceProvider()->wrapBackendTexture(backendTex)); sk_sp<GrTexture> tex(context->resourceProvider()->wrapBackendTexture(backendTex));
return GrSurfaceProxy::MakeWrapped(std::move(tex), origin); return GrSurfaceProxy::MakeWrapped(std::move(tex), origin);

View File

@ -4387,78 +4387,6 @@ void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
} }
} }
GrBackendObject GrGLGpu::createTestingOnlyBackendObject(void* pixels, int w, int h,
GrPixelConfig config, bool /*isRT*/,
GrMipMapped mipMapped) {
if (!this->caps()->isConfigTexturable(config)) {
return reinterpret_cast<GrBackendObject>(nullptr);
}
// Currently we don't support uploading pixel data when mipped.
if (pixels && GrMipMapped::kYes == mipMapped) {
return reinterpret_cast<GrBackendObject>(nullptr);
}
std::unique_ptr<GrGLTextureInfo> info = skstd::make_unique<GrGLTextureInfo>();
info->fTarget = GR_GL_TEXTURE_2D;
info->fID = 0;
GL_CALL(GenTextures(1, &info->fID));
GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
GL_CALL(BindTexture(info->fTarget, info->fID));
fHWBoundTextureUniqueIDs[0].makeInvalid();
GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
GrGLenum internalFormat;
GrGLenum externalFormat;
GrGLenum externalType;
if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
&externalType)) {
return reinterpret_cast<GrBackendObject>(nullptr);
}
this->unbindCpuToGpuXferBuffer();
// Figure out the number of mip levels.
int mipLevels = 1;
if (GrMipMapped::kYes == mipMapped) {
mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
}
size_t bpp = GrBytesPerPixel(config);
size_t baseLayerSize = bpp * w * h;
SkAutoMalloc defaultStorage(baseLayerSize);
if (!pixels) {
// Fill in the texture with all zeros so we don't have random garbage
pixels = defaultStorage.get();
memset(pixels, 0, baseLayerSize);
}
int width = w;
int height = h;
for (int i = 0; i < mipLevels; ++i) {
GL_CALL(TexImage2D(info->fTarget, i, internalFormat, width, height, 0, externalFormat,
externalType, pixels));
width = SkTMax(1, width / 2);
height = SkTMax(1, height / 2);
}
return reinterpret_cast<GrBackendObject>(info.release());
}
void GrGLGpu::deleteTestingOnlyBackendObject(GrBackendObject id, bool abandonTexture) {
std::unique_ptr<const GrGLTextureInfo> info(reinterpret_cast<const GrGLTextureInfo*>(id));
GrGLuint texID = info->fID;
if (!abandonTexture) {
GL_CALL(DeleteTextures(1, &texID));
}
}
GrBackendTexture GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h, GrBackendTexture GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
GrPixelConfig config, bool /*isRT*/, GrPixelConfig config, bool /*isRT*/,
GrMipMapped mipMapped) { GrMipMapped mipMapped) {

View File

@ -161,12 +161,6 @@ public:
int width, int width,
int height) override; int height) override;
GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
GrPixelConfig config,
bool isRenderTarget,
GrMipMapped mipMapped) override;
void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture) override;
GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
GrPixelConfig config, GrPixelConfig config,
bool isRenderTarget, bool isRenderTarget,

View File

@ -90,21 +90,6 @@ GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(const GrR
return new GrMockStencilAttachment(this, width, height, kBits, rt->numColorSamples()); return new GrMockStencilAttachment(this, width, height, kBits, rt->numColorSamples());
} }
GrBackendObject GrMockGpu::createTestingOnlyBackendObject(void* pixels, int w, int h,
GrPixelConfig config, bool isRT,
GrMipMapped) {
auto info = new GrMockTextureInfo;
info->fID = NextExternalTextureID();
fOutstandingTestingOnlyTextureIDs.add(info->fID);
return reinterpret_cast<GrBackendObject>(info);
}
void GrMockGpu::deleteTestingOnlyBackendObject(GrBackendObject object, bool abandonTexture) {
auto info = reinterpret_cast<const GrMockTextureInfo*>(object);
fOutstandingTestingOnlyTextureIDs.remove(info->fID);
delete info;
}
GrBackendTexture GrMockGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h, GrBackendTexture GrMockGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
GrPixelConfig config, bool isRT, GrPixelConfig config, bool isRT,
GrMipMapped) { GrMipMapped) {

View File

@ -129,10 +129,6 @@ private:
int height) override; int height) override;
void clearStencil(GrRenderTarget*, int clearValue) override {} void clearStencil(GrRenderTarget*, int clearValue) override {}
GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h, GrPixelConfig,
bool isRT, GrMipMapped) override;
void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture) override;
GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrPixelConfig, GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrPixelConfig,
bool isRT, GrMipMapped) override; bool isRT, GrMipMapped) override;
bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;

View File

@ -141,13 +141,6 @@ private:
void clearStencil(GrRenderTarget* target, int clearValue) override {} void clearStencil(GrRenderTarget* target, int clearValue) override {}
GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
GrPixelConfig config, bool isRT,
GrMipMapped) override {
return 0;
}
void deleteTestingOnlyBackendObject(GrBackendObject, bool abandonTexture = false) override {}
GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
GrPixelConfig config, bool isRT, GrPixelConfig config, bool isRT,
GrMipMapped) override { GrMipMapped) override {

View File

@ -1173,353 +1173,6 @@ bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size
return true; return true;
} }
GrBackendObject GrVkGpu::createTestingOnlyBackendObject(void* srcData, int w, int h,
GrPixelConfig config,
bool isRenderTarget,
GrMipMapped mipMapped) {
VkFormat pixelFormat;
if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
return 0;
}
bool linearTiling = false;
if (!fVkCaps->isConfigTexturable(config)) {
return 0;
}
if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) {
return 0;
}
// Currently we don't support uploading pixel data when mipped.
if (srcData && GrMipMapped::kYes == mipMapped) {
return 0;
}
if (fVkCaps->isConfigTexturableLinearly(config) &&
(!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false)) &&
GrMipMapped::kNo == mipMapped) {
linearTiling = true;
}
VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (isRenderTarget) {
usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
VkImage image = VK_NULL_HANDLE;
GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0, 0 };
VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
? VK_IMAGE_LAYOUT_PREINITIALIZED
: VK_IMAGE_LAYOUT_UNDEFINED;
// Create Image
VkSampleCountFlagBits vkSamples;
if (!GrSampleCountToVkSampleCount(1, &vkSamples)) {
return 0;
}
// Figure out the number of mip levels.
uint32_t mipLevels = 1;
if (GrMipMapped::kYes == mipMapped) {
mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
}
const VkImageCreateInfo imageCreateInfo = {
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
nullptr, // pNext
0, // VkImageCreateFlags
VK_IMAGE_TYPE_2D, // VkImageType
pixelFormat, // VkFormat
{ (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D
mipLevels, // mipLevels
1, // arrayLayers
vkSamples, // samples
imageTiling, // VkImageTiling
usageFlags, // VkImageUsageFlags
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
0, // queueFamilyCount
0, // pQueueFamilyIndices
initialLayout // initialLayout
};
GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image));
if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) {
VK_CALL(DestroyImage(this->device(), image, nullptr));
return 0;
}
// We need to declare these early so that we can delete them at the end outside of the if block.
GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 };
VkBuffer buffer = VK_NULL_HANDLE;
VkResult err;
const VkCommandBufferAllocateInfo cmdInfo = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
nullptr, // pNext
fCmdPool, // commandPool
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1 // bufferCount
};
VkCommandBuffer cmdBuffer;
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
if (err) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
return 0;
}
VkCommandBufferBeginInfo cmdBufferBeginInfo;
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmdBufferBeginInfo.pNext = nullptr;
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
SkASSERT(!err);
size_t bpp = GrBytesPerPixel(config);
size_t rowCopyBytes = bpp * w;
if (linearTiling) {
const VkImageSubresource subres = {
VK_IMAGE_ASPECT_COLOR_BIT,
0, // mipLevel
0, // arraySlice
};
VkSubresourceLayout layout;
VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes,
static_cast<size_t>(layout.rowPitch), h)) {
GrVkMemory::FreeImageMemory(this, true, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
VK_CALL(EndCommandBuffer(cmdBuffer));
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
return 0;
}
} else {
SkASSERT(w && h);
SkTArray<size_t> individualMipOffsets(mipLevels);
individualMipOffsets.push_back(0);
size_t combinedBufferSize = w * bpp * h;
int currentWidth = w;
int currentHeight = h;
// The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
// config. This works with the assumption that the bytes in pixel config is always a power
// of 2.
SkASSERT((bpp & (bpp - 1)) == 0);
const size_t alignmentMask = 0x3 | (bpp - 1);
for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) {
currentWidth = SkTMax(1, currentWidth/2);
currentHeight = SkTMax(1, currentHeight/2);
const size_t trimmedSize = currentWidth * bpp * currentHeight;
const size_t alignmentDiff = combinedBufferSize & alignmentMask;
if (alignmentDiff != 0) {
combinedBufferSize += alignmentMask - alignmentDiff + 1;
}
individualMipOffsets.push_back(combinedBufferSize);
combinedBufferSize += trimmedSize;
}
VkBufferCreateInfo bufInfo;
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufInfo.flags = 0;
bufInfo.size = combinedBufferSize;
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
bufInfo.queueFamilyIndexCount = 0;
bufInfo.pQueueFamilyIndices = nullptr;
err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
if (err) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
VK_CALL(EndCommandBuffer(cmdBuffer));
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
return 0;
}
if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type,
true, &bufferAlloc)) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
VK_CALL(EndCommandBuffer(cmdBuffer));
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
return 0;
}
currentWidth = w;
currentHeight = h;
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
SkASSERT(0 == currentMipLevel || !srcData);
size_t currentRowBytes = bpp * currentWidth;
size_t bufferOffset = individualMipOffsets[currentMipLevel];
if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset,
currentRowBytes, currentRowBytes, currentHeight)) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
VK_CALL(EndCommandBuffer(cmdBuffer));
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
return 0;
}
currentWidth = SkTMax(1, currentWidth/2);
currentHeight = SkTMax(1, currentHeight/2);
}
// Set image layout and add barrier
VkImageMemoryBarrier barrier;
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer,
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_TRANSFER_BIT,
0,
0, nullptr,
0, nullptr,
1, &barrier));
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
SkTArray<VkBufferImageCopy> regions(mipLevels);
currentWidth = w;
currentHeight = h;
for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
// Submit copy command
VkBufferImageCopy& region = regions.push_back();
memset(&region, 0, sizeof(VkBufferImageCopy));
region.bufferOffset = individualMipOffsets[currentMipLevel];
region.bufferRowLength = currentWidth;
region.bufferImageHeight = currentHeight;
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
region.imageOffset = { 0, 0, 0 };
region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
currentWidth = SkTMax(1, currentWidth/2);
currentHeight = SkTMax(1, currentHeight/2);
}
VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(),
regions.begin()));
}
// Change Image layout to shader read since if we use this texture as a borrowed textures within
// Ganesh we require that its layout be set to that
VkImageMemoryBarrier barrier;
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer,
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0, nullptr,
0, nullptr,
1, &barrier));
// End CommandBuffer
err = VK_CALL(EndCommandBuffer(cmdBuffer));
SkASSERT(!err);
// Create Fence for queue
VkFence fence;
VkFenceCreateInfo fenceInfo;
memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
SkASSERT(!err);
VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount = 0;
submitInfo.pWaitSemaphores = nullptr;
submitInfo.pWaitDstStageMask = 0;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &cmdBuffer;
submitInfo.signalSemaphoreCount = 0;
submitInfo.pSignalSemaphores = nullptr;
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
SkASSERT(!err);
err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX));
if (VK_TIMEOUT == err) {
GrVkMemory::FreeImageMemory(this, false, alloc);
VK_CALL(DestroyImage(fDevice, image, nullptr));
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
VK_CALL(DestroyFence(fDevice, fence, nullptr));
SkDebugf("Fence failed to signal: %d\n", err);
SK_ABORT("failing");
}
SkASSERT(!err);
// Clean up transfer resources
if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
}
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer));
VK_CALL(DestroyFence(fDevice, fence, nullptr));
GrVkImageInfo* info = new GrVkImageInfo;
info->fImage = image;
info->fAlloc = alloc;
info->fImageTiling = imageTiling;
info->fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
info->fFormat = pixelFormat;
info->fLevelCount = mipLevels;
return (GrBackendObject)info;
}
void GrVkGpu::deleteTestingOnlyBackendObject(GrBackendObject id, bool abandon) {
GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id);
if (backend) {
if (!abandon) {
// something in the command buffer may still be using this, so force submit
this->submitCommandBuffer(kForce_SyncQueue);
GrVkImage::DestroyImageInfo(this, backend);
}
delete backend;
}
}
GrBackendTexture GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h, GrBackendTexture GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
GrPixelConfig config, GrPixelConfig config,
bool isRenderTarget, bool isRenderTarget,

View File

@ -85,12 +85,6 @@ public:
void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {} void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
GrBackendObject createTestingOnlyBackendObject(void* pixels, int w, int h,
GrPixelConfig config,
bool isRenderTarget,
GrMipMapped) override;
void deleteTestingOnlyBackendObject(GrBackendObject id, bool abandonTexture) override;
GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h, GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
GrPixelConfig config, GrPixelConfig config,
bool isRenderTarget, bool isRenderTarget,

View File

@ -379,6 +379,14 @@ sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace
return nullptr; return nullptr;
} }
sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace space,
const GrBackendTexture yuvTextureHandles[3],
const SkISize yuvSizes[3],
GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) {
return nullptr;
}
sk_sp<SkImage> SkImage::makeTextureImage(GrContext*, SkColorSpace* dstColorSpace) const { sk_sp<SkImage> SkImage::makeTextureImage(GrContext*, SkColorSpace* dstColorSpace) const {
return nullptr; return nullptr;
} }

View File

@ -332,49 +332,38 @@ static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
} }
} }
static bool are_yuv_sizes_valid(const SkISize yuvSizes[], bool nv12) {
if (yuvSizes[0].fWidth <= 0 || yuvSizes[0].fHeight <= 0 ||
yuvSizes[1].fWidth <= 0 || yuvSizes[1].fHeight <= 0) {
return false;
}
if (!nv12 && (yuvSizes[2].fWidth <= 0 || yuvSizes[2].fHeight <= 0)) {
return false;
}
return true;
}
static sk_sp<SkImage> make_from_yuv_textures_copy(GrContext* ctx, SkYUVColorSpace colorSpace, static sk_sp<SkImage> make_from_yuv_textures_copy(GrContext* ctx, SkYUVColorSpace colorSpace,
bool nv12, bool nv12,
const GrBackendObject yuvTextureHandles[], const GrBackendTexture yuvBackendTextures[],
const SkISize yuvSizes[], const SkISize yuvSizes[],
GrSurfaceOrigin origin, GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) { sk_sp<SkColorSpace> imageColorSpace) {
const SkBudgeted budgeted = SkBudgeted::kYes; if (!are_yuv_sizes_valid(yuvSizes, nv12)) {
if (yuvSizes[0].fWidth <= 0 || yuvSizes[0].fHeight <= 0 || yuvSizes[1].fWidth <= 0 ||
yuvSizes[1].fHeight <= 0) {
return nullptr;
}
if (!nv12 && (yuvSizes[2].fWidth <= 0 || yuvSizes[2].fHeight <= 0)) {
return nullptr; return nullptr;
} }
const GrPixelConfig kConfig = nv12 ? kRGBA_8888_GrPixelConfig : kAlpha_8_GrPixelConfig; sk_sp<GrTextureProxy> yProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, yuvBackendTextures[0],
origin);
GrBackend backend = ctx->contextPriv().getBackend(); sk_sp<GrTextureProxy> uProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, yuvBackendTextures[1],
GrBackendTexture yTex = make_backend_texture_from_handle(backend, origin);
yuvSizes[0].fWidth,
yuvSizes[0].fHeight,
kConfig,
yuvTextureHandles[0]);
GrBackendTexture uTex = make_backend_texture_from_handle(backend,
yuvSizes[1].fWidth,
yuvSizes[1].fHeight,
kConfig,
yuvTextureHandles[1]);
sk_sp<GrTextureProxy> yProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, yTex, origin);
sk_sp<GrTextureProxy> uProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, uTex, origin);
sk_sp<GrTextureProxy> vProxy; sk_sp<GrTextureProxy> vProxy;
if (nv12) { if (nv12) {
vProxy = uProxy; vProxy = uProxy;
} else { } else {
GrBackendTexture vTex = make_backend_texture_from_handle(backend, vProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, yuvBackendTextures[2], origin);
yuvSizes[2].fWidth,
yuvSizes[2].fHeight,
kConfig,
yuvTextureHandles[2]);
vProxy = GrSurfaceProxy::MakeWrappedBackend(ctx, vTex, origin);
} }
if (!yProxy || !uProxy || !vProxy) { if (!yProxy || !uProxy || !vProxy) {
return nullptr; return nullptr;
@ -413,15 +402,55 @@ static sk_sp<SkImage> make_from_yuv_textures_copy(GrContext* ctx, SkYUVColorSpac
// MDB: this call is okay bc we know 'renderTargetContext' was exact // MDB: this call is okay bc we know 'renderTargetContext' was exact
return sk_make_sp<SkImage_Gpu>(ctx, kNeedNewImageUniqueID, kOpaque_SkAlphaType, return sk_make_sp<SkImage_Gpu>(ctx, kNeedNewImageUniqueID, kOpaque_SkAlphaType,
renderTargetContext->asTextureProxyRef(), renderTargetContext->asTextureProxyRef(),
renderTargetContext->colorSpaceInfo().refColorSpace(), budgeted); renderTargetContext->colorSpaceInfo().refColorSpace(),
SkBudgeted::kYes);
}
static sk_sp<SkImage> make_from_yuv_objects_copy(GrContext* ctx, SkYUVColorSpace colorSpace,
bool nv12,
const GrBackendObject yuvTextureHandles[],
const SkISize yuvSizes[],
GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) {
if (!are_yuv_sizes_valid(yuvSizes, nv12)) {
return nullptr;
}
GrBackendTexture backendTextures[3];
const GrPixelConfig kConfig = nv12 ? kRGBA_8888_GrPixelConfig : kAlpha_8_GrPixelConfig;
GrBackend backend = ctx->contextPriv().getBackend();
backendTextures[0] = make_backend_texture_from_handle(backend,
yuvSizes[0].fWidth,
yuvSizes[0].fHeight,
kConfig,
yuvTextureHandles[0]);
backendTextures[1] = make_backend_texture_from_handle(backend,
yuvSizes[1].fWidth,
yuvSizes[1].fHeight,
kConfig,
yuvTextureHandles[1]);
if (!nv12) {
backendTextures[2] = make_backend_texture_from_handle(backend,
yuvSizes[2].fWidth,
yuvSizes[2].fHeight,
kConfig,
yuvTextureHandles[2]);
}
return make_from_yuv_textures_copy(ctx, colorSpace, nv12,
backendTextures, yuvSizes, origin,
std::move(imageColorSpace));
} }
sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace, sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
const GrBackendObject yuvTextureHandles[3], const GrBackendObject yuvTextureHandles[3],
const SkISize yuvSizes[3], GrSurfaceOrigin origin, const SkISize yuvSizes[3], GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) { sk_sp<SkColorSpace> imageColorSpace) {
return make_from_yuv_textures_copy(ctx, colorSpace, false, yuvTextureHandles, yuvSizes, origin, return make_from_yuv_objects_copy(ctx, colorSpace, false, yuvTextureHandles, yuvSizes, origin,
std::move(imageColorSpace)); std::move(imageColorSpace));
} }
sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace, sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
@ -429,7 +458,24 @@ sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace
const SkISize yuvSizes[2], const SkISize yuvSizes[2],
GrSurfaceOrigin origin, GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) { sk_sp<SkColorSpace> imageColorSpace) {
return make_from_yuv_textures_copy(ctx, colorSpace, true, yuvTextureHandles, yuvSizes, origin, return make_from_yuv_objects_copy(ctx, colorSpace, true, yuvTextureHandles, yuvSizes, origin,
std::move(imageColorSpace));
}
sk_sp<SkImage> SkImage::MakeFromYUVTexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
const GrBackendTexture yuvBackendTextures[3],
const SkISize yuvSizes[3], GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) {
return make_from_yuv_textures_copy(ctx, colorSpace, false, yuvBackendTextures, yuvSizes, origin,
std::move(imageColorSpace));
}
sk_sp<SkImage> SkImage::MakeFromNV12TexturesCopy(GrContext* ctx, SkYUVColorSpace colorSpace,
const GrBackendTexture yuvBackendTextures[2],
const SkISize yuvSizes[2],
GrSurfaceOrigin origin,
sk_sp<SkColorSpace> imageColorSpace) {
return make_from_yuv_textures_copy(ctx, colorSpace, true, yuvBackendTextures, yuvSizes, origin,
std::move(imageColorSpace)); std::move(imageColorSpace));
} }