Have GrVkTexture not derive from GrVkImage.

A side effect of this change is that I've tried to pass GrVkAttachments
around GrVkGpu instead of GrVkTextures where I could to start the
transition within the backend code.

Bug: skia:10727
Change-Id: Ibc9553cdbd7f6ae845c56aad3f25f58e4c478e46
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/379577
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2021-03-12 16:29:40 -05:00 committed by Skia Commit-Bot
parent 4bcb56f389
commit e895ab2fc8
14 changed files with 316 additions and 428 deletions

View File

@ -22,11 +22,7 @@ size_t GrAttachment::onGpuMemorySize() const {
// the msaa and stencil attachments track their own size because they do get cached separately.
// For all GrTexture* based things we will continue to to use the GrTexture* to report size and
// the owned attachments will have no size and be uncached.
// TODO: Once we start using texture attachments this check really should be !texture. However,
// until then in GrVkTextureRenderTarget we make a wrapped attachment to use for the render
// target which duplicates the GrTexture. These will be merged once we use texture attachments.
if ((fSupportedUsages & UsageFlags::kStencilAttachment) ||
((fSupportedUsages & UsageFlags::kColorAttachment) && fSampleCnt > 1)) {
if (!(fSupportedUsages & UsageFlags::kTexture)) {
GrBackendFormat format = this->backendFormat();
SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);

View File

@ -1546,12 +1546,13 @@ GrCaps::SurfaceReadPixelsSupport GrVkCaps::surfaceSupportsReadPixels(
return SurfaceReadPixelsSupport::kUnsupported;
}
if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
auto texAttachment = tex->textureAttachment();
// We can't directly read from a VkImage that has a ycbcr sampler.
if (tex->ycbcrConversionInfo().isValid()) {
if (texAttachment->ycbcrConversionInfo().isValid()) {
return SurfaceReadPixelsSupport::kCopyToTexture2D;
}
// We can't directly read from a compressed format
if (GrVkFormatIsCompressed(tex->imageFormat())) {
if (GrVkFormatIsCompressed(texAttachment->imageFormat())) {
return SurfaceReadPixelsSupport::kCopyToTexture2D;
}
return SurfaceReadPixelsSupport::kSupported;
@ -1571,7 +1572,7 @@ bool GrVkCaps::onSurfaceSupportsWritePixels(const GrSurface* surface) const {
// We can't write to a texture that has a ycbcr sampler.
if (auto tex = static_cast<const GrVkTexture*>(surface->asTexture())) {
// We can't directly read from a VkImage that has a ycbcr sampler.
if (tex->ycbcrConversionInfo().isValid()) {
if (tex->textureAttachment()->ycbcrConversionInfo().isValid()) {
return false;
}
}

View File

@ -424,47 +424,53 @@ bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, in
GrColorType surfaceColorType, GrColorType srcColorType,
const GrMipLevel texels[], int mipLevelCount,
bool prepForTexSampling) {
GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
if (!vkTex) {
GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
if (!texture) {
return false;
}
GrVkAttachment* texAttachment = texture->textureAttachment();
// Make sure we have at least the base level
if (!mipLevelCount || !texels[0].fPixels) {
return false;
}
SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
SkASSERT(!GrVkFormatIsCompressed(texAttachment->imageFormat()));
bool success = false;
bool linearTiling = vkTex->isLinearTiled();
bool linearTiling = texAttachment->isLinearTiled();
if (linearTiling) {
if (mipLevelCount > 1) {
SkDebugf("Can't upload mipmap data to linear tiled texture");
return false;
}
if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
if (VK_IMAGE_LAYOUT_PREINITIALIZED != texAttachment->currentLayout()) {
// Need to change the layout to general in order to perform a host write
vkTex->setImageLayout(this,
VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_HOST_WRITE_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
false);
texAttachment->setImageLayout(this,
VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_HOST_WRITE_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
false);
if (!this->submitCommandBuffer(kForce_SyncQueue)) {
return false;
}
}
success = this->uploadTexDataLinear(vkTex, left, top, width, height, srcColorType,
success = this->uploadTexDataLinear(texAttachment, left, top, width, height, srcColorType,
texels[0].fPixels, texels[0].fRowBytes);
} else {
SkASSERT(mipLevelCount <= vkTex->maxMipmapLevel() + 1);
success = this->uploadTexDataOptimal(vkTex, left, top, width, height, srcColorType, texels,
mipLevelCount);
SkASSERT(mipLevelCount <= (int)texAttachment->mipLevels());
success = this->uploadTexDataOptimal(texAttachment, left, top, width, height, srcColorType,
texels, mipLevelCount);
if (1 == mipLevelCount) {
texture->markMipmapsDirty();
}
}
if (prepForTexSampling) {
vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
false);
texAttachment->setImageLayout(this,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
false);
}
return success;
@ -490,10 +496,11 @@ bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int widt
if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
return false;
}
GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
if (!vkTex) {
GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
if (!tex) {
return false;
}
GrVkAttachment* vkTex = tex->textureAttachment();
// Can't transfer compressed data
SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
@ -536,7 +543,7 @@ bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int widt
&region);
this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
vkTex->markMipmapsDirty();
tex->markMipmapsDirty();
return true;
}
@ -568,7 +575,8 @@ bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int wi
}
srcImage = rt->nonMSAAAttachment();
} else {
srcImage = static_cast<GrVkTexture*>(surface->asTexture());
SkASSERT(surface->asTexture());
srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureAttachment();
}
if (GrVkFormatBytesPerBlock(srcImage->imageFormat()) !=
@ -626,12 +634,12 @@ void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect&
GrRenderTarget* dstRT = dst->asRenderTarget();
GrTexture* dstTex = dst->asTexture();
if (dstTex) {
dstImage = static_cast<GrVkTexture*>(dstTex);
dstImage = static_cast<GrVkTexture*>(dstTex)->textureAttachment();
} else {
SkASSERT(dst->asRenderTarget());
dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
SkASSERT(dstImage);
}
SkASSERT(dstImage);
dstImage->setImageLayout(this,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
@ -665,21 +673,22 @@ void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resol
SkIPoint::Make(resolveRect.x(), resolveRect.y()));
}
bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
GrColorType dataColorType, const void* data, size_t rowBytes) {
bool GrVkGpu::uploadTexDataLinear(GrVkAttachment* texAttachment, int left, int top, int width,
int height, GrColorType dataColorType, const void* data,
size_t rowBytes) {
SkASSERT(data);
SkASSERT(tex->isLinearTiled());
SkASSERT(texAttachment->isLinearTiled());
SkDEBUGCODE(
SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
SkIRect bounds = SkIRect::MakeWH(texAttachment->width(), texAttachment->height());
SkASSERT(bounds.contains(subRect));
)
size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
size_t trimRowBytes = width * bpp;
SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texAttachment->currentLayout() ||
VK_IMAGE_LAYOUT_GENERAL == texAttachment->currentLayout());
const VkImageSubresource subres = {
VK_IMAGE_ASPECT_COLOR_BIT,
0, // mipLevel
@ -690,11 +699,11 @@ bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width
const GrVkInterface* interface = this->vkInterface();
GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
tex->image(),
texAttachment->image(),
&subres,
&layout));
const GrVkAlloc& alloc = tex->alloc();
const GrVkAlloc& alloc = texAttachment->alloc();
if (VK_NULL_HANDLE == alloc.fMemory) {
return false;
}
@ -777,42 +786,46 @@ static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
return combinedBufferSize;
}
bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
GrColorType dataColorType, const GrMipLevel texels[],
bool GrVkGpu::uploadTexDataOptimal(GrVkAttachment* texAttachment, int left, int top, int width,
int height, GrColorType dataColorType, const GrMipLevel texels[],
int mipLevelCount) {
if (!this->currentCommandBuffer()) {
return false;
}
SkASSERT(!tex->isLinearTiled());
SkASSERT(!texAttachment->isLinearTiled());
// The assumption is either that we have no mipmaps, or that our rect is the entire texture
SkASSERT(1 == mipLevelCount ||
(0 == left && 0 == top && width == tex->width() && height == tex->height()));
(0 == left && 0 == top && width == texAttachment->width() &&
height == texAttachment->height()));
// We assume that if the texture has mip levels, we either upload to all the levels or just the
// first.
SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->maxMipmapLevel() + 1));
SkASSERT(1 == mipLevelCount || mipLevelCount == (int)texAttachment->mipLevels());
if (width == 0 || height == 0) {
return false;
}
SkASSERT(this->vkCaps().surfaceSupportsWritePixels(tex));
SkASSERT(this->vkCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texAttachment));
SkASSERT(this->vkCaps().areColorTypeAndFormatCompatible(
dataColorType, texAttachment->backendFormat()));
// For RGB_888x src data we are uploading it first to an RGBA texture and then copying it to the
// dst RGB texture. Thus we do not upload mip levels for that.
if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
if (dataColorType == GrColorType::kRGB_888x &&
texAttachment->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
// First check that we'll be able to do the copy to the to the R8G8B8 image in the end via a
// blit or draw.
if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM, tex->isLinearTiled()) &&
if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM,
texAttachment->isLinearTiled()) &&
!this->vkCaps().isFormatRenderable(VK_FORMAT_R8G8B8_UNORM, 1)) {
return false;
}
mipLevelCount = 1;
}
SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
SkASSERT(this->vkCaps().isVkFormatTexturable(texAttachment->imageFormat()));
size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
// texels is const.
@ -868,39 +881,27 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int widt
int uploadLeft = left;
int uploadTop = top;
GrVkTexture* uploadTexture = tex;
GrVkAttachment* uploadTexture = texAttachment;
// For uploading RGB_888x data to an R8G8B8_UNORM texture we must first upload the data to an
// R8G8B8A8_UNORM image and then copy it.
sk_sp<GrVkTexture> copyTexture;
if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
bool dstHasYcbcr = tex->ycbcrConversionInfo().isValid();
if (!this->vkCaps().canCopyAsBlit(tex->imageFormat(), 1, false, dstHasYcbcr,
sk_sp<GrVkAttachment> copyTexAttachment;
if (dataColorType == GrColorType::kRGB_888x &&
texAttachment->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
bool dstHasYcbcr = texAttachment->ycbcrConversionInfo().isValid();
if (!this->vkCaps().canCopyAsBlit(texAttachment->imageFormat(), 1, false, dstHasYcbcr,
VK_FORMAT_R8G8B8A8_UNORM, 1, false, false)) {
return false;
}
VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT;
GrVkImage::ImageDesc imageDesc;
imageDesc.fImageType = VK_IMAGE_TYPE_2D;
imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
imageDesc.fWidth = width;
imageDesc.fHeight = height;
imageDesc.fLevels = 1;
imageDesc.fSamples = 1;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
copyTexture = GrVkTexture::MakeNewTexture(this, SkBudgeted::kYes, {width, height},
imageDesc, GrMipmapStatus::kNotAllocated);
if (!copyTexture) {
copyTexAttachment = GrVkAttachment::MakeTexture(this, {width, height},
VK_FORMAT_R8G8B8A8_UNORM, /*mipLevels=*/1,
GrRenderable::kNo, /*numSamples=*/1,
SkBudgeted::kYes, GrProtected::kNo);
if (!copyTexAttachment) {
return false;
}
uploadTexture = copyTexture.get();
uploadTexture = copyTexAttachment.get();
uploadLeft = 0;
uploadTop = 0;
}
@ -958,21 +959,19 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int widt
// If we copied the data into a temporary image first, copy that image into our main texture
// now.
if (copyTexture) {
if (copyTexAttachment) {
SkASSERT(dataColorType == GrColorType::kRGB_888x);
SkAssertResult(this->copySurface(tex, copyTexture.get(), SkIRect::MakeWH(width, height),
SkAssertResult(this->copySurface(texAttachment, copyTexAttachment.get(),
SkIRect::MakeWH(width, height),
SkIPoint::Make(left, top)));
}
if (1 == mipLevelCount) {
tex->markMipmapsDirty();
}
return true;
}
// It's probably possible to roll this into uploadTexDataOptimal,
// but for now it's easier to maintain as a separate entity.
bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* uploadTexture,
bool GrVkGpu::uploadTexDataCompressed(GrVkAttachment* uploadTexture,
SkImage::CompressionType compression, VkFormat vkFormat,
SkISize dimensions, GrMipmapped mipMapped,
const void* data, size_t dataSize) {
@ -1047,36 +1046,7 @@ sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
VkFormat pixelFormat;
SkAssertResult(format.asVkFormat(&pixelFormat));
SkASSERT(!GrVkFormatIsCompressed(pixelFormat));
VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
if (renderable == GrRenderable::kYes) {
usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// We always make our render targets support being used as input attachments
usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
}
// For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
// VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
// will be using this texture in some copy or not. Also this assumes, as is the current case,
// that all render targets in vulkan are also textures. If we change this practice of setting
// both bits, we must make sure to set the destination bit if we are uploading srcData to the
// texture.
usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
// This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
// requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
// to 1.
SkASSERT(mipLevelCount > 0);
GrVkImage::ImageDesc imageDesc;
imageDesc.fImageType = VK_IMAGE_TYPE_2D;
imageDesc.fFormat = pixelFormat;
imageDesc.fWidth = dimensions.fWidth;
imageDesc.fHeight = dimensions.fHeight;
imageDesc.fLevels = mipLevelCount;
imageDesc.fSamples = 1;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
imageDesc.fIsProtected = isProtected;
GrMipmapStatus mipmapStatus =
mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
@ -1084,9 +1054,11 @@ sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
sk_sp<GrVkTexture> tex;
if (renderable == GrRenderable::kYes) {
tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
this, budgeted, dimensions, renderTargetSampleCnt, imageDesc, mipmapStatus);
this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
mipmapStatus, isProtected);
} else {
tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, imageDesc, mipmapStatus);
tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
mipLevelCount, isProtected, mipmapStatus);
}
if (!tex) {
@ -1099,7 +1071,8 @@ sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
}
SkSTArray<1, VkImageSubresourceRange> ranges;
bool inRange = false;
for (uint32_t i = 0; i < tex->mipLevels(); ++i) {
GrVkImage* texImage = tex->textureAttachment();
for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
if (levelClearMask & (1U << i)) {
if (inRange) {
ranges.back().levelCount++;
@ -1118,9 +1091,9 @@ sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
}
SkASSERT(!ranges.empty());
static constexpr VkClearColorValue kZeroClearColor = {};
tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
this->currentCommandBuffer()->clearColorImage(this, tex.get(), &kZeroClearColor,
this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
ranges.count(), ranges.begin());
}
return std::move(tex);
@ -1136,44 +1109,23 @@ sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
SkAssertResult(format.asVkFormat(&pixelFormat));
SkASSERT(GrVkFormatIsCompressed(pixelFormat));
VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
// For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
// VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
// will be using this texture in some copy or not. Also this assumes, as is the current case,
// that all render targets in vulkan are also textures. If we change this practice of setting
// both bits, we must make sure to set the destination bit if we are uploading srcData to the
// texture.
usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
int numMipLevels = 1;
if (mipMapped == GrMipmapped::kYes) {
numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
}
// Compressed textures with MIP levels or multiple samples are not supported as of now.
GrVkImage::ImageDesc imageDesc;
imageDesc.fImageType = VK_IMAGE_TYPE_2D;
imageDesc.fFormat = pixelFormat;
imageDesc.fWidth = dimensions.width();
imageDesc.fHeight = dimensions.height();
imageDesc.fLevels = numMipLevels;
imageDesc.fSamples = 1;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = usageFlags;
imageDesc.fIsProtected = isProtected;
GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
: GrMipmapStatus::kNotAllocated;
auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, imageDesc, mipmapStatus);
auto tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
numMipLevels, isProtected, mipmapStatus);
if (!tex) {
return nullptr;
}
SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
if (!this->uploadTexDataCompressed(tex.get(), compression, pixelFormat, dimensions, mipMapped,
data, dataSize)) {
if (!this->uploadTexDataCompressed(tex->textureAttachment(), compression, pixelFormat,
dimensions, mipMapped, data, dataSize)) {
return nullptr;
}
@ -1434,7 +1386,7 @@ bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
if (!this->currentCommandBuffer()) {
return false;
}
auto* vkTex = static_cast<GrVkTexture*>(tex);
auto* vkTex = static_cast<GrVkTexture*>(tex)->textureAttachment();
// don't do anything for linearly tiled textures (can't have mipmaps)
if (vkTex->isLinearTiled()) {
SkDebugf("Trying to create mipmap for linear tiled texture");
@ -1671,14 +1623,16 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
if (!texture) {
return false;
}
GrVkAttachment* texAttachment = texture->textureAttachment();
GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
if (!cmdBuffer) {
return false;
}
texture->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
texAttachment->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
false);
// Unfortunately, CmdClearColorImage doesn't work for compressed formats
bool fastPath = data->type() == BackendTextureData::Type::kColor &&
@ -1700,7 +1654,7 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
range.baseMipLevel = 0;
range.layerCount = 1;
range.levelCount = info.fLevelCount;
cmdBuffer->clearColorImage(this, texture.get(), &vkColor, 1, &range);
cmdBuffer->clearColorImage(this, texAttachment, &vkColor, 1, &range);
} else {
SkImage::CompressionType compression = GrBackendFormatToCompressionType(
backendTexture.getBackendFormat());
@ -1738,16 +1692,16 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for
// every upload in the frame.
const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
cmdBuffer->copyBufferToImage(this, vkBuffer->vkBuffer(),
texture.get(), texture->currentLayout(), regions.count(),
cmdBuffer->copyBufferToImage(this, vkBuffer->vkBuffer(), texAttachment,
texAttachment->currentLayout(), regions.count(),
regions.begin());
}
// Change image layout to shader read since if we use this texture as a borrowed
// texture within Ganesh we require that its layout be set to that
texture->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
false);
texAttachment->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
false);
if (finishedCallback) {
this->addFinishedCallback(std::move(finishedCallback));
@ -1835,9 +1789,10 @@ bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
SkISize dimensions,
const GrVkSharedImageInfo& newInfo,
GrBackendSurfaceMutableState* previousState) {
sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(
this, dimensions, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, kRW_GrIOType, info,
std::move(currentState));
sk_sp<GrVkAttachment> texture = GrVkAttachment::MakeWrapped(
this, dimensions, info, std::move(currentState),
GrVkAttachment::UsageFlags::kColorAttachment, kBorrow_GrWrapOwnership,
GrWrapCacheable::kNo, /*forSecondaryCB=*/false);
SkASSERT(texture);
if (!texture) {
return false;
@ -2136,7 +2091,7 @@ void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
for (GrSurfaceProxy* proxy : proxies) {
SkASSERT(proxy->isInstantiated());
if (GrTexture* tex = proxy->peekTexture()) {
image = static_cast<GrVkTexture*>(tex);
image = static_cast<GrVkTexture*>(tex)->textureAttachment();
} else {
GrRenderTarget* rt = proxy->peekRenderTarget();
SkASSERT(rt);
@ -2198,18 +2153,6 @@ void GrVkGpu::onReportSubmitHistograms() {
#endif
}
static int get_surface_sample_cnt(GrSurface* surf, const GrVkCaps& caps) {
if (const GrRenderTarget* rt = surf->asRenderTarget()) {
auto vkRT = static_cast<const GrVkRenderTarget*>(rt);
if (caps.preferDiscardableMSAAAttachment() && vkRT->resolveAttachment() &&
vkRT->resolveAttachment()->supportsInputAttachmentUsage()) {
return 1;
}
return rt->numSamples();
}
return 0;
}
void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
GrVkImage* srcImage, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
@ -2218,8 +2161,8 @@ void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage*
}
#ifdef SK_DEBUG
int dstSampleCnt = get_surface_sample_cnt(dst, this->vkCaps());
int srcSampleCnt = get_surface_sample_cnt(src, this->vkCaps());
int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
VkFormat dstFormat = dstImage->imageFormat();
@ -2279,8 +2222,8 @@ void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstIm
}
#ifdef SK_DEBUG
int dstSampleCnt = get_surface_sample_cnt(dst, this->vkCaps());
int srcSampleCnt = get_surface_sample_cnt(src, this->vkCaps());
int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
VkFormat dstFormat = dstImage->imageFormat();
@ -2363,9 +2306,6 @@ bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRe
return false;
}
int dstSampleCnt = get_surface_sample_cnt(dst, this->vkCaps());
int srcSampleCnt = get_surface_sample_cnt(src, this->vkCaps());
bool useDiscardableMSAA = this->vkCaps().preferDiscardableMSAAAttachment();
GrVkImage* dstImage;
@ -2382,9 +2322,11 @@ bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRe
} else {
dstImage = vkRT->colorAttachment();
}
} else if (dst->asTexture()) {
dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureAttachment();
} else {
SkASSERT(dst->asTexture());
dstImage = static_cast<GrVkTexture*>(dst->asTexture());
// The surface in a GrAttachment already
dstImage = static_cast<GrVkAttachment*>(dst);
}
GrRenderTarget* srcRT = src->asRenderTarget();
if (srcRT) {
@ -2395,14 +2337,20 @@ bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRe
} else {
srcImage = vkRT->colorAttachment();
}
} else {
} else if (src->asTexture()) {
SkASSERT(src->asTexture());
srcImage = static_cast<GrVkTexture*>(src->asTexture());
srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureAttachment();
} else {
// The surface in a GrAttachment already
srcImage = static_cast<GrVkAttachment*>(src);
}
VkFormat dstFormat = dstImage->imageFormat();
VkFormat srcFormat = srcImage->imageFormat();
int dstSampleCnt = dstImage->vkImageInfo().fSampleCount;
int srcSampleCnt = srcImage->vkImageInfo().fSampleCount;
bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
@ -2454,7 +2402,7 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
}
image = rt->nonMSAAAttachment();
} else {
image = static_cast<GrVkTexture*>(surface->asTexture());
image = static_cast<GrVkTexture*>(surface->asTexture())->textureAttachment();
}
if (!image) {
@ -2464,7 +2412,7 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
// Skia's RGB_888x color type, which we map to the vulkan R8G8B8_UNORM, expects the data to be
// 32 bits, but the Vulkan format is only 24. So we first copy the surface into an R8G8B8A8
// image and then do the read pixels from that.
sk_sp<GrVkTextureRenderTarget> copySurface;
sk_sp<GrVkAttachment> copySurface;
if (dstColorType == GrColorType::kRGB_888x && image->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
int srcSampleCount = 0;
if (rt) {
@ -2495,9 +2443,10 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
imageDesc.fUsageFlags = usageFlags;
imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
copySurface = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
this, SkBudgeted::kYes, {width, height}, 1, imageDesc,
GrMipmapStatus::kNotAllocated);
copySurface =
GrVkAttachment::MakeTexture(this, {width, height}, VK_FORMAT_R8G8B8A8_UNORM,
/*mipLevels=*/1, GrRenderable::kYes, /*numSamples=*/1,
SkBudgeted::kYes, GrProtected::kNo);
if (!copySurface) {
return false;
}
@ -2715,7 +2664,7 @@ void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
SkASSERT(texture);
GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
GrVkAttachment* vkTexture = static_cast<GrVkTexture*>(texture)->textureAttachment();
vkTexture->setImageLayout(this,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT,

View File

@ -326,11 +326,11 @@ private:
const SkIPoint& dstPoint);
// helpers for onCreateTexture and writeTexturePixels
bool uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
bool uploadTexDataLinear(GrVkAttachment* tex, int left, int top, int width, int height,
GrColorType colorType, const void* data, size_t rowBytes);
bool uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
bool uploadTexDataOptimal(GrVkAttachment* tex, int left, int top, int width, int height,
GrColorType colorType, const GrMipLevel texels[], int mipLevelCount);
bool uploadTexDataCompressed(GrVkTexture* tex, SkImage::CompressionType compression,
bool uploadTexDataCompressed(GrVkAttachment* tex, SkImage::CompressionType compression,
VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped,
const void* data, size_t dataSize);
void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,

View File

@ -384,7 +384,9 @@ bool GrVkOpsRenderPass::set(GrRenderTarget* rt,
SkASSERT(sampledProxies[i]->asTextureProxy());
GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture());
SkASSERT(vkTex);
vkTex->setImageLayout(
GrVkAttachment* texture = vkTex->textureAttachment();
SkASSERT(texture);
texture->setImageLayout(
fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false);
}
@ -714,7 +716,7 @@ void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
#ifdef SK_DEBUG
void check_sampled_texture(GrTexture* tex, GrRenderTarget* rt, GrVkGpu* gpu) {
SkASSERT(!tex->isProtected() || (rt->isProtected() && gpu->protectedContext()));
GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
auto vkTex = static_cast<GrVkTexture*>(tex)->textureAttachment();
SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
#endif

View File

@ -140,12 +140,13 @@ bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
if (fNumSamplers == 1) {
auto texture = samplerBindings[0].fTexture;
auto texAttachment = texture->textureAttachment();
const auto& samplerState = samplerBindings[0].fState;
const GrVkDescriptorSet* descriptorSet = texture->cachedSingleDescSet(samplerState);
if (descriptorSet) {
commandBuffer->addGrSurface(sk_ref_sp<const GrSurface>(texture));
commandBuffer->addResource(texture->textureView());
commandBuffer->addResource(texture->resource());
commandBuffer->addResource(texAttachment->textureView());
commandBuffer->addResource(texAttachment->resource());
commandBuffer->addRecycledResource(descriptorSet);
commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), kSamplerDSIdx,
/*setCount=*/1, descriptorSet->descriptorSet(),
@ -164,14 +165,15 @@ bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
for (int i = 0; i < fNumSamplers; ++i) {
GrSamplerState state = samplerBindings[i].fState;
GrVkTexture* texture = samplerBindings[i].fTexture;
auto texAttachment = texture->textureAttachment();
const GrVkImageView* textureView = texture->textureView();
const GrVkImageView* textureView = texAttachment->textureView();
const GrVkSampler* sampler = nullptr;
if (fImmutableSamplers[i]) {
sampler = fImmutableSamplers[i];
} else {
sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
state, texture->ycbcrConversionInfo());
state, texAttachment->ycbcrConversionInfo());
}
SkASSERT(sampler);
@ -200,8 +202,8 @@ bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
if (!fImmutableSamplers[i]) {
sampler->unref();
}
commandBuffer->addResource(samplerBindings[i].fTexture->textureView());
commandBuffer->addResource(samplerBindings[i].fTexture->resource());
commandBuffer->addResource(textureView);
commandBuffer->addResource(texAttachment->resource());
}
if (fNumSamplers == 1) {
GrSamplerState state = samplerBindings[0].fState;

View File

@ -22,38 +22,36 @@
GrVkTexture::GrVkTexture(GrVkGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> view,
sk_sp<GrVkAttachment> texture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, info.fProtected)
, INHERITED(gpu, dimensions, info.fProtected, GrTextureType::k2D, mipmapStatus)
, GrVkImage(gpu, info, std::move(mutableState), GrBackendObjectOwnership::kOwned)
, fTextureView(std::move(view))
: GrSurface(gpu, dimensions,
texture->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, GrTexture(gpu, dimensions,
texture->isProtected() ? GrProtected::kYes : GrProtected::kNo,
GrTextureType::k2D, mipmapStatus)
, fTexture(std::move(texture))
, fDescSetCache(kMaxCachedDescSets) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == info.fLevelCount));
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == fTexture->mipLevels()));
// We don't support creating external GrVkTextures
SkASSERT(!info.fYcbcrConversionInfo.isValid() || !info.fYcbcrConversionInfo.fExternalFormat);
SkASSERT(SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT));
SkASSERT(!fTexture->ycbcrConversionInfo().isValid() ||
!fTexture->ycbcrConversionInfo().fExternalFormat);
SkASSERT(SkToBool(fTexture->vkUsageFlags() & VK_IMAGE_USAGE_SAMPLED_BIT));
this->registerWithCache(budgeted);
if (GrVkFormatIsCompressed(info.fFormat)) {
if (GrVkFormatIsCompressed(fTexture->imageFormat())) {
this->setReadOnly();
}
}
GrVkTexture::GrVkTexture(GrVkGpu* gpu, SkISize dimensions, const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> view,
GrMipmapStatus mipmapStatus, GrBackendObjectOwnership ownership,
GrVkTexture::GrVkTexture(GrVkGpu* gpu, SkISize dimensions,
sk_sp<GrVkAttachment> texture, GrMipmapStatus mipmapStatus,
GrWrapCacheable cacheable, GrIOType ioType, bool isExternal)
: GrSurface(gpu, dimensions, info.fProtected)
, INHERITED(gpu, dimensions, info.fProtected,
: GrSurface(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, GrTexture(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo,
isExternal ? GrTextureType::kExternal : GrTextureType::k2D, mipmapStatus)
, GrVkImage(gpu, info, std::move(mutableState), ownership)
, fTextureView(std::move(view))
, fTexture(std::move(texture))
, fDescSetCache(kMaxCachedDescSets) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == info.fLevelCount));
SkASSERT(SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT));
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == fTexture->mipLevels()));
SkASSERT(SkToBool(fTexture->vkUsageFlags() & VK_IMAGE_USAGE_SAMPLED_BIT));
if (ioType == kRead_GrIOType) {
this->setReadOnly();
}
@ -63,45 +61,33 @@ GrVkTexture::GrVkTexture(GrVkGpu* gpu, SkISize dimensions, const GrVkImageInfo&
// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
GrVkTexture::GrVkTexture(GrVkGpu* gpu,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> view,
GrMipmapStatus mipmapStatus,
GrBackendObjectOwnership ownership)
: GrSurface(gpu, dimensions, info.fProtected)
, INHERITED(gpu, dimensions, info.fProtected, GrTextureType::k2D, mipmapStatus)
, GrVkImage(gpu, info, std::move(mutableState), ownership)
, fTextureView(std::move(view))
sk_sp<GrVkAttachment> texture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, GrTexture(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo,
GrTextureType::k2D, mipmapStatus)
, fTexture(std::move(texture))
, fDescSetCache(kMaxCachedDescSets) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == info.fLevelCount));
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == fTexture->mipLevels()));
// Since this ctor is only called from GrVkTextureRenderTarget, we can't have a ycbcr conversion
// since we don't support that on render targets.
SkASSERT(!info.fYcbcrConversionInfo.isValid());
SkASSERT(SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT));
SkASSERT(!fTexture->ycbcrConversionInfo().isValid());
SkASSERT(SkToBool(fTexture->vkUsageFlags() & VK_IMAGE_USAGE_SAMPLED_BIT));
}
sk_sp<GrVkTexture> GrVkTexture::MakeNewTexture(GrVkGpu* gpu, SkBudgeted budgeted,
SkISize dimensions,
const GrVkImage::ImageDesc& imageDesc,
VkFormat format, uint32_t mipLevels,
GrProtected isProtected,
GrMipmapStatus mipmapStatus) {
SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
sk_sp<GrVkAttachment> texture = GrVkAttachment::MakeTexture(
gpu, dimensions, format, mipLevels, GrRenderable::kNo, /*numSamples=*/1, budgeted,
isProtected);
GrVkImageInfo info;
if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
if (!texture) {
return nullptr;
}
sk_sp<const GrVkImageView> imageView = GrVkImageView::Make(
gpu, info.fImage, info.fFormat, GrVkImageView::kColor_Type, info.fLevelCount,
info.fYcbcrConversionInfo);
if (!imageView) {
GrVkImage::DestroyImageInfo(gpu, &info);
return nullptr;
}
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
return sk_sp<GrVkTexture>(new GrVkTexture(gpu, budgeted, dimensions, info,
std::move(mutableState), std::move(imageView),
return sk_sp<GrVkTexture>(new GrVkTexture(gpu, budgeted, dimensions, std::move(texture),
mipmapStatus));
}
@ -113,41 +99,34 @@ sk_sp<GrVkTexture> GrVkTexture::MakeWrappedTexture(
SkASSERT(VK_NULL_HANDLE != info.fImage &&
(kBorrow_GrWrapOwnership == wrapOwnership || VK_NULL_HANDLE != info.fAlloc.fMemory));
sk_sp<const GrVkImageView> imageView = GrVkImageView::Make(
gpu, info.fImage, info.fFormat, GrVkImageView::kColor_Type, info.fLevelCount,
info.fYcbcrConversionInfo);
if (!imageView) {
sk_sp<GrVkAttachment> texture =
GrVkAttachment::MakeWrapped(gpu, dimensions, info, std::move(mutableState),
GrAttachment::UsageFlags::kTexture, wrapOwnership,
cacheable);
if (!texture) {
return nullptr;
}
GrMipmapStatus mipmapStatus = info.fLevelCount > 1 ? GrMipmapStatus::kValid
: GrMipmapStatus::kNotAllocated;
GrBackendObjectOwnership ownership = kBorrow_GrWrapOwnership == wrapOwnership
? GrBackendObjectOwnership::kBorrowed : GrBackendObjectOwnership::kOwned;
bool isExternal = info.fYcbcrConversionInfo.isValid() &&
(info.fYcbcrConversionInfo.fExternalFormat != 0);
return sk_sp<GrVkTexture>(new GrVkTexture(gpu, dimensions, info, std::move(mutableState),
std::move(imageView), mipmapStatus, ownership,
return sk_sp<GrVkTexture>(new GrVkTexture(gpu, dimensions, std::move(texture), mipmapStatus,
cacheable, ioType, isExternal));
}
GrVkTexture::~GrVkTexture() {
// either release or abandon should have been called by the owner of this object.
SkASSERT(!fTextureView);
SkASSERT(!fTexture);
}
void GrVkTexture::onRelease() {
// we create this and don't hand it off, so we should always destroy it
if (fTextureView) {
fTextureView.reset();
}
fTexture.reset();
fDescSetCache.reset();
this->releaseImage();
INHERITED::onRelease();
GrTexture::onRelease();
}
struct GrVkTexture::DescriptorCacheEntry {
@ -164,19 +143,16 @@ struct GrVkTexture::DescriptorCacheEntry {
};
void GrVkTexture::onAbandon() {
// we create this and don't hand it off, so we should always destroy it
if (fTextureView) {
fTextureView.reset();
}
fTexture.reset();
fDescSetCache.reset();
this->releaseImage();
INHERITED::onAbandon();
GrTexture::onAbandon();
}
GrBackendTexture GrVkTexture::getBackendTexture() const {
return GrBackendTexture(this->width(), this->height(), fInfo, this->getMutableState());
return GrBackendTexture(fTexture->width(), fTexture->height(), fTexture->vkImageInfo(),
fTexture->getMutableState());
}
GrVkGpu* GrVkTexture::getVkGpu() const {
@ -184,9 +160,7 @@ GrVkGpu* GrVkTexture::getVkGpu() const {
return static_cast<GrVkGpu*>(this->getGpu());
}
const GrVkImageView* GrVkTexture::textureView() {
return fTextureView.get();
}
const GrVkImageView* GrVkTexture::textureView() { return fTexture->textureView(); }
const GrVkDescriptorSet* GrVkTexture::cachedSingleDescSet(GrSamplerState state) {
if (std::unique_ptr<DescriptorCacheEntry>* e = fDescSetCache.find(state)) {

View File

@ -12,19 +12,21 @@
#include "src/core/SkLRUCache.h"
#include "src/gpu/GrSamplerState.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/vk/GrVkImage.h"
#include "src/gpu/vk/GrVkAttachment.h"
class GrVkDescriptorSet;
class GrVkGpu;
class GrVkImageView;
struct GrVkImageInfo;
class GrVkTexture : public GrTexture, public GrVkImage {
class GrVkTexture : public GrTexture {
public:
static sk_sp<GrVkTexture> MakeNewTexture(GrVkGpu*,
SkBudgeted budgeted,
SkISize dimensions,
const GrVkImage::ImageDesc&,
VkFormat format,
uint32_t mipLevels,
GrProtected,
GrMipmapStatus);
static sk_sp<GrVkTexture> MakeWrappedTexture(GrVkGpu*,
@ -39,10 +41,11 @@ public:
GrBackendTexture getBackendTexture() const override;
GrBackendFormat backendFormat() const override { return this->getBackendFormat(); }
GrBackendFormat backendFormat() const override { return fTexture->getBackendFormat(); }
void textureParamsModified() override {}
GrVkAttachment* textureAttachment() const { return fTexture.get(); }
const GrVkImageView* textureView();
// For each GrVkTexture, there is a cache of GrVkDescriptorSets which only contain a single
@ -57,11 +60,8 @@ public:
protected:
GrVkTexture(GrVkGpu*,
SkISize dimensions,
const GrVkImageInfo&,
sk_sp<GrBackendSurfaceMutableStateImpl>,
sk_sp<const GrVkImageView>,
GrMipmapStatus,
GrBackendObjectOwnership);
sk_sp<GrVkAttachment> texture,
GrMipmapStatus);
GrVkGpu* getVkGpu() const;
@ -72,22 +72,19 @@ protected:
return false;
}
private:
GrVkTexture(GrVkGpu*, SkBudgeted, SkISize, const GrVkImageInfo&,
sk_sp<GrBackendSurfaceMutableStateImpl>, sk_sp<const GrVkImageView> imageView,
GrMipmapStatus);
GrVkTexture(GrVkGpu*, SkISize, const GrVkImageInfo&, sk_sp<GrBackendSurfaceMutableStateImpl>,
sk_sp<const GrVkImageView>, GrMipmapStatus, GrBackendObjectOwnership,
GrWrapCacheable, GrIOType, bool isExternal);
// In Vulkan we call the release proc after we are finished with the underlying
// GrVkImage::Resource object (which occurs after the GPU has finished all work on it).
void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {
// Forward the release proc on to GrVkImage
this->setResourceRelease(std::move(releaseHelper));
// Forward the release proc onto the fTexture's GrVkImage
fTexture->setResourceRelease(std::move(releaseHelper));
}
sk_sp<const GrVkImageView> fTextureView;
private:
GrVkTexture(GrVkGpu*, SkBudgeted, SkISize, sk_sp<GrVkAttachment> texture, GrMipmapStatus);
GrVkTexture(GrVkGpu*, SkISize, sk_sp<GrVkAttachment> texture, GrMipmapStatus,
GrWrapCacheable, GrIOType, bool isExternal);
sk_sp<GrVkAttachment> fTexture;
struct SamplerHash {
uint32_t operator()(GrSamplerState state) const { return state.asIndex(); }
@ -96,8 +93,6 @@ private:
SkLRUCache<const GrSamplerState, std::unique_ptr<DescriptorCacheEntry>, SamplerHash>
fDescSetCache;
static constexpr int kMaxCachedDescSets = 8;
using INHERITED = GrTexture;
};
#endif

View File

@ -25,15 +25,12 @@ GrVkTextureRenderTarget::GrVkTextureRenderTarget(
GrVkGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> texView,
sk_sp<GrVkAttachment> texture,
sk_sp<GrVkAttachment> colorAttachment,
sk_sp<GrVkAttachment> resolveAttachment,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, info.fProtected)
, GrVkTexture(gpu, dimensions, info, mutableState, std::move(texView), mipmapStatus,
GrBackendObjectOwnership::kOwned)
: GrSurface(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, GrVkTexture(gpu, dimensions, std::move(texture), mipmapStatus)
, GrVkRenderTarget(gpu, dimensions, std::move(colorAttachment),
std::move(resolveAttachment), CreateType::kFromTextureRT) {
this->registerWithCache(budgeted);
@ -42,108 +39,65 @@ GrVkTextureRenderTarget::GrVkTextureRenderTarget(
GrVkTextureRenderTarget::GrVkTextureRenderTarget(
GrVkGpu* gpu,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> texView,
sk_sp<GrVkAttachment> texture,
sk_sp<GrVkAttachment> colorAttachment,
sk_sp<GrVkAttachment> resolveAttachment,
GrMipmapStatus mipmapStatus,
GrBackendObjectOwnership ownership,
GrWrapCacheable cacheable)
: GrSurface(gpu, dimensions, info.fProtected)
, GrVkTexture(gpu, dimensions, info, mutableState, std::move(texView), mipmapStatus,
ownership)
: GrSurface(gpu, dimensions, texture->isProtected() ? GrProtected::kYes : GrProtected::kNo)
, GrVkTexture(gpu, dimensions, std::move(texture), mipmapStatus)
, GrVkRenderTarget(gpu, dimensions, std::move(colorAttachment),
std::move(resolveAttachment), CreateType::kFromTextureRT) {
this->registerWithCacheWrapped(cacheable);
}
namespace {
struct Views {
sk_sp<const GrVkImageView> textureView;
sk_sp<GrVkAttachment> colorAttachment;
sk_sp<GrVkAttachment> resolveAttachment;
};
} // anonymous namespace
static Views create_attachments(GrVkGpu* gpu, SkISize dimensions, int sampleCnt,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState) {
VkImage image = info.fImage;
// Create the texture ImageView
Views views;
views.textureView = GrVkImageView::Make(gpu, image, info.fFormat, GrVkImageView::kColor_Type,
info.fLevelCount, info.fYcbcrConversionInfo);
if (!views.textureView) {
return {};
}
// Make the non-msaa attachment which may end up as either the color or resolve view depending
// on if sampleCnt > 1 or not. The info and mutableState passed in here will always represent
// the non-msaa image.
SkASSERT(info.fSampleCount == 1);
// TODO: Fix this weird wrapping once GrVkTexture and GrVkAttachment merge.
// Regardless of whether the actual TextureRenderTarget is wrapped or created, we always make a
// wrapped attachment here. The GrVkTexture will manage the lifetime and possible destruction
// of the GrVkImage object. So we want the attachment on the GrVkRenderTarget to always be
// borrowed. In the current system that can lead to overcounting of memory usage when we are
// including both owned and borrowed memory.
sk_sp<GrVkAttachment> nonMSAAAttachment =
GrVkAttachment::MakeWrapped(gpu, dimensions, info, std::move(mutableState),
GrAttachment::UsageFlags::kColorAttachment,
kBorrow_GrWrapOwnership, GrWrapCacheable::kNo);
if (!nonMSAAAttachment) {
return {};
}
// create msaa surface if necessary
bool create_rt_attachments(GrVkGpu* gpu, SkISize dimensions, VkFormat format, int sampleCnt,
GrProtected isProtected, sk_sp<GrVkAttachment> texture,
sk_sp<GrVkAttachment>* colorAttachment,
sk_sp<GrVkAttachment>* resolveAttachment) {
if (sampleCnt > 1) {
auto rp = gpu->getContext()->priv().resourceProvider();
sk_sp<GrAttachment> msaaAttachment = rp->makeMSAAAttachment(
dimensions, GrBackendFormat::MakeVk(info.fFormat), sampleCnt, info.fProtected);
dimensions, GrBackendFormat::MakeVk(format), sampleCnt, isProtected);
if (!msaaAttachment) {
return {};
return false;
}
views.colorAttachment =
*colorAttachment =
sk_sp<GrVkAttachment>(static_cast<GrVkAttachment*>(msaaAttachment.release()));
views.resolveAttachment = std::move(nonMSAAAttachment);
*resolveAttachment = std::move(texture);
} else {
views.colorAttachment = std::move(nonMSAAAttachment);
*colorAttachment = std::move(texture);
}
if (!views.colorAttachment) {
return {};
}
return views;
return true;
}
sk_sp<GrVkTextureRenderTarget> GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
GrVkGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
VkFormat format,
uint32_t mipLevels,
int sampleCnt,
const GrVkImage::ImageDesc& imageDesc,
GrMipmapStatus mipmapStatus) {
SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
GrVkImageInfo info;
if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
GrMipmapStatus mipmapStatus,
GrProtected isProtected) {
sk_sp<GrVkAttachment> texture =
GrVkAttachment::MakeTexture(gpu, dimensions, format, mipLevels, GrRenderable::kYes,
/*numSamples=*/1, budgeted, isProtected);
if (!texture) {
return nullptr;
}
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
Views views = create_attachments(gpu, dimensions, sampleCnt, info, mutableState);
if (!views.colorAttachment) {
GrVkImage::DestroyImageInfo(gpu, &info);
sk_sp<GrVkAttachment> colorAttachment;
sk_sp<GrVkAttachment> resolveAttachment;
if (!create_rt_attachments(gpu, dimensions, format, sampleCnt, isProtected, texture,
&colorAttachment, &resolveAttachment)) {
return nullptr;
}
SkASSERT(colorAttachment);
SkASSERT(sampleCnt == 1 || resolveAttachment);
return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
gpu, budgeted, dimensions, info, std::move(mutableState), std::move(views.textureView),
std::move(views.colorAttachment), std::move(views.resolveAttachment), mipmapStatus));
gpu, budgeted, dimensions, std::move(texture), std::move(colorAttachment),
std::move(resolveAttachment), mipmapStatus));
}
sk_sp<GrVkTextureRenderTarget> GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(
@ -158,19 +112,33 @@ sk_sp<GrVkTextureRenderTarget> GrVkTextureRenderTarget::MakeWrappedTextureRender
SkASSERT(VK_NULL_HANDLE != info.fImage &&
(kBorrow_GrWrapOwnership == wrapOwnership || VK_NULL_HANDLE != info.fAlloc.fMemory));
GrMipmapStatus mipmapStatus = info.fLevelCount > 1 ? GrMipmapStatus::kDirty
: GrMipmapStatus::kNotAllocated;
GrAttachment::UsageFlags textureUsageFlags = GrAttachment::UsageFlags::kTexture;
if (info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
textureUsageFlags |= GrAttachment::UsageFlags::kColorAttachment;
}
GrBackendObjectOwnership ownership = kBorrow_GrWrapOwnership == wrapOwnership
? GrBackendObjectOwnership::kBorrowed : GrBackendObjectOwnership::kOwned;
Views views = create_attachments(gpu, dimensions, sampleCnt, info, mutableState);
if (!views.colorAttachment) {
sk_sp<GrVkAttachment> texture =
GrVkAttachment::MakeWrapped(gpu, dimensions, info, std::move(mutableState),
textureUsageFlags, wrapOwnership, cacheable);
if (!texture) {
return nullptr;
}
sk_sp<GrVkAttachment> colorAttachment;
sk_sp<GrVkAttachment> resolveAttachment;
if (!create_rt_attachments(gpu, dimensions, info.fFormat, sampleCnt, info.fProtected, texture,
&colorAttachment, &resolveAttachment)) {
return nullptr;
}
SkASSERT(colorAttachment);
SkASSERT(sampleCnt == 1 || resolveAttachment);
GrMipmapStatus mipmapStatus =
info.fLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
return sk_sp<GrVkTextureRenderTarget>(new GrVkTextureRenderTarget(
gpu, dimensions, info, std::move(mutableState), std::move(views.textureView),
std::move(views.colorAttachment), std::move(views.resolveAttachment),
mipmapStatus, ownership, cacheable));
gpu, dimensions, std::move(texture), std::move(colorAttachment),
std::move(resolveAttachment), mipmapStatus, cacheable));
}
size_t GrVkTextureRenderTarget::onGpuMemorySize() const {

View File

@ -26,11 +26,15 @@ struct GrVkImageInfo;
class GrVkTextureRenderTarget: public GrVkTexture, public GrVkRenderTarget {
public:
static sk_sp<GrVkTextureRenderTarget> MakeNewTextureRenderTarget(GrVkGpu*, SkBudgeted,
SkISize dimensions,
int sampleCnt,
const GrVkImage::ImageDesc&,
GrMipmapStatus);
static sk_sp<GrVkTextureRenderTarget> MakeNewTextureRenderTarget(
GrVkGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
VkFormat format,
uint32_t mipLevels,
int sampleCnt,
GrMipmapStatus mipmapStatus,
GrProtected isProtected);
static sk_sp<GrVkTextureRenderTarget> MakeWrappedTextureRenderTarget(
GrVkGpu*,
@ -41,7 +45,7 @@ public:
const GrVkImageInfo&,
sk_sp<GrBackendSurfaceMutableStateImpl>);
GrBackendFormat backendFormat() const override { return this->getBackendFormat(); }
GrBackendFormat backendFormat() const override { return GrVkTexture::backendFormat(); }
protected:
void onAbandon() override {
@ -57,37 +61,29 @@ protected:
}
private:
// MSAA, not-wrapped
GrVkTextureRenderTarget(GrVkGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> texView,
sk_sp<GrVkAttachment> texture,
sk_sp<GrVkAttachment> colorAttachment,
sk_sp<GrVkAttachment> resolveAttachment,
GrMipmapStatus);
// MSAA, wrapped
GrVkTextureRenderTarget(GrVkGpu* gpu,
SkISize dimensions,
const GrVkImageInfo& info,
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
sk_sp<const GrVkImageView> texView,
sk_sp<GrVkAttachment> texture,
sk_sp<GrVkAttachment> colorAttachment,
sk_sp<GrVkAttachment> resolveAttachment,
GrMipmapStatus,
GrBackendObjectOwnership,
GrWrapCacheable);
// GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
size_t onGpuMemorySize() const override;
// In Vulkan we call the release proc after we are finished with the underlying
// GrVkImage::Resource object (which occurs after the GPU has finished all work on it).
void onSetRelease(sk_sp<GrRefCntedCallback> releaseHelper) override {
// Forward the release proc on to GrVkImage
this->setResourceRelease(std::move(releaseHelper));
GrVkTexture::onSetRelease(std::move(releaseHelper));
}
};

View File

@ -67,6 +67,17 @@ void test_wrapping(GrDirectContext* dContext,
return;
}
// As we transition to using attachments instead of GrTextures and GrRenderTargets individual
// proxy instansiations may add multiple things to the cache. There would be an entry for the
// GrTexture/GrRenderTarget and entries for one or more attachments.
int cacheEntriesPerProxy = 1;
// We currently only have attachments on the vulkan backend
if (dContext->backend() == GrBackend::kVulkan) {
// If we ever make a rt with multisamples this would have an additional
// attachment as well.
cacheEntriesPerProxy++;
}
if (GrRenderable::kYes == renderable && dContext->colorTypeSupportedAsSurface(skColorType)) {
sk_sp<SkSurface> surf = SkSurface::MakeFromBackendTexture(dContext,
mbet->texture(),
@ -78,16 +89,6 @@ void test_wrapping(GrDirectContext* dContext,
ERRORF(reporter, "Couldn't make SkSurface from backendTexture for %s\n",
ToolUtils::colortype_name(skColorType));
} else {
// As we transition to using attachments instead of GrTextures and GrRenderTargets
// individual proxy instansiations may add multiple things to the cache. There would be
// an entry for the GrTexture/GrRenderTarget and entries for one or more attachments.
int cacheEntriesPerProxy = 1;
// We currently only have attachments on the vulkan backend
if (dContext->backend() == GrBackend::kVulkan) {
// If we ever make a rt with multisamples this would have an additional
// attachment as well.
cacheEntriesPerProxy++;
}
REPORTER_ASSERT(reporter,
initialCount + cacheEntriesPerProxy == cache->getResourceCount());
}
@ -111,7 +112,8 @@ void test_wrapping(GrDirectContext* dContext,
REPORTER_ASSERT(reporter, proxy->isInstantiated());
REPORTER_ASSERT(reporter, mipMapped == proxy->mipmapped());
REPORTER_ASSERT(reporter, initialCount+1 == cache->getResourceCount());
REPORTER_ASSERT(reporter,
initialCount + cacheEntriesPerProxy == cache->getResourceCount());
}
}

View File

@ -70,7 +70,7 @@ DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkBackendSurfaceMutableStateTest, reporter, ctxIn
REPORTER_ASSERT(reporter, texture);
// Verify that modifying the layout via the GrVkTexture is reflected in the GrBackendTexture
GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
GrVkAttachment* vkTexture = static_cast<GrVkTexture*>(texture)->textureAttachment();
REPORTER_ASSERT(reporter, initLayout == vkTexture->currentLayout());
REPORTER_ASSERT(reporter, initQueue == vkTexture->currentQueueFamilyIndex());
vkTexture->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);

View File

@ -121,7 +121,8 @@ static sk_sp<GrTextureProxy> create_wrapped_backend(GrDirectContext* dContext) {
// and looking them up work, etc.
static void basic_test(GrDirectContext* dContext,
skiatest::Reporter* reporter,
sk_sp<GrTextureProxy> proxy) {
sk_sp<GrTextureProxy> proxy,
int cacheEntriesPerProxy) {
static int id = 1;
GrResourceProvider* resourceProvider = dContext->priv().resourceProvider();
@ -152,19 +153,6 @@ static void basic_test(GrDirectContext* dContext,
REPORTER_ASSERT(reporter, proxyProvider->findOrCreateProxyByUniqueKey(key));
REPORTER_ASSERT(reporter, 1 == proxyProvider->numUniqueKeyProxies_TestOnly());
// As we transition to using attachments instead of GrTextures and GrRenderTargets individual
// proxy instansiations may add multiple things to the cache. There would be an entry for the
// GrTexture/GrRenderTarget and entries for one or more attachments.
int cacheEntriesPerProxy = 1;
// We currently only have attachments on the vulkan backend
if (dContext->backend() == GrBackend::kVulkan) {
if (proxy->asRenderTargetProxy()) {
// If we ever have a test with multisamples this would have an additional attachment as
// well.
cacheEntriesPerProxy++;
}
}
int expectedCacheCount = startCacheCount + (proxy->isInstantiated() ? 0 : cacheEntriesPerProxy);
// Once instantiated, the backing resource should have the same key
@ -228,7 +216,9 @@ static void basic_test(GrDirectContext* dContext,
// Invalidation test
// Test if invalidating unique ids operates as expected for texture proxies.
static void invalidation_test(GrDirectContext* dContext, skiatest::Reporter* reporter) {
static void invalidation_test(GrDirectContext* dContext,
skiatest::Reporter* reporter,
int cacheEntriesPerProxy) {
GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
GrResourceCache* cache = dContext->priv().getResourceCache();
@ -259,7 +249,7 @@ static void invalidation_test(GrDirectContext* dContext, skiatest::Reporter* rep
sk_sp<SkImage> textureImg = rasterImg->makeTextureImage(dContext);
REPORTER_ASSERT(reporter, 0 == proxyProvider->numUniqueKeyProxies_TestOnly());
REPORTER_ASSERT(reporter, 1 + bufferResources == cache->getResourceCount());
REPORTER_ASSERT(reporter, cacheEntriesPerProxy + bufferResources == cache->getResourceCount());
rasterImg = nullptr; // this invalidates the uniqueKey
@ -268,7 +258,7 @@ static void invalidation_test(GrDirectContext* dContext, skiatest::Reporter* rep
dContext->setResourceCacheLimit(maxBytes-1);
REPORTER_ASSERT(reporter, 0 == proxyProvider->numUniqueKeyProxies_TestOnly());
REPORTER_ASSERT(reporter, 1 + bufferResources == cache->getResourceCount());
REPORTER_ASSERT(reporter, cacheEntriesPerProxy + bufferResources == cache->getResourceCount());
textureImg = nullptr;
@ -294,7 +284,8 @@ static void invalidation_test(GrDirectContext* dContext, skiatest::Reporter* rep
// Test if invalidating unique ids prior to instantiating operates as expected
static void invalidation_and_instantiation_test(GrDirectContext* dContext,
skiatest::Reporter* reporter) {
skiatest::Reporter* reporter,
int cacheEntriesPerProxy) {
GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
GrResourceProvider* resourceProvider = dContext->priv().resourceProvider();
GrResourceCache* cache = dContext->priv().getResourceCache();
@ -325,7 +316,7 @@ static void invalidation_and_instantiation_test(GrDirectContext* dContext,
REPORTER_ASSERT(reporter, !proxy->getUniqueKey().isValid());
REPORTER_ASSERT(reporter, !proxy->peekTexture()->getUniqueKey().isValid());
REPORTER_ASSERT(reporter, 0 == proxyProvider->numUniqueKeyProxies_TestOnly());
REPORTER_ASSERT(reporter, 1 == cache->getResourceCount());
REPORTER_ASSERT(reporter, cacheEntriesPerProxy == cache->getResourceCount());
proxy = nullptr;
dContext->priv().testingOnly_purgeAllUnlockedResources();
@ -342,18 +333,30 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(TextureProxyTest, reporter, ctxInfo) {
REPORTER_ASSERT(reporter, !proxyProvider->numUniqueKeyProxies_TestOnly());
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());
// As we transition to using attachments instead of GrTextures and GrRenderTargets individual
// proxy instansiations may add multiple things to the cache. There would be an entry for the
// GrTexture/GrRenderTarget and entries for one or more attachments.
int cacheEntriesPerProxy = 1;
// We currently only have attachments on the vulkan backend
if (direct->backend() == GrBackend::kVulkan) {
cacheEntriesPerProxy++;
// If we ever have a test with multisamples this would have an additional attachment as
// well.
}
for (auto fit : { SkBackingFit::kExact, SkBackingFit::kApprox }) {
for (auto create : { deferred_tex, deferred_texRT, wrapped, wrapped_with_key }) {
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());
basic_test(direct, reporter, create(reporter, direct, proxyProvider, fit));
basic_test(direct, reporter, create(reporter, direct, proxyProvider, fit),
cacheEntriesPerProxy);
}
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());
cache->purgeAllUnlocked();
}
basic_test(direct, reporter, create_wrapped_backend(direct));
basic_test(direct, reporter, create_wrapped_backend(direct), cacheEntriesPerProxy);
invalidation_test(direct, reporter);
invalidation_and_instantiation_test(direct, reporter);
invalidation_test(direct, reporter, cacheEntriesPerProxy);
invalidation_and_instantiation_test(direct, reporter, cacheEntriesPerProxy);
}

View File

@ -80,7 +80,7 @@ DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) {
REPORTER_ASSERT(reporter, texture);
// Verify that modifying the layout via the GrVkTexture is reflected in the GrBackendTexture
GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
GrVkAttachment* vkTexture = static_cast<GrVkTexture*>(texture)->textureAttachment();
REPORTER_ASSERT(reporter, initLayout == vkTexture->currentLayout());
vkTexture->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);