Remove code to push pixmaps to backend textures from GrGpu classes
Replace GrGpu::updateBackendTexture with narrower method that clears a backend texture. Creation of data for a solid color compressed texture is lifted up to GrDirectContext and goes through updateCompressedBackendTexture. Bug: skia:11786 Change-Id: I1d617623df5e65686f30e57c361a64f78d77f7bd Reviewed-on: https://skia-review.googlesource.com/c/skia/+/392836 Reviewed-by: Jim Van Verth <jvanverth@google.com> Reviewed-by: Greg Daniel <egdaniel@google.com> Commit-Queue: Brian Salomon <bsalomon@google.com>
This commit is contained in:
parent
04d4969bd2
commit
45889d1269
@ -280,6 +280,7 @@ public:
|
||||
SkISize dimensions() const { return {fWidth, fHeight}; }
|
||||
int width() const { return fWidth; }
|
||||
int height() const { return fHeight; }
|
||||
GrMipmapped mipmapped() const { return fMipmapped; }
|
||||
bool hasMipmaps() const { return fMipmapped == GrMipmapped::kYes; }
|
||||
/** deprecated alias of hasMipmaps(). */
|
||||
bool hasMipMaps() const { return this->hasMipmaps(); }
|
||||
|
@ -686,7 +686,7 @@ bool GrConvertPixels(const GrPixmap& dst, const GrCPixmap& src, bool flipY) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, SkColor4f color) {
|
||||
bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, std::array<float, 4> color) {
|
||||
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
|
||||
|
||||
if (!dstInfo.isValid()) {
|
||||
@ -700,7 +700,7 @@ bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, SkColor4f
|
||||
}
|
||||
if (dstInfo.colorType() == GrColorType::kRGB_888) {
|
||||
// SkRasterPipeline doesn't handle writing to RGB_888. So we handle that specially here.
|
||||
uint32_t rgba = color.toBytes_RGBA();
|
||||
uint32_t rgba = SkColor4f{color[0], color[1], color[2], color[3]}.toBytes_RGBA();
|
||||
for (int y = 0; y < dstInfo.height(); ++y) {
|
||||
char* d = static_cast<char*>(dst) + y * dstRB;
|
||||
for (int x = 0; x < dstInfo.width(); ++x, d += 3) {
|
||||
@ -719,7 +719,7 @@ bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, SkColor4f
|
||||
char block[64];
|
||||
SkArenaAlloc alloc(block, sizeof(block), 1024);
|
||||
SkRasterPipeline_<256> pipeline;
|
||||
pipeline.append_constant_color(&alloc, color);
|
||||
pipeline.append_constant_color(&alloc, color.data());
|
||||
switch (lumMode) {
|
||||
case LumMode::kNone:
|
||||
break;
|
||||
|
@ -39,7 +39,7 @@ void GrFillInCompressedData(SkImage::CompressionType, SkISize dimensions, GrMipm
|
||||
bool GrConvertPixels(const GrPixmap& dst, const GrCPixmap& src, bool flipY = false);
|
||||
|
||||
/** Clears the dst image to a constant color. */
|
||||
bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, SkColor4f color);
|
||||
bool GrClearImage(const GrImageInfo& dstInfo, void* dst, size_t dstRB, std::array<float, 4> color);
|
||||
|
||||
#if GR_TEST_UTILS
|
||||
/**
|
||||
|
@ -10,7 +10,9 @@
|
||||
|
||||
#include "include/core/SkTraceMemoryDump.h"
|
||||
#include "include/gpu/GrContextThreadSafeProxy.h"
|
||||
#include "src/core/SkAutoMalloc.h"
|
||||
#include "src/core/SkTaskGroup.h"
|
||||
#include "src/gpu/GrBackendUtils.h"
|
||||
#include "src/gpu/GrClientMappedBufferManager.h"
|
||||
#include "src/gpu/GrContextThreadSafeProxyPriv.h"
|
||||
#include "src/gpu/GrDirectContextPriv.h"
|
||||
@ -472,26 +474,24 @@ GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
|
||||
return this->createBackendTexture(width, height, format, mipMapped, renderable, isProtected);
|
||||
}
|
||||
|
||||
static GrBackendTexture create_and_update_backend_texture(
|
||||
GrDirectContext* dContext,
|
||||
SkISize dimensions,
|
||||
const GrBackendFormat& backendFormat,
|
||||
GrMipmapped mipMapped,
|
||||
GrRenderable renderable,
|
||||
GrProtected isProtected,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const GrGpu::BackendTextureData* data) {
|
||||
static GrBackendTexture create_and_clear_backend_texture(GrDirectContext* dContext,
|
||||
SkISize dimensions,
|
||||
const GrBackendFormat& backendFormat,
|
||||
GrMipmapped mipMapped,
|
||||
GrRenderable renderable,
|
||||
GrProtected isProtected,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
GrGpu* gpu = dContext->priv().getGpu();
|
||||
SkASSERT(data->type() == GrGpu::BackendTextureData::Type::kColor);
|
||||
GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
|
||||
mipMapped, isProtected);
|
||||
if (!beTex.isValid()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (!dContext->priv().getGpu()->updateBackendTexture(beTex,
|
||||
std::move(finishedCallback),
|
||||
data)) {
|
||||
if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
|
||||
std::move(finishedCallback),
|
||||
color)) {
|
||||
dContext->deleteBackendTexture(beTex);
|
||||
return {};
|
||||
}
|
||||
@ -555,10 +555,14 @@ GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
|
||||
return {};
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(color);
|
||||
return create_and_update_backend_texture(this, {width, height},
|
||||
backendFormat, mipMapped, renderable, isProtected,
|
||||
std::move(finishedCallback), &data);
|
||||
return create_and_clear_backend_texture(this,
|
||||
{width, height},
|
||||
backendFormat,
|
||||
mipMapped,
|
||||
renderable,
|
||||
isProtected,
|
||||
std::move(finishedCallback),
|
||||
color.array());
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
|
||||
@ -583,10 +587,14 @@ GrBackendTexture GrDirectContext::createBackendTexture(int width, int height,
|
||||
GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
|
||||
SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
|
||||
|
||||
GrGpu::BackendTextureData data(swizzledColor);
|
||||
return create_and_update_backend_texture(this, {width, height}, format,
|
||||
mipMapped, renderable, isProtected,
|
||||
std::move(finishedCallback), &data);
|
||||
return create_and_clear_backend_texture(this,
|
||||
{width, height},
|
||||
format,
|
||||
mipMapped,
|
||||
renderable,
|
||||
isProtected,
|
||||
std::move(finishedCallback),
|
||||
swizzledColor.array());
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
|
||||
@ -647,8 +655,7 @@ bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTextur
|
||||
return false;
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(color);
|
||||
return fGpu->updateBackendTexture(backendTexture, std::move(finishedCallback), &data);
|
||||
return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
|
||||
}
|
||||
|
||||
bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
@ -670,9 +677,11 @@ bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTextur
|
||||
}
|
||||
|
||||
GrSwizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
|
||||
GrGpu::BackendTextureData data(swizzle.applyTo(color));
|
||||
SkColor4f swizzledColor = swizzle.applyTo(color);
|
||||
|
||||
return fGpu->updateBackendTexture(backendTexture, std::move(finishedCallback), &data);
|
||||
return fGpu->clearBackendTexture(backendTexture,
|
||||
std::move(finishedCallback),
|
||||
swizzledColor.array());
|
||||
}
|
||||
|
||||
bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
@ -717,7 +726,8 @@ static GrBackendTexture create_and_update_compressed_backend_texture(
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const GrGpu::BackendTextureData* data) {
|
||||
const void* data,
|
||||
size_t size) {
|
||||
GrGpu* gpu = dContext->priv().getGpu();
|
||||
|
||||
GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
|
||||
@ -727,20 +737,21 @@ static GrBackendTexture create_and_update_compressed_backend_texture(
|
||||
}
|
||||
|
||||
if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
|
||||
beTex, std::move(finishedCallback), data)) {
|
||||
beTex, std::move(finishedCallback), data, size)) {
|
||||
dContext->deleteBackendTexture(beTex);
|
||||
return {};
|
||||
}
|
||||
return beTex;
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int height,
|
||||
const GrBackendFormat& backendFormat,
|
||||
const SkColor4f& color,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(
|
||||
int width, int height,
|
||||
const GrBackendFormat& backendFormat,
|
||||
const SkColor4f& color,
|
||||
GrMipmapped mipmapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
|
||||
auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
|
||||
|
||||
@ -748,19 +759,35 @@ GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int
|
||||
return {};
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(color);
|
||||
return create_and_update_compressed_backend_texture(this, {width, height},
|
||||
backendFormat, mipMapped, isProtected,
|
||||
std::move(finishedCallback), &data);
|
||||
SkImage::CompressionType compression = GrBackendFormatToCompressionType(backendFormat);
|
||||
if (compression == SkImage::CompressionType::kNone) {
|
||||
return {};
|
||||
}
|
||||
|
||||
size_t size = SkCompressedDataSize(compression,
|
||||
{width, height},
|
||||
nullptr,
|
||||
mipmapped == GrMipmapped::kYes);
|
||||
auto storage = std::make_unique<char[]>(size);
|
||||
GrFillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
|
||||
return create_and_update_compressed_backend_texture(this,
|
||||
{width, height},
|
||||
backendFormat,
|
||||
mipmapped,
|
||||
isProtected,
|
||||
std::move(finishedCallback),
|
||||
storage.get(),
|
||||
size);
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int height,
|
||||
SkImage::CompressionType compression,
|
||||
const SkColor4f& color,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(
|
||||
int width, int height,
|
||||
SkImage::CompressionType compression,
|
||||
const SkColor4f& color,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
|
||||
GrBackendFormat format = this->compressedBackendFormat(compression);
|
||||
return this->createCompressedBackendTexture(width, height, format, color,
|
||||
@ -768,14 +795,15 @@ GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int
|
||||
finishedContext);
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int height,
|
||||
const GrBackendFormat& backendFormat,
|
||||
const void* compressedData,
|
||||
size_t dataSize,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(
|
||||
int width, int height,
|
||||
const GrBackendFormat& backendFormat,
|
||||
const void* compressedData,
|
||||
size_t dataSize,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
|
||||
auto finishedCallback = GrRefCntedCallback::Make(finishedProc, finishedContext);
|
||||
|
||||
@ -783,19 +811,24 @@ GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int
|
||||
return {};
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(compressedData, dataSize);
|
||||
return create_and_update_compressed_backend_texture(this, {width, height},
|
||||
backendFormat, mipMapped, isProtected,
|
||||
std::move(finishedCallback), &data);
|
||||
return create_and_update_compressed_backend_texture(this,
|
||||
{width, height},
|
||||
backendFormat,
|
||||
mipMapped,
|
||||
isProtected,
|
||||
std::move(finishedCallback),
|
||||
compressedData,
|
||||
dataSize);
|
||||
}
|
||||
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(int width, int height,
|
||||
SkImage::CompressionType compression,
|
||||
const void* data, size_t dataSize,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
GrBackendTexture GrDirectContext::createCompressedBackendTexture(
|
||||
int width, int height,
|
||||
SkImage::CompressionType compression,
|
||||
const void* data, size_t dataSize,
|
||||
GrMipmapped mipMapped,
|
||||
GrProtected isProtected,
|
||||
GrGpuFinishedProc finishedProc,
|
||||
GrGpuFinishedContext finishedContext) {
|
||||
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
|
||||
GrBackendFormat format = this->compressedBackendFormat(compression);
|
||||
return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipMapped,
|
||||
@ -812,8 +845,25 @@ bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& bac
|
||||
return false;
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(color);
|
||||
return fGpu->updateCompressedBackendTexture(backendTexture, std::move(finishedCallback), &data);
|
||||
SkImage::CompressionType compression =
|
||||
GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
|
||||
if (compression == SkImage::CompressionType::kNone) {
|
||||
return {};
|
||||
}
|
||||
size_t size = SkCompressedDataSize(compression,
|
||||
backendTexture.dimensions(),
|
||||
nullptr,
|
||||
backendTexture.hasMipmaps());
|
||||
SkAutoMalloc storage(size);
|
||||
GrFillInCompressedData(compression,
|
||||
backendTexture.dimensions(),
|
||||
backendTexture.mipmapped(),
|
||||
static_cast<char*>(storage.get()),
|
||||
color);
|
||||
return fGpu->updateCompressedBackendTexture(backendTexture,
|
||||
std::move(finishedCallback),
|
||||
storage.get(),
|
||||
size);
|
||||
}
|
||||
|
||||
bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
@ -831,9 +881,10 @@ bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& bac
|
||||
return false;
|
||||
}
|
||||
|
||||
GrGpu::BackendTextureData data(compressedData, dataSize);
|
||||
|
||||
return fGpu->updateCompressedBackendTexture(backendTexture, std::move(finishedCallback), &data);
|
||||
return fGpu->updateCompressedBackendTexture(backendTexture,
|
||||
std::move(finishedCallback),
|
||||
compressedData,
|
||||
dataSize);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -754,58 +754,16 @@ void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>*
|
||||
#endif // GR_GPU_STATS
|
||||
#endif // GR_TEST_UTILS
|
||||
|
||||
bool GrGpu::MipMapsAreCorrect(SkISize dimensions,
|
||||
GrMipmapped mipmapped,
|
||||
const BackendTextureData* data) {
|
||||
int numMipLevels = 1;
|
||||
if (mipmapped == GrMipmapped::kYes) {
|
||||
numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
|
||||
}
|
||||
|
||||
if (!data || data->type() == BackendTextureData::Type::kColor) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
return false; // This should be going through CompressedDataIsCorrect
|
||||
}
|
||||
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kPixmaps);
|
||||
|
||||
if (data->pixmap(0).dimensions() != dimensions) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrColorType colorType = data->pixmap(0).colorType();
|
||||
for (int i = 1; i < numMipLevels; ++i) {
|
||||
dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
|
||||
if (dimensions != data->pixmap(i).dimensions()) {
|
||||
return false;
|
||||
}
|
||||
if (colorType != data->pixmap(i).colorType()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrGpu::CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType compressionType,
|
||||
GrMipmapped mipMapped, const BackendTextureData* data) {
|
||||
|
||||
if (!data || data->type() == BackendTextureData::Type::kColor) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kCompressed);
|
||||
|
||||
size_t computedSize = SkCompressedDataSize(compressionType, dimensions,
|
||||
nullptr, mipMapped == GrMipmapped::kYes);
|
||||
|
||||
return computedSize == data->compressedSize();
|
||||
bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
|
||||
SkImage::CompressionType compressionType,
|
||||
GrMipmapped mipMapped,
|
||||
const void* data,
|
||||
size_t length) {
|
||||
size_t computedSize = SkCompressedDataSize(compressionType,
|
||||
dimensions,
|
||||
nullptr,
|
||||
mipMapped == GrMipmapped::kYes);
|
||||
return computedSize == length;
|
||||
}
|
||||
|
||||
GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
|
||||
@ -836,33 +794,18 @@ GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
|
||||
return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, isProtected);
|
||||
}
|
||||
|
||||
bool GrGpu::updateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
SkASSERT(data);
|
||||
const GrCaps* caps = this->caps();
|
||||
|
||||
bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
if (!backendTexture.isValid()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
auto ct = data->pixmap(0).colorType();
|
||||
if (!caps->areColorTypeAndFormatCompatible(ct, backendTexture.getBackendFormat())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrMipmapped mipmapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
|
||||
if (!MipMapsAreCorrect(backendTexture.dimensions(), mipmapped, data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data);
|
||||
return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
|
||||
}
|
||||
|
||||
GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
|
||||
@ -896,7 +839,8 @@ GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
|
||||
|
||||
bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
const void* data,
|
||||
size_t length) {
|
||||
SkASSERT(data);
|
||||
|
||||
if (!backendTexture.isValid()) {
|
||||
@ -917,10 +861,16 @@ bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTextur
|
||||
|
||||
GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
|
||||
|
||||
if (!CompressedDataIsCorrect(backendTexture.dimensions(), compressionType, mipMapped, data)) {
|
||||
if (!CompressedDataIsCorrect(backendTexture.dimensions(),
|
||||
compressionType,
|
||||
mipMapped,
|
||||
data,
|
||||
length)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this->onUpdateCompressedBackendTexture(backendTexture, std::move(finishedCallback),
|
||||
data);
|
||||
return this->onUpdateCompressedBackendTexture(backendTexture,
|
||||
std::move(finishedCallback),
|
||||
data,
|
||||
length);
|
||||
}
|
||||
|
@ -497,58 +497,6 @@ public:
|
||||
Stats* stats() { return &fStats; }
|
||||
void dumpJSON(SkJSONWriter*) const;
|
||||
|
||||
/** Used to initialize a backend texture with either a constant color, pixmaps or
|
||||
* compressed data.
|
||||
*/
|
||||
class BackendTextureData {
|
||||
public:
|
||||
enum class Type { kColor, kPixmaps, kCompressed };
|
||||
BackendTextureData() = default;
|
||||
BackendTextureData(const SkColor4f& color) : fType(Type::kColor), fColor(color) {}
|
||||
BackendTextureData(const GrPixmap pixmaps[]) : fType(Type::kPixmaps), fPixmaps(pixmaps) {
|
||||
SkASSERT(pixmaps);
|
||||
}
|
||||
BackendTextureData(const void* data, size_t size) : fType(Type::kCompressed) {
|
||||
SkASSERT(data);
|
||||
fCompressed.fData = data;
|
||||
fCompressed.fSize = size;
|
||||
}
|
||||
|
||||
Type type() const { return fType; }
|
||||
SkColor4f color() const {
|
||||
SkASSERT(this->type() == Type::kColor);
|
||||
return fColor;
|
||||
}
|
||||
|
||||
const GrPixmap& pixmap(int i) const {
|
||||
SkASSERT(this->type() == Type::kPixmaps);
|
||||
return fPixmaps[i];
|
||||
}
|
||||
const GrPixmap* pixmaps() const {
|
||||
SkASSERT(this->type() == Type::kPixmaps);
|
||||
return fPixmaps;
|
||||
}
|
||||
|
||||
const void* compressedData() const {
|
||||
SkASSERT(this->type() == Type::kCompressed);
|
||||
return fCompressed.fData;
|
||||
}
|
||||
size_t compressedSize() const {
|
||||
SkASSERT(this->type() == Type::kCompressed);
|
||||
return fCompressed.fSize;
|
||||
}
|
||||
|
||||
private:
|
||||
Type fType = Type::kColor;
|
||||
union {
|
||||
SkColor4f fColor = {0, 0, 0, 0};
|
||||
const GrPixmap* fPixmaps;
|
||||
struct {
|
||||
const void* fData;
|
||||
size_t fSize;
|
||||
} fCompressed;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a texture directly in the backend API without wrapping it in a GrTexture.
|
||||
@ -570,9 +518,9 @@ public:
|
||||
GrMipmapped,
|
||||
GrProtected);
|
||||
|
||||
bool updateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*);
|
||||
bool clearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color);
|
||||
|
||||
/**
|
||||
* Same as the createBackendTexture case except compressed backend textures can
|
||||
@ -585,7 +533,8 @@ public:
|
||||
|
||||
bool updateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*);
|
||||
const void* data,
|
||||
size_t length);
|
||||
|
||||
virtual bool setBackendTextureState(const GrBackendTexture&,
|
||||
const GrBackendSurfaceMutableState&,
|
||||
@ -685,9 +634,11 @@ public:
|
||||
virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
|
||||
|
||||
protected:
|
||||
static bool MipMapsAreCorrect(SkISize dimensions, GrMipmapped, const BackendTextureData*);
|
||||
static bool CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType,
|
||||
GrMipmapped, const BackendTextureData*);
|
||||
static bool CompressedDataIsCorrect(SkISize dimensions,
|
||||
SkImage::CompressionType,
|
||||
GrMipmapped,
|
||||
const void* data,
|
||||
size_t length);
|
||||
|
||||
// Handles cases where a surface will be updated without a call to flushRenderTarget.
|
||||
void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
|
||||
@ -710,13 +661,14 @@ private:
|
||||
virtual GrBackendTexture onCreateCompressedBackendTexture(
|
||||
SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0;
|
||||
|
||||
virtual bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) = 0;
|
||||
virtual bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) = 0;
|
||||
|
||||
virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) = 0;
|
||||
const void* data,
|
||||
size_t length) = 0;
|
||||
|
||||
// called when the 3D context state is unknown. Subclass should emit any
|
||||
// assumed 3D context state and dirty any state cache.
|
||||
|
@ -994,32 +994,12 @@ GrBackendTexture GrD3DGpu::onCreateBackendTexture(SkISize dimensions,
|
||||
return GrBackendTexture(dimensions.width(), dimensions.height(), info);
|
||||
}
|
||||
|
||||
static void copy_src_data(char* mapPtr,
|
||||
DXGI_FORMAT dxgiFormat,
|
||||
D3D12_PLACED_SUBRESOURCE_FOOTPRINT* placedFootprints,
|
||||
const GrPixmap srcData[],
|
||||
int numMipLevels) {
|
||||
SkASSERT(srcData && numMipLevels);
|
||||
SkASSERT(!GrDxgiFormatIsCompressed(dxgiFormat));
|
||||
SkASSERT(mapPtr);
|
||||
|
||||
size_t bytesPerPixel = GrDxgiFormatBytesPerBlock(dxgiFormat);
|
||||
|
||||
for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
|
||||
const size_t trimRowBytes = srcData[currentMipLevel].width() * bytesPerPixel;
|
||||
|
||||
// copy data into the buffer, skipping any trailing bytes
|
||||
char* dst = mapPtr + placedFootprints[currentMipLevel].Offset;
|
||||
SkRectMemcpy(dst, placedFootprints[currentMipLevel].Footprint.RowPitch,
|
||||
srcData[currentMipLevel].addr(), srcData[currentMipLevel].rowBytes(),
|
||||
trimRowBytes, srcData[currentMipLevel].height());
|
||||
}
|
||||
}
|
||||
|
||||
static bool copy_color_data(const GrD3DCaps& caps, char* mapPtr,
|
||||
DXGI_FORMAT dxgiFormat, SkISize dimensions,
|
||||
static bool copy_color_data(const GrD3DCaps& caps,
|
||||
char* mapPtr,
|
||||
DXGI_FORMAT dxgiFormat,
|
||||
SkISize dimensions,
|
||||
D3D12_PLACED_SUBRESOURCE_FOOTPRINT* placedFootprints,
|
||||
SkColor4f color) {
|
||||
std::array<float, 4> color) {
|
||||
auto colorType = caps.getFormatColorType(dxgiFormat);
|
||||
if (colorType == GrColorType::kUnknown) {
|
||||
return false;
|
||||
@ -1032,11 +1012,12 @@ static bool copy_color_data(const GrD3DCaps& caps, char* mapPtr,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrD3DGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
bool GrD3DGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
GrD3DTextureResourceInfo info;
|
||||
SkAssertResult(backendTexture.getD3DTextureResourceInfo(&info));
|
||||
SkASSERT(!GrDxgiFormatIsCompressed(info.fFormat));
|
||||
|
||||
sk_sp<GrD3DResourceState> state = backendTexture.getGrD3DResourceState();
|
||||
SkASSERT(state);
|
||||
@ -1060,27 +1041,23 @@ bool GrD3DGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
|
||||
unsigned int mipLevelCount = 1;
|
||||
if (backendTexture.fMipmapped == GrMipmapped::kYes) {
|
||||
mipLevelCount = SkMipmap::ComputeLevelCount(backendTexture.dimensions().width(),
|
||||
backendTexture.dimensions().height()) + 1;
|
||||
mipLevelCount = SkMipmap::ComputeLevelCount(backendTexture.dimensions()) + 1;
|
||||
}
|
||||
SkASSERT(mipLevelCount == info.fLevelCount);
|
||||
SkAutoTMalloc<D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
|
||||
SkAutoSTMalloc<15, D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
|
||||
UINT numRows;
|
||||
UINT64 rowSizeInBytes;
|
||||
UINT64 combinedBufferSize;
|
||||
SkAutoTMalloc<UINT> numRows(mipLevelCount);
|
||||
SkAutoTMalloc<UINT64> rowSizeInBytes(mipLevelCount);
|
||||
fDevice->GetCopyableFootprints(&desc, 0, mipLevelCount, 0, placedFootprints.get(),
|
||||
numRows.get(), rowSizeInBytes.get(), &combinedBufferSize);
|
||||
// We reuse the same top-level buffer area for all levels, hence passing 1 for level count.
|
||||
fDevice->GetCopyableFootprints(&desc,
|
||||
/* first resource */ 0,
|
||||
/* mip level count */ 1,
|
||||
/* base offset */ 0,
|
||||
placedFootprints.get(),
|
||||
&numRows,
|
||||
&rowSizeInBytes,
|
||||
&combinedBufferSize);
|
||||
SkASSERT(combinedBufferSize);
|
||||
if (data->type() == BackendTextureData::Type::kColor &&
|
||||
!GrDxgiFormatIsCompressed(info.fFormat) && mipLevelCount > 1) {
|
||||
// For a single uncompressed color, we reuse the same top-level buffer area for all levels.
|
||||
combinedBufferSize =
|
||||
placedFootprints[0].Footprint.RowPitch * placedFootprints[0].Footprint.Height;
|
||||
for (unsigned int i = 1; i < mipLevelCount; ++i) {
|
||||
placedFootprints[i].Offset = 0;
|
||||
placedFootprints[i].Footprint.RowPitch = placedFootprints[0].Footprint.RowPitch;
|
||||
}
|
||||
}
|
||||
|
||||
GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
|
||||
combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
|
||||
@ -1090,42 +1067,37 @@ bool GrD3DGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
|
||||
char* bufferData = (char*)slice.fOffsetMapPtr;
|
||||
SkASSERT(bufferData);
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
copy_src_data(bufferData, info.fFormat, placedFootprints.get(), data->pixmaps(),
|
||||
info.fLevelCount);
|
||||
} else if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
copy_compressed_data(bufferData, info.fFormat, placedFootprints.get(), numRows.get(),
|
||||
rowSizeInBytes.get(), data->compressedData(), info.fLevelCount);
|
||||
} else {
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kColor);
|
||||
SkImage::CompressionType compression =
|
||||
GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
|
||||
if (SkImage::CompressionType::kNone == compression) {
|
||||
if (!copy_color_data(this->d3dCaps(), bufferData, info.fFormat,
|
||||
backendTexture.dimensions(), placedFootprints, data->color())) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
size_t totalCompressedSize = SkCompressedFormatDataSize(compression,
|
||||
backendTexture.dimensions(),
|
||||
backendTexture.hasMipmaps());
|
||||
SkAutoTMalloc<char> tempData(totalCompressedSize);
|
||||
GrFillInCompressedData(compression, backendTexture.dimensions(),
|
||||
backendTexture.fMipmapped, tempData, data->color());
|
||||
copy_compressed_data(bufferData, info.fFormat, placedFootprints.get(), numRows.get(),
|
||||
rowSizeInBytes.get(), tempData.get(), info.fLevelCount);
|
||||
}
|
||||
if (!copy_color_data(this->d3dCaps(),
|
||||
bufferData,
|
||||
info.fFormat,
|
||||
backendTexture.dimensions(),
|
||||
placedFootprints,
|
||||
color)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Update the offsets in the footprints to be relative to the slice's offset
|
||||
for (unsigned int i = 0; i < mipLevelCount; ++i) {
|
||||
placedFootprints[i].Offset += slice.fOffset;
|
||||
// Update the offsets in the footprint to be relative to the slice's offset
|
||||
placedFootprints[0].Offset += slice.fOffset;
|
||||
// Since we're sharing data for all the levels, set all the upper level footprints to the base.
|
||||
UINT w = placedFootprints[0].Footprint.Width;
|
||||
UINT h = placedFootprints[0].Footprint.Height;
|
||||
for (unsigned int i = 1; i < mipLevelCount; ++i) {
|
||||
w = std::max(1U, w/2);
|
||||
h = std::max(1U, h/2);
|
||||
placedFootprints[i].Offset = placedFootprints[0].Offset;
|
||||
placedFootprints[i].Footprint.Format = placedFootprints[0].Footprint.Format;
|
||||
placedFootprints[i].Footprint.Width = w;
|
||||
placedFootprints[i].Footprint.Height = h;
|
||||
placedFootprints[i].Footprint.Depth = 1;
|
||||
placedFootprints[i].Footprint.RowPitch = placedFootprints[0].Footprint.RowPitch;
|
||||
}
|
||||
|
||||
ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
|
||||
cmdList->copyBufferToTexture(d3dBuffer, texture.get(), mipLevelCount, placedFootprints.get(), 0,
|
||||
0);
|
||||
cmdList->copyBufferToTexture(d3dBuffer,
|
||||
texture.get(),
|
||||
mipLevelCount,
|
||||
placedFootprints.get(),
|
||||
/*left*/ 0,
|
||||
/*top */ 0);
|
||||
|
||||
if (finishedCallback) {
|
||||
this->addFinishedCallback(std::move(finishedCallback));
|
||||
@ -1143,8 +1115,88 @@ GrBackendTexture GrD3DGpu::onCreateCompressedBackendTexture(
|
||||
|
||||
bool GrD3DGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data);
|
||||
const void* data,
|
||||
size_t size) {
|
||||
GrD3DTextureResourceInfo info;
|
||||
SkAssertResult(backendTexture.getD3DTextureResourceInfo(&info));
|
||||
|
||||
sk_sp<GrD3DResourceState> state = backendTexture.getGrD3DResourceState();
|
||||
SkASSERT(state);
|
||||
sk_sp<GrD3DTexture> texture = GrD3DTexture::MakeWrappedTexture(this,
|
||||
backendTexture.dimensions(),
|
||||
GrWrapCacheable::kNo,
|
||||
kRW_GrIOType,
|
||||
info,
|
||||
std::move(state));
|
||||
if (!texture) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrD3DDirectCommandList* cmdList = this->currentCommandList();
|
||||
if (!cmdList) {
|
||||
return false;
|
||||
}
|
||||
|
||||
texture->setResourceState(this, D3D12_RESOURCE_STATE_COPY_DEST);
|
||||
|
||||
ID3D12Resource* d3dResource = texture->d3dResource();
|
||||
SkASSERT(d3dResource);
|
||||
D3D12_RESOURCE_DESC desc = d3dResource->GetDesc();
|
||||
unsigned int mipLevelCount = 1;
|
||||
if (backendTexture.hasMipmaps()) {
|
||||
mipLevelCount = SkMipmap::ComputeLevelCount(backendTexture.dimensions().width(),
|
||||
backendTexture.dimensions().height()) + 1;
|
||||
}
|
||||
SkASSERT(mipLevelCount == info.fLevelCount);
|
||||
SkAutoTMalloc<D3D12_PLACED_SUBRESOURCE_FOOTPRINT> placedFootprints(mipLevelCount);
|
||||
UINT64 combinedBufferSize;
|
||||
SkAutoTMalloc<UINT> numRows(mipLevelCount);
|
||||
SkAutoTMalloc<UINT64> rowSizeInBytes(mipLevelCount);
|
||||
fDevice->GetCopyableFootprints(&desc,
|
||||
0,
|
||||
mipLevelCount,
|
||||
0,
|
||||
placedFootprints.get(),
|
||||
numRows.get(),
|
||||
rowSizeInBytes.get(),
|
||||
&combinedBufferSize);
|
||||
SkASSERT(combinedBufferSize);
|
||||
SkASSERT(!GrDxgiFormatIsCompressed(info.fFormat));
|
||||
|
||||
GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
|
||||
combinedBufferSize, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
|
||||
if (!slice.fBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
char* bufferData = (char*)slice.fOffsetMapPtr;
|
||||
SkASSERT(bufferData);
|
||||
copy_compressed_data(bufferData,
|
||||
info.fFormat,
|
||||
placedFootprints.get(),
|
||||
numRows.get(),
|
||||
rowSizeInBytes.get(),
|
||||
data,
|
||||
info.fLevelCount);
|
||||
|
||||
// Update the offsets in the footprints to be relative to the slice's offset
|
||||
for (unsigned int i = 0; i < mipLevelCount; ++i) {
|
||||
placedFootprints[i].Offset += slice.fOffset;
|
||||
}
|
||||
|
||||
ID3D12Resource* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer)->d3dResource();
|
||||
cmdList->copyBufferToTexture(d3dBuffer,
|
||||
texture.get(),
|
||||
mipLevelCount,
|
||||
placedFootprints.get(),
|
||||
0,
|
||||
0);
|
||||
|
||||
if (finishedCallback) {
|
||||
this->addFinishedCallback(std::move(finishedCallback));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrD3DGpu::deleteBackendTexture(const GrBackendTexture& tex) {
|
||||
|
@ -217,9 +217,9 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override;
|
||||
|
||||
GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
|
||||
const GrBackendFormat&,
|
||||
@ -228,7 +228,8 @@ private:
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
const void* data,
|
||||
size_t size) override;
|
||||
|
||||
bool submitDirectCommandList(SyncQueue sync);
|
||||
|
||||
|
@ -381,35 +381,23 @@ void GrDawnGpu::uploadTextureData(GrColorType srcColorType, const GrMipLevel tex
|
||||
}
|
||||
}
|
||||
|
||||
bool GrDawnGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
bool GrDawnGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
GrDawnTextureInfo info;
|
||||
SkAssertResult(backendTexture.getDawnTextureInfo(&info));
|
||||
|
||||
size_t bpp = GrDawnBytesPerBlock(info.fFormat);
|
||||
size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
|
||||
const void* pixels;
|
||||
SkAutoMalloc defaultStorage(baseLayerSize);
|
||||
if (data && data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
int numMipLevels = info.fLevelCount;
|
||||
SkAutoTArray<GrMipLevel> texels(numMipLevels);
|
||||
GrColorType colorType = data->pixmap(0).colorType();
|
||||
for (int i = 0; i < numMipLevels; ++i) {
|
||||
texels[i] = {data->pixmap(i).addr(), data->pixmap(i).rowBytes(), nullptr};
|
||||
}
|
||||
SkIRect dstRect = SkIRect::MakeSize(backendTexture.dimensions());
|
||||
this->uploadTextureData(colorType, texels.get(), numMipLevels, dstRect, info.fTexture);
|
||||
return true;
|
||||
}
|
||||
pixels = defaultStorage.get();
|
||||
GrColorType colorType;
|
||||
if (!GrDawnFormatToGrColorType(info.fFormat, &colorType)) {
|
||||
return false;
|
||||
}
|
||||
SkISize size{backendTexture.width(), backendTexture.height()};
|
||||
GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, size);
|
||||
GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), data->color());
|
||||
|
||||
size_t bpp = GrDawnBytesPerBlock(info.fFormat);
|
||||
size_t baseLayerSize = bpp * backendTexture.width() * backendTexture.height();
|
||||
SkAutoMalloc defaultStorage(baseLayerSize);
|
||||
GrImageInfo imageInfo(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
|
||||
GrClearImage(imageInfo, defaultStorage.get(), bpp * backendTexture.width(), color);
|
||||
|
||||
wgpu::Device device = this->device();
|
||||
wgpu::CommandEncoder copyEncoder = this->getCopyEncoder();
|
||||
int w = backendTexture.width(), h = backendTexture.height();
|
||||
@ -420,9 +408,9 @@ bool GrDawnGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
GrStagingBufferManager::Slice stagingBuffer =
|
||||
this->stagingBufferManager()->allocateStagingBufferSlice(size);
|
||||
if (rowBytes == origRowBytes) {
|
||||
memcpy(stagingBuffer.fOffsetMapPtr, pixels, size);
|
||||
memcpy(stagingBuffer.fOffsetMapPtr, defaultStorage.get(), size);
|
||||
} else {
|
||||
const char* src = static_cast<const char*>(pixels);
|
||||
const char* src = static_cast<const char*>(defaultStorage.get());
|
||||
char* dst = static_cast<char*>(stagingBuffer.fOffsetMapPtr);
|
||||
for (int row = 0; row < h; row++) {
|
||||
memcpy(dst, src, origRowBytes);
|
||||
@ -454,7 +442,8 @@ GrBackendTexture GrDawnGpu::onCreateCompressedBackendTexture(
|
||||
|
||||
bool GrDawnGpu::onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) {
|
||||
const void* data,
|
||||
size_t size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -144,9 +144,9 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override;
|
||||
|
||||
GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
|
||||
const GrBackendFormat&,
|
||||
@ -155,7 +155,8 @@ private:
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
const void* data,
|
||||
size_t size) override;
|
||||
|
||||
sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
|
||||
const void* data) override;
|
||||
|
@ -980,7 +980,7 @@ bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat,
|
||||
bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat,
|
||||
SkISize texDims,
|
||||
GrGLenum target,
|
||||
SkColor4f color,
|
||||
std::array<float, 4> color,
|
||||
uint32_t levelMask) {
|
||||
GrColorType colorType;
|
||||
GrGLenum externalFormat, externalType;
|
||||
@ -1386,8 +1386,11 @@ sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions,
|
||||
fHWBoundRenderTargetUniqueID.makeInvalid();
|
||||
} else {
|
||||
this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID());
|
||||
static constexpr SkColor4f kZeroColor = {0, 0, 0, 0};
|
||||
this->uploadColorToTex(texDesc.fFormat, texDesc.fSize, texDesc.fTarget, kZeroColor,
|
||||
std::array<float, 4> zeros = {};
|
||||
this->uploadColorToTex(texDesc.fFormat,
|
||||
texDesc.fSize,
|
||||
texDesc.fTarget,
|
||||
zeros,
|
||||
levelClearMask);
|
||||
}
|
||||
}
|
||||
@ -1482,11 +1485,8 @@ GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(
|
||||
|
||||
bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
SkASSERT(data && data->type() != BackendTextureData::Type::kPixmaps);
|
||||
|
||||
this->handleDirtyContext();
|
||||
|
||||
const void* data,
|
||||
size_t length) {
|
||||
GrGLTextureInfo info;
|
||||
SkAssertResult(backendTexture.getGLTextureInfo(&info));
|
||||
|
||||
@ -1499,27 +1499,6 @@ bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTe
|
||||
|
||||
GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
|
||||
|
||||
const char* rawData = nullptr;
|
||||
size_t rawDataSize = 0;
|
||||
SkAutoMalloc am;
|
||||
if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
rawData = (const char*)data->compressedData();
|
||||
rawDataSize = data->compressedSize();
|
||||
} else {
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kColor);
|
||||
SkASSERT(compression != SkImage::CompressionType::kNone);
|
||||
|
||||
rawDataSize = SkCompressedDataSize(compression, backendTexture.dimensions(), nullptr,
|
||||
backendTexture.hasMipmaps());
|
||||
|
||||
am.reset(rawDataSize);
|
||||
|
||||
GrFillInCompressedData(compression, backendTexture.dimensions(), mipMapped, (char*)am.get(),
|
||||
data->color());
|
||||
|
||||
rawData = (const char*)am.get();
|
||||
}
|
||||
|
||||
this->bindTextureToScratchUnit(info.fTarget, info.fID);
|
||||
|
||||
// If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
|
||||
@ -1540,9 +1519,13 @@ bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTe
|
||||
params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
|
||||
}
|
||||
|
||||
bool result = this->uploadCompressedTexData(
|
||||
compression, glFormat, backendTexture.dimensions(), mipMapped, GR_GL_TEXTURE_2D,
|
||||
rawData, rawDataSize);
|
||||
bool result = this->uploadCompressedTexData(compression,
|
||||
glFormat,
|
||||
backendTexture.dimensions(),
|
||||
mipMapped,
|
||||
GR_GL_TEXTURE_2D,
|
||||
data,
|
||||
length);
|
||||
|
||||
// Unbind this texture from the scratch texture unit.
|
||||
this->bindTextureToScratchUnit(info.fTarget, 0);
|
||||
@ -3617,9 +3600,9 @@ GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions,
|
||||
std::move(parameters));
|
||||
}
|
||||
|
||||
bool GrGLGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
bool GrGLGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
this->handleDirtyContext();
|
||||
|
||||
GrGLTextureInfo info;
|
||||
@ -3651,26 +3634,12 @@ bool GrGLGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
|
||||
}
|
||||
|
||||
SkASSERT(data->type() != BackendTextureData::Type::kCompressed);
|
||||
bool result = false;
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
SkTArray<GrMipLevel> texels;
|
||||
texels.push_back_n(numMipLevels);
|
||||
GrColorType colorType = data->pixmap(0).colorType();
|
||||
for (int i = 0; i < numMipLevels; ++i) {
|
||||
texels[i] = {data->pixmap(i).addr(),
|
||||
data->pixmap(i).rowBytes(),
|
||||
data->pixmap(i).pixelStorage()};
|
||||
}
|
||||
SkIRect dstRect = SkIRect::MakeSize(backendTexture.dimensions());
|
||||
result = this->uploadColorTypeTexData(glFormat, colorType, backendTexture.dimensions(),
|
||||
info.fTarget, dstRect, colorType, texels.begin(),
|
||||
texels.count());
|
||||
} else if (data->type() == BackendTextureData::Type::kColor) {
|
||||
uint32_t levelMask = (1 << numMipLevels) - 1;
|
||||
result = this->uploadColorToTex(glFormat, backendTexture.dimensions(), info.fTarget,
|
||||
data->color(), levelMask);
|
||||
}
|
||||
uint32_t levelMask = (1 << numMipLevels) - 1;
|
||||
bool result = this->uploadColorToTex(glFormat,
|
||||
backendTexture.dimensions(),
|
||||
info.fTarget,
|
||||
color,
|
||||
levelMask);
|
||||
|
||||
// Unbind this texture from the scratch texture unit.
|
||||
this->bindTextureToScratchUnit(info.fTarget, 0);
|
||||
|
@ -212,13 +212,14 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override;
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
const void* data,
|
||||
size_t length) override;
|
||||
|
||||
void onResetContext(uint32_t resetBits) override;
|
||||
|
||||
@ -453,7 +454,7 @@ private:
|
||||
bool uploadColorToTex(GrGLFormat textureFormat,
|
||||
SkISize texDims,
|
||||
GrGLenum target,
|
||||
SkColor4f color,
|
||||
std::array<float, 4> color,
|
||||
uint32_t levelMask);
|
||||
|
||||
// Pushes data to the currently bound texture to the currently active unit. 'dstRect' must be
|
||||
|
@ -164,9 +164,9 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override {
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override {
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -177,7 +177,8 @@ private:
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override {
|
||||
const void*,
|
||||
size_t) override {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -131,9 +131,9 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override;
|
||||
|
||||
GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
|
||||
const GrBackendFormat&,
|
||||
@ -142,7 +142,8 @@ private:
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
const void* data,
|
||||
size_t size) override;
|
||||
|
||||
sk_sp<GrTexture> onCreateTexture(SkISize,
|
||||
const GrBackendFormat&,
|
||||
|
@ -946,9 +946,9 @@ GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
|
||||
return backendTex;
|
||||
}
|
||||
|
||||
bool GrMtlGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
GrMtlTextureInfo info;
|
||||
SkAssertResult(backendTexture.getMtlTextureInfo(&info));
|
||||
|
||||
@ -956,45 +956,19 @@ bool GrMtlGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
|
||||
const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
|
||||
|
||||
int numMipLevels = mtlTexture.mipmapLevelCount;
|
||||
GrMipmapped mipMapped = numMipLevels > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
|
||||
|
||||
SkImage::CompressionType compression = GrBackendFormatToCompressionType(
|
||||
backendTexture.getBackendFormat());
|
||||
|
||||
// Create a transfer buffer and fill with data.
|
||||
size_t bytesPerPixel = GrMtlFormatBytesPerBlock(mtlFormat);
|
||||
SkSTArray<16, size_t> individualMipOffsets;
|
||||
size_t combinedBufferSize;
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kColor &&
|
||||
compression == SkImage::CompressionType::kNone) {
|
||||
combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
|
||||
// Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
|
||||
individualMipOffsets.push_back_n(numMipLevels, (size_t)0);
|
||||
} else if (compression == SkImage::CompressionType::kNone) {
|
||||
combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel,
|
||||
backendTexture.dimensions(),
|
||||
&individualMipOffsets,
|
||||
numMipLevels);
|
||||
} else {
|
||||
combinedBufferSize = SkCompressedDataSize(compression, backendTexture.dimensions(),
|
||||
&individualMipOffsets,
|
||||
mipMapped == GrMipmapped::kYes);
|
||||
}
|
||||
SkASSERT(individualMipOffsets.count() == numMipLevels);
|
||||
// Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
|
||||
combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
|
||||
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
static const size_t kMinAlignment = 4;
|
||||
#else
|
||||
static const size_t kMinAlignment = 1;
|
||||
#endif
|
||||
size_t alignment;
|
||||
if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
alignment = std::max(SkCompressedBlockSize(compression), kMinAlignment);
|
||||
} else {
|
||||
alignment = std::max(bytesPerPixel, kMinAlignment);
|
||||
}
|
||||
size_t alignment = std::max(bytesPerPixel, kMinAlignment);
|
||||
GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
|
||||
combinedBufferSize, alignment);
|
||||
if (!slice.fBuffer) {
|
||||
@ -1002,28 +976,15 @@ bool GrMtlGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
}
|
||||
char* buffer = (char*)slice.fOffsetMapPtr;
|
||||
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
copy_src_data(buffer, bytesPerPixel, individualMipOffsets, data->pixmaps(),
|
||||
numMipLevels, combinedBufferSize);
|
||||
} else if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
memcpy(buffer, data->compressedData(), data->compressedSize());
|
||||
} else {
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kColor);
|
||||
if (compression == SkImage::CompressionType::kNone) {
|
||||
auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
|
||||
if (colorType == GrColorType::kUnknown) {
|
||||
return false;
|
||||
}
|
||||
GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
|
||||
auto rb = ii.minRowBytes();
|
||||
SkASSERT(rb == bytesPerPixel*backendTexture.width());
|
||||
if (!GrClearImage(ii, buffer, rb, data->color())) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
GrFillInCompressedData(compression, backendTexture.dimensions(), mipMapped, buffer,
|
||||
data->color());
|
||||
}
|
||||
auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
|
||||
if (colorType == GrColorType::kUnknown) {
|
||||
return false;
|
||||
}
|
||||
GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
|
||||
auto rb = ii.minRowBytes();
|
||||
SkASSERT(rb == bytesPerPixel*backendTexture.width());
|
||||
if (!GrClearImage(ii, buffer, rb, color)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Transfer buffer contents to texture
|
||||
@ -1034,32 +995,29 @@ bool GrMtlGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
|
||||
|
||||
SkISize levelDimensions(backendTexture.dimensions());
|
||||
int numMipLevels = mtlTexture.mipmapLevelCount;
|
||||
for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
|
||||
size_t levelRowBytes;
|
||||
size_t levelSize;
|
||||
|
||||
if (compression == SkImage::CompressionType::kNone) {
|
||||
levelRowBytes = levelDimensions.width() * bytesPerPixel;
|
||||
levelSize = levelRowBytes * levelDimensions.height();
|
||||
} else {
|
||||
levelRowBytes = GrCompressedRowBytes(compression, levelDimensions.width());
|
||||
levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
|
||||
}
|
||||
levelRowBytes = levelDimensions.width() * bytesPerPixel;
|
||||
levelSize = levelRowBytes * levelDimensions.height();
|
||||
|
||||
// TODO: can this all be done in one go?
|
||||
[blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
|
||||
sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
|
||||
sourceOffset: slice.fOffset
|
||||
sourceBytesPerRow: levelRowBytes
|
||||
sourceBytesPerImage: levelSize
|
||||
sourceSize: MTLSizeMake(levelDimensions.width(),
|
||||
levelDimensions.height(), 1)
|
||||
levelDimensions.height(),
|
||||
1)
|
||||
toTexture: mtlTexture
|
||||
destinationSlice: 0
|
||||
destinationLevel: currentMipLevel
|
||||
destinationOrigin: origin];
|
||||
|
||||
levelDimensions = { std::max(1, levelDimensions.width() / 2),
|
||||
std::max(1, levelDimensions.height() / 2) };
|
||||
levelDimensions = {std::max(1, levelDimensions.width() / 2),
|
||||
std::max(1, levelDimensions.height() / 2)};
|
||||
}
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
[mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
|
||||
@ -1088,8 +1046,84 @@ GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(
|
||||
|
||||
bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data);
|
||||
const void* data,
|
||||
size_t size) {
|
||||
GrMtlTextureInfo info;
|
||||
SkAssertResult(backendTexture.getMtlTextureInfo(&info));
|
||||
|
||||
id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
|
||||
|
||||
int numMipLevels = mtlTexture.mipmapLevelCount;
|
||||
GrMipmapped mipMapped = numMipLevels > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
|
||||
|
||||
SkImage::CompressionType compression =
|
||||
GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
|
||||
SkASSERT(compression != SkImage::CompressionType::kNone);
|
||||
|
||||
// Create a transfer buffer and fill with data.
|
||||
SkSTArray<16, size_t> individualMipOffsets;
|
||||
size_t combinedBufferSize;
|
||||
combinedBufferSize = SkCompressedDataSize(compression,
|
||||
backendTexture.dimensions(),
|
||||
&individualMipOffsets,
|
||||
mipMapped == GrMipmapped::kYes);
|
||||
SkASSERT(individualMipOffsets.count() == numMipLevels);
|
||||
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
static const size_t kMinAlignment = 4;
|
||||
#else
|
||||
static const size_t kMinAlignment = 1;
|
||||
#endif
|
||||
size_t alignment = std::max(SkCompressedBlockSize(compression), kMinAlignment);
|
||||
GrStagingBufferManager::Slice slice =
|
||||
fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
|
||||
if (!slice.fBuffer) {
|
||||
return false;
|
||||
}
|
||||
char* buffer = (char*)slice.fOffsetMapPtr;
|
||||
|
||||
memcpy(buffer, data, size);
|
||||
|
||||
// Transfer buffer contents to texture
|
||||
MTLOrigin origin = MTLOriginMake(0, 0, 0);
|
||||
|
||||
GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
|
||||
id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
|
||||
GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
|
||||
|
||||
SkISize levelDimensions(backendTexture.dimensions());
|
||||
for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
|
||||
size_t levelRowBytes;
|
||||
size_t levelSize;
|
||||
|
||||
levelRowBytes = GrCompressedRowBytes(compression, levelDimensions.width());
|
||||
levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
|
||||
|
||||
// TODO: can this all be done in one go?
|
||||
[blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
|
||||
sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
|
||||
sourceBytesPerRow: levelRowBytes
|
||||
sourceBytesPerImage: levelSize
|
||||
sourceSize: MTLSizeMake(levelDimensions.width(),
|
||||
levelDimensions.height(),
|
||||
1)
|
||||
toTexture: mtlTexture
|
||||
destinationSlice: 0
|
||||
destinationLevel: currentMipLevel
|
||||
destinationOrigin: origin];
|
||||
|
||||
levelDimensions = {std::max(1, levelDimensions.width() / 2),
|
||||
std::max(1, levelDimensions.height() / 2)};
|
||||
}
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
[mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
|
||||
#endif
|
||||
|
||||
if (finishedCallback) {
|
||||
this->addFinishedCallback(std::move(finishedCallback));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
|
||||
|
@ -728,14 +728,17 @@ bool GrVkGpu::uploadTexDataLinear(GrVkAttachment* texAttachment, int left, int t
|
||||
|
||||
// This fills in the 'regions' vector in preparation for copying a buffer to an image.
|
||||
// 'individualMipOffsets' is filled in as a side-effect.
|
||||
static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
|
||||
SkTArray<VkBufferImageCopy>* regions,
|
||||
SkTArray<size_t>* individualMipOffsets,
|
||||
GrStagingBufferManager::Slice* slice,
|
||||
SkImage::CompressionType compression,
|
||||
VkFormat vkFormat, SkISize dimensions, GrMipmapped mipMapped) {
|
||||
static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
|
||||
SkTArray<VkBufferImageCopy>* regions,
|
||||
SkTArray<size_t>* individualMipOffsets,
|
||||
GrStagingBufferManager::Slice* slice,
|
||||
SkImage::CompressionType compression,
|
||||
VkFormat vkFormat,
|
||||
SkISize dimensions,
|
||||
GrMipmapped mipmapped) {
|
||||
SkASSERT(compression != SkImage::CompressionType::kNone);
|
||||
int numMipLevels = 1;
|
||||
if (mipMapped == GrMipmapped::kYes) {
|
||||
if (mipmapped == GrMipmapped::kYes) {
|
||||
numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
|
||||
}
|
||||
|
||||
@ -744,15 +747,10 @@ static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
|
||||
|
||||
size_t bytesPerBlock = GrVkFormatBytesPerBlock(vkFormat);
|
||||
|
||||
size_t combinedBufferSize;
|
||||
if (compression == SkImage::CompressionType::kNone) {
|
||||
combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerBlock, dimensions,
|
||||
individualMipOffsets,
|
||||
numMipLevels);
|
||||
} else {
|
||||
combinedBufferSize = SkCompressedDataSize(compression, dimensions, individualMipOffsets,
|
||||
mipMapped == GrMipmapped::kYes);
|
||||
}
|
||||
size_t bufferSize = SkCompressedDataSize(compression,
|
||||
dimensions,
|
||||
individualMipOffsets,
|
||||
mipmapped == GrMipmapped::kYes);
|
||||
SkASSERT(individualMipOffsets->count() == numMipLevels);
|
||||
|
||||
// Get a staging buffer slice to hold our mip data.
|
||||
@ -763,7 +761,7 @@ static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
|
||||
case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
|
||||
default: alignment *= 4; break; // alignment is not a multiple of 2.
|
||||
}
|
||||
*slice = stagingBufferManager->allocateStagingBufferSlice(combinedBufferSize, alignment);
|
||||
*slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
|
||||
if (!slice->fBuffer) {
|
||||
return 0;
|
||||
}
|
||||
@ -784,7 +782,7 @@ static size_t fill_in_regions(GrStagingBufferManager* stagingBufferManager,
|
||||
std::max(1, dimensions.height()/2)};
|
||||
}
|
||||
|
||||
return combinedBufferSize;
|
||||
return bufferSize;
|
||||
}
|
||||
|
||||
bool GrVkGpu::uploadTexDataOptimal(GrVkAttachment* texAttachment, int left, int top, int width,
|
||||
@ -932,10 +930,14 @@ bool GrVkGpu::uploadTexDataCompressed(GrVkAttachment* uploadTexture,
|
||||
GrStagingBufferManager::Slice slice;
|
||||
SkTArray<VkBufferImageCopy> regions;
|
||||
SkTArray<size_t> individualMipOffsets;
|
||||
SkDEBUGCODE(size_t combinedBufferSize =) fill_in_regions(&fStagingBufferManager,
|
||||
®ions, &individualMipOffsets,
|
||||
&slice, compression, vkFormat,
|
||||
dimensions, mipMapped);
|
||||
SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
|
||||
®ions,
|
||||
&individualMipOffsets,
|
||||
&slice,
|
||||
compression,
|
||||
vkFormat,
|
||||
dimensions,
|
||||
mipMapped);
|
||||
if (!slice.fBuffer) {
|
||||
return false;
|
||||
}
|
||||
@ -1461,22 +1463,6 @@ bool copy_src_data(char* mapPtr,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool copy_compressed_data(GrVkGpu* gpu, char* mapPtr,
|
||||
const void* rawData, size_t dataSize) {
|
||||
SkASSERT(mapPtr);
|
||||
memcpy(mapPtr, rawData, dataSize);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool generate_compressed_data(GrVkGpu* gpu, char* mapPtr,
|
||||
SkImage::CompressionType compression, SkISize dimensions,
|
||||
GrMipmapped mipMapped, const SkColor4f& color) {
|
||||
SkASSERT(mapPtr);
|
||||
GrFillInCompressedData(compression, dimensions, mipMapped, mapPtr, color);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
|
||||
SkISize dimensions,
|
||||
int sampleCnt,
|
||||
@ -1544,9 +1530,9 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) {
|
||||
GrVkImageInfo info;
|
||||
SkAssertResult(backendTexture.getVkImageInfo(&info));
|
||||
|
||||
@ -1570,68 +1556,23 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
false);
|
||||
|
||||
// Unfortunately, CmdClearColorImage doesn't work for compressed formats
|
||||
bool fastPath = data->type() == BackendTextureData::Type::kColor &&
|
||||
!GrVkFormatIsCompressed(info.fFormat);
|
||||
// CmdClearColorImage doesn't work for compressed formats
|
||||
SkASSERT(!GrVkFormatIsCompressed(info.fFormat));
|
||||
|
||||
if (fastPath) {
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kColor);
|
||||
VkClearColorValue vkColor;
|
||||
SkColor4f color = data->color();
|
||||
// If we ever support SINT or UINT formats this needs to be updated to use the int32 and
|
||||
// uint32 union members in those cases.
|
||||
vkColor.float32[0] = color.fR;
|
||||
vkColor.float32[1] = color.fG;
|
||||
vkColor.float32[2] = color.fB;
|
||||
vkColor.float32[3] = color.fA;
|
||||
VkImageSubresourceRange range;
|
||||
range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
range.baseArrayLayer = 0;
|
||||
range.baseMipLevel = 0;
|
||||
range.layerCount = 1;
|
||||
range.levelCount = info.fLevelCount;
|
||||
cmdBuffer->clearColorImage(this, texAttachment, &vkColor, 1, &range);
|
||||
} else {
|
||||
SkImage::CompressionType compression = GrBackendFormatToCompressionType(
|
||||
backendTexture.getBackendFormat());
|
||||
|
||||
SkTArray<VkBufferImageCopy> regions;
|
||||
SkTArray<size_t> individualMipOffsets;
|
||||
GrStagingBufferManager::Slice slice;
|
||||
|
||||
fill_in_regions(&fStagingBufferManager, ®ions, &individualMipOffsets,
|
||||
&slice, compression, info.fFormat, backendTexture.dimensions(),
|
||||
backendTexture.fMipmapped);
|
||||
|
||||
if (!slice.fBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool result;
|
||||
if (data->type() == BackendTextureData::Type::kPixmaps) {
|
||||
result = copy_src_data((char*)slice.fOffsetMapPtr, info.fFormat, individualMipOffsets,
|
||||
data->pixmaps(), info.fLevelCount);
|
||||
} else if (data->type() == BackendTextureData::Type::kCompressed) {
|
||||
result = copy_compressed_data(this, (char*)slice.fOffsetMapPtr,
|
||||
data->compressedData(), data->compressedSize());
|
||||
} else {
|
||||
SkASSERT(data->type() == BackendTextureData::Type::kColor);
|
||||
result = generate_compressed_data(this, (char*)slice.fOffsetMapPtr, compression,
|
||||
backendTexture.dimensions(),
|
||||
backendTexture.fMipmapped, data->color());
|
||||
}
|
||||
|
||||
cmdBuffer->addGrSurface(texture);
|
||||
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
|
||||
// because we don't need the command buffer to ref the buffer here. The reason being is that
|
||||
// the buffer is coming from the staging manager and the staging manager will make sure the
|
||||
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for
|
||||
// every upload in the frame.
|
||||
const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
|
||||
cmdBuffer->copyBufferToImage(this, vkBuffer->vkBuffer(), texAttachment,
|
||||
texAttachment->currentLayout(), regions.count(),
|
||||
regions.begin());
|
||||
}
|
||||
VkClearColorValue vkColor;
|
||||
// If we ever support SINT or UINT formats this needs to be updated to use the int32 and
|
||||
// uint32 union members in those cases.
|
||||
vkColor.float32[0] = color[0];
|
||||
vkColor.float32[1] = color[1];
|
||||
vkColor.float32[2] = color[2];
|
||||
vkColor.float32[3] = color[3];
|
||||
VkImageSubresourceRange range;
|
||||
range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
range.baseArrayLayer = 0;
|
||||
range.baseMipLevel = 0;
|
||||
range.layerCount = 1;
|
||||
range.levelCount = info.fLevelCount;
|
||||
cmdBuffer->clearColorImage(this, texAttachment, &vkColor, 1, &range);
|
||||
|
||||
// Change image layout to shader read since if we use this texture as a borrowed
|
||||
// texture within Ganesh we require that its layout be set to that
|
||||
@ -1688,8 +1629,82 @@ GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
|
||||
|
||||
bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData* data) {
|
||||
return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data);
|
||||
const void* data,
|
||||
size_t size) {
|
||||
GrVkImageInfo info;
|
||||
SkAssertResult(backendTexture.getVkImageInfo(&info));
|
||||
|
||||
sk_sp<GrBackendSurfaceMutableStateImpl> mutableState = backendTexture.getMutableState();
|
||||
SkASSERT(mutableState);
|
||||
sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
|
||||
backendTexture.dimensions(),
|
||||
kBorrow_GrWrapOwnership,
|
||||
GrWrapCacheable::kNo,
|
||||
kRW_GrIOType,
|
||||
info,
|
||||
std::move(mutableState));
|
||||
if (!texture) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
|
||||
if (!cmdBuffer) {
|
||||
return false;
|
||||
}
|
||||
GrVkAttachment* attachment = texture->textureAttachment();
|
||||
attachment->setImageLayout(this,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
false);
|
||||
|
||||
SkImage::CompressionType compression =
|
||||
GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
|
||||
|
||||
SkTArray<VkBufferImageCopy> regions;
|
||||
SkTArray<size_t> individualMipOffsets;
|
||||
GrStagingBufferManager::Slice slice;
|
||||
|
||||
fill_in_compressed_regions(&fStagingBufferManager,
|
||||
®ions,
|
||||
&individualMipOffsets,
|
||||
&slice,
|
||||
compression,
|
||||
info.fFormat,
|
||||
backendTexture.dimensions(),
|
||||
backendTexture.fMipmapped);
|
||||
|
||||
if (!slice.fBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(slice.fOffsetMapPtr, data, size);
|
||||
|
||||
cmdBuffer->addGrSurface(texture);
|
||||
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
|
||||
// because we don't need the command buffer to ref the buffer here. The reason being is that
|
||||
// the buffer is coming from the staging manager and the staging manager will make sure the
|
||||
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for
|
||||
// every upload in the frame.
|
||||
cmdBuffer->copyBufferToImage(this,
|
||||
static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
|
||||
attachment,
|
||||
attachment->currentLayout(),
|
||||
regions.count(),
|
||||
regions.begin());
|
||||
|
||||
// Change image layout to shader read since if we use this texture as a borrowed
|
||||
// texture within Ganesh we require that its layout be set to that
|
||||
attachment->setImageLayout(this,
|
||||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
VK_ACCESS_SHADER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
||||
false);
|
||||
|
||||
if (finishedCallback) {
|
||||
this->addFinishedCallback(std::move(finishedCallback));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
|
||||
|
@ -218,13 +218,14 @@ private:
|
||||
GrMipmapped,
|
||||
GrProtected) override;
|
||||
|
||||
bool onUpdateBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
bool onClearBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
std::array<float, 4> color) override;
|
||||
|
||||
bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
|
||||
sk_sp<GrRefCntedCallback> finishedCallback,
|
||||
const BackendTextureData*) override;
|
||||
const void* data,
|
||||
size_t length) override;
|
||||
|
||||
bool setBackendSurfaceState(GrVkImageInfo info,
|
||||
sk_sp<GrBackendSurfaceMutableStateImpl> currentState,
|
||||
|
@ -222,7 +222,7 @@ static void check_solid_pixmap(skiatest::Reporter* reporter,
|
||||
static SkColor4f get_expected_color(SkColor4f orig, GrColorType ct) {
|
||||
GrImageInfo ii(ct, kUnpremul_SkAlphaType, nullptr, {1, 1});
|
||||
std::unique_ptr<char[]> data(new char[ii.minRowBytes()]);
|
||||
GrClearImage(ii, data.get(), ii.minRowBytes(), orig);
|
||||
GrClearImage(ii, data.get(), ii.minRowBytes(), orig.array());
|
||||
|
||||
// Read back to SkColor4f.
|
||||
SkColor4f result;
|
||||
|
Loading…
Reference in New Issue
Block a user