Add offset to memory allocations

This is setting up for suballocations within one large
allocation

BUG=skia:5031
GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2018933004
TBR=bsalomon@google.com

Review-Url: https://codereview.chromium.org/2018933004
This commit is contained in:
jvanverth 2016-06-01 09:39:15 -07:00 committed by Commit bot
parent 4a603fc591
commit 1e305ba0d6
15 changed files with 97 additions and 60 deletions

View File

@ -9,6 +9,7 @@
#ifndef GrVkTypes_DEFINED
#define GrVkTypes_DEFINED
#include "GrTypes.h"
#include "vk/GrVkDefines.h"
/**
@ -29,13 +30,18 @@
* Types for interacting with Vulkan resources created externally to Skia. GrBackendObjects for
* Vulkan textures are really const GrVkImageInfo*
*/
struct GrVkAlloc {
VkDeviceMemory fMemory; // can be VK_NULL_HANDLE iff Tex is an RT and uses borrow semantics
VkDeviceSize fOffset;
};
struct GrVkImageInfo {
/**
* If the image's format is sRGB (GrVkFormatIsSRGB returns true), then the image must have
* been created with VkImageCreateFlags containing VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
*/
VkImage fImage;
VkDeviceMemory fAlloc; // can be VK_NULL_HANDLE iff Tex is an RT and uses borrow semantics
GrVkAlloc fAlloc;
VkImageTiling fImageTiling;
VkImageLayout fImageLayout;
VkFormat fFormat;

View File

@ -20,7 +20,7 @@
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
VkBuffer buffer;
VkDeviceMemory alloc;
GrVkAlloc alloc;
// create the buffer object
VkBufferCreateInfo bufInfo;
@ -79,7 +79,7 @@ const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& d
const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc);
if (!resource) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr));
GrVkMemory::FreeBufferMemory(gpu, alloc);
return nullptr;
}
@ -111,9 +111,9 @@ void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
SkASSERT(fBuffer);
SkASSERT(fAlloc);
SkASSERT(fAlloc.fMemory);
VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
GrVkMemory::FreeBufferMemory(gpu, fAlloc);
}
void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
@ -141,7 +141,9 @@ void* GrVkBuffer::vkMap(const GrVkGpu* gpu) {
fResource = Create(gpu, fDesc);
}
VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SIZE, 0, &fMapPtr));
const GrVkAlloc& alloc = this->alloc();
VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory, alloc.fOffset,
VK_WHOLE_SIZE, 0, &fMapPtr));
if (err) {
fMapPtr = nullptr;
}
@ -154,7 +156,7 @@ void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) {
VALIDATE();
SkASSERT(this->vkIsMapped());
VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
fMapPtr = nullptr;
}
@ -182,7 +184,9 @@ bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSiz
}
void* mapPtr;
VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr));
const GrVkAlloc& alloc = this->alloc();
VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory, alloc.fOffset,
srcSizeInBytes, 0, &mapPtr));
if (VK_SUCCESS != err) {
return false;
@ -190,7 +194,7 @@ bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSiz
memcpy(mapPtr, src, srcSizeInBytes);
VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
VK_CALL(gpu, UnmapMemory(gpu->device(), alloc.fMemory));
return true;
}

View File

@ -10,6 +10,7 @@
#include "GrVkResource.h"
#include "vk/GrVkDefines.h"
#include "vk/GrVkTypes.h"
class GrVkGpu;
@ -24,8 +25,8 @@ public:
SkASSERT(!fResource);
}
VkBuffer buffer() const { return fResource->fBuffer; }
VkDeviceMemory alloc() const { return fResource->fAlloc; }
VkBuffer buffer() const { return fResource->fBuffer; }
const GrVkAlloc& alloc() const { return fResource->fAlloc; }
const GrVkResource* resource() const { return fResource; }
size_t size() const { return fDesc.fSizeInBytes; }
@ -53,10 +54,12 @@ protected:
class Resource : public GrVkResource {
public:
Resource(VkBuffer buf, VkDeviceMemory alloc) : INHERITED(), fBuffer(buf), fAlloc(alloc) {}
Resource(VkBuffer buf, const GrVkAlloc& alloc)
: INHERITED(), fBuffer(buf), fAlloc(alloc) {}
VkBuffer fBuffer;
GrVkAlloc fAlloc;
VkBuffer fBuffer;
VkDeviceMemory fAlloc;
private:
void freeGPUData(const GrVkGpu* gpu) const;

View File

@ -331,10 +331,11 @@ bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
&layout));
int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height : top;
VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
const GrVkAlloc& alloc = tex->alloc();
VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp;
VkDeviceSize size = height*layout.rowPitch;
void* mapPtr;
err = GR_VK_CALL(interface, MapMemory(fDevice, tex->memory(), offset, size, 0, &mapPtr));
err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr));
if (err) {
return false;
}
@ -358,7 +359,7 @@ bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
}
}
GR_VK_CALL(interface, UnmapMemory(fDevice, tex->memory()));
GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
return true;
}
@ -617,7 +618,7 @@ GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
}
const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTextureHandle);
if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) {
if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) {
return nullptr;
}
#ifdef SK_DEBUG
@ -660,7 +661,7 @@ GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
const GrVkImageInfo* info =
reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle);
if (VK_NULL_HANDLE == info->fImage ||
(VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership)) {
(VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == ownership)) {
return nullptr;
}
@ -873,7 +874,7 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
VkImage image = VK_NULL_HANDLE;
VkDeviceMemory alloc = VK_NULL_HANDLE;
GrVkAlloc alloc = { VK_NULL_HANDLE, 0 };
VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling)
@ -924,9 +925,10 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout));
void* mapPtr;
err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &mapPtr));
err = VK_CALL(MapMemory(fDevice, alloc.fMemory, alloc.fOffset, layout.rowPitch * h,
0, &mapPtr));
if (err) {
VK_CALL(FreeMemory(this->device(), alloc, nullptr));
GrVkMemory::FreeImageMemory(this, alloc);
VK_CALL(DestroyImage(this->device(), image, nullptr));
return 0;
}
@ -941,7 +943,7 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcData, rowCopyBytes,
rowCopyBytes, h);
}
VK_CALL(UnmapMemory(fDevice, alloc));
VK_CALL(UnmapMemory(fDevice, alloc.fMemory));
} else {
// TODO: Add support for copying to optimal tiling
SkASSERT(false);
@ -962,7 +964,7 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id);
if (backend && backend->fImage && backend->fAlloc) {
if (backend && backend->fImage && backend->fAlloc.fMemory) {
VkMemoryRequirements req;
memset(&req, 0, sizeof(req));
GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
@ -984,7 +986,7 @@ void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
// something in the command buffer may still be using this, so force submit
this->submitCommandBuffer(kForce_SyncQueue);
VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr));
GrVkMemory::FreeImageMemory(this, backend->fAlloc);
VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr));
}
delete backend;

View File

@ -62,7 +62,7 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
VkImage image = 0;
VkDeviceMemory alloc;
GrVkAlloc alloc;
VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling)
? VK_IMAGE_LAYOUT_PREINITIALIZED
@ -118,10 +118,10 @@ bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, Gr
void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
VK_CALL(gpu, FreeMemory(gpu->device(), info->fAlloc, nullptr));
GrVkMemory::FreeImageMemory(gpu, info->fAlloc);
}
void GrVkImage::setNewResource(VkImage image, VkDeviceMemory alloc) {
void GrVkImage::setNewResource(VkImage image, const GrVkAlloc& alloc) {
fResource = new Resource(image, alloc);
}
@ -146,7 +146,7 @@ void GrVkImage::abandonImage() {
void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const {
VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
GrVkMemory::FreeImageMemory(gpu, fAlloc);
}
void GrVkImage::BorrowedResource::freeGPUData(const GrVkGpu* gpu) const {

View File

@ -41,7 +41,7 @@ public:
virtual ~GrVkImage();
VkImage image() const { return fInfo.fImage; }
VkDeviceMemory memory() const { return fInfo.fAlloc; }
const GrVkAlloc& alloc() const { return fInfo.fAlloc; }
VkFormat imageFormat() const { return fInfo.fFormat; }
const Resource* resource() const { return fResource; }
bool isLinearTiled() const {
@ -87,7 +87,7 @@ protected:
void releaseImage(const GrVkGpu* gpu);
void abandonImage();
void setNewResource(VkImage image, VkDeviceMemory alloc);
void setNewResource(VkImage image, const GrVkAlloc& alloc);
GrVkImageInfo fInfo;
bool fIsBorrowed;
@ -98,19 +98,21 @@ private:
public:
Resource()
: INHERITED()
, fImage(VK_NULL_HANDLE)
, fAlloc(VK_NULL_HANDLE) {
, fImage(VK_NULL_HANDLE) {
fAlloc.fMemory = VK_NULL_HANDLE;
fAlloc.fOffset = 0;
}
Resource(VkImage image, VkDeviceMemory alloc) : fImage(image), fAlloc(alloc) {}
Resource(VkImage image, const GrVkAlloc& alloc)
: fImage(image), fAlloc(alloc) {}
~Resource() override {}
private:
void freeGPUData(const GrVkGpu* gpu) const override;
VkImage fImage;
VkDeviceMemory fAlloc;
VkImage fImage;
GrVkAlloc fAlloc;
typedef GrVkResource INHERITED;
};
@ -118,7 +120,8 @@ private:
// for wrapped textures
class BorrowedResource : public Resource {
public:
BorrowedResource(VkImage image, VkDeviceMemory alloc) : Resource(image, alloc) {
BorrowedResource(VkImage image, const GrVkAlloc& alloc)
: Resource(image, alloc) {
}
private:
void freeGPUData(const GrVkGpu* gpu) const override;

View File

@ -61,49 +61,65 @@ static bool alloc_device_memory(const GrVkGpu* gpu,
bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
VkBuffer buffer,
const VkMemoryPropertyFlags flags,
VkDeviceMemory* memory) {
GrVkAlloc* alloc) {
const GrVkInterface* iface = gpu->vkInterface();
VkDevice device = gpu->device();
VkMemoryRequirements memReqs;
GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
if (!alloc_device_memory(gpu, &memReqs, flags, &alloc->fMemory)) {
return false;
}
// for now, offset is always 0
alloc->fOffset = 0;
// Bind Memory to queue
VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, *memory, 0));
// Bind Memory to device
VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
alloc->fMemory, alloc->fOffset));
if (err) {
GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr));
GR_VK_CALL(iface, FreeMemory(device, alloc->fMemory, nullptr));
return false;
}
return true;
}
void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
const GrVkInterface* iface = gpu->vkInterface();
GR_VK_CALL(iface, FreeMemory(gpu->device(), alloc.fMemory, nullptr));
}
bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
VkImage image,
const VkMemoryPropertyFlags flags,
VkDeviceMemory* memory) {
GrVkAlloc* alloc) {
const GrVkInterface* iface = gpu->vkInterface();
VkDevice device = gpu->device();
VkMemoryRequirements memReqs;
GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
if (!alloc_device_memory(gpu, &memReqs, flags, &alloc->fMemory)) {
return false;
}
// for now, offset is always 0
alloc->fOffset = 0;
// Bind Memory to queue
VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, *memory, 0));
// Bind Memory to device
VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
alloc->fMemory, alloc->fOffset));
if (err) {
GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr));
GR_VK_CALL(iface, FreeMemory(device, alloc->fMemory, nullptr));
return false;
}
return true;
}
void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
const GrVkInterface* iface = gpu->vkInterface();
GR_VK_CALL(iface, FreeMemory(gpu->device(), alloc.fMemory, nullptr));
}
VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
if (VK_IMAGE_LAYOUT_GENERAL == layout) {
return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;

View File

@ -9,6 +9,7 @@
#define GrVkMemory_DEFINED
#include "vk/GrVkDefines.h"
#include "vk/GrVkTypes.h"
class GrVkGpu;
@ -20,12 +21,14 @@ namespace GrVkMemory {
bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
VkBuffer buffer,
const VkMemoryPropertyFlags flags,
VkDeviceMemory* memory);
GrVkAlloc* alloc);
void FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc);
bool AllocAndBindImageMemory(const GrVkGpu* gpu,
VkImage image,
const VkMemoryPropertyFlags flags,
VkDeviceMemory* memory);
GrVkAlloc* alloc);
void FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc);
VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);

View File

@ -206,7 +206,7 @@ GrVkRenderTarget::CreateWrappedRenderTarget(GrVkGpu* gpu,
SkASSERT(info);
// We can wrap a rendertarget without its allocation, as long as we don't take ownership
SkASSERT(VK_NULL_HANDLE != info->fImage);
SkASSERT(VK_NULL_HANDLE != info->fAlloc || kAdopt_GrWrapOwnership != ownership);
SkASSERT(VK_NULL_HANDLE != info->fAlloc.fMemory || kAdopt_GrWrapOwnership != ownership);
GrVkImage::Wrapped wrapped = kBorrow_GrWrapOwnership == ownership ? GrVkImage::kBorrowed_Wrapped
: GrVkImage::kAdopted_Wrapped;

View File

@ -53,8 +53,7 @@ GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
format.fInternalFormat,
GrVkImageView::kStencil_Type, 1);
if (!imageView) {
VK_CALL(gpu, DestroyImage(gpu->device(), info.fImage, nullptr));
VK_CALL(gpu, FreeMemory(gpu->device(), info.fAlloc, nullptr));
GrVkImage::DestroyImageInfo(gpu, &info);
return nullptr;
}

View File

@ -83,7 +83,7 @@ GrVkTexture* GrVkTexture::CreateWrappedTexture(GrVkGpu* gpu,
const GrVkImageInfo* info) {
SkASSERT(info);
// Wrapped textures require both image and allocation (because they can be mapped)
SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc);
SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc.fMemory);
const GrVkImageView* imageView = GrVkImageView::Create(gpu, info->fImage, info->fFormat,
GrVkImageView::kColor_Type,

View File

@ -156,9 +156,8 @@ GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(GrVkGpu* gpu,
const GrVkImageInfo* info) {
SkASSERT(info);
// Wrapped textures require both image and allocation (because they can be mapped)
SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc);
SkASSERT(VK_NULL_HANDLE != info->fImage && VK_NULL_HANDLE != info->fAlloc.fMemory);
GrVkImage::Wrapped wrapped = kBorrow_GrWrapOwnership == ownership ? GrVkImage::kBorrowed_Wrapped
: GrVkImage::kAdopted_Wrapped;

View File

@ -53,7 +53,7 @@ void wrap_tex_test(skiatest::Reporter* reporter, GrContext* context) {
// alloc is null
backendCopy.fImage = backendTex->fImage;
backendCopy.fAlloc = VK_NULL_HANDLE;
backendCopy.fAlloc = { VK_NULL_HANDLE, 0 };
tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership);
REPORTER_ASSERT(reporter, !tex);
tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership);
@ -98,7 +98,7 @@ void wrap_rt_test(skiatest::Reporter* reporter, GrContext* context) {
// alloc is null
backendCopy.fImage = backendTex->fImage;
backendCopy.fAlloc = VK_NULL_HANDLE;
backendCopy.fAlloc = { VK_NULL_HANDLE, 0 };
// can wrap null alloc if borrowing
rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership);
REPORTER_ASSERT(reporter, rt);
@ -143,7 +143,7 @@ void wrap_trt_test(skiatest::Reporter* reporter, GrContext* context) {
// alloc is null
backendCopy.fImage = backendTex->fImage;
backendCopy.fAlloc = VK_NULL_HANDLE;
backendCopy.fAlloc = { VK_NULL_HANDLE, 0 };
tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership);
REPORTER_ASSERT(reporter, !tex);
tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership);

View File

@ -255,7 +255,9 @@ void Viewer::setupCurrentSlide(int previousSlide) {
const SkISize slideSize = fSlides[fCurrentSlide]->getDimensions();
SkRect windowRect = fWindow->getContentRect();
fDefaultMatrixInv.mapRect(&windowRect);
fGesture.setTransLimit(SkRect::MakeWH(slideSize.width(), slideSize.height()), windowRect);
fGesture.setTransLimit(SkRect::MakeWH(SkIntToScalar(slideSize.width()),
SkIntToScalar(slideSize.height())),
windowRect);
}
this->updateTitle();

View File

@ -265,7 +265,7 @@ void VulkanWindowContext::createBuffers(VkFormat format) {
GrBackendRenderTargetDesc desc;
GrVkImageInfo info;
info.fImage = fImages[i];
info.fAlloc = VK_NULL_HANDLE;
info.fAlloc = { VK_NULL_HANDLE, 0 };
info.fImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
info.fFormat = format;