Create API for GrVkMemoryAllocator and impliment use of AMD VulkanMemoryAllocator on this API.

Bug: skia:
Change-Id: I1e122e1b11ab308c2f83cb98c36c81511f4507d0
Reviewed-on: https://skia-review.googlesource.com/129980
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
Greg Daniel 2018-05-25 11:02:16 -04:00 committed by Skia Commit-Bot
parent e7e6e22912
commit 26c0e4c1f5
13 changed files with 9949 additions and 4 deletions

View File

@ -616,6 +616,7 @@ optional("gpu") {
# they want. For example this would allow us to remove the fuchsia specific vulkan code in our
# vulkan files.
public_deps += [ "third_party/vulkan" ]
deps += [ "third_party/vulkanmemoryallocator" ]
sources += skia_vk_sources
if (skia_enable_vulkan_debug_layers) {
public_defines += [ "SK_ENABLE_VK_LAYERS" ]

View File

@ -251,6 +251,10 @@ srcs .add("third_party/skcms/skcms.c")
local_includes.add("third_party/skcms")
dm_includes .add("third_party/skcms")
# need to manually include the vulkanmemoryallocator headers. If HWUI ever needs
# direct access to the allocator we need to add it to export_includes as well.
local_includes.add("third_party/vulkanmemoryallocator/")
# No need to list headers.
srcs = {s for s in srcs if not s.endswith('.h')}
dm_srcs = {s for s in dm_srcs if not s.endswith('.h')}

View File

@ -535,8 +535,11 @@ skia_vk_sources = [
"$_include/gpu/vk/GrVkBackendContext.h",
"$_include/gpu/vk/GrVkDefines.h",
"$_include/gpu/vk/GrVkInterface.h",
"$_include/gpu/vk/GrVkMemoryAllocator.h",
"$_include/gpu/vk/GrVkTypes.h",
"$_include/private/GrVkTypesPriv.h",
"$_src/gpu/vk/GrVkAMDMemoryAllocator.cpp",
"$_src/gpu/vk/GrVkAMDMemoryAllocator.h",
"$_src/gpu/vk/GrVkBackendContext.cpp",
"$_src/gpu/vk/GrVkBuffer.cpp",
"$_src/gpu/vk/GrVkBuffer.h",

View File

@ -51,6 +51,82 @@
#error "Vulkan header version is too low"
#endif
// The AMD VulkanMemoryAllocator needs the objects from this extension to be declared.
#ifndef VK_KHR_get_memory_requirements2
#define VK_KHR_get_memory_requirements2 1
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1
#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2"
typedef struct VkBufferMemoryRequirementsInfo2KHR {
VkStructureType sType;
const void* pNext;
VkBuffer buffer;
} VkBufferMemoryRequirementsInfo2KHR;
typedef struct VkImageMemoryRequirementsInfo2KHR {
VkStructureType sType;
const void* pNext;
VkImage image;
} VkImageMemoryRequirementsInfo2KHR;
typedef struct VkImageSparseMemoryRequirementsInfo2KHR {
VkStructureType sType;
const void* pNext;
VkImage image;
} VkImageSparseMemoryRequirementsInfo2KHR;
typedef struct VkMemoryRequirements2KHR {
VkStructureType sType;
void* pNext;
VkMemoryRequirements memoryRequirements;
} VkMemoryRequirements2KHR;
typedef struct VkSparseImageMemoryRequirements2KHR {
VkStructureType sType;
void* pNext;
VkSparseImageMemoryRequirements memoryRequirements;
} VkSparseImageMemoryRequirements2KHR;
typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements);
static constexpr VkStructureType VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = (VkStructureType) 1000146000;
static constexpr VkStructureType VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = (VkStructureType) 1000146001;
static constexpr VkStructureType VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = (VkStructureType) 1000146002;
static constexpr VkStructureType VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = (VkStructureType) 1000146003;
static constexpr VkStructureType VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = (VkStructureType) 1000146004;
#endif // VK_KHR_get_memory_requirements2
// Also needed for VulkanMemoryAllocator
#ifndef VK_KHR_dedicated_allocation
#define VK_KHR_dedicated_allocation 1
#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3
#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation"
typedef struct VkMemoryDedicatedRequirementsKHR {
VkStructureType sType;
void* pNext;
VkBool32 prefersDedicatedAllocation;
VkBool32 requiresDedicatedAllocation;
} VkMemoryDedicatedRequirementsKHR;
typedef struct VkMemoryDedicatedAllocateInfoKHR {
VkStructureType sType;
const void* pNext;
VkImage image;
VkBuffer buffer;
} VkMemoryDedicatedAllocateInfoKHR;
static constexpr VkStructureType VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = (VkStructureType) 1000127000;
static constexpr VkStructureType VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = (VkStructureType) 1000127001;
#endif // VK_KHR_dedicated_allocation
#endif
#endif

View File

@ -0,0 +1,89 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrVkMemoryAllocator_DEFINED
#define GrVkMemoryAllocator_DEFINED
#include "SkRefCnt.h"
#include "GrTypes.h"
#include "GrVkDefines.h"
#include "GrVkTypes.h"
class GrVkMemoryAllocator : public SkRefCnt {
public:
enum class AllocationPropertyFlags {
kNone = 0,
// Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger
// block.
kDedicatedAllocation = 0x1,
// Says that the backing memory can only be accessed by the device. Additionally the device
// may lazily allocate the memory. This cannot be used with buffers that will be host
// visible. Setting this flag does not guarantee that we will allocate memory that respects
// it, but we will try to prefer memory that can respect it.
kLazyAllocation = 0x2,
// The allocation will be mapped immediately and stay mapped until it is destroyed. This
// flag is only valid for buffers which are host visible (i.e. must have a usage other than
// BufferUsage::kGpuOnly).
kPersistentlyMapped = 0x3,
};
GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(AllocationPropertyFlags);
enum class BufferUsage {
// Buffers that will only be accessed from the device (large const buffers). Will always be
// in device local memory.
kGpuOnly,
// Buffers that will be accessed on the host and copied to and from a GPU resource (transfer
// buffers). Will always be mappable and coherent memory.
kCpuOnly,
// Buffers that typically will be updated multiple times by the host and read on the gpu
// (e.g. uniform or vertex buffers). Will always be mappable memory, and will prefer to be
// in device local memory.
kCpuWritesGpuReads,
// Buffers which are typically writted to by the GPU and then read on the host. Will always
// be mappable memory, and will prefer coherent and cached memory.
kGpuWritesCpuReads,
};
virtual bool allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
GrVkBackendMemory*) = 0;
virtual bool allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
AllocationPropertyFlags flags, GrVkBackendMemory*) = 0;
// Fills out the passed in GrVkAlloc struct for the passed in GrVkBackendMemory.
virtual void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const = 0;
// Maps the entire allocation and returns a pointer to the start of the allocation. The
// implementation may map more memory than just the allocation, but the returned pointer must
// point at the start of the memory for the requested allocation.
virtual void* mapMemory(const GrVkBackendMemory&) = 0;
virtual void unmapMemory(const GrVkBackendMemory&) = 0;
// The following two calls are used for managing non-coherent memory. The offset is relative to
// the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client
// must make sure that the offset + size passed in is less that or equal to the allocation size.
// It is the responsibility of the implementation to make sure all alignment requirements are
// followed. The client should not have to deal with any sort of alignment issues.
virtual void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
VkDeviceSize size) = 0;
virtual void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
VkDeviceSize size)= 0;
virtual void freeMemory(const GrVkBackendMemory&) = 0;
// Returns the total amount of memory that is allocated and in use by an allocation for this
// allocator.
virtual uint64_t totalUsedMemory() const = 0;
// Returns the total amount of memory that is allocated by this allocator.
virtual uint64_t totalAllocatedMemory() const = 0;
};
GR_MAKE_BITFIELD_CLASS_OPS(GrVkMemoryAllocator::AllocationPropertyFlags);
#endif

View File

@ -26,6 +26,9 @@
///////////////////////////////////////////////////////////////////////////////
typedef intptr_t GrVkBackendMemory;
/**
* Types for interacting with Vulkan resources created externally to Skia. GrBackendObjects for
* Vulkan textures are really const GrVkImageInfo*
@ -36,6 +39,7 @@ struct GrVkAlloc {
, fOffset(0)
, fSize(0)
, fFlags(0)
, fBackendMemory(0)
, fUsesSystemHeap(false) {}
GrVkAlloc(VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, uint32_t flags)
@ -43,15 +47,18 @@ struct GrVkAlloc {
, fOffset(offset)
, fSize(size)
, fFlags(flags)
, fBackendMemory(0)
, fUsesSystemHeap(false) {}
VkDeviceMemory fMemory; // can be VK_NULL_HANDLE iff is an RT and is borrowed
VkDeviceSize fOffset;
VkDeviceSize fSize; // this can be indeterminate iff Tex uses borrow semantics
uint32_t fFlags;
VkDeviceMemory fMemory; // can be VK_NULL_HANDLE iff is an RT and is borrowed
VkDeviceSize fOffset;
VkDeviceSize fSize; // this can be indeterminate iff Tex uses borrow semantics
uint32_t fFlags;
GrVkBackendMemory fBackendMemory; // handle to memory allocated via GrVkMemoryAllocator.
enum Flag {
kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping
kMappable_Flag = 0x2, // memory is able to be mapped.
};
bool operator==(const GrVkAlloc& that) const {

View File

@ -0,0 +1,267 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrVkAMDMemoryAllocator.h"
#include "vk/GrVkInterface.h"
#include "GrVkUtil.h"
GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice,
VkDevice device,
sk_sp<const GrVkInterface> interface)
: fAllocator(VK_NULL_HANDLE)
, fInterface(std::move(interface))
, fDevice(device) {
#define GR_COPY_FUNCTION(NAME) functions.vk##NAME = fInterface->fFunctions.f##NAME;
VmaVulkanFunctions functions;
GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
GR_COPY_FUNCTION(AllocateMemory);
GR_COPY_FUNCTION(FreeMemory);
GR_COPY_FUNCTION(MapMemory);
GR_COPY_FUNCTION(UnmapMemory);
GR_COPY_FUNCTION(BindBufferMemory);
GR_COPY_FUNCTION(BindImageMemory);
GR_COPY_FUNCTION(GetBufferMemoryRequirements);
GR_COPY_FUNCTION(GetImageMemoryRequirements);
GR_COPY_FUNCTION(CreateBuffer);
GR_COPY_FUNCTION(DestroyBuffer);
GR_COPY_FUNCTION(CreateImage);
GR_COPY_FUNCTION(DestroyImage);
// Skia current doesn't support VK_KHR_dedicated_allocation
functions.vkGetBufferMemoryRequirements2KHR = nullptr;
functions.vkGetImageMemoryRequirements2KHR = nullptr;
VmaAllocatorCreateInfo info;
info.flags = 0;
info.physicalDevice = physicalDevice;
info.device = device;
info.preferredLargeHeapBlockSize = 0;
info.pAllocationCallbacks = nullptr;
info.pDeviceMemoryCallbacks = nullptr;
info.frameInUseCount = 0;
info.pHeapSizeLimit = nullptr;
info.pVulkanFunctions = &functions;
vmaCreateAllocator(&info, &fAllocator);
}
GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
vmaDestroyAllocator(fAllocator);
fAllocator = VK_NULL_HANDLE;
}
bool GrVkAMDMemoryAllocator::allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
GrVkBackendMemory* backendMemory) {
VmaAllocationCreateInfo info;
info.flags = 0;
info.usage = VMA_MEMORY_USAGE_UNKNOWN;
info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
info.preferredFlags = 0;
info.memoryTypeBits = 0;
info.pool = VK_NULL_HANDLE;
info.pUserData = nullptr;
if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
}
if (AllocationPropertyFlags::kLazyAllocation & flags) {
info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
}
VmaAllocation allocation;
VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
if (VK_SUCCESS != result) {
return false;
}
*backendMemory = (GrVkBackendMemory)allocation;
return true;
}
bool GrVkAMDMemoryAllocator::allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
AllocationPropertyFlags flags,
GrVkBackendMemory* backendMemory) {
VmaAllocationCreateInfo info;
info.flags = 0;
info.usage = VMA_MEMORY_USAGE_UNKNOWN;
info.memoryTypeBits = 0;
info.pool = VK_NULL_HANDLE;
info.pUserData = nullptr;
switch (usage) {
case BufferUsage::kGpuOnly:
info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
info.preferredFlags = 0;
break;
case BufferUsage::kCpuOnly:
info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
break;
case BufferUsage::kCpuWritesGpuReads:
// First attempt to try memory is also device local
info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
break;
case BufferUsage::kGpuWritesCpuReads:
info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
info.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
break;
}
if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
}
if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
}
if ((AllocationPropertyFlags::kPersistentlyMapped & flags) && BufferUsage::kGpuOnly != usage) {
info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
}
VmaAllocation allocation;
VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
if (VK_SUCCESS != result) {
if (usage == BufferUsage::kCpuWritesGpuReads) {
// We try again but this time drop the requirement for device local
info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
}
}
if (VK_SUCCESS != result) {
return false;
}
*backendMemory = (GrVkBackendMemory)allocation;
return true;
}
void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
vmaFreeMemory(fAllocator, allocation);
}
void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
GrVkAlloc* alloc) const {
const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
VmaAllocationInfo vmaInfo;
vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
VkMemoryPropertyFlags memFlags;
vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
uint32_t flags = 0;
if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
flags |= GrVkAlloc::kMappable_Flag;
}
if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
flags |= GrVkAlloc::kNoncoherent_Flag;
}
alloc->fMemory = vmaInfo.deviceMemory;
alloc->fOffset = vmaInfo.offset;
alloc->fSize = vmaInfo.size;
alloc->fFlags = flags;
alloc->fBackendMemory = memoryHandle;
}
void* GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle) {
const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
void* mapPtr;
vmaMapMemory(fAllocator, allocation, &mapPtr);
return mapPtr;
}
void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
vmaUnmapMemory(fAllocator, allocation);
}
void GrVkAMDMemoryAllocator::flushMappedMemory(const GrVkBackendMemory& memoryHandle,
VkDeviceSize offset, VkDeviceSize size) {
GrVkAlloc info;
this->getAllocInfo(memoryHandle, &info);
if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
// We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
const VkPhysicalDeviceProperties* physDevProps;
vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
offset = offset + info.fOffset;
VkDeviceSize offsetDiff = offset & (alignment -1);
offset = offset - offsetDiff;
size = (size + alignment - 1) & ~(alignment - 1);
#ifdef SK_DEBUG
SkASSERT(offset >= info.fOffset);
SkASSERT(offset + size <= info.fOffset + info.fSize);
SkASSERT(0 == (offset & (alignment-1)));
SkASSERT(size > 0);
SkASSERT(0 == (size & (alignment-1)));
#endif
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = info.fMemory;
mappedMemoryRange.offset = offset;
mappedMemoryRange.size = size;
GR_VK_CALL(fInterface, FlushMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
}
}
void GrVkAMDMemoryAllocator::invalidateMappedMemory(const GrVkBackendMemory& memoryHandle,
VkDeviceSize offset, VkDeviceSize size) {
GrVkAlloc info;
this->getAllocInfo(memoryHandle, &info);
if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
// We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
const VkPhysicalDeviceProperties* physDevProps;
vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
offset = offset + info.fOffset;
VkDeviceSize offsetDiff = offset & (alignment -1);
offset = offset - offsetDiff;
size = (size + alignment - 1) & ~(alignment - 1);
#ifdef SK_DEBUG
SkASSERT(offset >= info.fOffset);
SkASSERT(offset + size <= info.fOffset + info.fSize);
SkASSERT(0 == (offset & (alignment-1)));
SkASSERT(size > 0);
SkASSERT(0 == (size & (alignment-1)));
#endif
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = info.fMemory;
mappedMemoryRange.offset = offset;
mappedMemoryRange.size = size;
GR_VK_CALL(fInterface, InvalidateMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
}
}
uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
VmaStats stats;
vmaCalculateStats(fAllocator, &stats);
return stats.total.usedBytes;
}
uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
VmaStats stats;
vmaCalculateStats(fAllocator, &stats);
return stats.total.usedBytes + stats.total.unusedBytes;
}

View File

@ -0,0 +1,56 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrVkAMDMemoryAllocator_DEFINED
#define GrVkAMDMemoryAllocator_DEFINED
#include "vk/GrVkMemoryAllocator.h"
#include "GrVulkanMemoryAllocator.h"
struct GrVkInterface;
class GrVkAMDMemoryAllocator : public GrVkMemoryAllocator {
public:
GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice, VkDevice device,
sk_sp<const GrVkInterface> interface);
~GrVkAMDMemoryAllocator() override;
bool allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags, GrVkBackendMemory*) override;
bool allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
AllocationPropertyFlags flags, GrVkBackendMemory*) override;
void freeMemory(const GrVkBackendMemory&) override;
void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const override;
void* mapMemory(const GrVkBackendMemory&) override;
void unmapMemory(const GrVkBackendMemory&) override;
void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
VkDeviceSize size) override;
void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
VkDeviceSize size) override;
uint64_t totalUsedMemory() const override;
uint64_t totalAllocatedMemory() const override;
private:
VmaAllocator fAllocator;
// If a future version of the AMD allocator has helper functions for flushing and invalidating
// memory, then we won't need to save the GrVkInterface here since we won't need to make direct
// vulkan calls.
sk_sp<const GrVkInterface> fInterface;
VkDevice fDevice;
typedef GrVkMemoryAllocator INHERITED;
};
#endif

View File

@ -0,0 +1,26 @@
# Copyright 2018 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("../third_party.gni")
third_party("vulkanmemoryallocator") {
public_include_dirs = [ "./" ]
include_dirs = []
if (defined(is_skia_standalone) && is_skia_standalone && !is_official_build) {
include_dirs += [ "../../tools/gpu/vk" ]
include_dirs += [ "../../include/core" ]
include_dirs += [ "../../include/config" ]
}
# TODO: As described in the main skia BUILD.gn file we eventually want to move this to be
# //third_party/vulkan once clients have created their own //third_party/vulkan directory.
deps = [
"../vulkan",
]
sources = [
"GrVulkanMemoryAllocator.cpp",
"GrVulkanMemoryAllocator.h",
]
}

View File

@ -0,0 +1,16 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
// Workaround to make sure we align non-coherent memory to nonCoherentAtomSize.
#define VMA_DEBUG_ALIGNMENT 256
// We use our own functions pointers
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_IMPLEMENTATION
#include "GrVulkanMemoryAllocator.h"

View File

@ -0,0 +1,18 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
// We use this header to include vk_mem_alloc.h to make sure we always include GrVkDefines.h first.
// We need to do this so that the corect defines are setup before we include vulkan.h inside of
// vk_mem_alloc.h
#ifndef GrVulkanMemoryAllocator_DEFINED
#define GrVulkanMemoryAllocator_DEFINED
#include "../../include/gpu/vk/GrVkDefines.h"
#include "include/vk_mem_alloc.h"
#endif

View File

@ -0,0 +1,19 @@
Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

File diff suppressed because it is too large Load Diff