Update Flush and Invalidate Memory calls in vulkan to take offset and size

Bug: skia:
Change-Id: I4faf9f431422f27096fce4605be281c28935df08
Reviewed-on: https://skia-review.googlesource.com/111782
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2018-03-02 11:44:22 -05:00 committed by Skia Commit-Bot
parent 8080a6e705
commit e35a99ed70
4 changed files with 47 additions and 17 deletions

View File

@ -206,7 +206,13 @@ void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
SkASSERT(this->vkIsMapped());
if (fDesc.fDynamic) {
GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), fMappedSize);
// We currently don't use fOffset
SkASSERT(0 == fOffset);
VkDeviceSize flushOffset = this->alloc().fOffset + fOffset;
VkDeviceSize flushSize = gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory() ? VK_WHOLE_SIZE
: fMappedSize;
GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), flushOffset, flushSize);
VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
fMapPtr = nullptr;
fMappedSize = 0;

View File

@ -615,7 +615,7 @@ bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, GrSurfaceOrigin texOrigin, i
height);
}
GrVkMemory::FlushMappedAlloc(this, alloc, size);
GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
return true;
@ -1169,7 +1169,7 @@ bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size
}
}
}
GrVkMemory::FlushMappedAlloc(gpu, alloc, mapSize);
GrVkMemory::FlushMappedAlloc(gpu, alloc, mapOffset, mapSize);
GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
return true;
}
@ -2015,7 +2015,8 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, GrSurfaceOrigin origin, int left,
// we can copy the data out of the buffer.
this->submitCommandBuffer(kForce_SyncQueue);
void* mappedMemory = transferBuffer->map();
GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc());
const GrVkAlloc& transAlloc = transferBuffer->alloc();
GrVkMemory::InvalidateMappedAlloc(this, transAlloc, transAlloc.fOffset, VK_WHOLE_SIZE);
if (copyFromOrigin) {
uint32_t skipRows = region.imageExtent.height - height;

View File

@ -298,32 +298,53 @@ VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
return flags;
}
void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size) {
void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
#ifdef SK_DEBUG
SkASSERT(offset >= alloc.fOffset);
VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
SkASSERT(0 == (offset & (alignment-1)));
if (size != VK_WHOLE_SIZE) {
SkASSERT(size > 0);
SkASSERT(0 == (size & (alignment-1)) ||
(offset + size) == (alloc.fOffset + alloc.fSize));
SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
}
#endif
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
mappedMemoryRange.offset = alloc.fOffset;
if (gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory()) {
mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
} else {
SkASSERT(size > 0);
mappedMemoryRange.size = size;
}
mappedMemoryRange.offset = offset;
mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}
}
void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
VkDeviceSize offset, VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
#ifdef SK_DEBUG
SkASSERT(offset >= alloc.fOffset);
VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
SkASSERT(0 == (offset & (alignment-1)));
if (size != VK_WHOLE_SIZE) {
SkASSERT(size > 0);
SkASSERT(0 == (size & (alignment-1)) ||
(offset + size) == (alloc.fOffset + alloc.fSize));
SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
}
#endif
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
mappedMemoryRange.offset = alloc.fOffset;
mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
mappedMemoryRange.offset = offset;
mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}

View File

@ -38,8 +38,10 @@ namespace GrVkMemory {
VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size);
void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size);
void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size);
}
class GrVkFreeListAlloc {