Remove ARC from Metal backend

Change-Id: I5ab28f6eda3b37d1b82c94c7cc6eaa2ce59157da
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/311113
Reviewed-by: Adlai Holler <adlai@google.com>
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
Jim Van Verth 2020-08-24 10:47:22 -04:00 committed by Skia Commit-Bot
parent 06eda4b358
commit 6541013b53
40 changed files with 653 additions and 659 deletions

View File

@ -791,7 +791,6 @@ optional("gpu") {
frameworks += [ "Metal.framework" ]
frameworks += [ "MetalKit.framework" ]
frameworks += [ "Foundation.framework" ]
cflags_objcc += [ "-fobjc-arc" ]
}
if (is_debug) {
@ -1594,8 +1593,6 @@ if (skia_enable_tools) {
libs +=
[ "${fuchsia_sdk_path}/arch/${target_cpu}/sysroot/lib/libzircon.so" ]
}
cflags_objcc = [ "-fobjc-arc" ]
} # test_lib("gpu_tool_utils")
test_lib("flags") {
@ -1756,7 +1753,6 @@ if (skia_enable_tools) {
sources = tests_sources + pathops_tests_sources
if (skia_use_metal) {
sources += metal_tests_sources
cflags_objcc = [ "-fobjc-arc" ]
}
if (skia_use_gl) {
sources += gl_tests_sources

View File

@ -7,7 +7,9 @@ This file includes a list of high level updates for each milestone release.
Milestone 87
------------
* <insert new release notes here>
* Remove use of ARC from Metal backend. Updates sk_cf_obj to add more
functionality matching other internal smart pointer types.
https://review.skia.org/311113
* New YUVA planar interface in SkCodec. Chroma subsampling is specified in more structured way.
Doesn't assume 8bit planar values.

View File

@ -34,7 +34,8 @@ template <typename T> class sk_cf_obj {
public:
using element_type = T;
constexpr sk_cf_obj() : fObject(nullptr) {}
constexpr sk_cf_obj() {}
constexpr sk_cf_obj(std::nullptr_t) {}
/**
* Shares the underlying object by calling CFRetain(), so that both the argument and the newly
@ -62,9 +63,11 @@ public:
*/
~sk_cf_obj() {
SkCFSafeRelease(fObject);
SkDEBUGCODE(fObject = nullptr);
SkDEBUGCODE(fObject = nil);
}
sk_cf_obj<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
/**
* Shares the underlying object referenced by the argument by calling CFRetain() on it. If this
* sk_cf_obj previously had a reference to an object (i.e. not null) it will call CFRelease()
@ -87,13 +90,22 @@ public:
return *this;
}
explicit operator bool() const { return this->get() != nil; }
T get() const { return fObject; }
T operator*() const {
SkASSERT(fObject);
return fObject;
}
/**
* Adopt the new object, and call CFRelease() on any previously held object (if not null).
* No call to CFRetain() will be made.
*/
void reset(T object = nullptr) {
void reset(T object = nil) {
// Need to unref after assigning, see
// http://wg21.cmeerw.net/lwg/issue998
// http://wg21.cmeerw.net/lwg/issue2262
T oldObject = fObject;
fObject = object;
SkCFSafeRelease(oldObject);
@ -104,7 +116,7 @@ public:
* reference to an object (i.e. not null) it will call CFRelease() on that object.
*/
void retain(T object) {
if (this->fObject != object) {
if (fObject != object) {
this->reset(SkCFSafeRetain(object));
}
}
@ -116,23 +128,49 @@ public:
*/
T SK_WARN_UNUSED_RESULT release() {
T obj = fObject;
fObject = nullptr;
fObject = nil;
return obj;
}
private:
T fObject;
T fObject = nil;
};
template <typename T> inline bool operator==(const sk_cf_obj<T>& a,
const sk_cf_obj<T>& b) {
return a.get() == b.get();
}
template <typename T> inline bool operator==(const sk_cf_obj<T>& a,
std::nullptr_t) {
return !a;
}
template <typename T> inline bool operator==(std::nullptr_t,
const sk_cf_obj<T>& b) {
return !b;
}
template <typename T> inline bool operator!=(const sk_cf_obj<T>& a,
const sk_cf_obj<T>& b) {
return a.get() != b.get();
}
template <typename T> inline bool operator!=(const sk_cf_obj<T>& a,
std::nullptr_t) {
return static_cast<bool>(a);
}
template <typename T> inline bool operator!=(std::nullptr_t,
const sk_cf_obj<T>& b) {
return static_cast<bool>(b);
}
/*
* Returns a sk_cf_obj wrapping the provided object AND calls ref on it (if not null).
*
* This is different than the semantics of the constructor for sk_cf_obj, which just wraps the
* object, effectively "adopting" it.
*/
template <typename T> sk_cf_obj<T> sk_ref_cf_obj(T obj) {
return sk_cf_obj<T>(SkCFSafeRetain(obj));
}
#endif // SK_BUILD_FOR_MAC || SK_BUILD_FOR_IOS
#endif // SkCFOBject_DEFINED

View File

@ -8,6 +8,7 @@
#ifndef GrMtlBuffer_DEFINED
#define GrMtlBuffer_DEFINED
#include "include/ports/SkCFObject.h"
#include "src/gpu/GrGpuBuffer.h"
#include "src/gpu/mtl/GrMtlUniformHandler.h"
@ -23,7 +24,7 @@ public:
~GrMtlBuffer() override;
id<MTLBuffer> mtlBuffer() const { return fMtlBuffer; }
id<MTLBuffer> mtlBuffer() const { return fMtlBuffer.get(); }
size_t offset() const { return fOffset; }
protected:
@ -47,9 +48,9 @@ private:
#endif
bool fIsDynamic;
id<MTLBuffer> fMtlBuffer;
size_t fOffset; // offset into shared buffer for dynamic buffers
id<MTLBuffer> fMappedBuffer; // buffer used by static buffers for uploads
sk_cf_obj<id<MTLBuffer>> fMtlBuffer;
size_t fOffset; // offset into shared buffer for dynamic buffers
sk_cf_obj<id<MTLBuffer>> fMappedBuffer; // buffer used by static buffers for uploads
typedef GrGpuBuffer INHERITED;
};

View File

@ -11,10 +11,6 @@
#include "src/gpu/mtl/GrMtlCommandBuffer.h"
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
#ifdef SK_DEBUG
#define VALIDATE() this->validate()
#else
@ -52,25 +48,26 @@ GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedTyp
// to ensure we have space for the extra data
size = SkAlign4(size);
#endif
fMtlBuffer = size == 0 ? nil :
[gpu->device() newBufferWithLength: size
options: options];
if (size != 0) {
fMtlBuffer.reset([gpu->device() newBufferWithLength: size
options: options]);
}
this->registerWithCache(SkBudgeted::kYes);
VALIDATE();
}
GrMtlBuffer::~GrMtlBuffer() {
SkASSERT(fMtlBuffer == nil);
SkASSERT(fMappedBuffer == nil);
SkASSERT(!fMtlBuffer);
SkASSERT(!fMappedBuffer);
SkASSERT(fMapPtr == nullptr);
}
bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) {
if (!fIsDynamic) {
if (fMtlBuffer == nil) {
if (!fMtlBuffer) {
return false;
}
if (srcInBytes > fMtlBuffer.length) {
if (srcInBytes > (*fMtlBuffer).length) {
return false;
}
}
@ -82,7 +79,7 @@ bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) {
}
SkASSERT(fMappedBuffer);
if (!fIsDynamic) {
SkASSERT(SkAlign4(srcInBytes) == fMappedBuffer.length);
SkASSERT(SkAlign4(srcInBytes) == (*fMappedBuffer).length);
}
memcpy(fMapPtr, src, srcInBytes);
this->internalUnmap(srcInBytes);
@ -123,7 +120,7 @@ void GrMtlBuffer::internalMap(size_t sizeInBytes) {
SkASSERT(!this->isMapped());
if (fIsDynamic) {
fMappedBuffer = fMtlBuffer;
fMapPtr = static_cast<char*>(fMtlBuffer.contents) + fOffset;
fMapPtr = static_cast<char*>((*fMtlBuffer).contents) + fOffset;
} else {
SkASSERT(fMtlBuffer);
SkASSERT(fMappedBuffer == nil);
@ -135,10 +132,9 @@ void GrMtlBuffer::internalMap(size_t sizeInBytes) {
// Mac requires 4-byte alignment for copies so we pad this out
sizeInBytes = SkAlign4(sizeInBytes);
#endif
fMappedBuffer =
[this->mtlGpu()->device() newBufferWithLength: sizeInBytes
options: options];
fMapPtr = fMappedBuffer.contents;
fMappedBuffer.reset([this->mtlGpu()->device() newBufferWithLength: sizeInBytes
options: options]);
fMapPtr = (*fMappedBuffer).contents;
}
VALIDATE();
}
@ -162,14 +158,14 @@ void GrMtlBuffer::internalUnmap(size_t sizeInBytes) {
if (fIsDynamic) {
#ifdef SK_BUILD_FOR_MAC
SkASSERT(0 == (fOffset & 0x3)); // should be 4-byte aligned
[fMtlBuffer didModifyRange: NSMakeRange(fOffset, sizeInBytes)];
[*fMtlBuffer didModifyRange:NSMakeRange(fOffset, sizeInBytes)];
#endif
} else {
GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
[blitCmdEncoder copyFromBuffer: fMappedBuffer
[blitCmdEncoder copyFromBuffer: fMappedBuffer.get()
sourceOffset: 0
toBuffer: fMtlBuffer
toBuffer: fMtlBuffer.get()
destinationOffset: 0
size: sizeInBytes];
}
@ -193,6 +189,6 @@ void GrMtlBuffer::validate() const {
this->intendedType() == GrGpuBufferType::kXferCpuToGpu ||
this->intendedType() == GrGpuBufferType::kXferGpuToCpu);
SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil ||
fMappedBuffer.length <= fMtlBuffer.length);
(*fMappedBuffer).length <= (*fMtlBuffer).length);
}
#endif

View File

@ -20,10 +20,6 @@
#include "src/gpu/GrSurfaceProxy.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlCaps::GrMtlCaps(const GrContextOptions& contextOptions, const id<MTLDevice> device,
MTLFeatureSet featureSet)
: INHERITED(contextOptions) {

View File

@ -37,7 +37,7 @@ public:
GrMtlOpsRenderPass* opsRenderPass);
void addCompletedHandler(MTLCommandBufferHandler block) {
[fCmdBuffer addCompletedHandler:block];
[*fCmdBuffer addCompletedHandler:block];
}
void addGrBuffer(sk_sp<const GrBuffer> buffer) {
@ -48,25 +48,24 @@ public:
void encodeWaitForEvent(id<MTLEvent>, uint64_t value) SK_API_AVAILABLE(macos(10.14), ios(12.0));
void waitUntilCompleted() {
[fCmdBuffer waitUntilCompleted];
[*fCmdBuffer waitUntilCompleted];
}
void callFinishedCallbacks() { fFinishedCallbacks.reset(); }
private:
static const int kInitialTrackedResourcesCount = 32;
GrMtlCommandBuffer(id<MTLCommandBuffer> cmdBuffer)
: fCmdBuffer(cmdBuffer)
, fPreviousRenderPassDescriptor(nil)
GrMtlCommandBuffer(sk_cf_obj<id<MTLCommandBuffer>> cmdBuffer)
: fCmdBuffer(std::move(cmdBuffer))
, fHasWork(false) {}
void endAllEncoding();
id<MTLCommandBuffer> fCmdBuffer;
id<MTLBlitCommandEncoder> fActiveBlitCommandEncoder;
id<MTLRenderCommandEncoder> fActiveRenderCommandEncoder;
MTLRenderPassDescriptor* fPreviousRenderPassDescriptor;
bool fHasWork;
sk_cf_obj<id<MTLCommandBuffer>> fCmdBuffer;
sk_cf_obj<id<MTLBlitCommandEncoder>> fActiveBlitCommandEncoder;
sk_cf_obj<id<MTLRenderCommandEncoder>> fActiveRenderCommandEncoder;
sk_cf_obj<MTLRenderPassDescriptor*> fPreviousRenderPassDescriptor;
bool fHasWork;
SkTArray<sk_sp<GrRefCntedCallback>> fFinishedCallbacks;

View File

@ -11,18 +11,13 @@
#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
#include "src/gpu/mtl/GrMtlPipelineState.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
sk_sp<GrMtlCommandBuffer> GrMtlCommandBuffer::Make(id<MTLCommandQueue> queue) {
id<MTLCommandBuffer> mtlCommandBuffer;
mtlCommandBuffer = [queue commandBuffer];
sk_cf_obj<id<MTLCommandBuffer>> mtlCommandBuffer(sk_ref_cf_obj([queue commandBuffer]));
if (nil == mtlCommandBuffer) {
return nullptr;
}
mtlCommandBuffer.label = @"GrMtlCommandBuffer::Create";
[*mtlCommandBuffer setLabel: @"GrMtlCommandBuffer::Create"];
return sk_sp<GrMtlCommandBuffer>(new GrMtlCommandBuffer(mtlCommandBuffer));
}
@ -31,23 +26,21 @@ GrMtlCommandBuffer::~GrMtlCommandBuffer() {
this->endAllEncoding();
fTrackedGrBuffers.reset();
this->callFinishedCallbacks();
fCmdBuffer = nil;
}
id<MTLBlitCommandEncoder> GrMtlCommandBuffer::getBlitCommandEncoder() {
if (nil != fActiveRenderCommandEncoder) {
[fActiveRenderCommandEncoder endEncoding];
if (fActiveRenderCommandEncoder) {
[*fActiveRenderCommandEncoder endEncoding];
fActiveRenderCommandEncoder = nil;
}
if (nil == fActiveBlitCommandEncoder) {
fActiveBlitCommandEncoder = [fCmdBuffer blitCommandEncoder];
if (!fActiveBlitCommandEncoder) {
fActiveBlitCommandEncoder.retain([*fCmdBuffer blitCommandEncoder]);
}
fPreviousRenderPassDescriptor = nil;
fHasWork = true;
return fActiveBlitCommandEncoder;
return fActiveBlitCommandEncoder.get();
}
static bool compatible(const MTLRenderPassAttachmentDescriptor* first,
@ -77,50 +70,51 @@ id<MTLRenderCommandEncoder> GrMtlCommandBuffer::getRenderCommandEncoder(
MTLRenderPassDescriptor* descriptor, const GrMtlPipelineState* pipelineState,
GrMtlOpsRenderPass* opsRenderPass) {
if (nil != fPreviousRenderPassDescriptor) {
if (compatible(fPreviousRenderPassDescriptor.colorAttachments[0],
if (compatible((*fPreviousRenderPassDescriptor).colorAttachments[0],
descriptor.colorAttachments[0], pipelineState) &&
compatible(fPreviousRenderPassDescriptor.stencilAttachment,
compatible((*fPreviousRenderPassDescriptor).stencilAttachment,
descriptor.stencilAttachment, pipelineState)) {
return fActiveRenderCommandEncoder;
return fActiveRenderCommandEncoder.get();
}
}
this->endAllEncoding();
fActiveRenderCommandEncoder = [fCmdBuffer renderCommandEncoderWithDescriptor:descriptor];
fActiveRenderCommandEncoder.retain(
[*fCmdBuffer renderCommandEncoderWithDescriptor:descriptor]);
if (opsRenderPass) {
opsRenderPass->initRenderState(fActiveRenderCommandEncoder);
opsRenderPass->initRenderState(*fActiveRenderCommandEncoder);
}
fPreviousRenderPassDescriptor = descriptor;
fPreviousRenderPassDescriptor.retain(descriptor);
fHasWork = true;
return fActiveRenderCommandEncoder;
return fActiveRenderCommandEncoder.get();
}
bool GrMtlCommandBuffer::commit(bool waitUntilCompleted) {
this->endAllEncoding();
[fCmdBuffer commit];
[*fCmdBuffer commit];
if (waitUntilCompleted) {
this->waitUntilCompleted();
}
if (fCmdBuffer.status == MTLCommandBufferStatusError) {
NSString* description = fCmdBuffer.error.localizedDescription;
if ((*fCmdBuffer).status == MTLCommandBufferStatusError) {
NSString* description = (*fCmdBuffer).error.localizedDescription;
const char* errorString = [description UTF8String];
SkDebugf("Error submitting command buffer: %s\n", errorString);
}
return (fCmdBuffer.status != MTLCommandBufferStatusError);
return ((*fCmdBuffer).status != MTLCommandBufferStatusError);
}
void GrMtlCommandBuffer::endAllEncoding() {
if (fActiveRenderCommandEncoder) {
[fActiveRenderCommandEncoder endEncoding];
fActiveRenderCommandEncoder = nil;
fPreviousRenderPassDescriptor = nil;
[*fActiveRenderCommandEncoder endEncoding];
fActiveRenderCommandEncoder.reset();
fPreviousRenderPassDescriptor.reset();
}
if (fActiveBlitCommandEncoder) {
[fActiveBlitCommandEncoder endEncoding];
fActiveBlitCommandEncoder = nil;
[*fActiveBlitCommandEncoder endEncoding];
fActiveBlitCommandEncoder.reset();
}
}
@ -128,7 +122,7 @@ void GrMtlCommandBuffer::encodeSignalEvent(id<MTLEvent> event, uint64_t eventVal
SkASSERT(fCmdBuffer);
this->endAllEncoding(); // ensure we don't have any active command encoders
if (@available(macOS 10.14, iOS 12.0, *)) {
[fCmdBuffer encodeSignalEvent:event value:eventValue];
[*fCmdBuffer encodeSignalEvent:event value:eventValue];
}
fHasWork = true;
}
@ -138,7 +132,7 @@ void GrMtlCommandBuffer::encodeWaitForEvent(id<MTLEvent> event, uint64_t eventVa
this->endAllEncoding(); // ensure we don't have any active command encoders
// TODO: not sure if needed but probably
if (@available(macOS 10.14, iOS 12.0, *)) {
[fCmdBuffer encodeWaitForEvent:event value:eventValue];
[*fCmdBuffer encodeWaitForEvent:event value:eventValue];
}
fHasWork = true;
}

View File

@ -11,6 +11,7 @@
#import <Metal/Metal.h>
#include "include/gpu/GrTypes.h"
#include "include/ports/SkCFObject.h"
#include "src/core/SkOpts.h"
#include <atomic>
@ -22,9 +23,9 @@ class GrMtlDepthStencil : public SkRefCnt {
public:
static GrMtlDepthStencil* Create(const GrMtlGpu*, const GrStencilSettings&, GrSurfaceOrigin);
~GrMtlDepthStencil() override { fMtlDepthStencilState = nil; }
~GrMtlDepthStencil() override {}
id<MTLDepthStencilState> mtlDepthStencil() const { return fMtlDepthStencilState; }
id<MTLDepthStencilState> mtlDepthStencil() const { return fMtlDepthStencilState.get(); }
struct Key {
struct Face {
@ -54,12 +55,12 @@ public:
}
private:
GrMtlDepthStencil(id<MTLDepthStencilState> mtlDepthStencilState, Key key)
: fMtlDepthStencilState(mtlDepthStencilState)
GrMtlDepthStencil(sk_cf_obj<id<MTLDepthStencilState>> mtlDepthStencilState, Key key)
: fMtlDepthStencilState(std::move(mtlDepthStencilState))
, fKey(key) {}
id<MTLDepthStencilState> fMtlDepthStencilState;
Key fKey;
sk_cf_obj<id<MTLDepthStencilState>> fMtlDepthStencilState;
Key fKey;
};
#endif

View File

@ -9,10 +9,6 @@
#include "src/gpu/mtl/GrMtlDepthStencil.h"
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
MTLStencilOperation skia_stencil_op_to_mtl(GrStencilOp op) {
switch (op) {
case GrStencilOp::kKeep:
@ -34,7 +30,7 @@ MTLStencilOperation skia_stencil_op_to_mtl(GrStencilOp op) {
}
}
MTLStencilDescriptor* skia_stencil_to_mtl(GrStencilSettings::Face face) {
sk_cf_obj<MTLStencilDescriptor*> skia_stencil_to_mtl(GrStencilSettings::Face face) {
MTLStencilDescriptor* result = [[MTLStencilDescriptor alloc] init];
switch (face.fTest) {
case GrStencilTest::kAlways:
@ -66,25 +62,27 @@ MTLStencilDescriptor* skia_stencil_to_mtl(GrStencilSettings::Face face) {
result.writeMask = face.fWriteMask;
result.depthStencilPassOperation = skia_stencil_op_to_mtl(face.fPassOp);
result.stencilFailureOperation = skia_stencil_op_to_mtl(face.fFailOp);
return result;
return sk_cf_obj<MTLStencilDescriptor*>(result);
}
GrMtlDepthStencil* GrMtlDepthStencil::Create(const GrMtlGpu* gpu,
const GrStencilSettings& stencil,
GrSurfaceOrigin origin) {
MTLDepthStencilDescriptor* desc = [[MTLDepthStencilDescriptor alloc] init];
sk_cf_obj<MTLDepthStencilDescriptor*> desc([[MTLDepthStencilDescriptor alloc] init]);
if (!stencil.isDisabled()) {
if (stencil.isTwoSided()) {
desc.frontFaceStencil = skia_stencil_to_mtl(stencil.postOriginCCWFace(origin));
desc.backFaceStencil = skia_stencil_to_mtl(stencil.postOriginCWFace(origin));
(*desc).frontFaceStencil = skia_stencil_to_mtl(stencil.postOriginCCWFace(origin)).get();
(*desc).backFaceStencil = skia_stencil_to_mtl(stencil.postOriginCWFace(origin)).get();
}
else {
desc.frontFaceStencil = skia_stencil_to_mtl(stencil.singleSidedFace());
desc.backFaceStencil = desc.frontFaceStencil;
(*desc).frontFaceStencil = skia_stencil_to_mtl(stencil.singleSidedFace()).get();
(*desc).backFaceStencil = (*desc).frontFaceStencil;
}
}
return new GrMtlDepthStencil([gpu->device() newDepthStencilStateWithDescriptor: desc],
sk_cf_obj<id<MTLDepthStencilState>> stencilState(
[gpu->device() newDepthStencilStateWithDescriptor:desc.get()]);
return new GrMtlDepthStencil(std::move(stencilState),
GenerateKey(stencil, origin));
}

View File

@ -24,27 +24,33 @@
#import <Metal/Metal.h>
#include "include/ports/SkCFObject.h"
class GrMtlOpsRenderPass;
class GrMtlTexture;
class GrSemaphore;
struct GrMtlBackendContext;
class GrMtlCommandBuffer;
namespace SkSL {
class Compiler;
}
// TODO: move this to a public interface
struct GrMtlBackendContext {
sk_cf_obj<id<MTLDevice>> fDevice;
sk_cf_obj<id<MTLCommandQueue>> fQueue;
};
class GrMtlGpu : public GrGpu {
public:
static sk_sp<GrGpu> Make(GrDirectContext*, const GrContextOptions&,
id<MTLDevice>, id<MTLCommandQueue>);
static sk_sp<GrGpu> Make(GrDirectContext*, const GrContextOptions&, const GrMtlBackendContext&);
~GrMtlGpu() override;
void disconnect(DisconnectType) override;
const GrMtlCaps& mtlCaps() const { return *fMtlCaps.get(); }
id<MTLDevice> device() const { return fDevice; }
id<MTLDevice> device() const { return fDevice.get(); }
GrMtlResourceProvider& resourceProvider() { return fResourceProvider; }
@ -119,8 +125,7 @@ public:
}
private:
GrMtlGpu(GrDirectContext*, const GrContextOptions&, id<MTLDevice>,
id<MTLCommandQueue>, MTLFeatureSet);
GrMtlGpu(GrDirectContext*, const GrContextOptions&, const GrMtlBackendContext&, MTLFeatureSet);
void destroyResources();
@ -257,8 +262,8 @@ private:
sk_sp<GrMtlCaps> fMtlCaps;
id<MTLDevice> fDevice;
id<MTLCommandQueue> fQueue;
sk_cf_obj<id<MTLDevice>> fDevice;
sk_cf_obj<id<MTLCommandQueue>> fQueue;
sk_sp<GrMtlCommandBuffer> fCurrentCmdBuffer;

View File

@ -26,10 +26,6 @@
#import <simd/simd.h>
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
static bool get_feature_set(id<MTLDevice> device, MTLFeatureSet* featureSet) {
// Mac OSX
#ifdef SK_BUILD_FOR_MAC
@ -101,15 +97,15 @@ static bool get_feature_set(id<MTLDevice> device, MTLFeatureSet* featureSet) {
}
sk_sp<GrGpu> GrMtlGpu::Make(GrDirectContext* direct, const GrContextOptions& options,
id<MTLDevice> device, id<MTLCommandQueue> queue) {
if (!device || !queue) {
const GrMtlBackendContext& context) {
if (!context.fDevice || !context.fQueue) {
return nullptr;
}
MTLFeatureSet featureSet;
if (!get_feature_set(device, &featureSet)) {
if (!get_feature_set(context.fDevice.get(), &featureSet)) {
return nullptr;
}
return sk_sp<GrGpu>(new GrMtlGpu(direct, options, device, queue, featureSet));
return sk_sp<GrGpu>(new GrMtlGpu(direct, options, context, featureSet));
}
// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
@ -119,18 +115,18 @@ sk_sp<GrGpu> GrMtlGpu::Make(GrDirectContext* direct, const GrContextOptions& opt
static const int kDefaultOutstandingAllocCnt = 8;
GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
id<MTLDevice> device, id<MTLCommandQueue> queue, MTLFeatureSet featureSet)
const GrMtlBackendContext& context, MTLFeatureSet featureSet)
: INHERITED(direct)
, fDevice(device)
, fQueue(queue)
, fDevice(context.fDevice)
, fQueue(context.fQueue)
, fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
, fCompiler(new SkSL::Compiler())
, fResourceProvider(this)
, fStagingBufferManager(this)
, fDisconnected(false) {
fMtlCaps.reset(new GrMtlCaps(options, fDevice, featureSet));
fMtlCaps.reset(new GrMtlCaps(options, fDevice.get(), featureSet));
fCaps = fMtlCaps;
fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue.get());
}
GrMtlGpu::~GrMtlGpu() {
@ -218,7 +214,7 @@ bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
}
// Create a new command buffer for the next submit
fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue.get());
// This should be done after we have a new command buffer in case the freeing of any
// resources held by a finished command buffer causes us to send a new command to the gpu
@ -441,8 +437,8 @@ bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
if (@available(macOS 10.11, iOS 9.0, *)) {
options |= MTLResourceStorageModePrivate;
}
id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: combinedBufferSize
options: options];
sk_cf_obj<id<MTLBuffer>> transferBuffer([*fDevice newBufferWithLength: combinedBufferSize
options: options]);
if (nil == transferBuffer) {
return false;
}
@ -452,7 +448,7 @@ bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
NSRange clearRange;
clearRange.location = 0;
clearRange.length = combinedBufferSize;
[blitCmdEncoder fillBuffer: transferBuffer
[blitCmdEncoder fillBuffer: transferBuffer.get()
range: clearRange
value: 0];
@ -464,7 +460,7 @@ bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
if (levelMask & (1 << currentMipLevel)) {
const size_t rowBytes = currentWidth * bpp;
[blitCmdEncoder copyFromBuffer: transferBuffer
[blitCmdEncoder copyFromBuffer: transferBuffer.get()
sourceOffset: individualMipOffsets[currentMipLevel]
sourceBytesPerRow: rowBytes
sourceBytesPerImage: rowBytes * currentHeight
@ -526,30 +522,30 @@ sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
// This TexDesc refers to the texture that will be read by the client. Thus even if msaa is
// requested, this TexDesc describes the resolved texture. Therefore we always have samples
// set to 1.
MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
texDesc.textureType = MTLTextureType2D;
texDesc.pixelFormat = mtlPixelFormat;
texDesc.width = dimensions.fWidth;
texDesc.height = dimensions.fHeight;
texDesc.depth = 1;
texDesc.mipmapLevelCount = mipLevelCount;
texDesc.sampleCount = 1;
texDesc.arrayLength = 1;
sk_cf_obj<MTLTextureDescriptor*> texDesc([[MTLTextureDescriptor alloc] init]);
(*texDesc).textureType = MTLTextureType2D;
(*texDesc).pixelFormat = mtlPixelFormat;
(*texDesc).width = dimensions.fWidth;
(*texDesc).height = dimensions.fHeight;
(*texDesc).depth = 1;
(*texDesc).mipmapLevelCount = mipLevelCount;
(*texDesc).sampleCount = 1;
(*texDesc).arrayLength = 1;
// Make all textures have private gpu only access. We can use transfer buffers or textures
// to copy to them.
if (@available(macOS 10.11, iOS 9.0, *)) {
texDesc.storageMode = MTLStorageModePrivate;
texDesc.usage = MTLTextureUsageShaderRead;
texDesc.usage |= (renderable == GrRenderable::kYes) ? MTLTextureUsageRenderTarget : 0;
(*texDesc).storageMode = MTLStorageModePrivate;
(*texDesc).usage = MTLTextureUsageShaderRead;
(*texDesc).usage |= (renderable == GrRenderable::kYes) ? MTLTextureUsageRenderTarget : 0;
}
GrMipmapStatus mipmapStatus =
mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
if (renderable == GrRenderable::kYes) {
tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
this, budgeted, dimensions, renderTargetSampleCnt, texDesc, mipmapStatus);
this, budgeted, dimensions, renderTargetSampleCnt, texDesc.get(), mipmapStatus);
} else {
tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc, mipmapStatus);
tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc.get(), mipmapStatus);
}
if (!tex) {
@ -594,27 +590,28 @@ sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
// requested, this TexDesc describes the resolved texture. Therefore we always have samples
// set to 1.
// Compressed textures with MIP levels or multiple samples are not supported as of now.
MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
texDesc.textureType = MTLTextureType2D;
texDesc.pixelFormat = mtlPixelFormat;
texDesc.width = dimensions.width();
texDesc.height = dimensions.height();
texDesc.depth = 1;
texDesc.mipmapLevelCount = numMipLevels;
texDesc.sampleCount = 1;
texDesc.arrayLength = 1;
sk_cf_obj<MTLTextureDescriptor*> texDesc([[MTLTextureDescriptor alloc] init]);
(*texDesc).textureType = MTLTextureType2D;
(*texDesc).pixelFormat =mtlPixelFormat;
(*texDesc).width = dimensions.width();
(*texDesc).height = dimensions.height();
(*texDesc).depth = 1;
(*texDesc).mipmapLevelCount = numMipLevels;
(*texDesc).sampleCount = 1;
(*texDesc).arrayLength = 1;
// Make all textures have private gpu only access. We can use transfer buffers or textures
// to copy to them.
if (@available(macOS 10.11, iOS 9.0, *)) {
texDesc.storageMode = MTLStorageModePrivate;
texDesc.usage = MTLTextureUsageShaderRead;
(*texDesc).storageMode = MTLStorageModePrivate;
(*texDesc).usage = MTLTextureUsageShaderRead;
}
GrMipmapStatus mipmapStatus = (mipMapped == GrMipmapped::kYes)
? GrMipmapStatus::kValid
: GrMipmapStatus::kNotAllocated;
auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc, mipmapStatus);
auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, texDesc.get(),
mipmapStatus);
if (!tex) {
return nullptr;
}
@ -678,72 +675,72 @@ sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
return std::move(tex);
}
static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
static sk_cf_obj<id<MTLTexture>> get_texture_from_backend(const GrBackendTexture& backendTex) {
GrMtlTextureInfo textureInfo;
if (!backendTex.getMtlTextureInfo(&textureInfo)) {
return nil;
}
return GrGetMTLTexture(textureInfo.fTexture.get());
return GrRetainMTLTexture(textureInfo.fTexture.get());;
}
static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
static sk_cf_obj<id<MTLTexture>> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
GrMtlTextureInfo textureInfo;
if (!backendRT.getMtlTextureInfo(&textureInfo)) {
return nil;
}
return GrGetMTLTexture(textureInfo.fTexture.get());
return GrRetainMTLTexture(textureInfo.fTexture.get());
}
sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
GrWrapOwnership,
GrWrapCacheable cacheable,
GrIOType ioType) {
id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
sk_cf_obj<id<MTLTexture>> mtlTexture = get_texture_from_backend(backendTex);
if (!mtlTexture) {
return nullptr;
}
return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
ioType);
return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), std::move(mtlTexture),
cacheable, ioType);
}
sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
GrWrapOwnership,
GrWrapCacheable cacheable) {
id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
sk_cf_obj<id<MTLTexture>> mtlTexture = get_texture_from_backend(backendTex);
if (!mtlTexture) {
return nullptr;
}
return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
kRead_GrIOType);
return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), std::move(mtlTexture),
cacheable, kRead_GrIOType);
}
sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
int sampleCnt,
GrWrapOwnership,
GrWrapCacheable cacheable) {
id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
sk_cf_obj<id<MTLTexture>> mtlTexture = get_texture_from_backend(backendTex);
if (!mtlTexture) {
return nullptr;
}
const GrMtlCaps& caps = this->mtlCaps();
MTLPixelFormat format = mtlTexture.pixelFormat;
MTLPixelFormat format = (*mtlTexture).pixelFormat;
if (!caps.isFormatRenderable(format, sampleCnt)) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
SkASSERT(MTLTextureUsageRenderTarget & (*mtlTexture).usage);
}
sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
SkASSERT(sampleCnt);
return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
this, backendTex.dimensions(), sampleCnt, std::move(mtlTexture), cacheable);
}
sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
@ -751,33 +748,33 @@ sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderT
if (backendRT.sampleCnt() > 1) {
return nullptr;
}
id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
sk_cf_obj<id<MTLTexture>> mtlTexture = get_texture_from_backend(backendRT);
if (!mtlTexture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
SkASSERT(MTLTextureUsageRenderTarget & (*mtlTexture).usage);
}
return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendRT.dimensions(),
backendRT.sampleCnt(), mtlTexture);
backendRT.sampleCnt(), std::move(mtlTexture));
}
sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendTextureAsRenderTarget(
const GrBackendTexture& backendTex, int sampleCnt) {
id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
sk_cf_obj<id<MTLTexture>> mtlTexture = get_texture_from_backend(backendTex);
if (!mtlTexture) {
return nullptr;
}
MTLPixelFormat format = mtlTexture.pixelFormat;
MTLPixelFormat format = (*mtlTexture).pixelFormat;
if (!this->mtlCaps().isFormatRenderable(format, sampleCnt)) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
SkASSERT(MTLTextureUsageRenderTarget & (*mtlTexture).usage);
}
sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
@ -786,7 +783,7 @@ sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendTextureAsRenderTarget(
}
return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendTex.dimensions(), sampleCnt,
mtlTexture);
std::move(mtlTexture));
}
bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
@ -870,18 +867,18 @@ bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
return false;
}
bool useMips = mipMapped == GrMipmapped::kYes;
MTLTextureDescriptor* desc =
[MTLTextureDescriptor texture2DDescriptorWithPixelFormat: mtlFormat
width: dimensions.width()
height: dimensions.height()
mipmapped: mipMapped == GrMipmapped::kYes];
[MTLTextureDescriptor texture2DDescriptorWithPixelFormat: mtlFormat
width: dimensions.width()
height: dimensions.height()
mipmapped: useMips];
if (@available(macOS 10.11, iOS 9.0, *)) {
desc.storageMode = MTLStorageModePrivate;
desc.usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
desc.usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
}
id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
info->fTexture.reset(GrRetainPtrFromId(testTexture));
info->fTexture.reset([*fDevice newTextureWithDescriptor:desc]);
return true;
}
@ -908,7 +905,7 @@ bool GrMtlGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
GrMtlTextureInfo info;
SkAssertResult(backendTexture.getMtlTextureInfo(&info));
id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
id<MTLTexture> mtlTexture = (id<MTLTexture>)(info.fTexture.get());
const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
@ -1065,7 +1062,7 @@ bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
if (!tex.getMtlTextureInfo(&info)) {
return false;
}
id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
id<MTLTexture> mtlTexture = (id<MTLTexture>)(info.fTexture.get());
if (!mtlTexture) {
return false;
}
@ -1229,16 +1226,17 @@ bool GrMtlGpu::onReadPixels(GrSurface* surface, int left, int top, int width, in
#endif
}
id<MTLBuffer> transferBuffer = [fDevice newBufferWithLength: transBufferImageBytes
options: options];
sk_cf_obj<id<MTLBuffer>> transferBuffer([*fDevice newBufferWithLength: transBufferImageBytes
options: options]);
if (!this->readOrTransferPixels(surface, left, top, width, height, dstColorType, transferBuffer,
0, transBufferImageBytes, transBufferRowBytes)) {
if (!this->readOrTransferPixels(surface, left, top, width, height, dstColorType,
transferBuffer.get(), 0, transBufferImageBytes,
transBufferRowBytes)) {
return false;
}
this->submitCommandBuffer(kForce_SyncQueue);
const void* mappedMemory = transferBuffer.contents;
const void* mappedMemory = (*transferBuffer).contents;
SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, transBufferRowBytes, height);
@ -1322,7 +1320,7 @@ bool GrMtlGpu::readOrTransferPixels(GrSurface* surface, int left, int top, int w
return false;
}
id<MTLTexture> mtlTexture;
id<MTLTexture> mtlTexture = nil;
if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
if (rt->numSamples() > 1) {
SkASSERT(rt->requiresManualMSAAResolve()); // msaa-render-to-texture not yet supported.
@ -1350,7 +1348,7 @@ bool GrMtlGpu::readOrTransferPixels(GrSurface* surface, int left, int top, int w
destinationBytesPerImage: imageBytes];
#ifdef SK_BUILD_FOR_MAC
// Sync GPU data back to the CPU
[blitCmdEncoder synchronizeResource: transferBuffer];
[blitCmdEncoder synchronizeResource:transferBuffer];
#endif
return true;
@ -1360,18 +1358,18 @@ GrFence SK_WARN_UNUSED_RESULT GrMtlGpu::insertFence() {
GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
// We create a semaphore and signal it within the current
// command buffer's completion handler.
dispatch_semaphore_t semaphore = dispatch_semaphore_create(0);
sk_cf_obj<dispatch_semaphore_t> semaphore(dispatch_semaphore_create(0));
cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
dispatch_semaphore_signal(semaphore);
dispatch_semaphore_signal(semaphore.get());
});
const void* cfFence = (__bridge_retained const void*) semaphore;
CFTypeRef cfFence = (CFTypeRef)semaphore.release();
return (GrFence) cfFence;
}
bool GrMtlGpu::waitFence(GrFence fence) {
const void* cfFence = (const void*) fence;
dispatch_semaphore_t semaphore = (__bridge dispatch_semaphore_t)cfFence;
CFTypeRef cfFence = (CFTypeRef) fence;
dispatch_semaphore_t semaphore = (dispatch_semaphore_t)cfFence;
long result = dispatch_semaphore_wait(semaphore, 0);
@ -1379,14 +1377,18 @@ bool GrMtlGpu::waitFence(GrFence fence) {
}
void GrMtlGpu::deleteFence(GrFence fence) const {
const void* cfFence = (const void*) fence;
// In this case it's easier to release in CoreFoundation than depend on ARC
CFTypeRef cfFence = (CFTypeRef) fence;
// Just release in CoreFoundation
CFRelease(cfFence);
}
std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
SkASSERT(this->caps()->semaphoreSupport());
return GrMtlSemaphore::Make(this);
if (@available(macOS 10.14, iOS 12.0, *)) {
return GrMtlSemaphore::Make(this);
} else {
return nullptr;
}
}
std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(
@ -1394,7 +1396,11 @@ std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(
GrResourceProvider::SemaphoreWrapType wrapType,
GrWrapOwnership /*ownership*/) {
SkASSERT(this->caps()->semaphoreSupport());
return GrMtlSemaphore::MakeWrapped(semaphore.mtlSemaphore(), semaphore.mtlValue());
if (@available(macOS 10.14, iOS 12.0, *)) {
return GrMtlSemaphore::MakeWrapped(semaphore.mtlSemaphore(), semaphore.mtlValue());
} else {
return nullptr;
}
}
void GrMtlGpu::insertSemaphore(GrSemaphore* semaphore) {
@ -1421,15 +1427,12 @@ void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
}
void GrMtlGpu::resolveTexture(id<MTLTexture> resolveTexture, id<MTLTexture> colorTexture) {
auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
renderPassDesc.colorAttachments[0].texture = colorTexture;
renderPassDesc.colorAttachments[0].slice = 0;
renderPassDesc.colorAttachments[0].level = 0;
renderPassDesc.colorAttachments[0].resolveTexture = resolveTexture;
renderPassDesc.colorAttachments[0].slice = 0;
renderPassDesc.colorAttachments[0].level = 0;
renderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
renderPassDesc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
MTLRenderPassDescriptor* renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
MTLRenderPassColorAttachmentDescriptor* colorAttachment = renderPassDesc.colorAttachments[0];
colorAttachment.texture = colorTexture;
colorAttachment.resolveTexture = resolveTexture;
colorAttachment.loadAction = MTLLoadActionLoad;
colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
id<MTLRenderCommandEncoder> cmdEncoder =
this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
@ -1442,7 +1445,7 @@ void GrMtlGpu::testingOnly_startCapture() {
if (@available(macOS 10.13, iOS 11.0, *)) {
// TODO: add Metal 3 interface as well
MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
[captureManager startCaptureWithDevice: fDevice];
[captureManager startCaptureWithDevice:fDevice.get()];
}
}
@ -1462,22 +1465,22 @@ void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
writer->beginObject("Metal GPU");
writer->beginObject("Device");
writer->appendString("name", fDevice.name.UTF8String);
writer->appendString("name", (*fDevice).name.UTF8String);
#ifdef SK_BUILD_FOR_MAC
if (@available(macOS 10.11, *)) {
writer->appendBool("isHeadless", fDevice.isHeadless);
writer->appendBool("isLowPower", fDevice.isLowPower);
writer->appendBool("isHeadless", (*fDevice).isHeadless);
writer->appendBool("isLowPower", (*fDevice).isLowPower);
}
if (@available(macOS 10.13, *)) {
writer->appendBool("isRemovable", fDevice.isRemovable);
writer->appendBool("isRemovable", (*fDevice).isRemovable);
}
#endif
if (@available(macOS 10.13, iOS 11.0, *)) {
writer->appendU64("registryID", fDevice.registryID);
writer->appendU64("registryID", (*fDevice).registryID);
}
#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
if (@available(macOS 10.15, *)) {
switch (fDevice.location) {
switch ((*fDevice).location) {
case MTLDeviceLocationBuiltIn:
writer->appendString("location", "builtIn");
break;
@ -1494,66 +1497,66 @@ void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
writer->appendString("location", "unknown");
break;
}
writer->appendU64("locationNumber", fDevice.locationNumber);
writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
writer->appendU64("locationNumber", (*fDevice).locationNumber);
writer->appendU64("maxTransferRate", (*fDevice).maxTransferRate);
}
#endif // SK_BUILD_FOR_MAC
#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
if (@available(macOS 10.15, iOS 13.0, *)) {
writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
writer->appendBool("hasUnifiedMemory", (*fDevice).hasUnifiedMemory);
}
#endif
#ifdef SK_BUILD_FOR_MAC
#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
if (@available(macOS 10.15, *)) {
writer->appendU64("peerGroupID", fDevice.peerGroupID);
writer->appendU32("peerCount", fDevice.peerCount);
writer->appendU32("peerIndex", fDevice.peerIndex);
writer->appendU64("peerGroupID", (*fDevice).peerGroupID);
writer->appendU32("peerCount", (*fDevice).peerCount);
writer->appendU32("peerIndex", (*fDevice).peerIndex);
}
#endif
if (@available(macOS 10.12, *)) {
writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
writer->appendU64("recommendedMaxWorkingSetSize", (*fDevice).recommendedMaxWorkingSetSize);
}
#endif // SK_BUILD_FOR_MAC
if (@available(macOS 10.13, iOS 11.0, *)) {
writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
writer->appendU64("currentAllocatedSize", (*fDevice).currentAllocatedSize);
writer->appendU64("maxThreadgroupMemoryLength", (*fDevice).maxThreadgroupMemoryLength);
}
if (@available(macOS 10.11, iOS 9.0, *)) {
writer->beginObject("maxThreadsPerThreadgroup");
writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
writer->appendU64("width", (*fDevice).maxThreadsPerThreadgroup.width);
writer->appendU64("height", (*fDevice).maxThreadsPerThreadgroup.height);
writer->appendU64("depth", (*fDevice).maxThreadsPerThreadgroup.depth);
writer->endObject();
}
if (@available(macOS 10.13, iOS 11.0, *)) {
writer->appendBool("areProgrammableSamplePositionsSupported",
fDevice.areProgrammableSamplePositionsSupported);
(*fDevice).areProgrammableSamplePositionsSupported);
writer->appendBool("areRasterOrderGroupsSupported",
fDevice.areRasterOrderGroupsSupported);
(*fDevice).areRasterOrderGroupsSupported);
}
#ifdef SK_BUILD_FOR_MAC
if (@available(macOS 10.11, *)) {
writer->appendBool("isDepth24Stencil8PixelFormatSupported",
fDevice.isDepth24Stencil8PixelFormatSupported);
(*fDevice).isDepth24Stencil8PixelFormatSupported);
}
#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
if (@available(macOS 10.15, *)) {
writer->appendBool("areBarycentricCoordsSupported",
fDevice.areBarycentricCoordsSupported);
(*fDevice).areBarycentricCoordsSupported);
writer->appendBool("supportsShaderBarycentricCoordinates",
fDevice.supportsShaderBarycentricCoordinates);
(*fDevice).supportsShaderBarycentricCoordinates);
}
#endif
#endif // SK_BUILD_FOR_MAC
if (@available(macOS 10.14, iOS 12.0, *)) {
writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
writer->appendU64("maxBufferLength", (*fDevice).maxBufferLength);
}
if (@available(macOS 10.13, iOS 11.0, *)) {
switch (fDevice.readWriteTextureSupport) {
switch ((*fDevice).readWriteTextureSupport) {
case MTLReadWriteTextureTier1:
writer->appendString("readWriteTextureSupport", "tier1");
break;
@ -1567,7 +1570,7 @@ void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
writer->appendString("readWriteTextureSupport", "unknown");
break;
}
switch (fDevice.argumentBuffersSupport) {
switch ((*fDevice).argumentBuffersSupport) {
case MTLArgumentBuffersTier1:
writer->appendString("argumentBuffersSupport", "tier1");
break;
@ -1580,16 +1583,17 @@ void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
}
}
if (@available(macOS 10.14, iOS 12.0, *)) {
writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
writer->appendU64("maxArgumentBufferSamplerCount",
(*fDevice).maxArgumentBufferSamplerCount);
}
#ifdef SK_BUILD_FOR_IOS
if (@available(iOS 13.0, *)) {
writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
writer->appendU64("sparseTileSizeInBytes", (*fDevice).sparseTileSizeInBytes);
}
#endif
writer->endObject();
writer->appendString("queue", fQueue.label.UTF8String);
writer->appendString("queue", (*fQueue).label.UTF8String);
writer->appendBool("disconnected", fDisconnected);
writer->endObject();

View File

@ -64,11 +64,11 @@ private:
GrMtlGpu* fGpu;
id<MTLRenderCommandEncoder> fActiveRenderCmdEncoder;
GrMtlPipelineState* fActivePipelineState = nullptr;
MTLPrimitiveType fActivePrimitiveType;
MTLRenderPassDescriptor* fRenderPassDesc;
SkRect fBounds;
size_t fCurrentVertexStride;
GrMtlPipelineState* fActivePipelineState = nullptr;
MTLPrimitiveType fActivePrimitiveType;
sk_cf_obj<MTLRenderPassDescriptor*> fRenderPassDesc;
SkRect fBounds;
size_t fCurrentVertexStride;
static constexpr size_t kNumBindings = GrMtlUniformHandler::kLastUniformBinding + 3;
struct {

View File

@ -15,10 +15,6 @@
#include "src/gpu/mtl/GrMtlRenderTarget.h"
#include "src/gpu/mtl/GrMtlTexture.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
@ -34,7 +30,7 @@ void GrMtlOpsRenderPass::precreateCmdEncoder() {
// For clears, we may not have an associated draw. So we prepare a cmdEncoder that
// will be submitted whether there's a draw or not.
SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc.get(), nullptr, this);
SkASSERT(nil != cmdEncoder);
}
@ -79,7 +75,7 @@ bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
if (!fActiveRenderCmdEncoder) {
fActiveRenderCmdEncoder =
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc.get(), nullptr, this);
}
[fActiveRenderCmdEncoder setRenderPipelineState:fActivePipelineState->mtlPipelineState()];
@ -128,13 +124,13 @@ void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, const SkPMColor4
// Ideally we should never end up here since all clears should either be done as draws or
// load ops in metal. However, if a client inserts a wait op we need to handle it.
fRenderPassDesc.colorAttachments[0].clearColor =
MTLClearColorMake(color[0], color[1], color[2], color[3]);
fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionClear;
(*fRenderPassDesc).colorAttachments[0].clearColor =
MTLClearColorMake(color[0], color[1], color[2], color[3]);
(*fRenderPassDesc).colorAttachments[0].loadAction = MTLLoadActionClear;
this->precreateCmdEncoder();
fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
(*fRenderPassDesc).colorAttachments[0].loadAction = MTLLoadActionLoad;
fActiveRenderCmdEncoder =
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc.get(), nullptr, this);
}
void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
@ -150,16 +146,16 @@ void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool
// The contract with the callers does not guarantee that we preserve all bits in the stencil
// during this clear. Thus we will clear the entire stencil to the desired value.
if (insideStencilMask) {
fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
(*fRenderPassDesc).stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
} else {
fRenderPassDesc.stencilAttachment.clearStencil = 0;
(*fRenderPassDesc).stencilAttachment.clearStencil = 0;
}
fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear;
(*fRenderPassDesc).stencilAttachment.loadAction = MTLLoadActionClear;
this->precreateCmdEncoder();
fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
(*fRenderPassDesc).stencilAttachment.loadAction = MTLLoadActionLoad;
fActiveRenderCmdEncoder =
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc.get(), nullptr, this);
}
void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
@ -167,7 +163,7 @@ void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUp
state->doUpload(upload);
// doUpload() creates a blitCommandEncoder, so we need to recreate a renderCommandEncoder
fActiveRenderCmdEncoder =
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc.get(), nullptr, this);
}
void GrMtlOpsRenderPass::initRenderState(id<MTLRenderCommandEncoder> encoder) {
@ -229,7 +225,7 @@ void GrMtlOpsRenderPass::setupRenderPass(
renderPassDesc.stencilAttachment.storeAction =
mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
fRenderPassDesc = renderPassDesc;
fRenderPassDesc.retain(renderPassDesc);
// Manage initial clears
if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) {
@ -237,10 +233,10 @@ void GrMtlOpsRenderPass::setupRenderPass(
fRenderTarget->height());
this->precreateCmdEncoder();
if (colorInfo.fLoadOp == GrLoadOp::kClear) {
fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
(*fRenderPassDesc).colorAttachments[0].loadAction = MTLLoadActionLoad;
}
if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
(*fRenderPassDesc).stencilAttachment.loadAction = MTLLoadActionLoad;
}
} else {
fBounds.setEmpty();

View File

@ -33,7 +33,7 @@ public:
GrMtlPipelineState(
GrMtlGpu* gpu,
id<MTLRenderPipelineState> pipelineState,
sk_cf_obj<id<MTLRenderPipelineState>> pipelineState,
MTLPixelFormat pixelFormat,
const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
const UniformInfoArray& uniforms,
@ -43,7 +43,7 @@ public:
std::unique_ptr<GrGLSLXferProcessor> xferPRocessor,
std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors);
id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState; }
id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState.get(); }
void setData(const GrRenderTarget*, const GrProgramInfo&);
@ -114,8 +114,8 @@ private:
};
GrMtlGpu* fGpu;
id<MTLRenderPipelineState> fPipelineState;
MTLPixelFormat fPixelFormat;
sk_cf_obj<id<MTLRenderPipelineState>> fPipelineState;
MTLPixelFormat fPixelFormat;
RenderTargetState fRenderTargetState;
GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;

View File

@ -17,10 +17,6 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#include "src/gpu/mtl/GrMtlTexture.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlPipelineState::SamplerBindings::SamplerBindings(GrSamplerState state,
GrTexture* texture,
GrMtlGpu* gpu)
@ -30,7 +26,7 @@ GrMtlPipelineState::SamplerBindings::SamplerBindings(GrSamplerState state,
GrMtlPipelineState::GrMtlPipelineState(
GrMtlGpu* gpu,
id<MTLRenderPipelineState> pipelineState,
sk_cf_obj<id<MTLRenderPipelineState>> pipelineState,
MTLPixelFormat pixelFormat,
const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
const UniformInfoArray& uniforms,
@ -40,7 +36,7 @@ GrMtlPipelineState::GrMtlPipelineState(
std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors)
: fGpu(gpu)
, fPipelineState(pipelineState)
, fPipelineState(std::move(pipelineState))
, fPixelFormat(pixelFormat)
, fBuiltinUniformHandles(builtinUniformHandles)
, fNumSamplers(numSamplers)

View File

@ -16,6 +16,8 @@
#import <Metal/Metal.h>
#include "include/ports/SkCFObject.h"
class GrProgramDesc;
class GrProgramInfo;
class GrMtlCaps;
@ -49,16 +51,16 @@ private:
void finalizeFragmentSecondaryColor(GrShaderVar& outputColor) override;
id<MTLLibrary> generateMtlShaderLibrary(const SkSL::String& sksl,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* msl,
SkSL::Program::Inputs* inputs);
id<MTLLibrary> compileMtlShaderLibrary(const SkSL::String& shader,
SkSL::Program::Inputs inputs);
sk_cf_obj<id<MTLLibrary>> generateMtlShaderLibrary(const SkSL::String& sksl,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* msl,
SkSL::Program::Inputs* inputs);
sk_cf_obj<id<MTLLibrary>> compileMtlShaderLibrary(const SkSL::String& shader,
SkSL::Program::Inputs inputs);
void storeShadersInCache(const SkSL::String shaders[], const SkSL::Program::Inputs inputs[],
bool isSkSL);
bool loadShadersFromCache(SkReadBuffer* cached, __strong id<MTLLibrary> outLibraries[]);
bool loadShadersFromCache(SkReadBuffer* cached, sk_cf_obj<id<MTLLibrary>> outLibraries[]);
GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }

View File

@ -22,10 +22,6 @@
#import <simd/simd.h>
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlPipelineState* GrMtlPipelineStateBuilder::CreatePipelineState(
GrMtlGpu* gpu,
GrRenderTarget* renderTarget,
@ -67,7 +63,7 @@ static constexpr SkFourByteTag kSKSL_Tag = SkSetFourByteTag('S', 'K', 'S', 'L');
bool GrMtlPipelineStateBuilder::loadShadersFromCache(SkReadBuffer* cached,
__strong id<MTLLibrary> outLibraries[]) {
sk_cf_obj<id<MTLLibrary>> outLibraries[]) {
SkSL::String shaders[kGrShaderTypeCount];
SkSL::Program::Inputs inputs[kGrShaderTypeCount];
@ -101,23 +97,23 @@ void GrMtlPipelineStateBuilder::storeShadersInCache(const SkSL::String shaders[]
fGpu->getContext()->priv().getPersistentCache()->store(*key, *data);
}
id<MTLLibrary> GrMtlPipelineStateBuilder::generateMtlShaderLibrary(
sk_cf_obj<id<MTLLibrary>> GrMtlPipelineStateBuilder::generateMtlShaderLibrary(
const SkSL::String& shader,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* msl,
SkSL::Program::Inputs* inputs) {
id<MTLLibrary> shaderLibrary = GrGenerateMtlShaderLibrary(fGpu, shader,
kind, settings, msl, inputs);
sk_cf_obj<id<MTLLibrary>> shaderLibrary = GrGenerateMtlShaderLibrary(fGpu, shader, kind,
settings, msl, inputs);
if (shaderLibrary != nil && inputs->fRTHeight) {
this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
}
return shaderLibrary;
}
id<MTLLibrary> GrMtlPipelineStateBuilder::compileMtlShaderLibrary(const SkSL::String& shader,
SkSL::Program::Inputs inputs) {
id<MTLLibrary> shaderLibrary = GrCompileMtlShaderLibrary(fGpu, shader);
sk_cf_obj<id<MTLLibrary>> GrMtlPipelineStateBuilder::compileMtlShaderLibrary(
const SkSL::String& shader, SkSL::Program::Inputs inputs) {
sk_cf_obj<id<MTLLibrary>> shaderLibrary = GrCompileMtlShaderLibrary(fGpu, shader);
if (shaderLibrary != nil && inputs.fRTHeight) {
this->addRTHeightUniform(SKSL_RTHEIGHT_NAME);
}
@ -202,7 +198,8 @@ static inline MTLVertexFormat attribute_type_to_mtlformat(GrVertexAttribType typ
SK_ABORT("Unknown vertex attribute type");
}
static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor& primProc) {
static sk_cf_obj<MTLVertexDescriptor*> create_vertex_descriptor(
const GrPrimitiveProcessor& primProc) {
uint32_t vertexBinding = 0, instanceBinding = 0;
int nextBinding = GrMtlUniformHandler::kLastUniformBinding + 1;
@ -214,13 +211,13 @@ static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor&
instanceBinding = nextBinding;
}
auto vertexDescriptor = [[MTLVertexDescriptor alloc] init];
sk_cf_obj<MTLVertexDescriptor*> vertexDescriptor([[MTLVertexDescriptor alloc] init]);
int attributeIndex = 0;
int vertexAttributeCount = primProc.numVertexAttributes();
size_t vertexAttributeOffset = 0;
for (const auto& attribute : primProc.vertexAttributes()) {
MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex];
MTLVertexAttributeDescriptor* mtlAttribute = (*vertexDescriptor).attributes[attributeIndex];
mtlAttribute.format = attribute_type_to_mtlformat(attribute.cpuType());
SkASSERT(MTLVertexFormatInvalid != mtlAttribute.format);
mtlAttribute.offset = vertexAttributeOffset;
@ -233,7 +230,7 @@ static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor&
if (vertexAttributeCount) {
MTLVertexBufferLayoutDescriptor* vertexBufferLayout =
vertexDescriptor.layouts[vertexBinding];
(*vertexDescriptor).layouts[vertexBinding];
vertexBufferLayout.stepFunction = MTLVertexStepFunctionPerVertex;
vertexBufferLayout.stepRate = 1;
vertexBufferLayout.stride = vertexAttributeOffset;
@ -242,7 +239,7 @@ static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor&
int instanceAttributeCount = primProc.numInstanceAttributes();
size_t instanceAttributeOffset = 0;
for (const auto& attribute : primProc.instanceAttributes()) {
MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex];
MTLVertexAttributeDescriptor* mtlAttribute = (*vertexDescriptor).attributes[attributeIndex];
mtlAttribute.format = attribute_type_to_mtlformat(attribute.cpuType());
mtlAttribute.offset = instanceAttributeOffset;
mtlAttribute.bufferIndex = instanceBinding;
@ -254,7 +251,7 @@ static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor&
if (instanceAttributeCount) {
MTLVertexBufferLayoutDescriptor* instanceBufferLayout =
vertexDescriptor.layouts[instanceBinding];
(*vertexDescriptor).layouts[instanceBinding];
instanceBufferLayout.stepFunction = MTLVertexStepFunctionPerInstance;
instanceBufferLayout.stepRate = 1;
instanceBufferLayout.stride = instanceAttributeOffset;
@ -334,12 +331,13 @@ static MTLBlendOperation blend_equation_to_mtl_blend_op(GrBlendEquation equation
return gTable[equation];
}
static MTLRenderPipelineColorAttachmentDescriptor* create_color_attachment(
static sk_cf_obj<MTLRenderPipelineColorAttachmentDescriptor*> create_color_attachment(
MTLPixelFormat format, const GrPipeline& pipeline) {
auto mtlColorAttachment = [[MTLRenderPipelineColorAttachmentDescriptor alloc] init];
sk_cf_obj<MTLRenderPipelineColorAttachmentDescriptor*> mtlColorAttachment(
[[MTLRenderPipelineColorAttachmentDescriptor alloc] init]);
// pixel format
mtlColorAttachment.pixelFormat = format;
[*mtlColorAttachment setPixelFormat: format];
// blending
const GrXferProcessor::BlendInfo& blendInfo = pipeline.getXferProcessor().getBlendInfo();
@ -349,20 +347,20 @@ static MTLRenderPipelineColorAttachmentDescriptor* create_color_attachment(
GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
bool blendOff = GrBlendShouldDisable(equation, srcCoeff, dstCoeff);
mtlColorAttachment.blendingEnabled = !blendOff;
(*mtlColorAttachment).blendingEnabled = !blendOff;
if (!blendOff) {
mtlColorAttachment.sourceRGBBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
mtlColorAttachment.destinationRGBBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
mtlColorAttachment.rgbBlendOperation = blend_equation_to_mtl_blend_op(equation);
mtlColorAttachment.sourceAlphaBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
mtlColorAttachment.destinationAlphaBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
mtlColorAttachment.alphaBlendOperation = blend_equation_to_mtl_blend_op(equation);
(*mtlColorAttachment).sourceRGBBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
(*mtlColorAttachment).destinationRGBBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
(*mtlColorAttachment).rgbBlendOperation = blend_equation_to_mtl_blend_op(equation);
(*mtlColorAttachment).sourceAlphaBlendFactor = blend_coeff_to_mtl_blend(srcCoeff);
(*mtlColorAttachment).destinationAlphaBlendFactor = blend_coeff_to_mtl_blend(dstCoeff);
(*mtlColorAttachment).alphaBlendOperation = blend_equation_to_mtl_blend_op(equation);
}
if (!blendInfo.fWriteColor) {
mtlColorAttachment.writeMask = MTLColorWriteMaskNone;
(*mtlColorAttachment).writeMask = MTLColorWriteMaskNone;
} else {
mtlColorAttachment.writeMask = MTLColorWriteMaskAll;
(*mtlColorAttachment).writeMask = MTLColorWriteMaskAll;
}
return mtlColorAttachment;
}
@ -382,8 +380,9 @@ GrMtlPipelineState* GrMtlPipelineStateBuilder::finalize(GrRenderTarget* renderTa
const GrProgramInfo& programInfo) {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
auto pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
id<MTLLibrary> shaderLibraries[kGrShaderTypeCount];
sk_cf_obj<MTLRenderPipelineDescriptor*> pipelineDescriptor(
[[MTLRenderPipelineDescriptor alloc] init]);
sk_cf_obj<id<MTLLibrary>> shaderLibraries[kGrShaderTypeCount];
fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
@ -470,10 +469,10 @@ GrMtlPipelineState* GrMtlPipelineStateBuilder::finalize(GrRenderTarget* renderTa
}
}
id<MTLFunction> vertexFunction =
[shaderLibraries[kVertex_GrShaderType] newFunctionWithName: @"vertexMain"];
id<MTLFunction> fragmentFunction =
[shaderLibraries[kFragment_GrShaderType] newFunctionWithName: @"fragmentMain"];
sk_cf_obj<id<MTLFunction>> vertexFunction(
[*(shaderLibraries[kVertex_GrShaderType]) newFunctionWithName: @"vertexMain"]);
sk_cf_obj<id<MTLFunction>> fragmentFunction(
[*(shaderLibraries[kFragment_GrShaderType]) newFunctionWithName: @"fragmentMain"]);
if (vertexFunction == nil) {
SkDebugf("Couldn't find vertexMain() in library\n");
@ -484,37 +483,39 @@ GrMtlPipelineState* GrMtlPipelineStateBuilder::finalize(GrRenderTarget* renderTa
return nullptr;
}
pipelineDescriptor.vertexFunction = vertexFunction;
pipelineDescriptor.fragmentFunction = fragmentFunction;
pipelineDescriptor.vertexDescriptor = create_vertex_descriptor(programInfo.primProc());
(*pipelineDescriptor).vertexFunction = vertexFunction.get();
(*pipelineDescriptor).fragmentFunction = fragmentFunction.get();
auto vertexDescriptor = create_vertex_descriptor(programInfo.primProc());
(*pipelineDescriptor).vertexDescriptor = *vertexDescriptor;
MTLPixelFormat pixelFormat = GrBackendFormatAsMTLPixelFormat(renderTarget->backendFormat());
if (pixelFormat == MTLPixelFormatInvalid) {
return nullptr;
}
pipelineDescriptor.colorAttachments[0] = create_color_attachment(pixelFormat,
programInfo.pipeline());
pipelineDescriptor.sampleCount = programInfo.numRasterSamples();
auto colorAttachment = create_color_attachment(pixelFormat, programInfo.pipeline());
(*pipelineDescriptor).colorAttachments[0] = *colorAttachment;
(*pipelineDescriptor).sampleCount = programInfo.numRasterSamples();
bool hasStencilAttachment = SkToBool(renderTarget->getStencilAttachment());
GrMtlCaps* mtlCaps = (GrMtlCaps*)this->caps();
pipelineDescriptor.stencilAttachmentPixelFormat =
(*pipelineDescriptor).stencilAttachmentPixelFormat =
hasStencilAttachment ? mtlCaps->preferredStencilFormat().fInternalFormat
: MTLPixelFormatInvalid;
SkASSERT(pipelineDescriptor.vertexFunction);
SkASSERT(pipelineDescriptor.fragmentFunction);
SkASSERT(pipelineDescriptor.vertexDescriptor);
SkASSERT(pipelineDescriptor.colorAttachments[0]);
SkASSERT((*pipelineDescriptor).vertexFunction);
SkASSERT((*pipelineDescriptor).fragmentFunction);
SkASSERT((*pipelineDescriptor).vertexDescriptor);
SkASSERT((*pipelineDescriptor).colorAttachments[0]);
NSError* error = nil;
#if defined(SK_BUILD_FOR_MAC)
id<MTLRenderPipelineState> pipelineState = GrMtlNewRenderPipelineStateWithDescriptor(
fGpu->device(), pipelineDescriptor, &error);
sk_cf_obj<id<MTLRenderPipelineState>> pipelineState(
GrMtlNewRenderPipelineStateWithDescriptor(fGpu->device(), pipelineDescriptor.get(),
&error));
#else
id<MTLRenderPipelineState> pipelineState =
[fGpu->device() newRenderPipelineStateWithDescriptor: pipelineDescriptor
error: &error];
sk_cf_obj<id<MTLRenderPipelineState>> pipelineState(
[fGpu->device() newRenderPipelineStateWithDescriptor: pipelineDescriptor.get()
error: &error]);
#endif
if (error) {
SkDebugf("Error creating pipeline: %s\n",
@ -528,8 +529,8 @@ GrMtlPipelineState* GrMtlPipelineStateBuilder::finalize(GrRenderTarget* renderTa
uint32_t bufferSize = buffer_size(fUniformHandler.fCurrentUBOOffset,
fUniformHandler.fCurrentUBOMaxAlignment);
return new GrMtlPipelineState(fGpu,
pipelineState,
pipelineDescriptor.colorAttachments[0].pixelFormat,
std::move(pipelineState),
(*pipelineDescriptor).colorAttachments[0].pixelFormat,
fUniformHandles,
fUniformHandler.fUniforms,
bufferSize,

View File

@ -10,10 +10,6 @@
#include "src/gpu/mtl/GrMtlBuffer.h"
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlPipelineStateDataManager::GrMtlPipelineStateDataManager(const UniformInfoArray& uniforms,
uint32_t uniformSize)
: fUniformSize(uniformSize)

View File

@ -11,6 +11,7 @@
#include "src/gpu/GrRenderTarget.h"
#include "include/gpu/GrBackendSurface.h"
#include "include/ports/SkCFObject.h"
#include "src/gpu/GrGpu.h"
#import <Metal/Metal.h>
@ -22,7 +23,7 @@ public:
static sk_sp<GrMtlRenderTarget> MakeWrappedRenderTarget(GrMtlGpu*,
SkISize,
int sampleCnt,
id<MTLTexture>);
sk_cf_obj<id<MTLTexture>>);
~GrMtlRenderTarget() override;
@ -30,8 +31,8 @@ public:
return true;
}
id<MTLTexture> mtlColorTexture() const { return fColorTexture; }
id<MTLTexture> mtlResolveTexture() const { return fResolveTexture; }
id<MTLTexture> mtlColorTexture() const { return fColorTexture.get(); }
id<MTLTexture> mtlResolveTexture() const { return fResolveTexture.get(); }
GrBackendRenderTarget getBackendRenderTarget() const override;
@ -41,10 +42,10 @@ protected:
GrMtlRenderTarget(GrMtlGpu* gpu,
SkISize,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture);
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture);
GrMtlRenderTarget(GrMtlGpu* gpu, SkISize, id<MTLTexture> colorTexture);
GrMtlRenderTarget(GrMtlGpu* gpu, SkISize, sk_cf_obj<id<MTLTexture>> colorTexture);
GrMtlGpu* getMtlGpu() const;
@ -65,8 +66,8 @@ protected:
numColorSamples, GrMipmapped::kNo);
}
id<MTLTexture> fColorTexture;
id<MTLTexture> fResolveTexture;
sk_cf_obj<id<MTLTexture>> fColorTexture;
sk_cf_obj<id<MTLTexture>> fResolveTexture;
private:
// Extra param to disambiguate from constructor used by subclasses.
@ -74,10 +75,10 @@ private:
GrMtlRenderTarget(GrMtlGpu* gpu,
SkISize,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
Wrapped);
GrMtlRenderTarget(GrMtlGpu* gpu, SkISize, id<MTLTexture> colorTexture, Wrapped);
GrMtlRenderTarget(GrMtlGpu* gpu, SkISize, sk_cf_obj<id<MTLTexture>> colorTexture, Wrapped);
bool completeStencilAttachment() override;

View File

@ -10,32 +10,28 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
// Called for wrapped non-texture render targets.
GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
Wrapped)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrRenderTarget(gpu, dimensions, sampleCnt, GrProtected::kNo)
, fColorTexture(colorTexture)
, fResolveTexture(resolveTexture) {
, fColorTexture(std::move(colorTexture))
, fResolveTexture(std::move(resolveTexture)) {
SkASSERT(sampleCnt > 1);
this->registerWithCacheWrapped(GrWrapCacheable::kNo);
}
GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
id<MTLTexture> colorTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
Wrapped)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrRenderTarget(gpu, dimensions, 1, GrProtected::kNo)
, fColorTexture(colorTexture)
, fColorTexture(std::move(colorTexture))
, fResolveTexture(nil) {
this->registerWithCacheWrapped(GrWrapCacheable::kNo);
}
@ -44,61 +40,64 @@ GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture)
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrRenderTarget(gpu, dimensions, sampleCnt, GrProtected::kNo)
, fColorTexture(colorTexture)
, fResolveTexture(resolveTexture) {
, fColorTexture(std::move(colorTexture))
, fResolveTexture(std::move(resolveTexture)) {
SkASSERT(sampleCnt > 1);
}
GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu, SkISize dimensions, id<MTLTexture> colorTexture)
GrMtlRenderTarget::GrMtlRenderTarget(GrMtlGpu* gpu, SkISize dimensions,
sk_cf_obj<id<MTLTexture>> colorTexture)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrRenderTarget(gpu, dimensions, 1, GrProtected::kNo)
, fColorTexture(colorTexture)
, fColorTexture(std::move(colorTexture))
, fResolveTexture(nil) {}
sk_sp<GrMtlRenderTarget> GrMtlRenderTarget::MakeWrappedRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> texture) {
SkASSERT(nil != texture);
SkASSERT(1 == texture.mipmapLevelCount);
sk_sp<GrMtlRenderTarget> GrMtlRenderTarget::MakeWrappedRenderTarget(
GrMtlGpu* gpu, SkISize dimensions, int sampleCnt, sk_cf_obj<id<MTLTexture>> texture) {
SkASSERT(texture);
SkASSERT(1 == (*texture).mipmapLevelCount);
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(MTLTextureUsageRenderTarget & texture.usage);
SkASSERT(MTLTextureUsageRenderTarget & (*texture).usage);
}
GrMtlRenderTarget* mtlRT;
if (sampleCnt > 1) {
MTLPixelFormat format = texture.pixelFormat;
// TODO: share with routine in GrMtlTextureRenderTarget
MTLPixelFormat format = (*texture).pixelFormat;
if (!gpu->mtlCaps().isFormatRenderable(format, sampleCnt)) {
return nullptr;
}
MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
texDesc.textureType = MTLTextureType2DMultisample;
texDesc.pixelFormat = format;
texDesc.width = dimensions.fWidth;
texDesc.height = dimensions.fHeight;
texDesc.depth = 1;
texDesc.mipmapLevelCount = 1;
texDesc.sampleCount = sampleCnt;
texDesc.arrayLength = 1;
sk_cf_obj<MTLTextureDescriptor*> texDesc([[MTLTextureDescriptor alloc] init]);
(*texDesc).textureType = MTLTextureType2DMultisample;
(*texDesc).pixelFormat = format;
(*texDesc).width = dimensions.fWidth;
(*texDesc).height = dimensions.fHeight;
(*texDesc).depth = 1;
(*texDesc).mipmapLevelCount = 1;
(*texDesc).sampleCount = sampleCnt;
(*texDesc).arrayLength = 1;
if (@available(macOS 10.11, iOS 9.0, *)) {
texDesc.storageMode = MTLStorageModePrivate;
texDesc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
(*texDesc).storageMode = MTLStorageModePrivate;
(*texDesc).usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
}
id<MTLTexture> colorTexture = [gpu->device() newTextureWithDescriptor:texDesc];
sk_cf_obj<id<MTLTexture>> colorTexture(
[gpu->device() newTextureWithDescriptor:texDesc.get()]);
if (!colorTexture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) &
(*colorTexture).usage);
}
mtlRT = new GrMtlRenderTarget(gpu, dimensions, sampleCnt, colorTexture, texture, kWrapped);
mtlRT = new GrMtlRenderTarget(gpu, dimensions, sampleCnt, std::move(colorTexture),
std::move(texture), kWrapped);
} else {
mtlRT = new GrMtlRenderTarget(gpu, dimensions, texture, kWrapped);
mtlRT = new GrMtlRenderTarget(gpu, dimensions, std::move(texture), kWrapped);
}
return sk_sp<GrMtlRenderTarget>(mtlRT);
@ -111,12 +110,12 @@ GrMtlRenderTarget::~GrMtlRenderTarget() {
GrBackendRenderTarget GrMtlRenderTarget::getBackendRenderTarget() const {
GrMtlTextureInfo info;
info.fTexture.reset(GrRetainPtrFromId(fColorTexture));
return GrBackendRenderTarget(this->width(), this->height(), fColorTexture.sampleCount, info);
info.fTexture.retain(fColorTexture.get());
return GrBackendRenderTarget(this->width(), this->height(), (*fColorTexture).sampleCount, info);
}
GrBackendFormat GrMtlRenderTarget::backendFormat() const {
return GrBackendFormat::MakeMtl(fColorTexture.pixelFormat);
return GrBackendFormat::MakeMtl((*fColorTexture).pixelFormat);
}
GrMtlGpu* GrMtlRenderTarget::getMtlGpu() const {

View File

@ -17,10 +17,6 @@
#include "src/sksl/SkSLCompiler.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlResourceProvider::GrMtlResourceProvider(GrMtlGpu* gpu)
: fGpu(gpu) {
fPipelineStateCache.reset(new PipelineStateCache(gpu));

View File

@ -8,6 +8,8 @@
#ifndef GrMtlSampler_DEFINED
#define GrMtlSampler_DEFINED
#include "include/gpu/mtl/GrMtlTypes.h"
#import <Metal/Metal.h>
#include "src/core/SkOpts.h"
@ -20,9 +22,9 @@ class GrMtlGpu;
class GrMtlSampler : public SkRefCnt {
public:
static GrMtlSampler* Create(const GrMtlGpu* gpu, GrSamplerState);
~GrMtlSampler() override { fMtlSamplerState = nil; }
~GrMtlSampler() override {}
id<MTLSamplerState> mtlSampler() const { return fMtlSamplerState; }
id<MTLSamplerState> mtlSampler() const { return fMtlSamplerState.get(); }
typedef uint32_t Key;
@ -35,12 +37,12 @@ public:
}
private:
GrMtlSampler(id<MTLSamplerState> mtlSamplerState, Key key)
: fMtlSamplerState(mtlSamplerState)
GrMtlSampler(sk_cf_obj<id<MTLSamplerState>> mtlSamplerState, Key key)
: fMtlSamplerState(std::move(mtlSamplerState))
, fKey(key) {}
id<MTLSamplerState> fMtlSamplerState;
Key fKey;
sk_cf_obj<id<MTLSamplerState>> fMtlSamplerState;
Key fKey;
};
#endif

View File

@ -9,10 +9,6 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
static inline MTLSamplerAddressMode wrap_mode_to_mtl_sampler_address(
GrSamplerState::WrapMode wrapMode, const GrCaps& caps) {
switch (wrapMode) {
@ -58,25 +54,25 @@ GrMtlSampler* GrMtlSampler::Create(const GrMtlGpu* gpu, GrSamplerState samplerSt
SkUNREACHABLE;
}();
auto samplerDesc = [[MTLSamplerDescriptor alloc] init];
samplerDesc.rAddressMode = MTLSamplerAddressModeClampToEdge;
samplerDesc.sAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeX(),
gpu->mtlCaps());
samplerDesc.tAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeY(),
gpu->mtlCaps());
samplerDesc.magFilter = minMagFilter;
samplerDesc.minFilter = minMagFilter;
samplerDesc.mipFilter = mipFilter;
samplerDesc.lodMinClamp = 0.0f;
samplerDesc.lodMaxClamp = FLT_MAX; // default value according to docs.
samplerDesc.maxAnisotropy = 1.0f;
samplerDesc.normalizedCoordinates = true;
sk_cf_obj<MTLSamplerDescriptor*> samplerDesc([[MTLSamplerDescriptor alloc] init]);
(*samplerDesc).rAddressMode = MTLSamplerAddressModeClampToEdge;
(*samplerDesc).sAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeX(),
gpu->mtlCaps());
(*samplerDesc).tAddressMode = wrap_mode_to_mtl_sampler_address(samplerState.wrapModeY(),
gpu->mtlCaps());
(*samplerDesc).magFilter = minMagFilter;
(*samplerDesc).minFilter = minMagFilter;
(*samplerDesc).mipFilter = mipFilter;
(*samplerDesc).lodMinClamp = 0.0f;
(*samplerDesc).maxAnisotropy = 1.0f;
(*samplerDesc).normalizedCoordinates = true;
if (@available(macOS 10.11, iOS 9.0, *)) {
samplerDesc.compareFunction = MTLCompareFunctionNever;
(*samplerDesc).compareFunction = MTLCompareFunctionNever;
}
return new GrMtlSampler([gpu->device() newSamplerStateWithDescriptor: samplerDesc],
GenerateKey(samplerState));
sk_cf_obj<id<MTLSamplerState>> sampler(
[gpu->device() newSamplerStateWithDescriptor:samplerDesc.get()]);
return new GrMtlSampler(std::move(sampler), GenerateKey(samplerState));
}
GrMtlSampler::Key GrMtlSampler::GenerateKey(GrSamplerState samplerState) {

View File

@ -25,20 +25,23 @@ public:
~GrMtlSemaphore() override {}
id<MTLEvent> event() const SK_API_AVAILABLE(macos(10.14), ios(12.0)) { return fEvent; }
id<MTLEvent> event() const SK_API_AVAILABLE(macos(10.14), ios(12.0)) {
return static_cast<id<MTLEvent>>(fEvent.get());
}
uint64_t value() const { return fValue; }
GrBackendSemaphore backendSemaphore() const override;
private:
GrMtlSemaphore(id<MTLEvent> event, uint64_t value) SK_API_AVAILABLE(macos(10.14), ios(12.0));
GrMtlSemaphore(sk_cf_obj<GrMTLHandle> event,
uint64_t value);
void setIsOwned() override {}
id<MTLEvent> fEvent SK_API_AVAILABLE(macos(10.14), ios(12.0));
uint64_t fValue;
sk_cf_obj<GrMTLHandle> fEvent;
uint64_t fValue;
typedef GrSemaphore INHERITED;
};
} SK_API_AVAILABLE(macos(10.14), ios(12.0));
#endif

View File

@ -9,15 +9,11 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
std::unique_ptr<GrMtlSemaphore> GrMtlSemaphore::Make(GrMtlGpu* gpu) {
if (@available(macOS 10.14, iOS 12.0, *)) {
id<MTLEvent> event = [gpu->device() newEvent];
sk_cf_obj<GrMTLHandle> event(static_cast<GrMTLHandle>([gpu->device() newEvent]));
uint64_t value = 1; // seems like a reasonable starting point
return std::unique_ptr<GrMtlSemaphore>(new GrMtlSemaphore(event, value));
return std::unique_ptr<GrMtlSemaphore>(new GrMtlSemaphore(std::move(event), value));
} else {
return nullptr;
}
@ -25,18 +21,19 @@ std::unique_ptr<GrMtlSemaphore> GrMtlSemaphore::Make(GrMtlGpu* gpu) {
std::unique_ptr<GrMtlSemaphore> GrMtlSemaphore::MakeWrapped(GrMTLHandle event,
uint64_t value) {
// The GrMtlSemaphore will have strong ownership at this point.
// The GrMTLHandle will subsequently only have weak ownership.
// Implicitly the GrMtlSemaphore will take ownership at this point.
// The incoming GrMTLHandle will subsequently only have weak ownership.
// TODO: Can we manage shared ownership now?
if (@available(macOS 10.14, iOS 12.0, *)) {
id<MTLEvent> mtlEvent = (__bridge_transfer id<MTLEvent>)event;
return std::unique_ptr<GrMtlSemaphore>(new GrMtlSemaphore(mtlEvent, value));
sk_cf_obj<GrMTLHandle> mtlEvent(event);
return std::unique_ptr<GrMtlSemaphore>(new GrMtlSemaphore(std::move(mtlEvent), value));
} else {
return nullptr;
}
}
GrMtlSemaphore::GrMtlSemaphore(id<MTLEvent> event, uint64_t value)
: fEvent(event), fValue(value) {
GrMtlSemaphore::GrMtlSemaphore(sk_cf_obj<GrMTLHandle> event, uint64_t value)
: fEvent(std::move(event)), fValue(value) {
}
GrBackendSemaphore GrMtlSemaphore::backendSemaphore() const {
@ -44,7 +41,8 @@ GrBackendSemaphore GrMtlSemaphore::backendSemaphore() const {
// The GrMtlSemaphore and the GrBackendSemaphore will have strong ownership at this point.
// Whoever uses the GrBackendSemaphore will subsquently steal this ref (see MakeWrapped, above).
if (@available(macOS 10.14, iOS 12.0, *)) {
GrMTLHandle handle = (__bridge_retained GrMTLHandle)(fEvent);
SkASSERT(fEvent);
GrMTLHandle handle = CFRetain(fEvent.get());
backendSemaphore.initMetal(handle, fValue);
}
return backendSemaphore;

View File

@ -12,6 +12,8 @@
#import <Metal/Metal.h>
#include "include/ports/SkCFObject.h"
class GrMtlImageView;
class GrMtlGpu;
@ -31,7 +33,7 @@ public:
MTLPixelFormat mtlFormat() const { return fFormat.fInternalFormat; }
id<MTLTexture> stencilView() const { return fStencilView; }
id<MTLTexture> stencilView() const { return fStencilView.get(); }
protected:
void onRelease() override;
@ -48,7 +50,7 @@ private:
Format fFormat;
id<MTLTexture> fStencilView;
sk_cf_obj<id<MTLTexture>> fStencilView;
};
#endif

View File

@ -8,10 +8,6 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlStencilAttachment::GrMtlStencilAttachment(GrMtlGpu* gpu,
const Format& format,
const id<MTLTexture> stencilView)
@ -28,10 +24,10 @@ GrMtlStencilAttachment* GrMtlStencilAttachment::Create(GrMtlGpu* gpu,
int sampleCnt,
const Format& format) {
MTLTextureDescriptor* desc =
[MTLTextureDescriptor texture2DDescriptorWithPixelFormat:format.fInternalFormat
width:width
height:height
mipmapped:NO];
[MTLTextureDescriptor texture2DDescriptorWithPixelFormat:format.fInternalFormat
width:width
height:height
mipmapped:NO];
if (@available(macOS 10.11, iOS 9.0, *)) {
desc.storageMode = MTLStorageModePrivate;
desc.usage = MTLTextureUsageRenderTarget;
@ -40,7 +36,8 @@ GrMtlStencilAttachment* GrMtlStencilAttachment::Create(GrMtlGpu* gpu,
if (sampleCnt > 1) {
desc.textureType = MTLTextureType2DMultisample;
}
return new GrMtlStencilAttachment(gpu, format, [gpu->device() newTextureWithDescriptor:desc]);
return new GrMtlStencilAttachment(gpu, format,
[gpu->device() newTextureWithDescriptor:desc]);
}
GrMtlStencilAttachment::~GrMtlStencilAttachment() {
@ -57,12 +54,12 @@ size_t GrMtlStencilAttachment::onGpuMemorySize() const {
}
void GrMtlStencilAttachment::onRelease() {
fStencilView = nullptr;
fStencilView.reset();
GrStencilAttachment::onRelease();
}
void GrMtlStencilAttachment::onAbandon() {
fStencilView = nullptr;
fStencilView.reset();
GrStencilAttachment::onAbandon();
}

View File

@ -11,6 +11,7 @@
#include "src/gpu/GrTexture.h"
#import <Metal/Metal.h>
#include "include/ports/SkCFObject.h"
class GrMtlGpu;
@ -24,13 +25,13 @@ public:
static sk_sp<GrMtlTexture> MakeWrappedTexture(GrMtlGpu*,
SkISize,
id<MTLTexture>,
sk_cf_obj<id<MTLTexture>>,
GrWrapCacheable,
GrIOType);
~GrMtlTexture() override;
id<MTLTexture> mtlTexture() const { return fTexture; }
id<MTLTexture> mtlTexture() const { return fTexture.get(); }
GrBackendTexture getBackendTexture() const override;
@ -41,7 +42,7 @@ public:
bool reallocForMipmap(GrMtlGpu* gpu, uint32_t mipLevels);
protected:
GrMtlTexture(GrMtlGpu*, SkISize, id<MTLTexture>, GrMipmapStatus);
GrMtlTexture(GrMtlGpu*, SkISize, sk_cf_obj<id<MTLTexture>>, GrMipmapStatus);
GrMtlGpu* getMtlGpu() const;
@ -61,17 +62,17 @@ protected:
private:
enum Wrapped { kWrapped };
GrMtlTexture(GrMtlGpu*, SkBudgeted, SkISize, id<MTLTexture>, GrMipmapStatus);
GrMtlTexture(GrMtlGpu*, SkBudgeted, SkISize, sk_cf_obj<id<MTLTexture>>, GrMipmapStatus);
GrMtlTexture(GrMtlGpu*,
Wrapped,
SkISize,
id<MTLTexture>,
sk_cf_obj<id<MTLTexture>>,
GrMipmapStatus,
GrWrapCacheable,
GrIOType);
id<MTLTexture> fTexture;
sk_cf_obj<id<MTLTexture>> fTexture;
typedef GrTexture INHERITED;
};

View File

@ -11,25 +11,22 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
id<MTLTexture> texture,
sk_cf_obj<id<MTLTexture>> texture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, INHERITED(gpu, dimensions, GrProtected::kNo, GrTextureType::k2D, mipmapStatus)
, fTexture(texture) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == texture.mipmapLevelCount));
, fTexture(std::move(texture)) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) ==
(1 == (*fTexture).mipmapLevelCount));
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(SkToBool(texture.usage & MTLTextureUsageShaderRead));
SkASSERT(SkToBool((*fTexture).usage & MTLTextureUsageShaderRead));
}
SkASSERT(!texture.framebufferOnly);
SkASSERT(!(*fTexture).framebufferOnly);
this->registerWithCache(budgeted);
if (GrMtlFormatIsCompressed(texture.pixelFormat)) {
if (GrMtlFormatIsCompressed((*fTexture).pixelFormat)) {
this->setReadOnly();
}
}
@ -37,18 +34,19 @@ GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
Wrapped,
SkISize dimensions,
id<MTLTexture> texture,
sk_cf_obj<id<MTLTexture>> texture,
GrMipmapStatus mipmapStatus,
GrWrapCacheable cacheable,
GrIOType ioType)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, INHERITED(gpu, dimensions, GrProtected::kNo, GrTextureType::k2D, mipmapStatus)
, fTexture(texture) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == texture.mipmapLevelCount));
, fTexture(std::move(texture)) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) ==
(1 == (*fTexture).mipmapLevelCount));
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(SkToBool(texture.usage & MTLTextureUsageShaderRead));
SkASSERT(SkToBool((*fTexture).usage & MTLTextureUsageShaderRead));
}
SkASSERT(!texture.framebufferOnly);
SkASSERT(!(*fTexture).framebufferOnly);
if (ioType == kRead_GrIOType) {
this->setReadOnly();
}
@ -57,16 +55,17 @@ GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
GrMtlTexture::GrMtlTexture(GrMtlGpu* gpu,
SkISize dimensions,
id<MTLTexture> texture,
sk_cf_obj<id<MTLTexture>> texture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, INHERITED(gpu, dimensions, GrProtected::kNo, GrTextureType::k2D, mipmapStatus)
, fTexture(texture) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) == (1 == texture.mipmapLevelCount));
, fTexture(std::move(texture)) {
SkASSERT((GrMipmapStatus::kNotAllocated == mipmapStatus) ==
(1 == (*fTexture).mipmapLevelCount));
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(SkToBool(texture.usage & MTLTextureUsageShaderRead));
SkASSERT(SkToBool((*fTexture).usage & MTLTextureUsageShaderRead));
}
SkASSERT(!texture.framebufferOnly);
SkASSERT(!(*fTexture).framebufferOnly);
}
sk_sp<GrMtlTexture> GrMtlTexture::MakeNewTexture(GrMtlGpu* gpu,
@ -74,29 +73,31 @@ sk_sp<GrMtlTexture> GrMtlTexture::MakeNewTexture(GrMtlGpu* gpu,
SkISize dimensions,
MTLTextureDescriptor* texDesc,
GrMipmapStatus mipmapStatus) {
id<MTLTexture> texture = [gpu->device() newTextureWithDescriptor:texDesc];
sk_cf_obj<id<MTLTexture>> texture([gpu->device() newTextureWithDescriptor:texDesc]);
if (!texture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(SkToBool(texture.usage & MTLTextureUsageShaderRead));
SkASSERT(SkToBool((*texture).usage & MTLTextureUsageShaderRead));
}
return sk_sp<GrMtlTexture>(new GrMtlTexture(gpu, budgeted, dimensions, texture, mipmapStatus));
return sk_sp<GrMtlTexture>(new GrMtlTexture(gpu, budgeted, dimensions, std::move(texture),
mipmapStatus));
}
sk_sp<GrMtlTexture> GrMtlTexture::MakeWrappedTexture(GrMtlGpu* gpu,
SkISize dimensions,
id<MTLTexture> texture,
sk_cf_obj<id<MTLTexture>> texture,
GrWrapCacheable cacheable,
GrIOType ioType) {
SkASSERT(nil != texture);
SkASSERT(texture);
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT(SkToBool(texture.usage & MTLTextureUsageShaderRead));
SkASSERT(SkToBool((*texture).usage & MTLTextureUsageShaderRead));
}
GrMipmapStatus mipmapStatus = texture.mipmapLevelCount > 1 ? GrMipmapStatus::kValid
: GrMipmapStatus::kNotAllocated;
GrMipmapStatus mipmapStatus = (*texture).mipmapLevelCount > 1 ? GrMipmapStatus::kValid
: GrMipmapStatus::kNotAllocated;
return sk_sp<GrMtlTexture>(
new GrMtlTexture(gpu, kWrapped, dimensions, texture, mipmapStatus, cacheable, ioType));
new GrMtlTexture(gpu, kWrapped, dimensions, std::move(texture), mipmapStatus, cacheable,
ioType));
}
GrMtlTexture::~GrMtlTexture() {
@ -109,14 +110,13 @@ GrMtlGpu* GrMtlTexture::getMtlGpu() const {
}
GrBackendTexture GrMtlTexture::getBackendTexture() const {
GrMipmapped mipMapped = fTexture.mipmapLevelCount > 1 ? GrMipmapped::kYes
: GrMipmapped::kNo;
GrMipmapped mipMapped = (*fTexture).mipmapLevelCount > 1 ? GrMipmapped::kYes
: GrMipmapped::kNo;
GrMtlTextureInfo info;
info.fTexture.reset(GrRetainPtrFromId(fTexture));
info.fTexture.retain(fTexture.get());
return GrBackendTexture(this->width(), this->height(), mipMapped, info);
}
GrBackendFormat GrMtlTexture::backendFormat() const {
return GrBackendFormat::MakeMtl(fTexture.pixelFormat);
return GrBackendFormat::MakeMtl((*fTexture).pixelFormat);
}

View File

@ -13,17 +13,14 @@
class GrMtlTextureRenderTarget: public GrMtlTexture, public GrMtlRenderTarget {
public:
static sk_sp<GrMtlTextureRenderTarget> MakeNewTextureRenderTarget(GrMtlGpu*,
SkBudgeted,
SkISize,
int sampleCnt,
MTLTextureDescriptor*,
GrMipmapStatus);
static sk_sp<GrMtlTextureRenderTarget> MakeNewTextureRenderTarget(
GrMtlGpu*, SkBudgeted, SkISize, int sampleCnt, MTLTextureDescriptor*,
GrMipmapStatus);
static sk_sp<GrMtlTextureRenderTarget> MakeWrappedTextureRenderTarget(GrMtlGpu*,
SkISize,
int sampleCnt,
id<MTLTexture>,
sk_cf_obj<id<MTLTexture>>,
GrWrapCacheable);
GrBackendFormat backendFormat() const override {
return GrMtlTexture::backendFormat();
@ -45,27 +42,27 @@ private:
SkBudgeted budgeted,
SkISize,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
GrMipmapStatus);
GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkBudgeted budgeted,
SkISize,
id<MTLTexture> colorTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
GrMipmapStatus);
GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkISize,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
GrMipmapStatus,
GrWrapCacheable cacheable);
GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkISize,
id<MTLTexture> colorTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
GrMipmapStatus,
GrWrapCacheable cacheable);

View File

@ -9,78 +9,76 @@
#include "src/gpu/mtl/GrMtlTextureRenderTarget.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrMtlTexture(gpu, dimensions, resolveTexture, mipmapStatus)
, GrMtlRenderTarget(gpu, dimensions, sampleCnt, colorTexture, resolveTexture) {
, GrMtlRenderTarget(gpu, dimensions, sampleCnt, std::move(colorTexture),
std::move(resolveTexture)) {
this->registerWithCache(budgeted);
}
GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkBudgeted budgeted,
SkISize dimensions,
id<MTLTexture> colorTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
GrMipmapStatus mipmapStatus)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrMtlTexture(gpu, dimensions, colorTexture, mipmapStatus)
, GrMtlRenderTarget(gpu, dimensions, colorTexture) {
, GrMtlRenderTarget(gpu, dimensions, std::move(colorTexture)) {
this->registerWithCache(budgeted);
}
GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> colorTexture,
id<MTLTexture> resolveTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
sk_cf_obj<id<MTLTexture>> resolveTexture,
GrMipmapStatus mipmapStatus,
GrWrapCacheable cacheable)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrMtlTexture(gpu, dimensions, resolveTexture, mipmapStatus)
, GrMtlRenderTarget(gpu, dimensions, sampleCnt, colorTexture, resolveTexture) {
, GrMtlRenderTarget(gpu, dimensions, sampleCnt, std::move(colorTexture),
std::move(resolveTexture)) {
this->registerWithCacheWrapped(cacheable);
}
GrMtlTextureRenderTarget::GrMtlTextureRenderTarget(GrMtlGpu* gpu,
SkISize dimensions,
id<MTLTexture> colorTexture,
sk_cf_obj<id<MTLTexture>> colorTexture,
GrMipmapStatus mipmapStatus,
GrWrapCacheable cacheable)
: GrSurface(gpu, dimensions, GrProtected::kNo)
, GrMtlTexture(gpu, dimensions, colorTexture, mipmapStatus)
, GrMtlRenderTarget(gpu, dimensions, colorTexture) {
, GrMtlRenderTarget(gpu, dimensions, std::move(colorTexture)) {
this->registerWithCacheWrapped(cacheable);
}
id<MTLTexture> create_msaa_texture(GrMtlGpu* gpu, SkISize dimensions, MTLPixelFormat format,
int sampleCnt) {
sk_cf_obj<id<MTLTexture>> create_msaa_texture(GrMtlGpu* gpu, SkISize dimensions,
MTLPixelFormat format, int sampleCnt) {
if (!gpu->mtlCaps().isFormatRenderable(format, sampleCnt)) {
return nullptr;
}
MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
texDesc.textureType = MTLTextureType2DMultisample;
texDesc.pixelFormat = format;
texDesc.width = dimensions.fWidth;
texDesc.height = dimensions.fHeight;
texDesc.depth = 1;
texDesc.mipmapLevelCount = 1;
texDesc.sampleCount = sampleCnt;
texDesc.arrayLength = 1;
sk_cf_obj<MTLTextureDescriptor*> texDesc([[MTLTextureDescriptor alloc] init]);
(*texDesc).textureType = MTLTextureType2DMultisample;
(*texDesc).pixelFormat = format;
(*texDesc).width = dimensions.fWidth;
(*texDesc).height = dimensions.fHeight;
(*texDesc).depth = 1;
(*texDesc).mipmapLevelCount = 1;
(*texDesc).sampleCount = sampleCnt;
(*texDesc).arrayLength = 1;
if (@available(macOS 10.11, iOS 9.0, *)) {
texDesc.storageMode = MTLStorageModePrivate;
texDesc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
(*texDesc).storageMode = MTLStorageModePrivate;
(*texDesc).usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
}
return [gpu->device() newTextureWithDescriptor:texDesc];
return sk_cf_obj<id<MTLTexture>>([gpu->device() newTextureWithDescriptor:texDesc.get()]);
}
sk_sp<GrMtlTextureRenderTarget> GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
@ -90,28 +88,31 @@ sk_sp<GrMtlTextureRenderTarget> GrMtlTextureRenderTarget::MakeNewTextureRenderTa
int sampleCnt,
MTLTextureDescriptor* texDesc,
GrMipmapStatus mipmapStatus) {
id<MTLTexture> texture = [gpu->device() newTextureWithDescriptor:texDesc];
sk_cf_obj<id<MTLTexture>> texture([gpu->device() newTextureWithDescriptor:texDesc]);
if (!texture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & texture.usage);
SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & (*texture).usage);
}
if (sampleCnt > 1) {
id<MTLTexture> colorTexture =
create_msaa_texture(gpu, dimensions, texture.pixelFormat, sampleCnt);
sk_cf_obj<id<MTLTexture>> colorTexture =
create_msaa_texture(gpu, dimensions, (*texture).pixelFormat, sampleCnt);
if (!colorTexture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) &
(*colorTexture).usage);
}
return sk_sp<GrMtlTextureRenderTarget>(new GrMtlTextureRenderTarget(
gpu, budgeted, dimensions, sampleCnt, colorTexture, texture, mipmapStatus));
gpu, budgeted, dimensions, sampleCnt, std::move(colorTexture), std::move(texture),
mipmapStatus));
} else {
return sk_sp<GrMtlTextureRenderTarget>(
new GrMtlTextureRenderTarget(gpu, budgeted, dimensions, texture, mipmapStatus));
new GrMtlTextureRenderTarget(gpu, budgeted, dimensions, std::move(texture),
mipmapStatus));
}
}
@ -119,28 +120,31 @@ sk_sp<GrMtlTextureRenderTarget> GrMtlTextureRenderTarget::MakeWrappedTextureRend
GrMtlGpu* gpu,
SkISize dimensions,
int sampleCnt,
id<MTLTexture> texture,
sk_cf_obj<id<MTLTexture>> texture,
GrWrapCacheable cacheable) {
SkASSERT(nil != texture);
SkASSERT(texture);
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & texture.usage);
SkASSERT((MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget) & (*texture).usage);
}
GrMipmapStatus mipmapStatus = texture.mipmapLevelCount > 1
GrMipmapStatus mipmapStatus = (*texture).mipmapLevelCount > 1
? GrMipmapStatus::kDirty
: GrMipmapStatus::kNotAllocated;
if (sampleCnt > 1) {
id<MTLTexture> colorTexture =
create_msaa_texture(gpu, dimensions, texture.pixelFormat, sampleCnt);
sk_cf_obj<id<MTLTexture>> colorTexture =
create_msaa_texture(gpu, dimensions, (*texture).pixelFormat, sampleCnt);
if (!colorTexture) {
return nullptr;
}
if (@available(macOS 10.11, iOS 9.0, *)) {
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) & colorTexture.usage);
SkASSERT((MTLTextureUsageShaderRead|MTLTextureUsageRenderTarget) &
(*colorTexture).usage);
}
return sk_sp<GrMtlTextureRenderTarget>(new GrMtlTextureRenderTarget(
gpu, dimensions, sampleCnt, colorTexture, texture, mipmapStatus, cacheable));
gpu, dimensions, sampleCnt, std::move(colorTexture), std::move(texture),
mipmapStatus, cacheable));
} else {
return sk_sp<GrMtlTextureRenderTarget>(
new GrMtlTextureRenderTarget(gpu, dimensions, texture, mipmapStatus, cacheable));
new GrMtlTextureRenderTarget(gpu, dimensions, std::move(texture), mipmapStatus,
cacheable));
}
}

View File

@ -9,17 +9,15 @@
#include "src/gpu/mtl/GrMtlGpu.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
sk_sp<GrGpu> GrMtlTrampoline::MakeGpu(GrDirectContext* direct,
const GrContextOptions& options,
void* device,
void* queue) {
return GrMtlGpu::Make(direct,
options,
(__bridge id<MTLDevice>)device,
(__bridge id<MTLCommandQueue>)queue);
GrMtlBackendContext context;
// For now we retain these to make sure we don't remove the client's ref when we go out of
// scope. In the future the client will set this struct up.
context.fDevice.retain((id<MTLDevice>)device);
context.fQueue.retain((id<MTLCommandQueue>)queue);
return GrMtlGpu::Make(direct, options, context);
}

View File

@ -9,10 +9,6 @@
#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
#include "src/gpu/mtl/GrMtlUniformHandler.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
// TODO: this class is basically copy and pasted from GrVklUniformHandler so that we can have
// some shaders working. The SkSL Metal code generator was written to work with GLSL generated for
// the Ganesh Vulkan backend, so it should all work. There might be better ways to do things in

View File

@ -11,13 +11,10 @@
#import <Metal/Metal.h>
#include "include/gpu/GrBackendSurface.h"
#include "include/ports/SkCFObject.h"
#include "include/private/GrTypesPriv.h"
#include "src/sksl/ir/SkSLProgram.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
#if defined(SK_BUILD_FOR_MAC)
#if __MAC_OS_X_VERSION_MAX_ALLOWED < 101400
#error Must use at least 10.14 SDK to build Metal backend for MacOS
@ -32,25 +29,12 @@ class GrMtlGpu;
class GrSurface;
/**
* Returns a id<MTLTexture> to the MTLTexture pointed at by the const void*.
* Returns a sk_cf_obj<id<MTLTexture>> to the MTLTexture pointed at by the const void*.
* Will retain the MTLTexture.
*/
SK_ALWAYS_INLINE id<MTLTexture> GrGetMTLTexture(const void* mtlTexture) {
return (__bridge id<MTLTexture>)mtlTexture;
}
/**
* Returns a const void* to whatever the id object is pointing to.
*/
SK_ALWAYS_INLINE const void* GrGetPtrFromId(id idObject) {
return (__bridge const void*)idObject;
}
/**
* Returns a const void* to whatever the id object is pointing to.
* Will call CFRetain on the object.
*/
SK_ALWAYS_INLINE const void* GrRetainPtrFromId(id idObject) {
return (__bridge_retained const void*)idObject;
SK_ALWAYS_INLINE sk_cf_obj<id<MTLTexture>> GrRetainMTLTexture(const void* mtlTexture) {
sk_cf_obj<id<MTLTexture>> texture(sk_ref_cf_obj((id<MTLTexture>)mtlTexture));
return texture;
}
enum class GrMtlErrorCode {
@ -63,29 +47,29 @@ NSError* GrCreateMtlError(NSString* description, GrMtlErrorCode errorCode);
* Returns a MTLTextureDescriptor which describes the MTLTexture. Useful when creating a duplicate
* MTLTexture without the same storage allocation.
*/
MTLTextureDescriptor* GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture);
sk_cf_obj<MTLTextureDescriptor*> GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture);
/**
* Returns a compiled MTLLibrary created from MSL code generated by SkSLC
*/
id<MTLLibrary> GrGenerateMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* mslShader,
SkSL::Program::Inputs* outInputs);
sk_cf_obj<id<MTLLibrary>> GrGenerateMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* mslShader,
SkSL::Program::Inputs* outInputs);
/**
* Returns a compiled MTLLibrary created from MSL code
*/
id<MTLLibrary> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString);
sk_cf_obj<id<MTLLibrary>> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString);
/**
* Replacement for newLibraryWithSource:options:error that has a timeout.
*/
id<MTLLibrary> GrMtlNewLibraryWithSource(id<MTLDevice>, NSString* mslCode,
MTLCompileOptions*, NSError**);
MTLCompileOptions*, NSError**);
/**
* Replacement for newRenderPipelineStateWithDescriptor:error that has a timeout.

View File

@ -18,10 +18,6 @@
#import <Metal/Metal.h>
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
#define PRINT_MSL 0 // print out the MSL code generated
NSError* GrCreateMtlError(NSString* description, GrMtlErrorCode errorCode) {
@ -32,18 +28,18 @@ NSError* GrCreateMtlError(NSString* description, GrMtlErrorCode errorCode) {
userInfo:userInfo];
}
MTLTextureDescriptor* GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture) {
MTLTextureDescriptor* texDesc = [[MTLTextureDescriptor alloc] init];
texDesc.textureType = mtlTexture.textureType;
texDesc.pixelFormat = mtlTexture.pixelFormat;
texDesc.width = mtlTexture.width;
texDesc.height = mtlTexture.height;
texDesc.depth = mtlTexture.depth;
texDesc.mipmapLevelCount = mtlTexture.mipmapLevelCount;
texDesc.arrayLength = mtlTexture.arrayLength;
texDesc.sampleCount = mtlTexture.sampleCount;
sk_cf_obj<MTLTextureDescriptor*> GrGetMTLTextureDescriptor(id<MTLTexture> mtlTexture) {
sk_cf_obj<MTLTextureDescriptor*> texDesc([[MTLTextureDescriptor alloc] init]);
(*texDesc).textureType = mtlTexture.textureType;
(*texDesc).pixelFormat =mtlTexture.pixelFormat;
(*texDesc).width = mtlTexture.width;
(*texDesc).height = mtlTexture.height;
(*texDesc).depth = mtlTexture.depth;
(*texDesc).mipmapLevelCount = mtlTexture.mipmapLevelCount;
(*texDesc).arrayLength = mtlTexture.arrayLength;
(*texDesc).sampleCount = mtlTexture.sampleCount;
if (@available(macOS 10.11, iOS 9.0, *)) {
texDesc.usage = mtlTexture.usage;
(*texDesc).usage = mtlTexture.usage;
}
return texDesc;
}
@ -60,12 +56,12 @@ void print_msl(const char* source) {
}
#endif
id<MTLLibrary> GrGenerateMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* mslShader,
SkSL::Program::Inputs* outInputs) {
sk_cf_obj<id<MTLLibrary>> GrGenerateMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString,
SkSL::Program::Kind kind,
const SkSL::Program::Settings& settings,
SkSL::String* mslShader,
SkSL::Program::Inputs* outInputs) {
std::unique_ptr<SkSL::Program> program =
gpu->shaderCompiler()->convertProgram(kind,
shaderString,
@ -87,22 +83,22 @@ id<MTLLibrary> GrGenerateMtlShaderLibrary(const GrMtlGpu* gpu,
return GrCompileMtlShaderLibrary(gpu, *mslShader);
}
id<MTLLibrary> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString) {
NSString* mtlCode = [[NSString alloc] initWithCString: shaderString.c_str()
encoding: NSASCIIStringEncoding];
sk_cf_obj<id<MTLLibrary>> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
const SkSL::String& shaderString) {
sk_cf_obj<NSString*> mtlCode([[NSString alloc] initWithCString: shaderString.c_str()
encoding: NSASCIIStringEncoding]);
#if PRINT_MSL
print_msl([mtlCode cStringUsingEncoding: NSASCIIStringEncoding]);
#endif
MTLCompileOptions* defaultOptions = [[MTLCompileOptions alloc] init];
sk_cf_obj<MTLCompileOptions*> defaultOptions([[MTLCompileOptions alloc] init]);
NSError* error = nil;
#if defined(SK_BUILD_FOR_MAC)
id<MTLLibrary> compiledLibrary = GrMtlNewLibraryWithSource(gpu->device(), mtlCode,
defaultOptions, &error);
id<MTLLibrary> compiledLibrary = GrMtlNewLibraryWithSource(gpu->device(), *mtlCode,
*defaultOptions, &error);
#else
id<MTLLibrary> compiledLibrary = [gpu->device() newLibraryWithSource: mtlCode
options: defaultOptions
id<MTLLibrary> compiledLibrary = [gpu->device() newLibraryWithSource: mtlCode.get()
options: defaultOptions.get()
error: &error];
#endif
if (!compiledLibrary) {
@ -112,7 +108,7 @@ id<MTLLibrary> GrCompileMtlShaderLibrary(const GrMtlGpu* gpu,
return nil;
}
return compiledLibrary;
return sk_cf_obj<id<MTLLibrary>>(compiledLibrary);
}
// Wrapper to get atomic assignment for compiles and pipeline creation
@ -121,7 +117,12 @@ public:
MtlCompileResult() : fCompiledObject(nil), fError(nil) {}
void set(id compiledObject, NSError* error) {
SkAutoMutexExclusive automutex(fMutex);
fCompiledObject = compiledObject;
// we need to retain ownership here -- otherwise when we leave the
// scope of the block it will be deleted.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wobjc-messaging-id"
fCompiledObject = [compiledObject retain];
#pragma clang diagnostic pop
fError = error;
}
std::pair<id, NSError*> get() {
@ -231,7 +232,7 @@ id<MTLTexture> GrGetMTLTextureFromSurface(GrSurface* surface) {
// CPP Utils
GrMTLPixelFormat GrGetMTLPixelFormatFromMtlTextureInfo(const GrMtlTextureInfo& info) {
id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
id<MTLTexture> mtlTexture = (id<MTLTexture>)(info.fTexture.get());
return static_cast<GrMTLPixelFormat>(mtlTexture.pixelFormat);
}

View File

@ -7,10 +7,6 @@
#include "src/gpu/mtl/GrMtlVaryingHandler.h"
#if !__has_feature(objc_arc)
#error This file must be compiled with Arc. Use -fobjc-arc flag
#endif
static void finalize_helper(GrMtlVaryingHandler::VarArray& vars) {
int locationIndex = 0;
int componentCount = 0;

View File

@ -37,7 +37,7 @@ sk_sp<SkSurface> SkSurface::MakeFromCAMetalLayer(GrContext* context,
GrProxyProvider* proxyProvider = context->priv().proxyProvider();
const GrCaps* caps = context->priv().caps();
CAMetalLayer* metalLayer = (__bridge CAMetalLayer*)layer;
CAMetalLayer* metalLayer = (CAMetalLayer*)layer;
GrBackendFormat backendFormat = GrBackendFormat::MakeMtl(metalLayer.pixelFormat);
GrColorType grColorType = SkColorTypeToGrColorType(colorType);
@ -51,24 +51,24 @@ sk_sp<SkSurface> SkSurface::MakeFromCAMetalLayer(GrContext* context,
sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
[layer, drawable](GrResourceProvider* resourceProvider,
const GrSurfaceProxy::LazySurfaceDesc& desc) {
CAMetalLayer* metalLayer = (__bridge CAMetalLayer*)layer;
CAMetalLayer* metalLayer = (CAMetalLayer*)layer;
id<CAMetalDrawable> currentDrawable = [metalLayer nextDrawable];
GrMtlGpu* mtlGpu = (GrMtlGpu*) resourceProvider->priv().gpu();
sk_sp<GrRenderTarget> surface;
sk_cf_obj<id<MTLTexture>> drawableTexture(sk_ref_cf_obj(currentDrawable.texture));
if (metalLayer.framebufferOnly) {
surface = GrMtlRenderTarget::MakeWrappedRenderTarget(
mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture);
mtlGpu, desc.fDimensions, desc.fSampleCnt, std::move(drawableTexture));
} else {
surface = GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture,
mtlGpu, desc.fDimensions, desc.fSampleCnt, std::move(drawableTexture),
GrWrapCacheable::kNo);
}
if (surface && desc.fSampleCnt > 1) {
surface->setRequiresManualMSAAResolve();
}
*drawable = (__bridge_retained GrMTLHandle) currentDrawable;
*drawable = [currentDrawable retain];
return GrSurfaceProxy::LazyCallbackResult(std::move(surface));
},
backendFormat,
@ -108,7 +108,7 @@ sk_sp<SkSurface> SkSurface::MakeFromMTKView(GrContext* context,
GrProxyProvider* proxyProvider = context->priv().proxyProvider();
const GrCaps* caps = context->priv().caps();
MTKView* mtkView = (__bridge MTKView*)view;
MTKView* mtkView = (MTKView*)view;
GrBackendFormat backendFormat = GrBackendFormat::MakeMtl(mtkView.colorPixelFormat);
GrColorType grColorType = SkColorTypeToGrColorType(colorType);
@ -122,17 +122,18 @@ sk_sp<SkSurface> SkSurface::MakeFromMTKView(GrContext* context,
sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
[view](GrResourceProvider* resourceProvider,
const GrSurfaceProxy::LazySurfaceDesc& desc) {
MTKView* mtkView = (__bridge MTKView*)view;
MTKView* mtkView = (MTKView*)view;
id<CAMetalDrawable> currentDrawable = [mtkView currentDrawable];
GrMtlGpu* mtlGpu = (GrMtlGpu*) resourceProvider->priv().gpu();
sk_sp<GrRenderTarget> surface;
sk_cf_obj<id<MTLTexture>> drawableTexture(sk_ref_cf_obj(currentDrawable.texture));
if (mtkView.framebufferOnly) {
surface = GrMtlRenderTarget::MakeWrappedRenderTarget(
mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture);
mtlGpu, desc.fDimensions, desc.fSampleCnt, std::move(drawableTexture));
} else {
surface = GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture,
mtlGpu, desc.fDimensions, desc.fSampleCnt, std::move(drawableTexture),
GrWrapCacheable::kNo);
}
if (surface && desc.fSampleCnt > 1) {

View File

@ -38,7 +38,7 @@ DEF_GPUTEST_FOR_METAL_CONTEXT(MtlCopySurfaceTest, reporter, ctxInfo) {
// TODO: check multisampled RT as well
GrMtlTextureInfo fbInfo;
fbInfo.fTexture.retain((__bridge const void*)(drawable.texture));
fbInfo.fTexture.retain(drawable.texture);
GrBackendRenderTarget backendRT(kWidth, kHeight, 1, fbInfo);
GrProxyProvider* proxyProvider = context->priv().proxyProvider();

View File

@ -10,6 +10,7 @@
#include "include/gpu/GrContextOptions.h"
#include "include/gpu/GrDirectContext.h"
#include "include/gpu/mtl/GrMtlTypes.h"
#include "src/gpu/mtl/GrMtlUtil.h"
#ifdef SK_METAL
@ -20,36 +21,36 @@ namespace {
class MtlTestContextImpl : public sk_gpu_test::MtlTestContext {
public:
static MtlTestContext* Create(MtlTestContext* sharedContext) {
id<MTLDevice> device;
id<MTLCommandQueue> queue;
sk_cf_obj<id<MTLDevice>> device;
sk_cf_obj<id<MTLCommandQueue>> queue;
if (sharedContext) {
MtlTestContextImpl* sharedContextImpl = (MtlTestContextImpl*) sharedContext;
device = sharedContextImpl->device();
queue = sharedContextImpl->queue();
device.retain(sharedContextImpl->device());
queue.retain(sharedContextImpl->queue());
} else {
#ifdef SK_BUILD_FOR_MAC
NSArray<id <MTLDevice>>* availableDevices = MTLCopyAllDevices();
sk_cf_obj<NSArray<id <MTLDevice>>*> availableDevices(MTLCopyAllDevices());
// Choose the non-integrated CPU if available
for (id<MTLDevice> dev in availableDevices) {
for (id<MTLDevice> dev in availableDevices.get()) {
if (!dev.isLowPower) {
device = dev;
device.retain(dev);
break;
}
if (dev.isRemovable) {
device = dev;
device.retain(dev);
break;
}
}
if (!device) {
device = MTLCreateSystemDefaultDevice();
device.reset(MTLCreateSystemDefaultDevice());
}
#else
device = MTLCreateSystemDefaultDevice();
device.reset(MTLCreateSystemDefaultDevice());
#endif
queue = [device newCommandQueue];
queue.reset([*device newCommandQueue]);
}
return new MtlTestContextImpl(device, queue);
return new MtlTestContextImpl(std::move(device), std::move(queue));
}
~MtlTestContextImpl() override { this->teardown(); }
@ -59,17 +60,17 @@ public:
void finish() override {}
sk_sp<GrDirectContext> makeContext(const GrContextOptions& options) override {
return GrDirectContext::MakeMetal((__bridge void*)fDevice,
(__bridge void*)fQueue,
return GrDirectContext::MakeMetal((void*)fDevice.get(),
(void*)fQueue.get(),
options);
}
id<MTLDevice> device() { return fDevice; }
id<MTLCommandQueue> queue() { return fQueue; }
id<MTLDevice> device() { return fDevice.get(); }
id<MTLCommandQueue> queue() { return fQueue.get(); }
private:
MtlTestContextImpl(id<MTLDevice> device, id<MTLCommandQueue> queue)
: INHERITED(), fDevice(device), fQueue(queue) {
MtlTestContextImpl(sk_cf_obj<id<MTLDevice>> device, sk_cf_obj<id<MTLCommandQueue>> queue)
: INHERITED(), fDevice(std::move(device)), fQueue(std::move(queue)) {
fFenceSupport = true;
}
@ -77,8 +78,8 @@ private:
void onPlatformMakeCurrent() const override {}
std::function<void()> onPlatformGetAutoContextRestore() const override { return nullptr; }
id<MTLDevice> fDevice;
id<MTLCommandQueue> fQueue;
sk_cf_obj<id<MTLDevice>> fDevice;
sk_cf_obj<id<MTLCommandQueue>> fQueue;
typedef sk_gpu_test::MtlTestContext INHERITED;
};