implemented GrMtlBuffer for Metal gpu backend

Bug: skia:
Change-Id: Iff10fe69ccc08672cbbb06dad623a844008404cd
Reviewed-on: https://skia-review.googlesource.com/144606
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Timothy Liang <timliang@google.com>
diff --git a/gn/gpu.gni b/gn/gpu.gni
index 1c25078..fa00509 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -624,6 +624,8 @@
 
 skia_metal_sources = [
   "$_include/gpu/mtl/GrMtlTypes.h",
+  "$_src/gpu/mtl/GrMtlBuffer.h",
+  "$_src/gpu/mtl/GrMtlBuffer.mm",
   "$_src/gpu/mtl/GrMtlCaps.h",
   "$_src/gpu/mtl/GrMtlCaps.mm",
   "$_src/gpu/mtl/GrMtlCopyManager.h",
diff --git a/src/gpu/mtl/GrMtlBuffer.h b/src/gpu/mtl/GrMtlBuffer.h
new file mode 100644
index 0000000..651cf1a
--- /dev/null
+++ b/src/gpu/mtl/GrMtlBuffer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlBuffer_DEFINED
+#define GrMtlBuffer_DEFINED
+
+#include "GrBuffer.h"
+
+#import <metal/metal.h>
+
+class GrMtlCaps;
+class GrMtlGpu;
+
+class GrMtlBuffer: public GrBuffer {
+public:
+    static GrMtlBuffer* Create(GrMtlGpu*, size_t size, GrBufferType intendedType, GrAccessPattern,
+                               const void* data = nullptr);
+
+    ~GrMtlBuffer() override;
+
+    id<MTLBuffer> mtlBuffer() const { return fMtlBuffer; }
+
+protected:
+    GrMtlBuffer(GrMtlGpu*, size_t size, GrBufferType intendedType, GrAccessPattern);
+
+    void onAbandon() override;
+    void onRelease() override;
+
+private:
+    GrMtlGpu* mtlGpu() const;
+
+    void onMap() override;
+    void onUnmap() override;
+    bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+    void internalMap(size_t sizeInBytes);
+    void internalUnmap(size_t sizeInBytes);
+
+#ifdef SK_DEBUG
+    void validate() const;
+#endif
+
+    GrBufferType fIntendedType;
+    bool fIsDynamic;
+    id<MTLBuffer> fMtlBuffer;
+    id<MTLBuffer> fMappedBuffer;
+
+    typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/mtl/GrMtlBuffer.mm b/src/gpu/mtl/GrMtlBuffer.mm
new file mode 100644
index 0000000..5af363e
--- /dev/null
+++ b/src/gpu/mtl/GrMtlBuffer.mm
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMtlBuffer.h"
+#include "GrMtlGpu.h"
+#include "GrGpuResourcePriv.h"
+#include "GrTypesPriv.h"
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+GrMtlBuffer* GrMtlBuffer::Create(GrMtlGpu* gpu, size_t size, GrBufferType intendedType,
+                                 GrAccessPattern accessPattern, const void* data) {
+    // TODO: DrawIndirect buffers aren't actually supported yet because we don't have a way of
+    // uploading data to them.
+    SkASSERT(intendedType != kDrawIndirect_GrBufferType);
+    sk_sp<GrMtlBuffer> buffer(new GrMtlBuffer(gpu, size, intendedType, accessPattern));
+    if (data && !buffer->onUpdateData(data, size)) {
+        return nullptr;
+    }
+    return buffer.release();
+}
+
+GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrBufferType intendedType,
+                         GrAccessPattern accessPattern)
+        : INHERITED(gpu, size, intendedType, accessPattern)
+        , fIntendedType(intendedType)
+        , fIsDynamic(accessPattern == kDynamic_GrAccessPattern) {
+    fMtlBuffer =
+            [gpu->device() newBufferWithLength: size
+                                       options: fIsDynamic ? MTLResourceStorageModeManaged
+                                                           : MTLResourceStorageModePrivate];
+    VALIDATE();
+    this->registerWithCache(SkBudgeted::kYes);
+
+    // TODO: We are treating all buffers as static access since we don't have an implementation to
+    // synchronize gpu and cpu access of a resource yet. See comments in GrMtlBuffer::internalMap()
+    // and interalUnmap() for more details.
+    fIsDynamic = false;
+}
+
+GrMtlBuffer::~GrMtlBuffer() {
+    SkASSERT(fMtlBuffer == nil);
+    SkASSERT(fMappedBuffer == nil);
+    SkASSERT(fMapPtr == nullptr);
+}
+
+bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) {
+    if (fMtlBuffer == nil) {
+        return false;
+    }
+    if (srcInBytes > fMtlBuffer.length) {
+        return false;
+    }
+    if (fIntendedType == kDrawIndirect_GrBufferType) {
+        // TODO: implement encoding data into argument (DrawIndirect) buffers.
+        return false;
+    }
+    VALIDATE();
+
+    this->internalMap(srcInBytes);
+    if (fMapPtr == nil) {
+        return false;
+    }
+    SkASSERT(fMappedBuffer);
+    SkASSERT(srcInBytes == fMappedBuffer.length);
+    memcpy(fMapPtr, src, srcInBytes);
+    this->internalUnmap(srcInBytes);
+
+    VALIDATE();
+    return true;
+}
+
+inline GrMtlGpu* GrMtlBuffer::mtlGpu() const {
+    SkASSERT(!this->wasDestroyed());
+    return static_cast<GrMtlGpu*>(this->getGpu());
+}
+
+void GrMtlBuffer::onAbandon() {
+    fMtlBuffer = nil;
+    fMappedBuffer = nil;
+    fMapPtr = nullptr;
+    VALIDATE();
+    INHERITED::onAbandon();
+}
+
+void GrMtlBuffer::onRelease() {
+    if (!this->wasDestroyed()) {
+        VALIDATE();
+        fMtlBuffer = nil;
+        fMappedBuffer = nil;
+        fMapPtr = nullptr;
+        VALIDATE();
+    }
+    INHERITED::onRelease();
+}
+
+void GrMtlBuffer::internalMap(size_t sizeInBytes) {
+    SkASSERT(fMtlBuffer);
+    if (this->wasDestroyed()) {
+        return;
+    }
+    VALIDATE();
+    SkASSERT(!this->isMapped());
+    if (fIsDynamic) {
+        // TODO: We will want to decide if we need to create a new buffer here in order to avoid
+        // possibly invalidating a buffer which is being used by the gpu.
+        fMappedBuffer = fMtlBuffer;
+        fMapPtr = fMappedBuffer.contents;
+    } else {
+        // TODO: We can't ensure that map will only be called once on static access buffers until
+        // we actually enable dynamic access.
+        // SkASSERT(fMappedBuffer == nil);
+        fMappedBuffer =
+                [this->mtlGpu()->device() newBufferWithLength: sizeInBytes
+                                                      options: MTLResourceStorageModeManaged];
+        fMapPtr = fMappedBuffer.contents;
+    }
+    VALIDATE();
+}
+
+void GrMtlBuffer::internalUnmap(size_t sizeInBytes) {
+    SkASSERT(fMtlBuffer);
+    if (this->wasDestroyed()) {
+        return;
+    }
+    VALIDATE();
+    SkASSERT(this->isMapped());
+    if (fMtlBuffer == nil) {
+        fMappedBuffer = nil;
+        fMapPtr = nullptr;
+        return;
+    }
+    // TODO: by calling didModifyRange here we invalidate the buffer. This will cause problems for
+    // dynamic access buffers if they are being used by the gpu.
+    [fMappedBuffer didModifyRange: NSMakeRange(0, sizeInBytes)];
+
+    if (!fIsDynamic) {
+        id<MTLBlitCommandEncoder> blitCmdEncoder =
+                [this->mtlGpu()->commandBuffer() blitCommandEncoder];
+        [blitCmdEncoder copyFromBuffer: fMappedBuffer
+                          sourceOffset: 0
+                              toBuffer: fMtlBuffer
+                     destinationOffset: 0
+                                  size: sizeInBytes];
+        [blitCmdEncoder endEncoding];
+    }
+    fMappedBuffer = nil;
+    fMapPtr = nullptr;
+}
+
+void GrMtlBuffer::onMap() {
+    this->internalMap(fMtlBuffer.length);
+}
+
+void GrMtlBuffer::onUnmap() {
+    this->internalUnmap(fMappedBuffer.length);
+}
+
+#ifdef SK_DEBUG
+void GrMtlBuffer::validate() const {
+    SkASSERT(fMtlBuffer == nil ||
+             fIntendedType == kVertex_GrBufferType ||
+             fIntendedType == kIndex_GrBufferType ||
+             fIntendedType == kXferCpuToGpu_GrBufferType ||
+             fIntendedType == kXferGpuToCpu_GrBufferType);
+//           fIntendedType == kDrawIndirect_GrBufferType not yet supported
+    SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil ||
+             fMappedBuffer.length <= fMtlBuffer.length);
+    SkASSERT(fIsDynamic == false); // TODO: implement synchronization to allow dynamic access.
+}
+#endif
diff --git a/src/gpu/mtl/GrMtlCopyManager.mm b/src/gpu/mtl/GrMtlCopyManager.mm
index 529bdc1..84f8f39 100644
--- a/src/gpu/mtl/GrMtlCopyManager.mm
+++ b/src/gpu/mtl/GrMtlCopyManager.mm
@@ -9,6 +9,7 @@
 
 #include "GrSurface.h"
 
+#include "GrMtlBuffer.h"
 #include "GrMtlCopyPipelineState.h"
 #include "GrMtlGpu.h"
 #include "GrMtlResourceProvider.h"
@@ -28,20 +29,9 @@
         {1, 0},
         {1, 1},
     };
-    fVertexAttributeBuffer = [fGpu->device() newBufferWithLength: sizeof(vdata)
-                                                         options: MTLResourceStorageModePrivate];
-    id<MTLBuffer> transferBuffer =
-            [fGpu->device() newBufferWithBytes: vdata
-                                        length: sizeof(vdata)
-                                       options: MTLResourceStorageModeManaged];
-
-    id<MTLBlitCommandEncoder> blitCmdEncoder = [fGpu->commandBuffer() blitCommandEncoder];
-    [blitCmdEncoder copyFromBuffer: transferBuffer
-                      sourceOffset: 0
-                          toBuffer: fVertexAttributeBuffer
-                 destinationOffset: 0
-                              size: sizeof(vdata)];
-    [blitCmdEncoder endEncoding];
+    sk_sp<GrMtlBuffer> mtlBuffer(GrMtlBuffer::Create(fGpu, sizeof(vdata), kVertex_GrBufferType,
+                                                     kStatic_GrAccessPattern, vdata));
+    fVertexAttributeBuffer = mtlBuffer->mtlBuffer();
 }
 
 void GrMtlCopyManager::createCopyProgramShaders() {
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index 243eaa0..e49c6c2 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -47,6 +47,11 @@
         kSkip_SyncQueue
     };
 
+    // Commits the current command buffer to the queue and then creates a new command buffer. If
+    // sync is set to kForce_SyncQueue, the function will wait for all work in the committed
+    // command buffer to finish before creating a new buffer and returning.
+    void submitCommandBuffer(SyncQueue sync);
+
 #ifdef GR_TEST_UTILS
     GrBackendTexture createTestingOnlyBackendTexture(const void* pixels, int w, int h,
                                                      GrPixelConfig config, bool isRT,
@@ -126,9 +131,7 @@
     sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
                                                              int sampleCnt) override;
 
-    GrBuffer* onCreateBuffer(size_t, GrBufferType, GrAccessPattern, const void*) override {
-        return nullptr;
-    }
+    GrBuffer* onCreateBuffer(size_t, GrBufferType, GrAccessPattern, const void*) override;
 
     bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
                       void* buffer, size_t rowBytes) override;
@@ -149,11 +152,6 @@
 
     void onFinishFlush(bool insertedSemaphores) override {}
 
-    // Commits the current command buffer to the queue and then creates a new command buffer. If
-    // sync is set to kForce_SyncQueue, the function will wait for all work in the committed
-    // command buffer to finish before creating a new buffer and returning.
-    void submitCommandBuffer(SyncQueue sync);
-
     // Function that uploads data onto textures with private storage mode (GPU access only).
     bool uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,
                          GrColorType dataColorType, const GrMipLevel texels[], int mipLevels);
diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm
index 037f175..53ad690 100644
--- a/src/gpu/mtl/GrMtlGpu.mm
+++ b/src/gpu/mtl/GrMtlGpu.mm
@@ -7,6 +7,7 @@
 
 #include "GrMtlGpu.h"
 
+#include "GrMtlBuffer.h"
 #include "GrMtlGpuCommandBuffer.h"
 #include "GrMtlTexture.h"
 #include "GrMtlTextureRenderTarget.h"
@@ -125,6 +126,11 @@
     fCmdBuffer = [fQueue commandBuffer];
 }
 
+GrBuffer* GrMtlGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern,
+                                   const void* data) {
+    return GrMtlBuffer::Create(this, size, type, accessPattern, data);
+}
+
 static bool check_max_blit_width(int widthInPixels) {
     if (widthInPixels > 32767) {
         SkASSERT(false); // surfaces should not be this wide anyway