blob: 7e04577543f2922a816c7199081d740c676297a4 [file] [log] [blame]
cdalton397536c2016-03-25 12:15:03 -07001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrBuffer_DEFINED
9#define GrBuffer_DEFINED
10
11#include "GrGpuResource.h"
12
13class GrGpu;
14
15class GrBuffer : public GrGpuResource {
16public:
17 /**
18 * Computes a scratch key for a buffer with a "dynamic" access pattern. (Buffers with "static"
19 * and "stream" access patterns are disqualified by nature from being cached and reused.)
20 */
cdaltone2e71c22016-04-07 18:13:29 -070021 static void ComputeScratchKeyForDynamicBuffer(size_t size, GrBufferType intendedType,
cdalton397536c2016-03-25 12:15:03 -070022 GrScratchKey* key) {
23 static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
24 GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
25 // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
26 // a chunk of memory we can use/reuse for any type of data. We really only need to
27 // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
cdaltone2e71c22016-04-07 18:13:29 -070028 builder[0] = intendedType;
cdalton397536c2016-03-25 12:15:03 -070029 builder[1] = (uint32_t)size;
30 if (sizeof(size_t) > 4) {
31 builder[2] = (uint32_t)((uint64_t)size >> 32);
32 }
33 }
34
cdalton397536c2016-03-25 12:15:03 -070035 GrAccessPattern accessPattern() const { return fAccessPattern; }
36
37 /**
38 * Returns true if the buffer is a wrapper around a CPU array. If true it
39 * indicates that map will always succeed and will be free.
40 */
41 bool isCPUBacked() const { return fCPUBacked; }
42
43 /**
44 * Maps the buffer to be written by the CPU.
45 *
46 * The previous content of the buffer is invalidated. It is an error
47 * to draw from the buffer while it is mapped. It may fail if the backend
48 * doesn't support mapping the buffer. If the buffer is CPU backed then
49 * it will always succeed and is a free operation. Once a buffer is mapped,
50 * subsequent calls to map() are ignored.
51 *
52 * Note that buffer mapping does not go through GrContext and therefore is
53 * not serialized with other operations.
54 *
55 * @return a pointer to the data or nullptr if the map fails.
56 */
57 void* map() {
58 if (!fMapPtr) {
59 this->onMap();
60 }
61 return fMapPtr;
62 }
63
64 /**
65 * Unmaps the buffer.
66 *
67 * The pointer returned by the previous map call will no longer be valid.
68 */
69 void unmap() {
70 SkASSERT(fMapPtr);
71 this->onUnmap();
72 fMapPtr = nullptr;
73 }
74
75 /**
76 * Returns the same ptr that map() returned at time of map or nullptr if the
77 * is not mapped.
78 *
79 * @return ptr to mapped buffer data or nullptr if buffer is not mapped.
80 */
81 void* mapPtr() const { return fMapPtr; }
82
83 /**
84 Queries whether the buffer has been mapped.
85
86 @return true if the buffer is mapped, false otherwise.
87 */
88 bool isMapped() const { return SkToBool(fMapPtr); }
89
90 /**
91 * Updates the buffer data.
92 *
93 * The size of the buffer will be preserved. The src data will be
94 * placed at the beginning of the buffer and any remaining contents will
95 * be undefined. srcSizeInBytes must be <= to the buffer size.
96 *
97 * The buffer must not be mapped.
98 *
99 * Note that buffer updates do not go through GrContext and therefore are
100 * not serialized with other operations.
101 *
102 * @return returns true if the update succeeds, false otherwise.
103 */
104 bool updateData(const void* src, size_t srcSizeInBytes) {
105 SkASSERT(!this->isMapped());
106 SkASSERT(srcSizeInBytes <= fGpuMemorySize);
107 return this->onUpdateData(src, srcSizeInBytes);
108 }
109
110protected:
cdaltone2e71c22016-04-07 18:13:29 -0700111 GrBuffer(GrGpu* gpu, size_t gpuMemorySize, GrBufferType intendedType,
112 GrAccessPattern accessPattern, bool cpuBacked)
cdalton397536c2016-03-25 12:15:03 -0700113 : INHERITED(gpu, kCached_LifeCycle),
114 fMapPtr(nullptr),
cdalton397536c2016-03-25 12:15:03 -0700115 fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers?
116 fAccessPattern(accessPattern),
117 fCPUBacked(cpuBacked) {
118 if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern == fAccessPattern) {
119 GrScratchKey key;
cdaltone2e71c22016-04-07 18:13:29 -0700120 ComputeScratchKeyForDynamicBuffer(fGpuMemorySize, intendedType, &key);
cdalton397536c2016-03-25 12:15:03 -0700121 this->setScratchKey(key);
122 }
123 }
124
125 void* fMapPtr;
126
127private:
128 virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
129
130 virtual void onMap() = 0;
131 virtual void onUnmap() = 0;
132 virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
133
cdalton397536c2016-03-25 12:15:03 -0700134 size_t fGpuMemorySize;
135 GrAccessPattern fAccessPattern;
136 bool fCPUBacked;
137
138 typedef GrGpuResource INHERITED;
139};
140
141#endif