blob: 77ad1746b30c582af4688f896ac073b9697e243c [file] [log] [blame]
Iliyan Malchev202a77d2012-06-11 14:41:12 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <limits.h>
19#include <unistd.h>
20#include <fcntl.h>
21#include <cutils/properties.h>
22#include <sys/mman.h>
23
24#include <genlock.h>
25
26#include "gr.h"
27#include "gpu.h"
28#include "memalloc.h"
29#include "alloc_controller.h"
30
31using namespace gralloc;
32using android::sp;
33
34gpu_context_t::gpu_context_t(const private_module_t* module,
35 sp<IAllocController> alloc_ctrl ) :
36 mAllocCtrl(alloc_ctrl)
37{
38 // Zero out the alloc_device_t
39 memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
40
41 char property[PROPERTY_VALUE_MAX];
42 if (property_get("debug.sf.hw", property, NULL) > 0) {
43 if(atoi(property) == 0) {
44 //debug.sf.hw = 0
45 compositionType = CPU_COMPOSITION;
46 } else { //debug.sf.hw = 1
47 // Get the composition type
48 property_get("debug.composition.type", property, NULL);
49 if (property == NULL) {
50 compositionType = GPU_COMPOSITION;
51 } else if ((strncmp(property, "mdp", 3)) == 0) {
52 compositionType = MDP_COMPOSITION;
53 } else if ((strncmp(property, "c2d", 3)) == 0) {
54 compositionType = C2D_COMPOSITION;
55 } else {
56 compositionType = GPU_COMPOSITION;
57 }
58 }
59 } else { //debug.sf.hw is not set. Use cpu composition
60 compositionType = CPU_COMPOSITION;
61 }
62
63 // Initialize the procs
64 common.tag = HARDWARE_DEVICE_TAG;
65 common.version = 0;
66 common.module = const_cast<hw_module_t*>(&module->base.common);
67 common.close = gralloc_close;
68 alloc = gralloc_alloc;
69#if 0
70 allocSize = gralloc_alloc_size;
71#endif
72 free = gralloc_free;
73
74}
75
76int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
77 buffer_handle_t* pHandle)
78{
79 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
80
81 // we don't support allocations with both the FB and PMEM_ADSP flags
82 if (usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) {
83 return -EINVAL;
84 }
85
86 if (m->framebuffer == NULL) {
87 ALOGE("%s: Invalid framebuffer", __FUNCTION__);
88 return -EINVAL;
89 }
90
91 const uint32_t bufferMask = m->bufferMask;
92 const uint32_t numBuffers = m->numBuffers;
93 size_t bufferSize = m->finfo.line_length * m->info.yres;
94
95 //adreno needs FB size to be page aligned
96 bufferSize = roundUpToPageSize(bufferSize);
97
98 if (numBuffers == 1) {
99 // If we have only one buffer, we never use page-flipping. Instead,
100 // we return a regular buffer which will be memcpy'ed to the main
101 // screen when post is called.
102 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
103 return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
104 m->fbFormat, m->info.xres, m->info.yres);
105 }
106
107 if (bufferMask >= ((1LU<<numBuffers)-1)) {
108 // We ran out of buffers.
109 return -ENOMEM;
110 }
111
112 // create a "fake" handles for it
113 // Set the PMEM flag as well, since adreno
114 // treats the FB memory as pmem
115 intptr_t vaddr = intptr_t(m->framebuffer->base);
116 private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
117 private_handle_t::PRIV_FLAGS_USES_PMEM |
118 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
119 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
120 m->info.yres);
121
122 // find a free slot
123 for (uint32_t i=0 ; i<numBuffers ; i++) {
124 if ((bufferMask & (1LU<<i)) == 0) {
125 m->bufferMask |= (1LU<<i);
126 break;
127 }
128 vaddr += bufferSize;
129 }
130
131 hnd->base = vaddr;
132 hnd->offset = vaddr - intptr_t(m->framebuffer->base);
133 *pHandle = hnd;
134 return 0;
135}
136
137
138int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
139 buffer_handle_t* pHandle)
140{
141 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
142 pthread_mutex_lock(&m->lock);
143 int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
144 pthread_mutex_unlock(&m->lock);
145 return err;
146}
147
148int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
149 buffer_handle_t* pHandle, int bufferType,
150 int format, int width, int height)
151{
152 int err = 0;
153 int flags = 0;
154 size = roundUpToPageSize(size);
155 alloc_data data;
156 data.offset = 0;
157 data.fd = -1;
158 data.base = 0;
159 data.size = size;
160 if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
161 data.align = 8192;
162 else
163 data.align = getpagesize();
164 data.pHandle = (unsigned int) pHandle;
165 err = mAllocCtrl->allocate(data, usage, compositionType);
166
167 if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
168 flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
169 }
170
171 if (usage & GRALLOC_USAGE_EXTERNAL_ONLY) {
172 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
173 //The EXTERNAL_BLOCK flag is always an add-on
174 if (usage & GRALLOC_USAGE_EXTERNAL_BLOCK) {
175 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
176 }
177 }
178
179 if (err == 0) {
180 flags |= data.allocType;
181 private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
182 bufferType, format, width, height);
183
184 hnd->offset = data.offset;
185 hnd->base = int(data.base) + data.offset;
186 *pHandle = hnd;
187 }
188
189 ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
190 return err;
191}
192
193void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
194 int *colorFormat,
195 int *bufferType)
196{
197 *bufferType = BUFFER_TYPE_VIDEO;
198 *colorFormat = inputFormat;
199
200 if (inputFormat == HAL_PIXEL_FORMAT_YV12) {
201 *bufferType = BUFFER_TYPE_VIDEO;
202 } else if (inputFormat & S3D_FORMAT_MASK) {
203 // S3D format
204 *colorFormat = COLOR_FORMAT(inputFormat);
205 } else if (inputFormat & INTERLACE_MASK) {
206 // Interlaced
207 *colorFormat = inputFormat ^ HAL_PIXEL_FORMAT_INTERLACE;
208 } else if (inputFormat < 0x7) {
209 // RGB formats
210 *colorFormat = inputFormat;
211 *bufferType = BUFFER_TYPE_UI;
212 } else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
213 (inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
214 *colorFormat = inputFormat;
215 *bufferType = BUFFER_TYPE_UI;
216 }
217}
218
219int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
220 buffer_handle_t* pHandle, int* pStride, size_t bufferSize) {
221 if (!pHandle || !pStride)
222 return -EINVAL;
223
224 size_t size;
225 int alignedw, alignedh;
226 int colorFormat, bufferType;
227 getGrallocInformationFromFormat(format, &colorFormat, &bufferType);
228 size = getBufferSizeAndDimensions(w, h, colorFormat, alignedw, alignedh);
229
230 if ((ssize_t)size <= 0)
231 return -EINVAL;
232 size = (bufferSize >= size)? bufferSize : size;
233
234 // All buffers marked as protected or for external
235 // display need to go to overlay
236 if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
237 (usage & GRALLOC_USAGE_PROTECTED)) {
238 bufferType = BUFFER_TYPE_VIDEO;
239 }
240 int err;
241 if (usage & GRALLOC_USAGE_HW_FB) {
242 err = gralloc_alloc_framebuffer(size, usage, pHandle);
243 } else {
244 err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
245 format, alignedw, alignedh);
246 }
247
248 if (err < 0) {
249 return err;
250 }
251
252 // Create a genlock lock for this buffer handle.
253 err = genlock_create_lock((native_handle_t*)(*pHandle));
254 if (err) {
255 ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
256 free_impl(reinterpret_cast<private_handle_t*>(pHandle));
257 return err;
258 }
259 *pStride = alignedw;
260 return 0;
261}
262
263int gpu_context_t::free_impl(private_handle_t const* hnd) {
264 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
265 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
266 // free this buffer
267 const size_t bufferSize = m->finfo.line_length * m->info.yres;
268 int index = (hnd->base - m->framebuffer->base) / bufferSize;
269 m->bufferMask &= ~(1<<index);
270 } else {
271 sp<IMemAlloc> memalloc = mAllocCtrl->getAllocator(hnd->flags);
272 int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
273 hnd->offset, hnd->fd);
274 if(err)
275 return err;
276 terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
277 }
278
279 // Release the genlock
280 int err = genlock_release_lock((native_handle_t*)hnd);
281 if (err) {
282 ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
283 }
284
285 delete hnd;
286 return 0;
287}
288
289int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
290 int usage, buffer_handle_t* pHandle, int* pStride)
291{
292 if (!dev) {
293 return -EINVAL;
294 }
295 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
296 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
297}
298int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h, int format,
299 int usage, buffer_handle_t* pHandle, int* pStride, int bufferSize)
300{
301 if (!dev) {
302 return -EINVAL;
303 }
304 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
305 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
306}
307
308
309int gpu_context_t::gralloc_free(alloc_device_t* dev,
310 buffer_handle_t handle)
311{
312 if (private_handle_t::validate(handle) < 0)
313 return -EINVAL;
314
315 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
316 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
317 return gpu->free_impl(hnd);
318}
319
320/*****************************************************************************/
321
322int gpu_context_t::gralloc_close(struct hw_device_t *dev)
323{
324 gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
325 if (ctx) {
326 /* TODO: keep a list of all buffer_handle_t created, and free them
327 * all here.
328 */
329 delete ctx;
330 }
331 return 0;
332}
333