blob: c4575207855894c927e2d52c45107aff350f071d [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080022#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020023
David Herrmann0de23972013-07-24 21:07:52 +020024#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060025
26#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060027#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060028
Rob Clarkcd5351f2011-11-12 12:09:40 -060029/*
30 * GEM buffer object implementation.
31 */
32
Rob Clarkcd5351f2011-11-12 12:09:40 -060033/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020034#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
35#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
36#define OMAP_BO_MEM_EXT 0x04000000 /* memory allocated externally */
37#define OMAP_BO_EXT_SYNC 0x10000000 /* externally allocated sync object */
Rob Clarkcd5351f2011-11-12 12:09:40 -060038
Rob Clarkcd5351f2011-11-12 12:09:40 -060039struct omap_gem_object {
40 struct drm_gem_object base;
41
Rob Clarkf6b60362012-03-05 10:48:36 -060042 struct list_head mm_list;
43
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 uint32_t flags;
45
Rob Clarkf7f9f452011-12-05 19:19:22 -060046 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width, height;
48
Rob Clarka6a91822011-12-09 23:26:08 -060049 /** roll applied when mapping to DMM */
50 uint32_t roll;
51
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 /**
Laurent Pinchartcdb03812015-12-14 22:39:37 +020053 * If buffer is allocated physically contiguous, the OMAP_BO_MEM_DMA_API
54 * flag is set and the paddr is valid. Also if the buffer is remapped
55 * in TILER and paddr_cnt > 0, then paddr is valid. But if you are using
56 * the physical address and OMAP_BO_MEM_DMA_API is not set, then you
57 * should be going thru omap_gem_{get,put}_paddr() to ensure the mapping
58 * is not removed from under your feet.
Rob Clarkcd5351f2011-11-12 12:09:40 -060059 *
60 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
Laurent Pinchartcdb03812015-12-14 22:39:37 +020061 * buffer is requested, but doesn't mean that it is. Use the
62 * OMAP_BO_MEM_DMA_API flag to determine if the buffer has a DMA capable
Rob Clarkcd5351f2011-11-12 12:09:40 -060063 * physical address.
64 */
65 dma_addr_t paddr;
66
67 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060068 * # of users of paddr
69 */
70 uint32_t paddr_cnt;
71
72 /**
73 * tiler block used when buffer is remapped in DMM/TILER.
74 */
75 struct tiler_block *block;
76
77 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060078 * Array of backing pages, if allocated. Note that pages are never
79 * allocated for buffers originally allocated from contiguous memory
80 */
81 struct page **pages;
82
Rob Clarkf3bc9d22011-12-20 16:54:28 -060083 /** addresses corresponding to pages in above array */
84 dma_addr_t *addrs;
85
Rob Clarkcd5351f2011-11-12 12:09:40 -060086 /**
87 * Virtual address, if mapped.
88 */
89 void *vaddr;
90
91 /**
92 * sync-object allocated on demand (if needed)
93 *
94 * Per-buffer sync-object for tracking pending and completed hw/dma
95 * read and write operations. The layout in memory is dictated by
96 * the SGX firmware, which uses this information to stall the command
97 * stream if a surface is not ready yet.
98 *
99 * Note that when buffer is used by SGX, the sync-object needs to be
100 * allocated from a special heap of sync-objects. This way many sync
101 * objects can be packed in a page, and not waste GPU virtual address
102 * space. Because of this we have to have a omap_gem_set_sync_object()
103 * API to allow replacement of the syncobj after it has (potentially)
104 * already been allocated. A bit ugly but I haven't thought of a
105 * better alternative.
106 */
107 struct {
108 uint32_t write_pending;
109 uint32_t write_complete;
110 uint32_t read_pending;
111 uint32_t read_complete;
112 } *sync;
113};
114
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200115#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600116
Rob Clarkf7f9f452011-12-05 19:19:22 -0600117/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
118 * not necessarily pinned in TILER all the time, and (b) when they are
119 * they are not necessarily page aligned, we reserve one or more small
120 * regions in each of the 2d containers to use as a user-GART where we
121 * can create a second page-aligned mapping of parts of the buffer
122 * being accessed from userspace.
123 *
124 * Note that we could optimize slightly when we know that multiple
125 * tiler containers are backed by the same PAT.. but I'll leave that
126 * for later..
127 */
128#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200129struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600130 struct tiler_block *block; /* the reserved tiler block */
131 dma_addr_t paddr;
132 struct drm_gem_object *obj; /* the current pinned obj */
133 pgoff_t obj_pgoff; /* page offset of obj currently
134 mapped in */
135};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200136
137struct omap_drm_usergart {
138 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600139 int height; /* height in rows */
140 int height_shift; /* ilog2(height in rows) */
141 int slot_shift; /* ilog2(width per slot) */
142 int stride_pfn; /* stride in pages */
143 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200144};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600145
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200146/* -----------------------------------------------------------------------------
147 * Helpers
148 */
149
150/** get mmap offset */
151static uint64_t mmap_offset(struct drm_gem_object *obj)
152{
153 struct drm_device *dev = obj->dev;
154 int ret;
155 size_t size;
156
157 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
158
159 /* Make it mmapable */
160 size = omap_gem_mmap_size(obj);
161 ret = drm_gem_create_mmap_offset_size(obj, size);
162 if (ret) {
163 dev_err(dev->dev, "could not allocate mmap offset\n");
164 return 0;
165 }
166
167 return drm_vma_node_offset_addr(&obj->vma_node);
168}
169
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200170/* -----------------------------------------------------------------------------
171 * Eviction
172 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600173
174static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200175 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600176{
David Herrmann6796cb12014-01-03 14:24:19 +0100177 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200178 struct omap_drm_private *priv = obj->dev->dev_private;
179 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100180 size_t size = PAGE_SIZE * n;
181 loff_t off = mmap_offset(obj) +
182 (entry->obj_pgoff << PAGE_SHIFT);
183 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
184
185 if (m > 1) {
186 int i;
187 /* if stride > than PAGE_SIZE then sparse mapping: */
188 for (i = n; i > 0; i--) {
189 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
190 off, PAGE_SIZE, 1);
191 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600192 }
David Herrmann6796cb12014-01-03 14:24:19 +0100193 } else {
194 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
195 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600196 }
197
198 entry->obj = NULL;
199}
200
201/* Evict a buffer from usergart, if it is mapped there */
202static void evict(struct drm_gem_object *obj)
203{
204 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200205 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600206
207 if (omap_obj->flags & OMAP_BO_TILED) {
208 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
209 int i;
210
Rob Clarkf7f9f452011-12-05 19:19:22 -0600211 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200212 struct omap_drm_usergart_entry *entry =
213 &priv->usergart[fmt].entry[i];
214
Rob Clarkf7f9f452011-12-05 19:19:22 -0600215 if (entry->obj == obj)
216 evict_entry(obj, fmt, entry);
217 }
218 }
219}
220
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200221/* -----------------------------------------------------------------------------
222 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600223 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600224
225/** ensure backing pages are allocated */
226static int omap_gem_attach_pages(struct drm_gem_object *obj)
227{
Rob Clark8b6b5692012-05-17 02:37:25 -0600228 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600229 struct omap_gem_object *omap_obj = to_omap_bo(obj);
230 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200231 int npages = obj->size >> PAGE_SHIFT;
232 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600233 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600234
235 WARN_ON(omap_obj->pages);
236
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200237 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 if (IS_ERR(pages)) {
239 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 return PTR_ERR(pages);
241 }
242
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600243 /* for non-cached buffers, ensure the new pages are clean because
244 * DSS, GPU, etc. are not cache coherent:
245 */
246 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100247 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200248 if (!addrs) {
249 ret = -ENOMEM;
250 goto free_pages;
251 }
252
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600253 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600254 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600255 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200256
257 if (dma_mapping_error(dev->dev, addrs[i])) {
258 dev_warn(dev->dev,
259 "%s: failed to map page\n", __func__);
260
261 for (i = i - 1; i >= 0; --i) {
262 dma_unmap_page(dev->dev, addrs[i],
263 PAGE_SIZE, DMA_BIDIRECTIONAL);
264 }
265
266 ret = -ENOMEM;
267 goto free_addrs;
268 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600269 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600270 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100271 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200272 if (!addrs) {
273 ret = -ENOMEM;
274 goto free_pages;
275 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600276 }
277
Rob Clark8b6b5692012-05-17 02:37:25 -0600278 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600279 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600280
Rob Clarkcd5351f2011-11-12 12:09:40 -0600281 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200282
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200283free_addrs:
284 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200285free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400286 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287
288 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600289}
290
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200291/* acquire pages when needed (for example, for DMA where physically
292 * contiguous buffer is not required
293 */
294static int get_pages(struct drm_gem_object *obj, struct page ***pages)
295{
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
297 int ret = 0;
298
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200299 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200300 ret = omap_gem_attach_pages(obj);
301 if (ret) {
302 dev_err(obj->dev->dev, "could not attach pages\n");
303 return ret;
304 }
305 }
306
307 /* TODO: even phys-contig.. we should have a list of pages? */
308 *pages = omap_obj->pages;
309
310 return 0;
311}
312
Rob Clarkcd5351f2011-11-12 12:09:40 -0600313/** release backing pages */
314static void omap_gem_detach_pages(struct drm_gem_object *obj)
315{
316 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600317
318 /* for non-cached buffers, ensure the new pages are clean because
319 * DSS, GPU, etc. are not cache coherent:
320 */
321 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
322 int i, npages = obj->size >> PAGE_SHIFT;
323 for (i = 0; i < npages; i++) {
324 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
325 PAGE_SIZE, DMA_BIDIRECTIONAL);
326 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600327 }
328
Rob Clark8b6b5692012-05-17 02:37:25 -0600329 kfree(omap_obj->addrs);
330 omap_obj->addrs = NULL;
331
Rob Clarkddcd09d2013-08-07 13:41:27 -0400332 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600333 omap_obj->pages = NULL;
334}
335
Rob Clark6ad11bc2012-04-10 13:19:55 -0500336/* get buffer flags */
337uint32_t omap_gem_flags(struct drm_gem_object *obj)
338{
339 return to_omap_bo(obj)->flags;
340}
341
Rob Clarkc5b12472012-01-18 18:33:02 -0600342uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
343{
344 uint64_t offset;
345 mutex_lock(&obj->dev->struct_mutex);
346 offset = mmap_offset(obj);
347 mutex_unlock(&obj->dev->struct_mutex);
348 return offset;
349}
350
Rob Clarkf7f9f452011-12-05 19:19:22 -0600351/** get mmap size */
352size_t omap_gem_mmap_size(struct drm_gem_object *obj)
353{
354 struct omap_gem_object *omap_obj = to_omap_bo(obj);
355 size_t size = obj->size;
356
357 if (omap_obj->flags & OMAP_BO_TILED) {
358 /* for tiled buffers, the virtual size has stride rounded up
359 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
360 * 32kb later!). But we don't back the entire buffer with
361 * pages, only the valid picture part.. so need to adjust for
362 * this in the size used to mmap and generate mmap offset
363 */
364 size = tiler_vsize(gem2fmt(omap_obj->flags),
365 omap_obj->width, omap_obj->height);
366 }
367
368 return size;
369}
370
Rob Clark3c810c62012-08-15 15:18:01 -0500371/* get tiled size, returns -EINVAL if not tiled buffer */
372int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
373{
374 struct omap_gem_object *omap_obj = to_omap_bo(obj);
375 if (omap_obj->flags & OMAP_BO_TILED) {
376 *w = omap_obj->width;
377 *h = omap_obj->height;
378 return 0;
379 }
380 return -EINVAL;
381}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600382
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200383/* -----------------------------------------------------------------------------
384 * Fault Handling
385 */
386
Rob Clarkf7f9f452011-12-05 19:19:22 -0600387/* Normal handling for the case of faulting in non-tiled buffers */
388static int fault_1d(struct drm_gem_object *obj,
389 struct vm_area_struct *vma, struct vm_fault *vmf)
390{
391 struct omap_gem_object *omap_obj = to_omap_bo(obj);
392 unsigned long pfn;
393 pgoff_t pgoff;
394
395 /* We don't use vmf->pgoff since that has the fake offset: */
396 pgoff = ((unsigned long)vmf->virtual_address -
397 vma->vm_start) >> PAGE_SHIFT;
398
399 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600400 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600401 pfn = page_to_pfn(omap_obj->pages[pgoff]);
402 } else {
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200403 BUG_ON(!(omap_obj->flags & OMAP_BO_MEM_DMA_API));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600404 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
405 }
406
407 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
408 pfn, pfn << PAGE_SHIFT);
409
Dan Williams01c8f1c2016-01-15 16:56:40 -0800410 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
411 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600412}
413
414/* Special handling for the case of faulting in 2d tiled buffers */
415static int fault_2d(struct drm_gem_object *obj,
416 struct vm_area_struct *vma, struct vm_fault *vmf)
417{
418 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200419 struct omap_drm_private *priv = obj->dev->dev_private;
420 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600421 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
422 struct page *pages[64]; /* XXX is this too much to have on stack? */
423 unsigned long pfn;
424 pgoff_t pgoff, base_pgoff;
425 void __user *vaddr;
426 int i, ret, slots;
427
Rob Clarke5598952012-03-05 10:48:40 -0600428 /*
429 * Note the height of the slot is also equal to the number of pages
430 * that need to be mapped in to fill 4kb wide CPU page. If the slot
431 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600432 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200433 const int n = priv->usergart[fmt].height;
434 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke5598952012-03-05 10:48:40 -0600435
436 /*
437 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
438 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
439 * into account in some of the math, so figure out virtual stride
440 * in pages
441 */
442 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600443
444 /* We don't use vmf->pgoff since that has the fake offset: */
445 pgoff = ((unsigned long)vmf->virtual_address -
446 vma->vm_start) >> PAGE_SHIFT;
447
Rob Clarke5598952012-03-05 10:48:40 -0600448 /*
449 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600450 * boundary in the y direction:
451 */
Rob Clarke5598952012-03-05 10:48:40 -0600452 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600453
Rob Clarke5598952012-03-05 10:48:40 -0600454 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200455 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600456
Rob Clarke5598952012-03-05 10:48:40 -0600457 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
458
Laurent Pinchartf4302742015-12-14 22:39:34 +0200459 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke5598952012-03-05 10:48:40 -0600460
Rob Clarkf7f9f452011-12-05 19:19:22 -0600461 /* evict previous buffer using this usergart entry, if any: */
462 if (entry->obj)
463 evict_entry(entry->obj, fmt, entry);
464
465 entry->obj = obj;
466 entry->obj_pgoff = base_pgoff;
467
Rob Clarke5598952012-03-05 10:48:40 -0600468 /* now convert base_pgoff to phys offset from virt offset: */
469 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600470
Rob Clarke5598952012-03-05 10:48:40 -0600471 /* for wider-than 4k.. figure out which part of the slot-row we want: */
472 if (m > 1) {
473 int off = pgoff % m;
474 entry->obj_pgoff += off;
475 base_pgoff /= m;
476 slots = min(slots - (off << n_shift), n);
477 base_pgoff += off << n_shift;
478 vaddr += off << PAGE_SHIFT;
479 }
480
481 /*
482 * Map in pages. Beyond the valid pixel part of the buffer, we set
483 * pages[i] to NULL to get a dummy page mapped in.. if someone
484 * reads/writes it they will get random/undefined content, but at
485 * least it won't be corrupting whatever other random page used to
486 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600487 */
488 memcpy(pages, &omap_obj->pages[base_pgoff],
489 sizeof(struct page *) * slots);
490 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600491 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600492
Rob Clarka6a91822011-12-09 23:26:08 -0600493 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600494 if (ret) {
495 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
496 return ret;
497 }
498
Rob Clarkf7f9f452011-12-05 19:19:22 -0600499 pfn = entry->paddr >> PAGE_SHIFT;
500
501 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
502 pfn, pfn << PAGE_SHIFT);
503
Rob Clarke5598952012-03-05 10:48:40 -0600504 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800505 vm_insert_mixed(vma, (unsigned long)vaddr,
506 __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200507 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600508 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600509 }
510
511 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200512 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
513 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600514
515 return 0;
516}
517
Rob Clarkcd5351f2011-11-12 12:09:40 -0600518/**
519 * omap_gem_fault - pagefault handler for GEM objects
520 * @vma: the VMA of the GEM object
521 * @vmf: fault detail
522 *
523 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
524 * does most of the work for us including the actual map/unmap calls
525 * but we need to do the actual page work.
526 *
527 * The VMA was set up by GEM. In doing so it also ensured that the
528 * vma->vm_private_data points to the GEM object that is backing this
529 * mapping.
530 */
531int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
532{
533 struct drm_gem_object *obj = vma->vm_private_data;
534 struct omap_gem_object *omap_obj = to_omap_bo(obj);
535 struct drm_device *dev = obj->dev;
536 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600537 int ret;
538
539 /* Make sure we don't parallel update on a fault, nor move or remove
540 * something from beneath our feet
541 */
542 mutex_lock(&dev->struct_mutex);
543
544 /* if a shmem backed object, make sure we have pages attached now */
545 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900546 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600547 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600548
549 /* where should we do corresponding put_pages().. we are mapping
550 * the original page, rather than thru a GART, so we can't rely
551 * on eviction to trigger this. But munmap() or all mappings should
552 * probably trigger put_pages()?
553 */
554
Rob Clarkf7f9f452011-12-05 19:19:22 -0600555 if (omap_obj->flags & OMAP_BO_TILED)
556 ret = fault_2d(obj, vma, vmf);
557 else
558 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600559
Rob Clarkcd5351f2011-11-12 12:09:40 -0600560
561fail:
562 mutex_unlock(&dev->struct_mutex);
563 switch (ret) {
564 case 0:
565 case -ERESTARTSYS:
566 case -EINTR:
567 return VM_FAULT_NOPAGE;
568 case -ENOMEM:
569 return VM_FAULT_OOM;
570 default:
571 return VM_FAULT_SIGBUS;
572 }
573}
574
575/** We override mainly to fix up some of the vm mapping flags.. */
576int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
577{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600578 int ret;
579
580 ret = drm_gem_mmap(filp, vma);
581 if (ret) {
582 DBG("mmap failed: %d", ret);
583 return ret;
584 }
585
Rob Clark8b6b5692012-05-17 02:37:25 -0600586 return omap_gem_mmap_obj(vma->vm_private_data, vma);
587}
588
589int omap_gem_mmap_obj(struct drm_gem_object *obj,
590 struct vm_area_struct *vma)
591{
592 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600593
594 vma->vm_flags &= ~VM_PFNMAP;
595 vma->vm_flags |= VM_MIXEDMAP;
596
597 if (omap_obj->flags & OMAP_BO_WC) {
598 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
599 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
600 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
601 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600602 /*
603 * We do have some private objects, at least for scanout buffers
604 * on hardware without DMM/TILER. But these are allocated write-
605 * combine
606 */
607 if (WARN_ON(!obj->filp))
608 return -EINVAL;
609
610 /*
611 * Shunt off cached objs to shmem file so they have their own
612 * address_space (so unmap_mapping_range does what we want,
613 * in particular in the case of mmap'd dmabufs)
614 */
615 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600616 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400617 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600618
Rob Clarkcd5351f2011-11-12 12:09:40 -0600619 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
620 }
621
Rob Clark8b6b5692012-05-17 02:37:25 -0600622 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600623}
624
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200625/* -----------------------------------------------------------------------------
626 * Dumb Buffers
627 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600628
Rob Clarkcd5351f2011-11-12 12:09:40 -0600629/**
630 * omap_gem_dumb_create - create a dumb buffer
631 * @drm_file: our client file
632 * @dev: our device
633 * @args: the requested arguments copied from userspace
634 *
635 * Allocate a buffer suitable for use for a frame buffer of the
636 * form described by user space. Give userspace a handle by which
637 * to reference it.
638 */
639int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
640 struct drm_mode_create_dumb *args)
641{
642 union omap_gem_size gsize;
643
Thierry Redingbdb2b932014-11-03 11:57:33 +0100644 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600645 args->size = PAGE_ALIGN(args->pitch * args->height);
646
647 gsize = (union omap_gem_size){
648 .bytes = args->size,
649 };
650
651 return omap_gem_new_handle(dev, file, gsize,
652 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
653}
654
655/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600656 * omap_gem_dumb_map - buffer mapping for dumb interface
657 * @file: our drm client file
658 * @dev: drm device
659 * @handle: GEM handle to the object (from dumb_create)
660 *
661 * Do the necessary setup to allow the mapping of the frame buffer
662 * into user memory. We don't have to do much here at the moment.
663 */
664int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
665 uint32_t handle, uint64_t *offset)
666{
667 struct drm_gem_object *obj;
668 int ret = 0;
669
Rob Clarkcd5351f2011-11-12 12:09:40 -0600670 /* GEM does all our handle to object mapping */
671 obj = drm_gem_object_lookup(dev, file, handle);
672 if (obj == NULL) {
673 ret = -ENOENT;
674 goto fail;
675 }
676
677 *offset = omap_gem_mmap_offset(obj);
678
679 drm_gem_object_unreference_unlocked(obj);
680
681fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600682 return ret;
683}
684
Laurent Pincharte1c11742015-12-14 22:39:30 +0200685#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600686/* Set scrolling position. This allows us to implement fast scrolling
687 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600688 *
689 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600690 */
691int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
692{
693 struct omap_gem_object *omap_obj = to_omap_bo(obj);
694 uint32_t npages = obj->size >> PAGE_SHIFT;
695 int ret = 0;
696
697 if (roll > npages) {
698 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
699 return -EINVAL;
700 }
701
Rob Clarka6a91822011-12-09 23:26:08 -0600702 omap_obj->roll = roll;
703
Rob Clarkaf695922011-12-16 11:34:34 -0600704 mutex_lock(&obj->dev->struct_mutex);
705
Rob Clarka6a91822011-12-09 23:26:08 -0600706 /* if we aren't mapped yet, we don't need to do anything */
707 if (omap_obj->block) {
708 struct page **pages;
709 ret = get_pages(obj, &pages);
710 if (ret)
711 goto fail;
712 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
713 if (ret)
714 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
715 }
716
717fail:
718 mutex_unlock(&obj->dev->struct_mutex);
719
720 return ret;
721}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200722#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600723
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200724/* -----------------------------------------------------------------------------
725 * Memory Management & DMA Sync
726 */
727
728/**
729 * shmem buffers that are mapped cached can simulate coherency via using
730 * page faulting to keep track of dirty pages
731 */
732static inline bool is_cached_coherent(struct drm_gem_object *obj)
733{
734 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200735
736 return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200737 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
738}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600739
Rob Clark8b6b5692012-05-17 02:37:25 -0600740/* Sync the buffer for CPU access.. note pages should already be
741 * attached, ie. omap_gem_get_pages()
742 */
743void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
744{
745 struct drm_device *dev = obj->dev;
746 struct omap_gem_object *omap_obj = to_omap_bo(obj);
747
748 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
749 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
750 PAGE_SIZE, DMA_BIDIRECTIONAL);
751 omap_obj->addrs[pgoff] = 0;
752 }
753}
754
755/* sync the buffer for DMA access */
756void omap_gem_dma_sync(struct drm_gem_object *obj,
757 enum dma_data_direction dir)
758{
759 struct drm_device *dev = obj->dev;
760 struct omap_gem_object *omap_obj = to_omap_bo(obj);
761
762 if (is_cached_coherent(obj)) {
763 int i, npages = obj->size >> PAGE_SHIFT;
764 struct page **pages = omap_obj->pages;
765 bool dirty = false;
766
767 for (i = 0; i < npages; i++) {
768 if (!omap_obj->addrs[i]) {
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200769 dma_addr_t addr;
770
771 addr = dma_map_page(dev->dev, pages[i], 0,
Rob Clark8b6b5692012-05-17 02:37:25 -0600772 PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200773
774 if (dma_mapping_error(dev->dev, addr)) {
775 dev_warn(dev->dev,
776 "%s: failed to map page\n",
777 __func__);
778 break;
779 }
780
Rob Clark8b6b5692012-05-17 02:37:25 -0600781 dirty = true;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200782 omap_obj->addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600783 }
784 }
785
786 if (dirty) {
787 unmap_mapping_range(obj->filp->f_mapping, 0,
788 omap_gem_mmap_size(obj), 1);
789 }
790 }
791}
792
Rob Clarkcd5351f2011-11-12 12:09:40 -0600793/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
794 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
795 * map in TILER)
796 */
797int omap_gem_get_paddr(struct drm_gem_object *obj,
798 dma_addr_t *paddr, bool remap)
799{
Rob Clarka6a91822011-12-09 23:26:08 -0600800 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600801 struct omap_gem_object *omap_obj = to_omap_bo(obj);
802 int ret = 0;
803
Rob Clarkf7f9f452011-12-05 19:19:22 -0600804 mutex_lock(&obj->dev->struct_mutex);
805
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200806 if (!(omap_obj->flags & OMAP_BO_MEM_DMA_API) &&
807 remap && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600808 if (omap_obj->paddr_cnt == 0) {
809 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600810 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600811 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
812 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600813
Rob Clarkf7f9f452011-12-05 19:19:22 -0600814 BUG_ON(omap_obj->block);
815
816 ret = get_pages(obj, &pages);
817 if (ret)
818 goto fail;
819
Rob Clarkf7f9f452011-12-05 19:19:22 -0600820 if (omap_obj->flags & OMAP_BO_TILED) {
821 block = tiler_reserve_2d(fmt,
822 omap_obj->width,
823 omap_obj->height, 0);
824 } else {
825 block = tiler_reserve_1d(obj->size);
826 }
827
828 if (IS_ERR(block)) {
829 ret = PTR_ERR(block);
830 dev_err(obj->dev->dev,
831 "could not remap: %d (%d)\n", ret, fmt);
832 goto fail;
833 }
834
835 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600836 ret = tiler_pin(block, pages, npages,
837 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600838 if (ret) {
839 tiler_release(block);
840 dev_err(obj->dev->dev,
841 "could not pin: %d\n", ret);
842 goto fail;
843 }
844
845 omap_obj->paddr = tiler_ssptr(block);
846 omap_obj->block = block;
847
Russell King2d31ca32014-07-12 10:53:41 +0100848 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600849 }
850
851 omap_obj->paddr_cnt++;
852
853 *paddr = omap_obj->paddr;
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200854 } else if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600855 *paddr = omap_obj->paddr;
856 } else {
857 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600858 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600859 }
860
Rob Clarkf7f9f452011-12-05 19:19:22 -0600861fail:
862 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600863
864 return ret;
865}
866
867/* Release physical address, when DMA is no longer being performed.. this
868 * could potentially unpin and unmap buffers from TILER
869 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300870void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600871{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600872 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300873 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600874
875 mutex_lock(&obj->dev->struct_mutex);
876 if (omap_obj->paddr_cnt > 0) {
877 omap_obj->paddr_cnt--;
878 if (omap_obj->paddr_cnt == 0) {
879 ret = tiler_unpin(omap_obj->block);
880 if (ret) {
881 dev_err(obj->dev->dev,
882 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600883 }
884 ret = tiler_release(omap_obj->block);
885 if (ret) {
886 dev_err(obj->dev->dev,
887 "could not release unmap: %d\n", ret);
888 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000889 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600890 omap_obj->block = NULL;
891 }
892 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300893
Rob Clarkf7f9f452011-12-05 19:19:22 -0600894 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600895}
896
Rob Clark3c810c62012-08-15 15:18:01 -0500897/* Get rotated scanout address (only valid if already pinned), at the
898 * specified orientation and x,y offset from top-left corner of buffer
899 * (only valid for tiled 2d buffers)
900 */
901int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
902 int x, int y, dma_addr_t *paddr)
903{
904 struct omap_gem_object *omap_obj = to_omap_bo(obj);
905 int ret = -EINVAL;
906
907 mutex_lock(&obj->dev->struct_mutex);
908 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
909 (omap_obj->flags & OMAP_BO_TILED)) {
910 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
911 ret = 0;
912 }
913 mutex_unlock(&obj->dev->struct_mutex);
914 return ret;
915}
916
917/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
918int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
919{
920 struct omap_gem_object *omap_obj = to_omap_bo(obj);
921 int ret = -EINVAL;
922 if (omap_obj->flags & OMAP_BO_TILED)
923 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
924 return ret;
925}
926
Rob Clark6ad11bc2012-04-10 13:19:55 -0500927/* if !remap, and we don't have pages backing, then fail, rather than
928 * increasing the pin count (which we don't really do yet anyways,
929 * because we don't support swapping pages back out). And 'remap'
930 * might not be quite the right name, but I wanted to keep it working
931 * similarly to omap_gem_get_paddr(). Note though that mutex is not
932 * aquired if !remap (because this can be called in atomic ctxt),
933 * but probably omap_gem_get_paddr() should be changed to work in the
934 * same way. If !remap, a matching omap_gem_put_pages() call is not
935 * required (and should not be made).
936 */
937int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
938 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600939{
940 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500941 if (!remap) {
942 struct omap_gem_object *omap_obj = to_omap_bo(obj);
943 if (!omap_obj->pages)
944 return -ENOMEM;
945 *pages = omap_obj->pages;
946 return 0;
947 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600948 mutex_lock(&obj->dev->struct_mutex);
949 ret = get_pages(obj, pages);
950 mutex_unlock(&obj->dev->struct_mutex);
951 return ret;
952}
953
954/* release pages when DMA no longer being performed */
955int omap_gem_put_pages(struct drm_gem_object *obj)
956{
957 /* do something here if we dynamically attach/detach pages.. at
958 * least they would no longer need to be pinned if everyone has
959 * released the pages..
960 */
961 return 0;
962}
963
Laurent Pincharte1c11742015-12-14 22:39:30 +0200964#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600965/* Get kernel virtual address for CPU access.. this more or less only
966 * exists for omap_fbdev. This should be called with struct_mutex
967 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600968 */
969void *omap_gem_vaddr(struct drm_gem_object *obj)
970{
971 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900972 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600973 if (!omap_obj->vaddr) {
974 struct page **pages;
975 int ret = get_pages(obj, &pages);
976 if (ret)
977 return ERR_PTR(ret);
978 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
979 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
980 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600981 return omap_obj->vaddr;
982}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200983#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600984
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200985/* -----------------------------------------------------------------------------
986 * Power Management
987 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600988
Andy Grosse78edba2012-12-19 14:53:37 -0600989#ifdef CONFIG_PM
990/* re-pin objects in DMM in resume path: */
991int omap_gem_resume(struct device *dev)
992{
993 struct drm_device *drm_dev = dev_get_drvdata(dev);
994 struct omap_drm_private *priv = drm_dev->dev_private;
995 struct omap_gem_object *omap_obj;
996 int ret = 0;
997
998 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
999 if (omap_obj->block) {
1000 struct drm_gem_object *obj = &omap_obj->base;
1001 uint32_t npages = obj->size >> PAGE_SHIFT;
1002 WARN_ON(!omap_obj->pages); /* this can't happen */
1003 ret = tiler_pin(omap_obj->block,
1004 omap_obj->pages, npages,
1005 omap_obj->roll, true);
1006 if (ret) {
1007 dev_err(dev, "could not repin: %d\n", ret);
1008 return ret;
1009 }
1010 }
1011 }
1012
1013 return 0;
1014}
1015#endif
1016
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001017/* -----------------------------------------------------------------------------
1018 * DebugFS
1019 */
1020
Rob Clarkf6b60362012-03-05 10:48:36 -06001021#ifdef CONFIG_DEBUG_FS
1022void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1023{
Rob Clarkf6b60362012-03-05 10:48:36 -06001024 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001025 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001026
David Herrmann0de23972013-07-24 21:07:52 +02001027 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001028
Russell King2d31ca32014-07-12 10:53:41 +01001029 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001030 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001031 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001032 omap_obj->vaddr, omap_obj->roll);
1033
1034 if (omap_obj->flags & OMAP_BO_TILED) {
1035 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1036 if (omap_obj->block) {
1037 struct tcm_area *area = &omap_obj->block->area;
1038 seq_printf(m, " (%dx%d, %dx%d)",
1039 area->p0.x, area->p0.y,
1040 area->p1.x, area->p1.y);
1041 }
1042 } else {
1043 seq_printf(m, " %d", obj->size);
1044 }
1045
1046 seq_printf(m, "\n");
1047}
1048
1049void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1050{
1051 struct omap_gem_object *omap_obj;
1052 int count = 0;
1053 size_t size = 0;
1054
1055 list_for_each_entry(omap_obj, list, mm_list) {
1056 struct drm_gem_object *obj = &omap_obj->base;
1057 seq_printf(m, " ");
1058 omap_gem_describe(obj, m);
1059 count++;
1060 size += obj->size;
1061 }
1062
1063 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1064}
1065#endif
1066
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001067/* -----------------------------------------------------------------------------
1068 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001069 */
1070
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001071static DEFINE_SPINLOCK(sync_lock);
1072
Rob Clarkcd5351f2011-11-12 12:09:40 -06001073struct omap_gem_sync_waiter {
1074 struct list_head list;
1075 struct omap_gem_object *omap_obj;
1076 enum omap_gem_op op;
1077 uint32_t read_target, write_target;
1078 /* notify called w/ sync_lock held */
1079 void (*notify)(void *arg);
1080 void *arg;
1081};
1082
1083/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1084 * the read and/or write target count is achieved which can call a user
1085 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1086 * cpu access), etc.
1087 */
1088static LIST_HEAD(waiters);
1089
1090static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1091{
1092 struct omap_gem_object *omap_obj = waiter->omap_obj;
1093 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301094 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001095 return true;
1096 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301097 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001098 return true;
1099 return false;
1100}
1101
1102/* macro for sync debug.. */
1103#define SYNCDBG 0
1104#define SYNC(fmt, ...) do { if (SYNCDBG) \
1105 printk(KERN_ERR "%s:%d: "fmt"\n", \
1106 __func__, __LINE__, ##__VA_ARGS__); \
1107 } while (0)
1108
1109
1110static void sync_op_update(void)
1111{
1112 struct omap_gem_sync_waiter *waiter, *n;
1113 list_for_each_entry_safe(waiter, n, &waiters, list) {
1114 if (!is_waiting(waiter)) {
1115 list_del(&waiter->list);
1116 SYNC("notify: %p", waiter);
1117 waiter->notify(waiter->arg);
1118 kfree(waiter);
1119 }
1120 }
1121}
1122
1123static inline int sync_op(struct drm_gem_object *obj,
1124 enum omap_gem_op op, bool start)
1125{
1126 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1127 int ret = 0;
1128
1129 spin_lock(&sync_lock);
1130
1131 if (!omap_obj->sync) {
1132 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1133 if (!omap_obj->sync) {
1134 ret = -ENOMEM;
1135 goto unlock;
1136 }
1137 }
1138
1139 if (start) {
1140 if (op & OMAP_GEM_READ)
1141 omap_obj->sync->read_pending++;
1142 if (op & OMAP_GEM_WRITE)
1143 omap_obj->sync->write_pending++;
1144 } else {
1145 if (op & OMAP_GEM_READ)
1146 omap_obj->sync->read_complete++;
1147 if (op & OMAP_GEM_WRITE)
1148 omap_obj->sync->write_complete++;
1149 sync_op_update();
1150 }
1151
1152unlock:
1153 spin_unlock(&sync_lock);
1154
1155 return ret;
1156}
1157
1158/* it is a bit lame to handle updates in this sort of polling way, but
1159 * in case of PVR, the GPU can directly update read/write complete
1160 * values, and not really tell us which ones it updated.. this also
1161 * means that sync_lock is not quite sufficient. So we'll need to
1162 * do something a bit better when it comes time to add support for
1163 * separate 2d hw..
1164 */
1165void omap_gem_op_update(void)
1166{
1167 spin_lock(&sync_lock);
1168 sync_op_update();
1169 spin_unlock(&sync_lock);
1170}
1171
1172/* mark the start of read and/or write operation */
1173int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1174{
1175 return sync_op(obj, op, true);
1176}
1177
1178int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1179{
1180 return sync_op(obj, op, false);
1181}
1182
1183static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1184
1185static void sync_notify(void *arg)
1186{
1187 struct task_struct **waiter_task = arg;
1188 *waiter_task = NULL;
1189 wake_up_all(&sync_event);
1190}
1191
1192int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1193{
1194 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1195 int ret = 0;
1196 if (omap_obj->sync) {
1197 struct task_struct *waiter_task = current;
1198 struct omap_gem_sync_waiter *waiter =
1199 kzalloc(sizeof(*waiter), GFP_KERNEL);
1200
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001201 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001202 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001203
1204 waiter->omap_obj = omap_obj;
1205 waiter->op = op;
1206 waiter->read_target = omap_obj->sync->read_pending;
1207 waiter->write_target = omap_obj->sync->write_pending;
1208 waiter->notify = sync_notify;
1209 waiter->arg = &waiter_task;
1210
1211 spin_lock(&sync_lock);
1212 if (is_waiting(waiter)) {
1213 SYNC("waited: %p", waiter);
1214 list_add_tail(&waiter->list, &waiters);
1215 spin_unlock(&sync_lock);
1216 ret = wait_event_interruptible(sync_event,
1217 (waiter_task == NULL));
1218 spin_lock(&sync_lock);
1219 if (waiter_task) {
1220 SYNC("interrupted: %p", waiter);
1221 /* we were interrupted */
1222 list_del(&waiter->list);
1223 waiter_task = NULL;
1224 } else {
1225 /* freed in sync_op_update() */
1226 waiter = NULL;
1227 }
1228 }
1229 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001230 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001231 }
1232 return ret;
1233}
1234
1235/* call fxn(arg), either synchronously or asynchronously if the op
1236 * is currently blocked.. fxn() can be called from any context
1237 *
1238 * (TODO for now fxn is called back from whichever context calls
1239 * omap_gem_op_update().. but this could be better defined later
1240 * if needed)
1241 *
1242 * TODO more code in common w/ _sync()..
1243 */
1244int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1245 void (*fxn)(void *arg), void *arg)
1246{
1247 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1248 if (omap_obj->sync) {
1249 struct omap_gem_sync_waiter *waiter =
1250 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1251
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001252 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001253 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001254
1255 waiter->omap_obj = omap_obj;
1256 waiter->op = op;
1257 waiter->read_target = omap_obj->sync->read_pending;
1258 waiter->write_target = omap_obj->sync->write_pending;
1259 waiter->notify = fxn;
1260 waiter->arg = arg;
1261
1262 spin_lock(&sync_lock);
1263 if (is_waiting(waiter)) {
1264 SYNC("waited: %p", waiter);
1265 list_add_tail(&waiter->list, &waiters);
1266 spin_unlock(&sync_lock);
1267 return 0;
1268 }
1269
1270 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301271
1272 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001273 }
1274
1275 /* no waiting.. */
1276 fxn(arg);
1277
1278 return 0;
1279}
1280
1281/* special API so PVR can update the buffer to use a sync-object allocated
1282 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1283 * perspective) sync-object, so we overwrite the new syncobj w/ values
1284 * from the already allocated syncobj (if there is one)
1285 */
1286int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1287{
1288 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1289 int ret = 0;
1290
1291 spin_lock(&sync_lock);
1292
1293 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1294 /* clearing a previously set syncobj */
Peter Huewee6200962013-01-26 00:40:13 +01001295 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1296 GFP_ATOMIC);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001297 if (!syncobj) {
1298 ret = -ENOMEM;
1299 goto unlock;
1300 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001301 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1302 omap_obj->sync = syncobj;
1303 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1304 /* replacing an existing syncobj */
1305 if (omap_obj->sync) {
1306 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1307 kfree(omap_obj->sync);
1308 }
1309 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1310 omap_obj->sync = syncobj;
1311 }
1312
1313unlock:
1314 spin_unlock(&sync_lock);
1315 return ret;
1316}
1317
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001318/* -----------------------------------------------------------------------------
1319 * Constructor & Destructor
1320 */
1321
Rob Clarkcd5351f2011-11-12 12:09:40 -06001322/* don't call directly.. called from GEM core when it is time to actually
1323 * free the object..
1324 */
1325void omap_gem_free_object(struct drm_gem_object *obj)
1326{
1327 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001328 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001329 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1330
Rob Clarkf7f9f452011-12-05 19:19:22 -06001331 evict(obj);
1332
Rob Clarkf6b60362012-03-05 10:48:36 -06001333 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1334
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001335 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001336 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001337 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001338
Rob Clark9a0774e2012-01-16 12:51:17 -06001339 /* this means the object is still pinned.. which really should
1340 * not happen. I think..
1341 */
1342 WARN_ON(omap_obj->paddr_cnt > 0);
1343
Rob Clarkcd5351f2011-11-12 12:09:40 -06001344 /* don't free externally allocated backing memory */
Laurent Pinchartcdb03812015-12-14 22:39:37 +02001345 if (!(omap_obj->flags & OMAP_BO_MEM_EXT)) {
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001346 if (omap_obj->pages)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001347 omap_gem_detach_pages(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001348
Laurent Pinchartcdb03812015-12-14 22:39:37 +02001349 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001350 dma_free_writecombine(dev->dev, obj->size,
1351 omap_obj->vaddr, omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001352 } else if (omap_obj->vaddr) {
1353 vunmap(omap_obj->vaddr);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001354 }
1355 }
1356
1357 /* don't free externally allocated syncobj */
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001358 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001359 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001360
1361 drm_gem_object_release(obj);
1362
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001363 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001364}
1365
1366/* GEM buffer object constructor */
1367struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1368 union omap_gem_size gsize, uint32_t flags)
1369{
Rob Clarka6a91822011-12-09 23:26:08 -06001370 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001371 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001372 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001373 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001374 size_t size;
1375 int ret;
1376
1377 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001378 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001379 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001380 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001381 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001382
Rob Clarkf7f9f452011-12-05 19:19:22 -06001383 /* tiled buffers are always shmem paged backed.. when they are
1384 * scanned out, they are remapped into DMM/TILER
1385 */
1386 flags &= ~OMAP_BO_SCANOUT;
1387
1388 /* currently don't allow cached buffers.. there is some caching
1389 * stuff that needs to be handled better
1390 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001391 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1392 flags |= tiler_get_cpu_cache_flags();
Rob Clarkf7f9f452011-12-05 19:19:22 -06001393
1394 /* align dimensions to slot boundaries... */
1395 tiler_align(gem2fmt(flags),
1396 &gsize.tiled.width, &gsize.tiled.height);
1397
1398 /* ...and calculate size based on aligned dimensions */
1399 size = tiler_size(gem2fmt(flags),
1400 gsize.tiled.width, gsize.tiled.height);
1401 } else {
1402 size = PAGE_ALIGN(gsize.bytes);
1403 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001404
1405 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001406 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001407 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001408
Rob Clarkcd5351f2011-11-12 12:09:40 -06001409 obj = &omap_obj->base;
1410
Rob Clarka6a91822011-12-09 23:26:08 -06001411 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1412 /* attempt to allocate contiguous memory if we don't
1413 * have DMM for remappign discontiguous buffers
1414 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001415 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1416 &omap_obj->paddr, GFP_KERNEL);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001417 if (!omap_obj->vaddr) {
1418 kfree(omap_obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001419
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001420 return NULL;
1421 }
1422
Laurent Pinchartcdb03812015-12-14 22:39:37 +02001423 flags |= OMAP_BO_MEM_DMA_API;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001424 }
1425
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001426 spin_lock(&priv->list_lock);
1427 list_add(&omap_obj->mm_list, &priv->obj_list);
1428 spin_unlock(&priv->list_lock);
1429
Rob Clarkcd5351f2011-11-12 12:09:40 -06001430 omap_obj->flags = flags;
1431
Rob Clarkf7f9f452011-12-05 19:19:22 -06001432 if (flags & OMAP_BO_TILED) {
1433 omap_obj->width = gsize.tiled.width;
1434 omap_obj->height = gsize.tiled.height;
1435 }
1436
Laurent Pinchartcdb03812015-12-14 22:39:37 +02001437 if (flags & (OMAP_BO_MEM_DMA_API | OMAP_BO_MEM_EXT)) {
David Herrmann89c82332013-07-11 11:56:32 +02001438 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001439 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001440 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001441 if (ret)
1442 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001443
David Herrmannab5a60c2014-05-25 12:45:39 +02001444 mapping = file_inode(obj->filp)->i_mapping;
1445 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
Laurent Pinchartcdb03812015-12-14 22:39:37 +02001446
1447 omap_obj->flags |= OMAP_BO_MEM_SHMEM;
David Herrmannab5a60c2014-05-25 12:45:39 +02001448 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001449
1450 return obj;
1451
1452fail:
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001453 omap_gem_free_object(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001454 return NULL;
1455}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001456
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001457/* convenience method to construct a GEM buffer object, and userspace handle */
1458int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1459 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1460{
1461 struct drm_gem_object *obj;
1462 int ret;
1463
1464 obj = omap_gem_new(dev, gsize, flags);
1465 if (!obj)
1466 return -ENOMEM;
1467
1468 ret = drm_gem_handle_create(file, obj, handle);
1469 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001470 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001471 return ret;
1472 }
1473
1474 /* drop reference from allocate - handle holds it now */
1475 drm_gem_object_unreference_unlocked(obj);
1476
1477 return 0;
1478}
1479
1480/* -----------------------------------------------------------------------------
1481 * Init & Cleanup
1482 */
1483
1484/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001485void omap_gem_init(struct drm_device *dev)
1486{
Rob Clarka6a91822011-12-09 23:26:08 -06001487 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001488 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001489 const enum tiler_fmt fmts[] = {
1490 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1491 };
Andy Gross5c137792012-03-05 10:48:39 -06001492 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001493
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001494 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001495 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001496 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001497 return;
1498 }
1499
Joe Perches78110bb2013-02-11 09:41:29 -08001500 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1501 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001502 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001503
1504 /* reserve 4k aligned/wide regions for userspace mappings: */
1505 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1506 uint16_t h = 1, w = PAGE_SIZE >> i;
1507 tiler_align(fmts[i], &w, &h);
1508 /* note: since each region is 1 4kb page wide, and minimum
1509 * number of rows, the height ends up being the same as the
1510 * # of pages in the region
1511 */
1512 usergart[i].height = h;
1513 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001514 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001515 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1516 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001517 struct omap_drm_usergart_entry *entry;
1518 struct tiler_block *block;
1519
1520 entry = &usergart[i].entry[j];
1521 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001522 if (IS_ERR(block)) {
1523 dev_err(dev->dev,
1524 "reserve failed: %d, %d, %ld\n",
1525 i, j, PTR_ERR(block));
1526 return;
1527 }
1528 entry->paddr = tiler_ssptr(block);
1529 entry->block = block;
1530
Russell King2d31ca32014-07-12 10:53:41 +01001531 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1532 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001533 usergart[i].stride_pfn << PAGE_SHIFT);
1534 }
1535 }
Rob Clarka6a91822011-12-09 23:26:08 -06001536
Laurent Pinchartf4302742015-12-14 22:39:34 +02001537 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001538 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001539}
1540
1541void omap_gem_deinit(struct drm_device *dev)
1542{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001543 struct omap_drm_private *priv = dev->dev_private;
1544
Rob Clarkf7f9f452011-12-05 19:19:22 -06001545 /* I believe we can rely on there being no more outstanding GEM
1546 * objects which could depend on usergart/dmm at this point.
1547 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001548 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001549}