blob: 29a7ac6eb0401fb2497efe7b5fb4df2f7390c0a7 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
22
David Herrmann0de23972013-07-24 21:07:52 +020023#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060024
25#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060026#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060027
Rob Clarkcd5351f2011-11-12 12:09:40 -060028/*
29 * GEM buffer object implementation.
30 */
31
32#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
33
34/* note: we use upper 8 bits of flags for driver-internal flags: */
35#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
36#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
37#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
38
39
40struct omap_gem_object {
41 struct drm_gem_object base;
42
Rob Clarkf6b60362012-03-05 10:48:36 -060043 struct list_head mm_list;
44
Rob Clarkcd5351f2011-11-12 12:09:40 -060045 uint32_t flags;
46
Rob Clarkf7f9f452011-12-05 19:19:22 -060047 /** width/height for tiled formats (rounded up to slot boundaries) */
48 uint16_t width, height;
49
Rob Clarka6a91822011-12-09 23:26:08 -060050 /** roll applied when mapping to DMM */
51 uint32_t roll;
52
Rob Clarkcd5351f2011-11-12 12:09:40 -060053 /**
54 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
Rob Clarkf7f9f452011-12-05 19:19:22 -060055 * is set and the paddr is valid. Also if the buffer is remapped in
56 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
57 * the physical address and OMAP_BO_DMA is not set, then you should
58 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
59 * not removed from under your feet.
Rob Clarkcd5351f2011-11-12 12:09:40 -060060 *
61 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
62 * buffer is requested, but doesn't mean that it is. Use the
63 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
64 * physical address.
65 */
66 dma_addr_t paddr;
67
68 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060069 * # of users of paddr
70 */
71 uint32_t paddr_cnt;
72
73 /**
74 * tiler block used when buffer is remapped in DMM/TILER.
75 */
76 struct tiler_block *block;
77
78 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060079 * Array of backing pages, if allocated. Note that pages are never
80 * allocated for buffers originally allocated from contiguous memory
81 */
82 struct page **pages;
83
Rob Clarkf3bc9d22011-12-20 16:54:28 -060084 /** addresses corresponding to pages in above array */
85 dma_addr_t *addrs;
86
Rob Clarkcd5351f2011-11-12 12:09:40 -060087 /**
88 * Virtual address, if mapped.
89 */
90 void *vaddr;
91
92 /**
93 * sync-object allocated on demand (if needed)
94 *
95 * Per-buffer sync-object for tracking pending and completed hw/dma
96 * read and write operations. The layout in memory is dictated by
97 * the SGX firmware, which uses this information to stall the command
98 * stream if a surface is not ready yet.
99 *
100 * Note that when buffer is used by SGX, the sync-object needs to be
101 * allocated from a special heap of sync-objects. This way many sync
102 * objects can be packed in a page, and not waste GPU virtual address
103 * space. Because of this we have to have a omap_gem_set_sync_object()
104 * API to allow replacement of the syncobj after it has (potentially)
105 * already been allocated. A bit ugly but I haven't thought of a
106 * better alternative.
107 */
108 struct {
109 uint32_t write_pending;
110 uint32_t write_complete;
111 uint32_t read_pending;
112 uint32_t read_complete;
113 } *sync;
114};
115
Rob Clarkc5b12472012-01-18 18:33:02 -0600116static int get_pages(struct drm_gem_object *obj, struct page ***pages);
117static uint64_t mmap_offset(struct drm_gem_object *obj);
118
Rob Clarkf7f9f452011-12-05 19:19:22 -0600119/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
120 * not necessarily pinned in TILER all the time, and (b) when they are
121 * they are not necessarily page aligned, we reserve one or more small
122 * regions in each of the 2d containers to use as a user-GART where we
123 * can create a second page-aligned mapping of parts of the buffer
124 * being accessed from userspace.
125 *
126 * Note that we could optimize slightly when we know that multiple
127 * tiler containers are backed by the same PAT.. but I'll leave that
128 * for later..
129 */
130#define NUM_USERGART_ENTRIES 2
131struct usergart_entry {
132 struct tiler_block *block; /* the reserved tiler block */
133 dma_addr_t paddr;
134 struct drm_gem_object *obj; /* the current pinned obj */
135 pgoff_t obj_pgoff; /* page offset of obj currently
136 mapped in */
137};
138static struct {
139 struct usergart_entry entry[NUM_USERGART_ENTRIES];
140 int height; /* height in rows */
141 int height_shift; /* ilog2(height in rows) */
142 int slot_shift; /* ilog2(width per slot) */
143 int stride_pfn; /* stride in pages */
144 int last; /* index of last used entry */
145} *usergart;
146
147static void evict_entry(struct drm_gem_object *obj,
148 enum tiler_fmt fmt, struct usergart_entry *entry)
149{
David Herrmann6796cb12014-01-03 14:24:19 +0100150 struct omap_gem_object *omap_obj = to_omap_bo(obj);
151 int n = usergart[fmt].height;
152 size_t size = PAGE_SIZE * n;
153 loff_t off = mmap_offset(obj) +
154 (entry->obj_pgoff << PAGE_SHIFT);
155 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
156
157 if (m > 1) {
158 int i;
159 /* if stride > than PAGE_SIZE then sparse mapping: */
160 for (i = n; i > 0; i--) {
161 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
162 off, PAGE_SIZE, 1);
163 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600164 }
David Herrmann6796cb12014-01-03 14:24:19 +0100165 } else {
166 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
167 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600168 }
169
170 entry->obj = NULL;
171}
172
173/* Evict a buffer from usergart, if it is mapped there */
174static void evict(struct drm_gem_object *obj)
175{
176 struct omap_gem_object *omap_obj = to_omap_bo(obj);
177
178 if (omap_obj->flags & OMAP_BO_TILED) {
179 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
180 int i;
181
182 if (!usergart)
183 return;
184
185 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
186 struct usergart_entry *entry = &usergart[fmt].entry[i];
187 if (entry->obj == obj)
188 evict_entry(obj, fmt, entry);
189 }
190 }
191}
192
Rob Clarkcd5351f2011-11-12 12:09:40 -0600193/* GEM objects can either be allocated from contiguous memory (in which
194 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
195 * contiguous buffers can be remapped in TILER/DMM if they need to be
196 * contiguous... but we don't do this all the time to reduce pressure
197 * on TILER/DMM space when we know at allocation time that the buffer
198 * will need to be scanned out.
199 */
200static inline bool is_shmem(struct drm_gem_object *obj)
201{
202 return obj->filp != NULL;
203}
204
Rob Clark8b6b5692012-05-17 02:37:25 -0600205/**
206 * shmem buffers that are mapped cached can simulate coherency via using
207 * page faulting to keep track of dirty pages
208 */
209static inline bool is_cached_coherent(struct drm_gem_object *obj)
210{
211 struct omap_gem_object *omap_obj = to_omap_bo(obj);
212 return is_shmem(obj) &&
213 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
214}
215
Rob Clarkcd5351f2011-11-12 12:09:40 -0600216static DEFINE_SPINLOCK(sync_lock);
217
218/** ensure backing pages are allocated */
219static int omap_gem_attach_pages(struct drm_gem_object *obj)
220{
Rob Clark8b6b5692012-05-17 02:37:25 -0600221 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600222 struct omap_gem_object *omap_obj = to_omap_bo(obj);
223 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200224 int npages = obj->size >> PAGE_SHIFT;
225 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600226 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600227
228 WARN_ON(omap_obj->pages);
229
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200230 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600231 if (IS_ERR(pages)) {
232 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
233 return PTR_ERR(pages);
234 }
235
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600236 /* for non-cached buffers, ensure the new pages are clean because
237 * DSS, GPU, etc. are not cache coherent:
238 */
239 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100240 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200241 if (!addrs) {
242 ret = -ENOMEM;
243 goto free_pages;
244 }
245
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600246 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600247 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600248 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
249 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600250 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100251 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200252 if (!addrs) {
253 ret = -ENOMEM;
254 goto free_pages;
255 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600256 }
257
Rob Clark8b6b5692012-05-17 02:37:25 -0600258 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600259 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600260
Rob Clarkcd5351f2011-11-12 12:09:40 -0600261 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200262
263free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400264 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200265
266 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600267}
268
269/** release backing pages */
270static void omap_gem_detach_pages(struct drm_gem_object *obj)
271{
272 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600273
274 /* for non-cached buffers, ensure the new pages are clean because
275 * DSS, GPU, etc. are not cache coherent:
276 */
277 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
278 int i, npages = obj->size >> PAGE_SHIFT;
279 for (i = 0; i < npages; i++) {
280 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
281 PAGE_SIZE, DMA_BIDIRECTIONAL);
282 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600283 }
284
Rob Clark8b6b5692012-05-17 02:37:25 -0600285 kfree(omap_obj->addrs);
286 omap_obj->addrs = NULL;
287
Rob Clarkddcd09d2013-08-07 13:41:27 -0400288 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600289 omap_obj->pages = NULL;
290}
291
Rob Clark6ad11bc2012-04-10 13:19:55 -0500292/* get buffer flags */
293uint32_t omap_gem_flags(struct drm_gem_object *obj)
294{
295 return to_omap_bo(obj)->flags;
296}
297
Rob Clarkcd5351f2011-11-12 12:09:40 -0600298/** get mmap offset */
Rob Clarkc5b12472012-01-18 18:33:02 -0600299static uint64_t mmap_offset(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600300{
Rob Clarkf6b60362012-03-05 10:48:36 -0600301 struct drm_device *dev = obj->dev;
David Herrmann0de23972013-07-24 21:07:52 +0200302 int ret;
303 size_t size;
Rob Clarkf6b60362012-03-05 10:48:36 -0600304
305 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
306
David Herrmann0de23972013-07-24 21:07:52 +0200307 /* Make it mmapable */
308 size = omap_gem_mmap_size(obj);
Rob Clarkddcd09d2013-08-07 13:41:27 -0400309 ret = drm_gem_create_mmap_offset_size(obj, size);
David Herrmann0de23972013-07-24 21:07:52 +0200310 if (ret) {
311 dev_err(dev->dev, "could not allocate mmap offset\n");
312 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600313 }
314
David Herrmann0de23972013-07-24 21:07:52 +0200315 return drm_vma_node_offset_addr(&obj->vma_node);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600316}
317
Rob Clarkc5b12472012-01-18 18:33:02 -0600318uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
319{
320 uint64_t offset;
321 mutex_lock(&obj->dev->struct_mutex);
322 offset = mmap_offset(obj);
323 mutex_unlock(&obj->dev->struct_mutex);
324 return offset;
325}
326
Rob Clarkf7f9f452011-12-05 19:19:22 -0600327/** get mmap size */
328size_t omap_gem_mmap_size(struct drm_gem_object *obj)
329{
330 struct omap_gem_object *omap_obj = to_omap_bo(obj);
331 size_t size = obj->size;
332
333 if (omap_obj->flags & OMAP_BO_TILED) {
334 /* for tiled buffers, the virtual size has stride rounded up
335 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
336 * 32kb later!). But we don't back the entire buffer with
337 * pages, only the valid picture part.. so need to adjust for
338 * this in the size used to mmap and generate mmap offset
339 */
340 size = tiler_vsize(gem2fmt(omap_obj->flags),
341 omap_obj->width, omap_obj->height);
342 }
343
344 return size;
345}
346
Rob Clark3c810c62012-08-15 15:18:01 -0500347/* get tiled size, returns -EINVAL if not tiled buffer */
348int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
349{
350 struct omap_gem_object *omap_obj = to_omap_bo(obj);
351 if (omap_obj->flags & OMAP_BO_TILED) {
352 *w = omap_obj->width;
353 *h = omap_obj->height;
354 return 0;
355 }
356 return -EINVAL;
357}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600358
359/* Normal handling for the case of faulting in non-tiled buffers */
360static int fault_1d(struct drm_gem_object *obj,
361 struct vm_area_struct *vma, struct vm_fault *vmf)
362{
363 struct omap_gem_object *omap_obj = to_omap_bo(obj);
364 unsigned long pfn;
365 pgoff_t pgoff;
366
367 /* We don't use vmf->pgoff since that has the fake offset: */
368 pgoff = ((unsigned long)vmf->virtual_address -
369 vma->vm_start) >> PAGE_SHIFT;
370
371 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600372 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600373 pfn = page_to_pfn(omap_obj->pages[pgoff]);
374 } else {
375 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
376 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
377 }
378
379 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
380 pfn, pfn << PAGE_SHIFT);
381
382 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
383}
384
385/* Special handling for the case of faulting in 2d tiled buffers */
386static int fault_2d(struct drm_gem_object *obj,
387 struct vm_area_struct *vma, struct vm_fault *vmf)
388{
389 struct omap_gem_object *omap_obj = to_omap_bo(obj);
390 struct usergart_entry *entry;
391 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
392 struct page *pages[64]; /* XXX is this too much to have on stack? */
393 unsigned long pfn;
394 pgoff_t pgoff, base_pgoff;
395 void __user *vaddr;
396 int i, ret, slots;
397
Rob Clarke5598952012-03-05 10:48:40 -0600398 /*
399 * Note the height of the slot is also equal to the number of pages
400 * that need to be mapped in to fill 4kb wide CPU page. If the slot
401 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600402 */
Rob Clarke5598952012-03-05 10:48:40 -0600403 const int n = usergart[fmt].height;
404 const int n_shift = usergart[fmt].height_shift;
405
406 /*
407 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
408 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
409 * into account in some of the math, so figure out virtual stride
410 * in pages
411 */
412 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600413
414 /* We don't use vmf->pgoff since that has the fake offset: */
415 pgoff = ((unsigned long)vmf->virtual_address -
416 vma->vm_start) >> PAGE_SHIFT;
417
Rob Clarke5598952012-03-05 10:48:40 -0600418 /*
419 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600420 * boundary in the y direction:
421 */
Rob Clarke5598952012-03-05 10:48:40 -0600422 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600423
Rob Clarke5598952012-03-05 10:48:40 -0600424 /* figure out buffer width in slots */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600425 slots = omap_obj->width >> usergart[fmt].slot_shift;
426
Rob Clarke5598952012-03-05 10:48:40 -0600427 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
428
429 entry = &usergart[fmt].entry[usergart[fmt].last];
430
Rob Clarkf7f9f452011-12-05 19:19:22 -0600431 /* evict previous buffer using this usergart entry, if any: */
432 if (entry->obj)
433 evict_entry(entry->obj, fmt, entry);
434
435 entry->obj = obj;
436 entry->obj_pgoff = base_pgoff;
437
Rob Clarke5598952012-03-05 10:48:40 -0600438 /* now convert base_pgoff to phys offset from virt offset: */
439 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600440
Rob Clarke5598952012-03-05 10:48:40 -0600441 /* for wider-than 4k.. figure out which part of the slot-row we want: */
442 if (m > 1) {
443 int off = pgoff % m;
444 entry->obj_pgoff += off;
445 base_pgoff /= m;
446 slots = min(slots - (off << n_shift), n);
447 base_pgoff += off << n_shift;
448 vaddr += off << PAGE_SHIFT;
449 }
450
451 /*
452 * Map in pages. Beyond the valid pixel part of the buffer, we set
453 * pages[i] to NULL to get a dummy page mapped in.. if someone
454 * reads/writes it they will get random/undefined content, but at
455 * least it won't be corrupting whatever other random page used to
456 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600457 */
458 memcpy(pages, &omap_obj->pages[base_pgoff],
459 sizeof(struct page *) * slots);
460 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600461 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600462
Rob Clarka6a91822011-12-09 23:26:08 -0600463 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600464 if (ret) {
465 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
466 return ret;
467 }
468
Rob Clarkf7f9f452011-12-05 19:19:22 -0600469 pfn = entry->paddr >> PAGE_SHIFT;
470
471 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
472 pfn, pfn << PAGE_SHIFT);
473
Rob Clarke5598952012-03-05 10:48:40 -0600474 for (i = n; i > 0; i--) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600475 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
476 pfn += usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600477 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600478 }
479
480 /* simple round-robin: */
481 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
482
483 return 0;
484}
485
Rob Clarkcd5351f2011-11-12 12:09:40 -0600486/**
487 * omap_gem_fault - pagefault handler for GEM objects
488 * @vma: the VMA of the GEM object
489 * @vmf: fault detail
490 *
491 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
492 * does most of the work for us including the actual map/unmap calls
493 * but we need to do the actual page work.
494 *
495 * The VMA was set up by GEM. In doing so it also ensured that the
496 * vma->vm_private_data points to the GEM object that is backing this
497 * mapping.
498 */
499int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
500{
501 struct drm_gem_object *obj = vma->vm_private_data;
502 struct omap_gem_object *omap_obj = to_omap_bo(obj);
503 struct drm_device *dev = obj->dev;
504 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600505 int ret;
506
507 /* Make sure we don't parallel update on a fault, nor move or remove
508 * something from beneath our feet
509 */
510 mutex_lock(&dev->struct_mutex);
511
512 /* if a shmem backed object, make sure we have pages attached now */
513 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900514 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600515 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600516
517 /* where should we do corresponding put_pages().. we are mapping
518 * the original page, rather than thru a GART, so we can't rely
519 * on eviction to trigger this. But munmap() or all mappings should
520 * probably trigger put_pages()?
521 */
522
Rob Clarkf7f9f452011-12-05 19:19:22 -0600523 if (omap_obj->flags & OMAP_BO_TILED)
524 ret = fault_2d(obj, vma, vmf);
525 else
526 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600527
Rob Clarkcd5351f2011-11-12 12:09:40 -0600528
529fail:
530 mutex_unlock(&dev->struct_mutex);
531 switch (ret) {
532 case 0:
533 case -ERESTARTSYS:
534 case -EINTR:
535 return VM_FAULT_NOPAGE;
536 case -ENOMEM:
537 return VM_FAULT_OOM;
538 default:
539 return VM_FAULT_SIGBUS;
540 }
541}
542
543/** We override mainly to fix up some of the vm mapping flags.. */
544int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
545{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600546 int ret;
547
548 ret = drm_gem_mmap(filp, vma);
549 if (ret) {
550 DBG("mmap failed: %d", ret);
551 return ret;
552 }
553
Rob Clark8b6b5692012-05-17 02:37:25 -0600554 return omap_gem_mmap_obj(vma->vm_private_data, vma);
555}
556
557int omap_gem_mmap_obj(struct drm_gem_object *obj,
558 struct vm_area_struct *vma)
559{
560 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600561
562 vma->vm_flags &= ~VM_PFNMAP;
563 vma->vm_flags |= VM_MIXEDMAP;
564
565 if (omap_obj->flags & OMAP_BO_WC) {
566 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
567 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
568 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
569 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600570 /*
571 * We do have some private objects, at least for scanout buffers
572 * on hardware without DMM/TILER. But these are allocated write-
573 * combine
574 */
575 if (WARN_ON(!obj->filp))
576 return -EINVAL;
577
578 /*
579 * Shunt off cached objs to shmem file so they have their own
580 * address_space (so unmap_mapping_range does what we want,
581 * in particular in the case of mmap'd dmabufs)
582 */
583 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600584 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400585 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600586
Rob Clarkcd5351f2011-11-12 12:09:40 -0600587 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
588 }
589
Rob Clark8b6b5692012-05-17 02:37:25 -0600590 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600591}
592
Rob Clark8b6b5692012-05-17 02:37:25 -0600593
Rob Clarkcd5351f2011-11-12 12:09:40 -0600594/**
595 * omap_gem_dumb_create - create a dumb buffer
596 * @drm_file: our client file
597 * @dev: our device
598 * @args: the requested arguments copied from userspace
599 *
600 * Allocate a buffer suitable for use for a frame buffer of the
601 * form described by user space. Give userspace a handle by which
602 * to reference it.
603 */
604int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
605 struct drm_mode_create_dumb *args)
606{
607 union omap_gem_size gsize;
608
Thierry Redingbdb2b932014-11-03 11:57:33 +0100609 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600610 args->size = PAGE_ALIGN(args->pitch * args->height);
611
612 gsize = (union omap_gem_size){
613 .bytes = args->size,
614 };
615
616 return omap_gem_new_handle(dev, file, gsize,
617 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
618}
619
620/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600621 * omap_gem_dumb_map - buffer mapping for dumb interface
622 * @file: our drm client file
623 * @dev: drm device
624 * @handle: GEM handle to the object (from dumb_create)
625 *
626 * Do the necessary setup to allow the mapping of the frame buffer
627 * into user memory. We don't have to do much here at the moment.
628 */
629int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
630 uint32_t handle, uint64_t *offset)
631{
632 struct drm_gem_object *obj;
633 int ret = 0;
634
Rob Clarkcd5351f2011-11-12 12:09:40 -0600635 /* GEM does all our handle to object mapping */
636 obj = drm_gem_object_lookup(dev, file, handle);
637 if (obj == NULL) {
638 ret = -ENOENT;
639 goto fail;
640 }
641
642 *offset = omap_gem_mmap_offset(obj);
643
644 drm_gem_object_unreference_unlocked(obj);
645
646fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600647 return ret;
648}
649
Laurent Pincharte1c11742015-12-14 22:39:30 +0200650#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600651/* Set scrolling position. This allows us to implement fast scrolling
652 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600653 *
654 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600655 */
656int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
657{
658 struct omap_gem_object *omap_obj = to_omap_bo(obj);
659 uint32_t npages = obj->size >> PAGE_SHIFT;
660 int ret = 0;
661
662 if (roll > npages) {
663 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
664 return -EINVAL;
665 }
666
Rob Clarka6a91822011-12-09 23:26:08 -0600667 omap_obj->roll = roll;
668
Rob Clarkaf695922011-12-16 11:34:34 -0600669 mutex_lock(&obj->dev->struct_mutex);
670
Rob Clarka6a91822011-12-09 23:26:08 -0600671 /* if we aren't mapped yet, we don't need to do anything */
672 if (omap_obj->block) {
673 struct page **pages;
674 ret = get_pages(obj, &pages);
675 if (ret)
676 goto fail;
677 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
678 if (ret)
679 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
680 }
681
682fail:
683 mutex_unlock(&obj->dev->struct_mutex);
684
685 return ret;
686}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200687#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600688
Rob Clark8b6b5692012-05-17 02:37:25 -0600689/* Sync the buffer for CPU access.. note pages should already be
690 * attached, ie. omap_gem_get_pages()
691 */
692void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
693{
694 struct drm_device *dev = obj->dev;
695 struct omap_gem_object *omap_obj = to_omap_bo(obj);
696
697 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
698 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
699 PAGE_SIZE, DMA_BIDIRECTIONAL);
700 omap_obj->addrs[pgoff] = 0;
701 }
702}
703
704/* sync the buffer for DMA access */
705void omap_gem_dma_sync(struct drm_gem_object *obj,
706 enum dma_data_direction dir)
707{
708 struct drm_device *dev = obj->dev;
709 struct omap_gem_object *omap_obj = to_omap_bo(obj);
710
711 if (is_cached_coherent(obj)) {
712 int i, npages = obj->size >> PAGE_SHIFT;
713 struct page **pages = omap_obj->pages;
714 bool dirty = false;
715
716 for (i = 0; i < npages; i++) {
717 if (!omap_obj->addrs[i]) {
718 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
719 PAGE_SIZE, DMA_BIDIRECTIONAL);
720 dirty = true;
721 }
722 }
723
724 if (dirty) {
725 unmap_mapping_range(obj->filp->f_mapping, 0,
726 omap_gem_mmap_size(obj), 1);
727 }
728 }
729}
730
Rob Clarkcd5351f2011-11-12 12:09:40 -0600731/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
732 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
733 * map in TILER)
734 */
735int omap_gem_get_paddr(struct drm_gem_object *obj,
736 dma_addr_t *paddr, bool remap)
737{
Rob Clarka6a91822011-12-09 23:26:08 -0600738 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600739 struct omap_gem_object *omap_obj = to_omap_bo(obj);
740 int ret = 0;
741
Rob Clarkf7f9f452011-12-05 19:19:22 -0600742 mutex_lock(&obj->dev->struct_mutex);
743
Rob Clarka6a91822011-12-09 23:26:08 -0600744 if (remap && is_shmem(obj) && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600745 if (omap_obj->paddr_cnt == 0) {
746 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600747 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600748 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
749 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600750
Rob Clarkf7f9f452011-12-05 19:19:22 -0600751 BUG_ON(omap_obj->block);
752
753 ret = get_pages(obj, &pages);
754 if (ret)
755 goto fail;
756
Rob Clarkf7f9f452011-12-05 19:19:22 -0600757 if (omap_obj->flags & OMAP_BO_TILED) {
758 block = tiler_reserve_2d(fmt,
759 omap_obj->width,
760 omap_obj->height, 0);
761 } else {
762 block = tiler_reserve_1d(obj->size);
763 }
764
765 if (IS_ERR(block)) {
766 ret = PTR_ERR(block);
767 dev_err(obj->dev->dev,
768 "could not remap: %d (%d)\n", ret, fmt);
769 goto fail;
770 }
771
772 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600773 ret = tiler_pin(block, pages, npages,
774 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600775 if (ret) {
776 tiler_release(block);
777 dev_err(obj->dev->dev,
778 "could not pin: %d\n", ret);
779 goto fail;
780 }
781
782 omap_obj->paddr = tiler_ssptr(block);
783 omap_obj->block = block;
784
Russell King2d31ca32014-07-12 10:53:41 +0100785 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600786 }
787
788 omap_obj->paddr_cnt++;
789
790 *paddr = omap_obj->paddr;
791 } else if (omap_obj->flags & OMAP_BO_DMA) {
792 *paddr = omap_obj->paddr;
793 } else {
794 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600795 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600796 }
797
Rob Clarkf7f9f452011-12-05 19:19:22 -0600798fail:
799 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600800
801 return ret;
802}
803
804/* Release physical address, when DMA is no longer being performed.. this
805 * could potentially unpin and unmap buffers from TILER
806 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300807void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600808{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600809 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300810 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600811
812 mutex_lock(&obj->dev->struct_mutex);
813 if (omap_obj->paddr_cnt > 0) {
814 omap_obj->paddr_cnt--;
815 if (omap_obj->paddr_cnt == 0) {
816 ret = tiler_unpin(omap_obj->block);
817 if (ret) {
818 dev_err(obj->dev->dev,
819 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600820 }
821 ret = tiler_release(omap_obj->block);
822 if (ret) {
823 dev_err(obj->dev->dev,
824 "could not release unmap: %d\n", ret);
825 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000826 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600827 omap_obj->block = NULL;
828 }
829 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300830
Rob Clarkf7f9f452011-12-05 19:19:22 -0600831 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600832}
833
Rob Clark3c810c62012-08-15 15:18:01 -0500834/* Get rotated scanout address (only valid if already pinned), at the
835 * specified orientation and x,y offset from top-left corner of buffer
836 * (only valid for tiled 2d buffers)
837 */
838int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
839 int x, int y, dma_addr_t *paddr)
840{
841 struct omap_gem_object *omap_obj = to_omap_bo(obj);
842 int ret = -EINVAL;
843
844 mutex_lock(&obj->dev->struct_mutex);
845 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
846 (omap_obj->flags & OMAP_BO_TILED)) {
847 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
848 ret = 0;
849 }
850 mutex_unlock(&obj->dev->struct_mutex);
851 return ret;
852}
853
854/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
855int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
856{
857 struct omap_gem_object *omap_obj = to_omap_bo(obj);
858 int ret = -EINVAL;
859 if (omap_obj->flags & OMAP_BO_TILED)
860 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
861 return ret;
862}
863
Rob Clarkcd5351f2011-11-12 12:09:40 -0600864/* acquire pages when needed (for example, for DMA where physically
865 * contiguous buffer is not required
866 */
867static int get_pages(struct drm_gem_object *obj, struct page ***pages)
868{
869 struct omap_gem_object *omap_obj = to_omap_bo(obj);
870 int ret = 0;
871
872 if (is_shmem(obj) && !omap_obj->pages) {
873 ret = omap_gem_attach_pages(obj);
874 if (ret) {
875 dev_err(obj->dev->dev, "could not attach pages\n");
876 return ret;
877 }
878 }
879
880 /* TODO: even phys-contig.. we should have a list of pages? */
881 *pages = omap_obj->pages;
882
883 return 0;
884}
885
Rob Clark6ad11bc2012-04-10 13:19:55 -0500886/* if !remap, and we don't have pages backing, then fail, rather than
887 * increasing the pin count (which we don't really do yet anyways,
888 * because we don't support swapping pages back out). And 'remap'
889 * might not be quite the right name, but I wanted to keep it working
890 * similarly to omap_gem_get_paddr(). Note though that mutex is not
891 * aquired if !remap (because this can be called in atomic ctxt),
892 * but probably omap_gem_get_paddr() should be changed to work in the
893 * same way. If !remap, a matching omap_gem_put_pages() call is not
894 * required (and should not be made).
895 */
896int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
897 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600898{
899 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500900 if (!remap) {
901 struct omap_gem_object *omap_obj = to_omap_bo(obj);
902 if (!omap_obj->pages)
903 return -ENOMEM;
904 *pages = omap_obj->pages;
905 return 0;
906 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600907 mutex_lock(&obj->dev->struct_mutex);
908 ret = get_pages(obj, pages);
909 mutex_unlock(&obj->dev->struct_mutex);
910 return ret;
911}
912
913/* release pages when DMA no longer being performed */
914int omap_gem_put_pages(struct drm_gem_object *obj)
915{
916 /* do something here if we dynamically attach/detach pages.. at
917 * least they would no longer need to be pinned if everyone has
918 * released the pages..
919 */
920 return 0;
921}
922
Laurent Pincharte1c11742015-12-14 22:39:30 +0200923#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600924/* Get kernel virtual address for CPU access.. this more or less only
925 * exists for omap_fbdev. This should be called with struct_mutex
926 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600927 */
928void *omap_gem_vaddr(struct drm_gem_object *obj)
929{
930 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900931 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600932 if (!omap_obj->vaddr) {
933 struct page **pages;
934 int ret = get_pages(obj, &pages);
935 if (ret)
936 return ERR_PTR(ret);
937 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
938 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
939 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600940 return omap_obj->vaddr;
941}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200942#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600943
Andy Grosse78edba2012-12-19 14:53:37 -0600944#ifdef CONFIG_PM
945/* re-pin objects in DMM in resume path: */
946int omap_gem_resume(struct device *dev)
947{
948 struct drm_device *drm_dev = dev_get_drvdata(dev);
949 struct omap_drm_private *priv = drm_dev->dev_private;
950 struct omap_gem_object *omap_obj;
951 int ret = 0;
952
953 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
954 if (omap_obj->block) {
955 struct drm_gem_object *obj = &omap_obj->base;
956 uint32_t npages = obj->size >> PAGE_SHIFT;
957 WARN_ON(!omap_obj->pages); /* this can't happen */
958 ret = tiler_pin(omap_obj->block,
959 omap_obj->pages, npages,
960 omap_obj->roll, true);
961 if (ret) {
962 dev_err(dev, "could not repin: %d\n", ret);
963 return ret;
964 }
965 }
966 }
967
968 return 0;
969}
970#endif
971
Rob Clarkf6b60362012-03-05 10:48:36 -0600972#ifdef CONFIG_DEBUG_FS
973void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
974{
Rob Clarkf6b60362012-03-05 10:48:36 -0600975 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +0200976 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -0600977
David Herrmann0de23972013-07-24 21:07:52 +0200978 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -0600979
Russell King2d31ca32014-07-12 10:53:41 +0100980 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -0600981 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +0100982 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -0600983 omap_obj->vaddr, omap_obj->roll);
984
985 if (omap_obj->flags & OMAP_BO_TILED) {
986 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
987 if (omap_obj->block) {
988 struct tcm_area *area = &omap_obj->block->area;
989 seq_printf(m, " (%dx%d, %dx%d)",
990 area->p0.x, area->p0.y,
991 area->p1.x, area->p1.y);
992 }
993 } else {
994 seq_printf(m, " %d", obj->size);
995 }
996
997 seq_printf(m, "\n");
998}
999
1000void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1001{
1002 struct omap_gem_object *omap_obj;
1003 int count = 0;
1004 size_t size = 0;
1005
1006 list_for_each_entry(omap_obj, list, mm_list) {
1007 struct drm_gem_object *obj = &omap_obj->base;
1008 seq_printf(m, " ");
1009 omap_gem_describe(obj, m);
1010 count++;
1011 size += obj->size;
1012 }
1013
1014 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1015}
1016#endif
1017
Rob Clarkcd5351f2011-11-12 12:09:40 -06001018/* Buffer Synchronization:
1019 */
1020
1021struct omap_gem_sync_waiter {
1022 struct list_head list;
1023 struct omap_gem_object *omap_obj;
1024 enum omap_gem_op op;
1025 uint32_t read_target, write_target;
1026 /* notify called w/ sync_lock held */
1027 void (*notify)(void *arg);
1028 void *arg;
1029};
1030
1031/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1032 * the read and/or write target count is achieved which can call a user
1033 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1034 * cpu access), etc.
1035 */
1036static LIST_HEAD(waiters);
1037
1038static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1039{
1040 struct omap_gem_object *omap_obj = waiter->omap_obj;
1041 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301042 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001043 return true;
1044 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301045 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001046 return true;
1047 return false;
1048}
1049
1050/* macro for sync debug.. */
1051#define SYNCDBG 0
1052#define SYNC(fmt, ...) do { if (SYNCDBG) \
1053 printk(KERN_ERR "%s:%d: "fmt"\n", \
1054 __func__, __LINE__, ##__VA_ARGS__); \
1055 } while (0)
1056
1057
1058static void sync_op_update(void)
1059{
1060 struct omap_gem_sync_waiter *waiter, *n;
1061 list_for_each_entry_safe(waiter, n, &waiters, list) {
1062 if (!is_waiting(waiter)) {
1063 list_del(&waiter->list);
1064 SYNC("notify: %p", waiter);
1065 waiter->notify(waiter->arg);
1066 kfree(waiter);
1067 }
1068 }
1069}
1070
1071static inline int sync_op(struct drm_gem_object *obj,
1072 enum omap_gem_op op, bool start)
1073{
1074 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1075 int ret = 0;
1076
1077 spin_lock(&sync_lock);
1078
1079 if (!omap_obj->sync) {
1080 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1081 if (!omap_obj->sync) {
1082 ret = -ENOMEM;
1083 goto unlock;
1084 }
1085 }
1086
1087 if (start) {
1088 if (op & OMAP_GEM_READ)
1089 omap_obj->sync->read_pending++;
1090 if (op & OMAP_GEM_WRITE)
1091 omap_obj->sync->write_pending++;
1092 } else {
1093 if (op & OMAP_GEM_READ)
1094 omap_obj->sync->read_complete++;
1095 if (op & OMAP_GEM_WRITE)
1096 omap_obj->sync->write_complete++;
1097 sync_op_update();
1098 }
1099
1100unlock:
1101 spin_unlock(&sync_lock);
1102
1103 return ret;
1104}
1105
1106/* it is a bit lame to handle updates in this sort of polling way, but
1107 * in case of PVR, the GPU can directly update read/write complete
1108 * values, and not really tell us which ones it updated.. this also
1109 * means that sync_lock is not quite sufficient. So we'll need to
1110 * do something a bit better when it comes time to add support for
1111 * separate 2d hw..
1112 */
1113void omap_gem_op_update(void)
1114{
1115 spin_lock(&sync_lock);
1116 sync_op_update();
1117 spin_unlock(&sync_lock);
1118}
1119
1120/* mark the start of read and/or write operation */
1121int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1122{
1123 return sync_op(obj, op, true);
1124}
1125
1126int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1127{
1128 return sync_op(obj, op, false);
1129}
1130
1131static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1132
1133static void sync_notify(void *arg)
1134{
1135 struct task_struct **waiter_task = arg;
1136 *waiter_task = NULL;
1137 wake_up_all(&sync_event);
1138}
1139
1140int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1141{
1142 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1143 int ret = 0;
1144 if (omap_obj->sync) {
1145 struct task_struct *waiter_task = current;
1146 struct omap_gem_sync_waiter *waiter =
1147 kzalloc(sizeof(*waiter), GFP_KERNEL);
1148
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001149 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001150 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001151
1152 waiter->omap_obj = omap_obj;
1153 waiter->op = op;
1154 waiter->read_target = omap_obj->sync->read_pending;
1155 waiter->write_target = omap_obj->sync->write_pending;
1156 waiter->notify = sync_notify;
1157 waiter->arg = &waiter_task;
1158
1159 spin_lock(&sync_lock);
1160 if (is_waiting(waiter)) {
1161 SYNC("waited: %p", waiter);
1162 list_add_tail(&waiter->list, &waiters);
1163 spin_unlock(&sync_lock);
1164 ret = wait_event_interruptible(sync_event,
1165 (waiter_task == NULL));
1166 spin_lock(&sync_lock);
1167 if (waiter_task) {
1168 SYNC("interrupted: %p", waiter);
1169 /* we were interrupted */
1170 list_del(&waiter->list);
1171 waiter_task = NULL;
1172 } else {
1173 /* freed in sync_op_update() */
1174 waiter = NULL;
1175 }
1176 }
1177 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001178 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001179 }
1180 return ret;
1181}
1182
1183/* call fxn(arg), either synchronously or asynchronously if the op
1184 * is currently blocked.. fxn() can be called from any context
1185 *
1186 * (TODO for now fxn is called back from whichever context calls
1187 * omap_gem_op_update().. but this could be better defined later
1188 * if needed)
1189 *
1190 * TODO more code in common w/ _sync()..
1191 */
1192int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1193 void (*fxn)(void *arg), void *arg)
1194{
1195 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1196 if (omap_obj->sync) {
1197 struct omap_gem_sync_waiter *waiter =
1198 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1199
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001200 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001201 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001202
1203 waiter->omap_obj = omap_obj;
1204 waiter->op = op;
1205 waiter->read_target = omap_obj->sync->read_pending;
1206 waiter->write_target = omap_obj->sync->write_pending;
1207 waiter->notify = fxn;
1208 waiter->arg = arg;
1209
1210 spin_lock(&sync_lock);
1211 if (is_waiting(waiter)) {
1212 SYNC("waited: %p", waiter);
1213 list_add_tail(&waiter->list, &waiters);
1214 spin_unlock(&sync_lock);
1215 return 0;
1216 }
1217
1218 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301219
1220 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001221 }
1222
1223 /* no waiting.. */
1224 fxn(arg);
1225
1226 return 0;
1227}
1228
1229/* special API so PVR can update the buffer to use a sync-object allocated
1230 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1231 * perspective) sync-object, so we overwrite the new syncobj w/ values
1232 * from the already allocated syncobj (if there is one)
1233 */
1234int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1235{
1236 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1237 int ret = 0;
1238
1239 spin_lock(&sync_lock);
1240
1241 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1242 /* clearing a previously set syncobj */
Peter Huewee6200962013-01-26 00:40:13 +01001243 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1244 GFP_ATOMIC);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001245 if (!syncobj) {
1246 ret = -ENOMEM;
1247 goto unlock;
1248 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001249 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1250 omap_obj->sync = syncobj;
1251 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1252 /* replacing an existing syncobj */
1253 if (omap_obj->sync) {
1254 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1255 kfree(omap_obj->sync);
1256 }
1257 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1258 omap_obj->sync = syncobj;
1259 }
1260
1261unlock:
1262 spin_unlock(&sync_lock);
1263 return ret;
1264}
1265
Rob Clarkcd5351f2011-11-12 12:09:40 -06001266/* don't call directly.. called from GEM core when it is time to actually
1267 * free the object..
1268 */
1269void omap_gem_free_object(struct drm_gem_object *obj)
1270{
1271 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001272 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001273 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1274
Rob Clarkf7f9f452011-12-05 19:19:22 -06001275 evict(obj);
1276
Rob Clarkf6b60362012-03-05 10:48:36 -06001277 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1278
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001279 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001280 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001281 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001282
David Herrmann0de23972013-07-24 21:07:52 +02001283 drm_gem_free_mmap_offset(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001284
Rob Clark9a0774e2012-01-16 12:51:17 -06001285 /* this means the object is still pinned.. which really should
1286 * not happen. I think..
1287 */
1288 WARN_ON(omap_obj->paddr_cnt > 0);
1289
Rob Clarkcd5351f2011-11-12 12:09:40 -06001290 /* don't free externally allocated backing memory */
1291 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001292 if (omap_obj->pages)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001293 omap_gem_detach_pages(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001294
Rob Clarkcd5351f2011-11-12 12:09:40 -06001295 if (!is_shmem(obj)) {
1296 dma_free_writecombine(dev->dev, obj->size,
1297 omap_obj->vaddr, omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001298 } else if (omap_obj->vaddr) {
1299 vunmap(omap_obj->vaddr);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001300 }
1301 }
1302
1303 /* don't free externally allocated syncobj */
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001304 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001305 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001306
1307 drm_gem_object_release(obj);
1308
1309 kfree(obj);
1310}
1311
1312/* convenience method to construct a GEM buffer object, and userspace handle */
1313int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1314 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1315{
1316 struct drm_gem_object *obj;
1317 int ret;
1318
1319 obj = omap_gem_new(dev, gsize, flags);
1320 if (!obj)
1321 return -ENOMEM;
1322
1323 ret = drm_gem_handle_create(file, obj, handle);
1324 if (ret) {
1325 drm_gem_object_release(obj);
1326 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1327 return ret;
1328 }
1329
1330 /* drop reference from allocate - handle holds it now */
1331 drm_gem_object_unreference_unlocked(obj);
1332
1333 return 0;
1334}
1335
1336/* GEM buffer object constructor */
1337struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1338 union omap_gem_size gsize, uint32_t flags)
1339{
Rob Clarka6a91822011-12-09 23:26:08 -06001340 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001341 struct omap_gem_object *omap_obj;
1342 struct drm_gem_object *obj = NULL;
David Herrmannab5a60c2014-05-25 12:45:39 +02001343 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001344 size_t size;
1345 int ret;
1346
1347 if (flags & OMAP_BO_TILED) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001348 if (!usergart) {
1349 dev_err(dev->dev, "Tiled buffers require DMM\n");
1350 goto fail;
1351 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001352
Rob Clarkf7f9f452011-12-05 19:19:22 -06001353 /* tiled buffers are always shmem paged backed.. when they are
1354 * scanned out, they are remapped into DMM/TILER
1355 */
1356 flags &= ~OMAP_BO_SCANOUT;
1357
1358 /* currently don't allow cached buffers.. there is some caching
1359 * stuff that needs to be handled better
1360 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001361 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1362 flags |= tiler_get_cpu_cache_flags();
Rob Clarkf7f9f452011-12-05 19:19:22 -06001363
1364 /* align dimensions to slot boundaries... */
1365 tiler_align(gem2fmt(flags),
1366 &gsize.tiled.width, &gsize.tiled.height);
1367
1368 /* ...and calculate size based on aligned dimensions */
1369 size = tiler_size(gem2fmt(flags),
1370 gsize.tiled.width, gsize.tiled.height);
1371 } else {
1372 size = PAGE_ALIGN(gsize.bytes);
1373 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001374
1375 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001376 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001377 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001378
Rob Clarkcd5351f2011-11-12 12:09:40 -06001379 obj = &omap_obj->base;
1380
Rob Clarka6a91822011-12-09 23:26:08 -06001381 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1382 /* attempt to allocate contiguous memory if we don't
1383 * have DMM for remappign discontiguous buffers
1384 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001385 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1386 &omap_obj->paddr, GFP_KERNEL);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001387 if (!omap_obj->vaddr) {
1388 kfree(omap_obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001389
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001390 return NULL;
1391 }
1392
1393 flags |= OMAP_BO_DMA;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001394 }
1395
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001396 spin_lock(&priv->list_lock);
1397 list_add(&omap_obj->mm_list, &priv->obj_list);
1398 spin_unlock(&priv->list_lock);
1399
Rob Clarkcd5351f2011-11-12 12:09:40 -06001400 omap_obj->flags = flags;
1401
Rob Clarkf7f9f452011-12-05 19:19:22 -06001402 if (flags & OMAP_BO_TILED) {
1403 omap_obj->width = gsize.tiled.width;
1404 omap_obj->height = gsize.tiled.height;
1405 }
1406
David Herrmannab5a60c2014-05-25 12:45:39 +02001407 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
David Herrmann89c82332013-07-11 11:56:32 +02001408 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001409 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001410 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001411 if (ret)
1412 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001413
David Herrmannab5a60c2014-05-25 12:45:39 +02001414 mapping = file_inode(obj->filp)->i_mapping;
1415 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1416 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001417
1418 return obj;
1419
1420fail:
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001421 if (obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001422 omap_gem_free_object(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001423
Rob Clarkcd5351f2011-11-12 12:09:40 -06001424 return NULL;
1425}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001426
1427/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1428void omap_gem_init(struct drm_device *dev)
1429{
Rob Clarka6a91822011-12-09 23:26:08 -06001430 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001431 const enum tiler_fmt fmts[] = {
1432 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1433 };
Andy Gross5c137792012-03-05 10:48:39 -06001434 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001435
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001436 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001437 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001438 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001439 return;
1440 }
1441
Joe Perches78110bb2013-02-11 09:41:29 -08001442 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1443 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001444 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001445
1446 /* reserve 4k aligned/wide regions for userspace mappings: */
1447 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1448 uint16_t h = 1, w = PAGE_SIZE >> i;
1449 tiler_align(fmts[i], &w, &h);
1450 /* note: since each region is 1 4kb page wide, and minimum
1451 * number of rows, the height ends up being the same as the
1452 * # of pages in the region
1453 */
1454 usergart[i].height = h;
1455 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001456 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001457 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1458 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1459 struct usergart_entry *entry = &usergart[i].entry[j];
1460 struct tiler_block *block =
1461 tiler_reserve_2d(fmts[i], w, h,
1462 PAGE_SIZE);
1463 if (IS_ERR(block)) {
1464 dev_err(dev->dev,
1465 "reserve failed: %d, %d, %ld\n",
1466 i, j, PTR_ERR(block));
1467 return;
1468 }
1469 entry->paddr = tiler_ssptr(block);
1470 entry->block = block;
1471
Russell King2d31ca32014-07-12 10:53:41 +01001472 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1473 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001474 usergart[i].stride_pfn << PAGE_SHIFT);
1475 }
1476 }
Rob Clarka6a91822011-12-09 23:26:08 -06001477
1478 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001479}
1480
1481void omap_gem_deinit(struct drm_device *dev)
1482{
1483 /* I believe we can rely on there being no more outstanding GEM
1484 * objects which could depend on usergart/dmm at this point.
1485 */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001486 kfree(usergart);
1487}