blob: 9ac30560e9b103bec909b16e061fa420a8171648 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080022#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020023
David Herrmann0de23972013-07-24 21:07:52 +020024#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060025
26#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060027#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060028
Rob Clarkcd5351f2011-11-12 12:09:40 -060029/*
30 * GEM buffer object implementation.
31 */
32
Rob Clarkcd5351f2011-11-12 12:09:40 -060033/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020034#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
35#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020036#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060037
Rob Clarkcd5351f2011-11-12 12:09:40 -060038struct omap_gem_object {
39 struct drm_gem_object base;
40
Rob Clarkf6b60362012-03-05 10:48:36 -060041 struct list_head mm_list;
42
Rob Clarkcd5351f2011-11-12 12:09:40 -060043 uint32_t flags;
44
Rob Clarkf7f9f452011-12-05 19:19:22 -060045 /** width/height for tiled formats (rounded up to slot boundaries) */
46 uint16_t width, height;
47
Rob Clarka6a91822011-12-09 23:26:08 -060048 /** roll applied when mapping to DMM */
49 uint32_t roll;
50
Rob Clarkcd5351f2011-11-12 12:09:40 -060051 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020052 * paddr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060053 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020054 * - buffers allocated through the DMA mapping API (with the
55 * OMAP_BO_MEM_DMA_API flag set)
56 *
57 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
58 * if they are physically contiguous (when sgt->orig_nents == 1)
59 *
60 * - buffers mapped through the TILER when paddr_cnt is not zero, in
61 * which case the DMA address points to the TILER aperture
62 *
63 * Physically contiguous buffers have their DMA address equal to the
64 * physical address as we don't remap those buffers through the TILER.
65 *
66 * Buffers mapped to the TILER have their DMA address pointing to the
67 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
68 * the DMA address must be accessed through omap_get_get_paddr() to
69 * ensure that the mapping won't disappear unexpectedly. References must
70 * be released with omap_gem_put_paddr().
Rob Clarkcd5351f2011-11-12 12:09:40 -060071 */
72 dma_addr_t paddr;
73
74 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060075 * # of users of paddr
76 */
77 uint32_t paddr_cnt;
78
79 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020080 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
81 * is set and the sgt field is valid.
82 */
83 struct sg_table *sgt;
84
85 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060086 * tiler block used when buffer is remapped in DMM/TILER.
87 */
88 struct tiler_block *block;
89
90 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060091 * Array of backing pages, if allocated. Note that pages are never
92 * allocated for buffers originally allocated from contiguous memory
93 */
94 struct page **pages;
95
Rob Clarkf3bc9d22011-12-20 16:54:28 -060096 /** addresses corresponding to pages in above array */
97 dma_addr_t *addrs;
98
Rob Clarkcd5351f2011-11-12 12:09:40 -060099 /**
100 * Virtual address, if mapped.
101 */
102 void *vaddr;
103
104 /**
105 * sync-object allocated on demand (if needed)
106 *
107 * Per-buffer sync-object for tracking pending and completed hw/dma
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +0200108 * read and write operations.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600109 */
110 struct {
111 uint32_t write_pending;
112 uint32_t write_complete;
113 uint32_t read_pending;
114 uint32_t read_complete;
115 } *sync;
116};
117
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200118#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600119
Rob Clarkf7f9f452011-12-05 19:19:22 -0600120/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
121 * not necessarily pinned in TILER all the time, and (b) when they are
122 * they are not necessarily page aligned, we reserve one or more small
123 * regions in each of the 2d containers to use as a user-GART where we
124 * can create a second page-aligned mapping of parts of the buffer
125 * being accessed from userspace.
126 *
127 * Note that we could optimize slightly when we know that multiple
128 * tiler containers are backed by the same PAT.. but I'll leave that
129 * for later..
130 */
131#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200132struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600133 struct tiler_block *block; /* the reserved tiler block */
134 dma_addr_t paddr;
135 struct drm_gem_object *obj; /* the current pinned obj */
136 pgoff_t obj_pgoff; /* page offset of obj currently
137 mapped in */
138};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200139
140struct omap_drm_usergart {
141 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600142 int height; /* height in rows */
143 int height_shift; /* ilog2(height in rows) */
144 int slot_shift; /* ilog2(width per slot) */
145 int stride_pfn; /* stride in pages */
146 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200147};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600148
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200149/* -----------------------------------------------------------------------------
150 * Helpers
151 */
152
153/** get mmap offset */
154static uint64_t mmap_offset(struct drm_gem_object *obj)
155{
156 struct drm_device *dev = obj->dev;
157 int ret;
158 size_t size;
159
160 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
161
162 /* Make it mmapable */
163 size = omap_gem_mmap_size(obj);
164 ret = drm_gem_create_mmap_offset_size(obj, size);
165 if (ret) {
166 dev_err(dev->dev, "could not allocate mmap offset\n");
167 return 0;
168 }
169
170 return drm_vma_node_offset_addr(&obj->vma_node);
171}
172
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200173static bool is_contiguous(struct omap_gem_object *omap_obj)
174{
175 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
176 return true;
177
178 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
179 return true;
180
181 return false;
182}
183
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200184/* -----------------------------------------------------------------------------
185 * Eviction
186 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600187
188static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200189 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600190{
David Herrmann6796cb12014-01-03 14:24:19 +0100191 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200192 struct omap_drm_private *priv = obj->dev->dev_private;
193 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100194 size_t size = PAGE_SIZE * n;
195 loff_t off = mmap_offset(obj) +
196 (entry->obj_pgoff << PAGE_SHIFT);
197 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
198
199 if (m > 1) {
200 int i;
201 /* if stride > than PAGE_SIZE then sparse mapping: */
202 for (i = n; i > 0; i--) {
203 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
204 off, PAGE_SIZE, 1);
205 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600206 }
David Herrmann6796cb12014-01-03 14:24:19 +0100207 } else {
208 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
209 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600210 }
211
212 entry->obj = NULL;
213}
214
215/* Evict a buffer from usergart, if it is mapped there */
216static void evict(struct drm_gem_object *obj)
217{
218 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200219 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600220
221 if (omap_obj->flags & OMAP_BO_TILED) {
222 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
223 int i;
224
Rob Clarkf7f9f452011-12-05 19:19:22 -0600225 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200226 struct omap_drm_usergart_entry *entry =
227 &priv->usergart[fmt].entry[i];
228
Rob Clarkf7f9f452011-12-05 19:19:22 -0600229 if (entry->obj == obj)
230 evict_entry(obj, fmt, entry);
231 }
232 }
233}
234
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200235/* -----------------------------------------------------------------------------
236 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600237 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238
239/** ensure backing pages are allocated */
240static int omap_gem_attach_pages(struct drm_gem_object *obj)
241{
Rob Clark8b6b5692012-05-17 02:37:25 -0600242 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600243 struct omap_gem_object *omap_obj = to_omap_bo(obj);
244 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200245 int npages = obj->size >> PAGE_SHIFT;
246 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600247 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600248
249 WARN_ON(omap_obj->pages);
250
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200251 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600252 if (IS_ERR(pages)) {
253 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
254 return PTR_ERR(pages);
255 }
256
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600257 /* for non-cached buffers, ensure the new pages are clean because
258 * DSS, GPU, etc. are not cache coherent:
259 */
260 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100261 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200262 if (!addrs) {
263 ret = -ENOMEM;
264 goto free_pages;
265 }
266
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600267 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600268 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600269 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200270
271 if (dma_mapping_error(dev->dev, addrs[i])) {
272 dev_warn(dev->dev,
273 "%s: failed to map page\n", __func__);
274
275 for (i = i - 1; i >= 0; --i) {
276 dma_unmap_page(dev->dev, addrs[i],
277 PAGE_SIZE, DMA_BIDIRECTIONAL);
278 }
279
280 ret = -ENOMEM;
281 goto free_addrs;
282 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600283 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600284 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100285 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200286 if (!addrs) {
287 ret = -ENOMEM;
288 goto free_pages;
289 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600290 }
291
Rob Clark8b6b5692012-05-17 02:37:25 -0600292 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600293 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600294
Rob Clarkcd5351f2011-11-12 12:09:40 -0600295 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200296
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200297free_addrs:
298 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200299free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400300 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200301
302 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600303}
304
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200305/* acquire pages when needed (for example, for DMA where physically
306 * contiguous buffer is not required
307 */
308static int get_pages(struct drm_gem_object *obj, struct page ***pages)
309{
310 struct omap_gem_object *omap_obj = to_omap_bo(obj);
311 int ret = 0;
312
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200313 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200314 ret = omap_gem_attach_pages(obj);
315 if (ret) {
316 dev_err(obj->dev->dev, "could not attach pages\n");
317 return ret;
318 }
319 }
320
321 /* TODO: even phys-contig.. we should have a list of pages? */
322 *pages = omap_obj->pages;
323
324 return 0;
325}
326
Rob Clarkcd5351f2011-11-12 12:09:40 -0600327/** release backing pages */
328static void omap_gem_detach_pages(struct drm_gem_object *obj)
329{
330 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600331
332 /* for non-cached buffers, ensure the new pages are clean because
333 * DSS, GPU, etc. are not cache coherent:
334 */
335 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
336 int i, npages = obj->size >> PAGE_SHIFT;
337 for (i = 0; i < npages; i++) {
338 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
339 PAGE_SIZE, DMA_BIDIRECTIONAL);
340 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600341 }
342
Rob Clark8b6b5692012-05-17 02:37:25 -0600343 kfree(omap_obj->addrs);
344 omap_obj->addrs = NULL;
345
Rob Clarkddcd09d2013-08-07 13:41:27 -0400346 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600347 omap_obj->pages = NULL;
348}
349
Rob Clark6ad11bc2012-04-10 13:19:55 -0500350/* get buffer flags */
351uint32_t omap_gem_flags(struct drm_gem_object *obj)
352{
353 return to_omap_bo(obj)->flags;
354}
355
Rob Clarkc5b12472012-01-18 18:33:02 -0600356uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
357{
358 uint64_t offset;
359 mutex_lock(&obj->dev->struct_mutex);
360 offset = mmap_offset(obj);
361 mutex_unlock(&obj->dev->struct_mutex);
362 return offset;
363}
364
Rob Clarkf7f9f452011-12-05 19:19:22 -0600365/** get mmap size */
366size_t omap_gem_mmap_size(struct drm_gem_object *obj)
367{
368 struct omap_gem_object *omap_obj = to_omap_bo(obj);
369 size_t size = obj->size;
370
371 if (omap_obj->flags & OMAP_BO_TILED) {
372 /* for tiled buffers, the virtual size has stride rounded up
373 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
374 * 32kb later!). But we don't back the entire buffer with
375 * pages, only the valid picture part.. so need to adjust for
376 * this in the size used to mmap and generate mmap offset
377 */
378 size = tiler_vsize(gem2fmt(omap_obj->flags),
379 omap_obj->width, omap_obj->height);
380 }
381
382 return size;
383}
384
Rob Clark3c810c62012-08-15 15:18:01 -0500385/* get tiled size, returns -EINVAL if not tiled buffer */
386int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
387{
388 struct omap_gem_object *omap_obj = to_omap_bo(obj);
389 if (omap_obj->flags & OMAP_BO_TILED) {
390 *w = omap_obj->width;
391 *h = omap_obj->height;
392 return 0;
393 }
394 return -EINVAL;
395}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600396
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200397/* -----------------------------------------------------------------------------
398 * Fault Handling
399 */
400
Rob Clarkf7f9f452011-12-05 19:19:22 -0600401/* Normal handling for the case of faulting in non-tiled buffers */
402static int fault_1d(struct drm_gem_object *obj,
403 struct vm_area_struct *vma, struct vm_fault *vmf)
404{
405 struct omap_gem_object *omap_obj = to_omap_bo(obj);
406 unsigned long pfn;
407 pgoff_t pgoff;
408
409 /* We don't use vmf->pgoff since that has the fake offset: */
410 pgoff = ((unsigned long)vmf->virtual_address -
411 vma->vm_start) >> PAGE_SHIFT;
412
413 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600414 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600415 pfn = page_to_pfn(omap_obj->pages[pgoff]);
416 } else {
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200417 BUG_ON(!is_contiguous(omap_obj));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600418 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
419 }
420
421 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
422 pfn, pfn << PAGE_SHIFT);
423
Dan Williams01c8f1c2016-01-15 16:56:40 -0800424 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
425 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600426}
427
428/* Special handling for the case of faulting in 2d tiled buffers */
429static int fault_2d(struct drm_gem_object *obj,
430 struct vm_area_struct *vma, struct vm_fault *vmf)
431{
432 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200433 struct omap_drm_private *priv = obj->dev->dev_private;
434 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600435 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
436 struct page *pages[64]; /* XXX is this too much to have on stack? */
437 unsigned long pfn;
438 pgoff_t pgoff, base_pgoff;
439 void __user *vaddr;
440 int i, ret, slots;
441
Rob Clarke5598952012-03-05 10:48:40 -0600442 /*
443 * Note the height of the slot is also equal to the number of pages
444 * that need to be mapped in to fill 4kb wide CPU page. If the slot
445 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600446 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200447 const int n = priv->usergart[fmt].height;
448 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke5598952012-03-05 10:48:40 -0600449
450 /*
451 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
452 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
453 * into account in some of the math, so figure out virtual stride
454 * in pages
455 */
456 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600457
458 /* We don't use vmf->pgoff since that has the fake offset: */
459 pgoff = ((unsigned long)vmf->virtual_address -
460 vma->vm_start) >> PAGE_SHIFT;
461
Rob Clarke5598952012-03-05 10:48:40 -0600462 /*
463 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600464 * boundary in the y direction:
465 */
Rob Clarke5598952012-03-05 10:48:40 -0600466 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600467
Rob Clarke5598952012-03-05 10:48:40 -0600468 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200469 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600470
Rob Clarke5598952012-03-05 10:48:40 -0600471 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
472
Laurent Pinchartf4302742015-12-14 22:39:34 +0200473 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke5598952012-03-05 10:48:40 -0600474
Rob Clarkf7f9f452011-12-05 19:19:22 -0600475 /* evict previous buffer using this usergart entry, if any: */
476 if (entry->obj)
477 evict_entry(entry->obj, fmt, entry);
478
479 entry->obj = obj;
480 entry->obj_pgoff = base_pgoff;
481
Rob Clarke5598952012-03-05 10:48:40 -0600482 /* now convert base_pgoff to phys offset from virt offset: */
483 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600484
Rob Clarke5598952012-03-05 10:48:40 -0600485 /* for wider-than 4k.. figure out which part of the slot-row we want: */
486 if (m > 1) {
487 int off = pgoff % m;
488 entry->obj_pgoff += off;
489 base_pgoff /= m;
490 slots = min(slots - (off << n_shift), n);
491 base_pgoff += off << n_shift;
492 vaddr += off << PAGE_SHIFT;
493 }
494
495 /*
496 * Map in pages. Beyond the valid pixel part of the buffer, we set
497 * pages[i] to NULL to get a dummy page mapped in.. if someone
498 * reads/writes it they will get random/undefined content, but at
499 * least it won't be corrupting whatever other random page used to
500 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600501 */
502 memcpy(pages, &omap_obj->pages[base_pgoff],
503 sizeof(struct page *) * slots);
504 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600505 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600506
Rob Clarka6a91822011-12-09 23:26:08 -0600507 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600508 if (ret) {
509 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
510 return ret;
511 }
512
Rob Clarkf7f9f452011-12-05 19:19:22 -0600513 pfn = entry->paddr >> PAGE_SHIFT;
514
515 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
516 pfn, pfn << PAGE_SHIFT);
517
Rob Clarke5598952012-03-05 10:48:40 -0600518 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800519 vm_insert_mixed(vma, (unsigned long)vaddr,
520 __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200521 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600522 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600523 }
524
525 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200526 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
527 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600528
529 return 0;
530}
531
Rob Clarkcd5351f2011-11-12 12:09:40 -0600532/**
533 * omap_gem_fault - pagefault handler for GEM objects
534 * @vma: the VMA of the GEM object
535 * @vmf: fault detail
536 *
537 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
538 * does most of the work for us including the actual map/unmap calls
539 * but we need to do the actual page work.
540 *
541 * The VMA was set up by GEM. In doing so it also ensured that the
542 * vma->vm_private_data points to the GEM object that is backing this
543 * mapping.
544 */
545int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
546{
547 struct drm_gem_object *obj = vma->vm_private_data;
548 struct omap_gem_object *omap_obj = to_omap_bo(obj);
549 struct drm_device *dev = obj->dev;
550 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600551 int ret;
552
553 /* Make sure we don't parallel update on a fault, nor move or remove
554 * something from beneath our feet
555 */
556 mutex_lock(&dev->struct_mutex);
557
558 /* if a shmem backed object, make sure we have pages attached now */
559 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900560 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600561 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600562
563 /* where should we do corresponding put_pages().. we are mapping
564 * the original page, rather than thru a GART, so we can't rely
565 * on eviction to trigger this. But munmap() or all mappings should
566 * probably trigger put_pages()?
567 */
568
Rob Clarkf7f9f452011-12-05 19:19:22 -0600569 if (omap_obj->flags & OMAP_BO_TILED)
570 ret = fault_2d(obj, vma, vmf);
571 else
572 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600573
Rob Clarkcd5351f2011-11-12 12:09:40 -0600574
575fail:
576 mutex_unlock(&dev->struct_mutex);
577 switch (ret) {
578 case 0:
579 case -ERESTARTSYS:
580 case -EINTR:
Rob Clarke1d4ee02013-10-20 12:07:42 -0400581 case -EBUSY:
582 /*
583 * EBUSY is ok: this just means that another thread
584 * already did the job.
585 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600586 return VM_FAULT_NOPAGE;
587 case -ENOMEM:
588 return VM_FAULT_OOM;
589 default:
590 return VM_FAULT_SIGBUS;
591 }
592}
593
594/** We override mainly to fix up some of the vm mapping flags.. */
595int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
596{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600597 int ret;
598
599 ret = drm_gem_mmap(filp, vma);
600 if (ret) {
601 DBG("mmap failed: %d", ret);
602 return ret;
603 }
604
Rob Clark8b6b5692012-05-17 02:37:25 -0600605 return omap_gem_mmap_obj(vma->vm_private_data, vma);
606}
607
608int omap_gem_mmap_obj(struct drm_gem_object *obj,
609 struct vm_area_struct *vma)
610{
611 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600612
613 vma->vm_flags &= ~VM_PFNMAP;
614 vma->vm_flags |= VM_MIXEDMAP;
615
616 if (omap_obj->flags & OMAP_BO_WC) {
617 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
618 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
619 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
620 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600621 /*
622 * We do have some private objects, at least for scanout buffers
623 * on hardware without DMM/TILER. But these are allocated write-
624 * combine
625 */
626 if (WARN_ON(!obj->filp))
627 return -EINVAL;
628
629 /*
630 * Shunt off cached objs to shmem file so they have their own
631 * address_space (so unmap_mapping_range does what we want,
632 * in particular in the case of mmap'd dmabufs)
633 */
634 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600635 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400636 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600637
Rob Clarkcd5351f2011-11-12 12:09:40 -0600638 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
639 }
640
Rob Clark8b6b5692012-05-17 02:37:25 -0600641 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600642}
643
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200644/* -----------------------------------------------------------------------------
645 * Dumb Buffers
646 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600647
Rob Clarkcd5351f2011-11-12 12:09:40 -0600648/**
649 * omap_gem_dumb_create - create a dumb buffer
650 * @drm_file: our client file
651 * @dev: our device
652 * @args: the requested arguments copied from userspace
653 *
654 * Allocate a buffer suitable for use for a frame buffer of the
655 * form described by user space. Give userspace a handle by which
656 * to reference it.
657 */
658int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
659 struct drm_mode_create_dumb *args)
660{
661 union omap_gem_size gsize;
662
Thierry Redingbdb2b932014-11-03 11:57:33 +0100663 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600664 args->size = PAGE_ALIGN(args->pitch * args->height);
665
666 gsize = (union omap_gem_size){
667 .bytes = args->size,
668 };
669
670 return omap_gem_new_handle(dev, file, gsize,
671 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
672}
673
674/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600675 * omap_gem_dumb_map - buffer mapping for dumb interface
676 * @file: our drm client file
677 * @dev: drm device
678 * @handle: GEM handle to the object (from dumb_create)
679 *
680 * Do the necessary setup to allow the mapping of the frame buffer
681 * into user memory. We don't have to do much here at the moment.
682 */
683int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
684 uint32_t handle, uint64_t *offset)
685{
686 struct drm_gem_object *obj;
687 int ret = 0;
688
Rob Clarkcd5351f2011-11-12 12:09:40 -0600689 /* GEM does all our handle to object mapping */
690 obj = drm_gem_object_lookup(dev, file, handle);
691 if (obj == NULL) {
692 ret = -ENOENT;
693 goto fail;
694 }
695
696 *offset = omap_gem_mmap_offset(obj);
697
698 drm_gem_object_unreference_unlocked(obj);
699
700fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600701 return ret;
702}
703
Laurent Pincharte1c11742015-12-14 22:39:30 +0200704#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600705/* Set scrolling position. This allows us to implement fast scrolling
706 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600707 *
708 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600709 */
710int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
711{
712 struct omap_gem_object *omap_obj = to_omap_bo(obj);
713 uint32_t npages = obj->size >> PAGE_SHIFT;
714 int ret = 0;
715
716 if (roll > npages) {
717 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
718 return -EINVAL;
719 }
720
Rob Clarka6a91822011-12-09 23:26:08 -0600721 omap_obj->roll = roll;
722
Rob Clarkaf695922011-12-16 11:34:34 -0600723 mutex_lock(&obj->dev->struct_mutex);
724
Rob Clarka6a91822011-12-09 23:26:08 -0600725 /* if we aren't mapped yet, we don't need to do anything */
726 if (omap_obj->block) {
727 struct page **pages;
728 ret = get_pages(obj, &pages);
729 if (ret)
730 goto fail;
731 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
732 if (ret)
733 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
734 }
735
736fail:
737 mutex_unlock(&obj->dev->struct_mutex);
738
739 return ret;
740}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200741#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600742
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200743/* -----------------------------------------------------------------------------
744 * Memory Management & DMA Sync
745 */
746
747/**
748 * shmem buffers that are mapped cached can simulate coherency via using
749 * page faulting to keep track of dirty pages
750 */
751static inline bool is_cached_coherent(struct drm_gem_object *obj)
752{
753 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200754
755 return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200756 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
757}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600758
Rob Clark8b6b5692012-05-17 02:37:25 -0600759/* Sync the buffer for CPU access.. note pages should already be
760 * attached, ie. omap_gem_get_pages()
761 */
762void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
763{
764 struct drm_device *dev = obj->dev;
765 struct omap_gem_object *omap_obj = to_omap_bo(obj);
766
767 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
768 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
769 PAGE_SIZE, DMA_BIDIRECTIONAL);
770 omap_obj->addrs[pgoff] = 0;
771 }
772}
773
774/* sync the buffer for DMA access */
775void omap_gem_dma_sync(struct drm_gem_object *obj,
776 enum dma_data_direction dir)
777{
778 struct drm_device *dev = obj->dev;
779 struct omap_gem_object *omap_obj = to_omap_bo(obj);
780
781 if (is_cached_coherent(obj)) {
782 int i, npages = obj->size >> PAGE_SHIFT;
783 struct page **pages = omap_obj->pages;
784 bool dirty = false;
785
786 for (i = 0; i < npages; i++) {
787 if (!omap_obj->addrs[i]) {
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200788 dma_addr_t addr;
789
790 addr = dma_map_page(dev->dev, pages[i], 0,
Rob Clark8b6b5692012-05-17 02:37:25 -0600791 PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200792
793 if (dma_mapping_error(dev->dev, addr)) {
794 dev_warn(dev->dev,
795 "%s: failed to map page\n",
796 __func__);
797 break;
798 }
799
Rob Clark8b6b5692012-05-17 02:37:25 -0600800 dirty = true;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200801 omap_obj->addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600802 }
803 }
804
805 if (dirty) {
806 unmap_mapping_range(obj->filp->f_mapping, 0,
807 omap_gem_mmap_size(obj), 1);
808 }
809 }
810}
811
Rob Clarkcd5351f2011-11-12 12:09:40 -0600812/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
813 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
814 * map in TILER)
815 */
816int omap_gem_get_paddr(struct drm_gem_object *obj,
817 dma_addr_t *paddr, bool remap)
818{
Rob Clarka6a91822011-12-09 23:26:08 -0600819 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600820 struct omap_gem_object *omap_obj = to_omap_bo(obj);
821 int ret = 0;
822
Rob Clarkf7f9f452011-12-05 19:19:22 -0600823 mutex_lock(&obj->dev->struct_mutex);
824
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200825 if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600826 if (omap_obj->paddr_cnt == 0) {
827 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600828 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600829 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
830 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600831
Rob Clarkf7f9f452011-12-05 19:19:22 -0600832 BUG_ON(omap_obj->block);
833
834 ret = get_pages(obj, &pages);
835 if (ret)
836 goto fail;
837
Rob Clarkf7f9f452011-12-05 19:19:22 -0600838 if (omap_obj->flags & OMAP_BO_TILED) {
839 block = tiler_reserve_2d(fmt,
840 omap_obj->width,
841 omap_obj->height, 0);
842 } else {
843 block = tiler_reserve_1d(obj->size);
844 }
845
846 if (IS_ERR(block)) {
847 ret = PTR_ERR(block);
848 dev_err(obj->dev->dev,
849 "could not remap: %d (%d)\n", ret, fmt);
850 goto fail;
851 }
852
853 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600854 ret = tiler_pin(block, pages, npages,
855 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600856 if (ret) {
857 tiler_release(block);
858 dev_err(obj->dev->dev,
859 "could not pin: %d\n", ret);
860 goto fail;
861 }
862
863 omap_obj->paddr = tiler_ssptr(block);
864 omap_obj->block = block;
865
Russell King2d31ca32014-07-12 10:53:41 +0100866 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600867 }
868
869 omap_obj->paddr_cnt++;
870
871 *paddr = omap_obj->paddr;
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200872 } else if (is_contiguous(omap_obj)) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600873 *paddr = omap_obj->paddr;
874 } else {
875 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600876 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600877 }
878
Rob Clarkf7f9f452011-12-05 19:19:22 -0600879fail:
880 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600881
882 return ret;
883}
884
885/* Release physical address, when DMA is no longer being performed.. this
886 * could potentially unpin and unmap buffers from TILER
887 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300888void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600889{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600890 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300891 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600892
893 mutex_lock(&obj->dev->struct_mutex);
894 if (omap_obj->paddr_cnt > 0) {
895 omap_obj->paddr_cnt--;
896 if (omap_obj->paddr_cnt == 0) {
897 ret = tiler_unpin(omap_obj->block);
898 if (ret) {
899 dev_err(obj->dev->dev,
900 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600901 }
902 ret = tiler_release(omap_obj->block);
903 if (ret) {
904 dev_err(obj->dev->dev,
905 "could not release unmap: %d\n", ret);
906 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000907 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600908 omap_obj->block = NULL;
909 }
910 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300911
Rob Clarkf7f9f452011-12-05 19:19:22 -0600912 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600913}
914
Rob Clark3c810c62012-08-15 15:18:01 -0500915/* Get rotated scanout address (only valid if already pinned), at the
916 * specified orientation and x,y offset from top-left corner of buffer
917 * (only valid for tiled 2d buffers)
918 */
919int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
920 int x, int y, dma_addr_t *paddr)
921{
922 struct omap_gem_object *omap_obj = to_omap_bo(obj);
923 int ret = -EINVAL;
924
925 mutex_lock(&obj->dev->struct_mutex);
926 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
927 (omap_obj->flags & OMAP_BO_TILED)) {
928 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
929 ret = 0;
930 }
931 mutex_unlock(&obj->dev->struct_mutex);
932 return ret;
933}
934
935/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
936int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
937{
938 struct omap_gem_object *omap_obj = to_omap_bo(obj);
939 int ret = -EINVAL;
940 if (omap_obj->flags & OMAP_BO_TILED)
941 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
942 return ret;
943}
944
Rob Clark6ad11bc2012-04-10 13:19:55 -0500945/* if !remap, and we don't have pages backing, then fail, rather than
946 * increasing the pin count (which we don't really do yet anyways,
947 * because we don't support swapping pages back out). And 'remap'
948 * might not be quite the right name, but I wanted to keep it working
949 * similarly to omap_gem_get_paddr(). Note though that mutex is not
950 * aquired if !remap (because this can be called in atomic ctxt),
951 * but probably omap_gem_get_paddr() should be changed to work in the
952 * same way. If !remap, a matching omap_gem_put_pages() call is not
953 * required (and should not be made).
954 */
955int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
956 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600957{
958 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500959 if (!remap) {
960 struct omap_gem_object *omap_obj = to_omap_bo(obj);
961 if (!omap_obj->pages)
962 return -ENOMEM;
963 *pages = omap_obj->pages;
964 return 0;
965 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600966 mutex_lock(&obj->dev->struct_mutex);
967 ret = get_pages(obj, pages);
968 mutex_unlock(&obj->dev->struct_mutex);
969 return ret;
970}
971
972/* release pages when DMA no longer being performed */
973int omap_gem_put_pages(struct drm_gem_object *obj)
974{
975 /* do something here if we dynamically attach/detach pages.. at
976 * least they would no longer need to be pinned if everyone has
977 * released the pages..
978 */
979 return 0;
980}
981
Laurent Pincharte1c11742015-12-14 22:39:30 +0200982#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600983/* Get kernel virtual address for CPU access.. this more or less only
984 * exists for omap_fbdev. This should be called with struct_mutex
985 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600986 */
987void *omap_gem_vaddr(struct drm_gem_object *obj)
988{
989 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900990 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600991 if (!omap_obj->vaddr) {
992 struct page **pages;
993 int ret = get_pages(obj, &pages);
994 if (ret)
995 return ERR_PTR(ret);
996 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
997 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
998 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600999 return omap_obj->vaddr;
1000}
Laurent Pincharte1c11742015-12-14 22:39:30 +02001001#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -06001002
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001003/* -----------------------------------------------------------------------------
1004 * Power Management
1005 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001006
Andy Grosse78edba2012-12-19 14:53:37 -06001007#ifdef CONFIG_PM
1008/* re-pin objects in DMM in resume path: */
1009int omap_gem_resume(struct device *dev)
1010{
1011 struct drm_device *drm_dev = dev_get_drvdata(dev);
1012 struct omap_drm_private *priv = drm_dev->dev_private;
1013 struct omap_gem_object *omap_obj;
1014 int ret = 0;
1015
1016 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1017 if (omap_obj->block) {
1018 struct drm_gem_object *obj = &omap_obj->base;
1019 uint32_t npages = obj->size >> PAGE_SHIFT;
1020 WARN_ON(!omap_obj->pages); /* this can't happen */
1021 ret = tiler_pin(omap_obj->block,
1022 omap_obj->pages, npages,
1023 omap_obj->roll, true);
1024 if (ret) {
1025 dev_err(dev, "could not repin: %d\n", ret);
1026 return ret;
1027 }
1028 }
1029 }
1030
1031 return 0;
1032}
1033#endif
1034
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001035/* -----------------------------------------------------------------------------
1036 * DebugFS
1037 */
1038
Rob Clarkf6b60362012-03-05 10:48:36 -06001039#ifdef CONFIG_DEBUG_FS
1040void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1041{
Rob Clarkf6b60362012-03-05 10:48:36 -06001042 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001043 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001044
David Herrmann0de23972013-07-24 21:07:52 +02001045 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001046
Russell King2d31ca32014-07-12 10:53:41 +01001047 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001048 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001049 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001050 omap_obj->vaddr, omap_obj->roll);
1051
1052 if (omap_obj->flags & OMAP_BO_TILED) {
1053 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1054 if (omap_obj->block) {
1055 struct tcm_area *area = &omap_obj->block->area;
1056 seq_printf(m, " (%dx%d, %dx%d)",
1057 area->p0.x, area->p0.y,
1058 area->p1.x, area->p1.y);
1059 }
1060 } else {
1061 seq_printf(m, " %d", obj->size);
1062 }
1063
1064 seq_printf(m, "\n");
1065}
1066
1067void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1068{
1069 struct omap_gem_object *omap_obj;
1070 int count = 0;
1071 size_t size = 0;
1072
1073 list_for_each_entry(omap_obj, list, mm_list) {
1074 struct drm_gem_object *obj = &omap_obj->base;
1075 seq_printf(m, " ");
1076 omap_gem_describe(obj, m);
1077 count++;
1078 size += obj->size;
1079 }
1080
1081 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1082}
1083#endif
1084
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001085/* -----------------------------------------------------------------------------
1086 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001087 */
1088
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001089static DEFINE_SPINLOCK(sync_lock);
1090
Rob Clarkcd5351f2011-11-12 12:09:40 -06001091struct omap_gem_sync_waiter {
1092 struct list_head list;
1093 struct omap_gem_object *omap_obj;
1094 enum omap_gem_op op;
1095 uint32_t read_target, write_target;
1096 /* notify called w/ sync_lock held */
1097 void (*notify)(void *arg);
1098 void *arg;
1099};
1100
1101/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1102 * the read and/or write target count is achieved which can call a user
1103 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1104 * cpu access), etc.
1105 */
1106static LIST_HEAD(waiters);
1107
1108static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1109{
1110 struct omap_gem_object *omap_obj = waiter->omap_obj;
1111 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301112 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001113 return true;
1114 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301115 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001116 return true;
1117 return false;
1118}
1119
1120/* macro for sync debug.. */
1121#define SYNCDBG 0
1122#define SYNC(fmt, ...) do { if (SYNCDBG) \
1123 printk(KERN_ERR "%s:%d: "fmt"\n", \
1124 __func__, __LINE__, ##__VA_ARGS__); \
1125 } while (0)
1126
1127
1128static void sync_op_update(void)
1129{
1130 struct omap_gem_sync_waiter *waiter, *n;
1131 list_for_each_entry_safe(waiter, n, &waiters, list) {
1132 if (!is_waiting(waiter)) {
1133 list_del(&waiter->list);
1134 SYNC("notify: %p", waiter);
1135 waiter->notify(waiter->arg);
1136 kfree(waiter);
1137 }
1138 }
1139}
1140
1141static inline int sync_op(struct drm_gem_object *obj,
1142 enum omap_gem_op op, bool start)
1143{
1144 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1145 int ret = 0;
1146
1147 spin_lock(&sync_lock);
1148
1149 if (!omap_obj->sync) {
1150 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1151 if (!omap_obj->sync) {
1152 ret = -ENOMEM;
1153 goto unlock;
1154 }
1155 }
1156
1157 if (start) {
1158 if (op & OMAP_GEM_READ)
1159 omap_obj->sync->read_pending++;
1160 if (op & OMAP_GEM_WRITE)
1161 omap_obj->sync->write_pending++;
1162 } else {
1163 if (op & OMAP_GEM_READ)
1164 omap_obj->sync->read_complete++;
1165 if (op & OMAP_GEM_WRITE)
1166 omap_obj->sync->write_complete++;
1167 sync_op_update();
1168 }
1169
1170unlock:
1171 spin_unlock(&sync_lock);
1172
1173 return ret;
1174}
1175
Rob Clarkcd5351f2011-11-12 12:09:40 -06001176/* mark the start of read and/or write operation */
1177int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1178{
1179 return sync_op(obj, op, true);
1180}
1181
1182int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1183{
1184 return sync_op(obj, op, false);
1185}
1186
1187static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1188
1189static void sync_notify(void *arg)
1190{
1191 struct task_struct **waiter_task = arg;
1192 *waiter_task = NULL;
1193 wake_up_all(&sync_event);
1194}
1195
1196int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1197{
1198 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1199 int ret = 0;
1200 if (omap_obj->sync) {
1201 struct task_struct *waiter_task = current;
1202 struct omap_gem_sync_waiter *waiter =
1203 kzalloc(sizeof(*waiter), GFP_KERNEL);
1204
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001205 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001206 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001207
1208 waiter->omap_obj = omap_obj;
1209 waiter->op = op;
1210 waiter->read_target = omap_obj->sync->read_pending;
1211 waiter->write_target = omap_obj->sync->write_pending;
1212 waiter->notify = sync_notify;
1213 waiter->arg = &waiter_task;
1214
1215 spin_lock(&sync_lock);
1216 if (is_waiting(waiter)) {
1217 SYNC("waited: %p", waiter);
1218 list_add_tail(&waiter->list, &waiters);
1219 spin_unlock(&sync_lock);
1220 ret = wait_event_interruptible(sync_event,
1221 (waiter_task == NULL));
1222 spin_lock(&sync_lock);
1223 if (waiter_task) {
1224 SYNC("interrupted: %p", waiter);
1225 /* we were interrupted */
1226 list_del(&waiter->list);
1227 waiter_task = NULL;
1228 } else {
1229 /* freed in sync_op_update() */
1230 waiter = NULL;
1231 }
1232 }
1233 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001234 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001235 }
1236 return ret;
1237}
1238
1239/* call fxn(arg), either synchronously or asynchronously if the op
1240 * is currently blocked.. fxn() can be called from any context
1241 *
1242 * (TODO for now fxn is called back from whichever context calls
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001243 * omap_gem_op_finish().. but this could be better defined later
Rob Clarkcd5351f2011-11-12 12:09:40 -06001244 * if needed)
1245 *
1246 * TODO more code in common w/ _sync()..
1247 */
1248int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1249 void (*fxn)(void *arg), void *arg)
1250{
1251 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1252 if (omap_obj->sync) {
1253 struct omap_gem_sync_waiter *waiter =
1254 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1255
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001256 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001257 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001258
1259 waiter->omap_obj = omap_obj;
1260 waiter->op = op;
1261 waiter->read_target = omap_obj->sync->read_pending;
1262 waiter->write_target = omap_obj->sync->write_pending;
1263 waiter->notify = fxn;
1264 waiter->arg = arg;
1265
1266 spin_lock(&sync_lock);
1267 if (is_waiting(waiter)) {
1268 SYNC("waited: %p", waiter);
1269 list_add_tail(&waiter->list, &waiters);
1270 spin_unlock(&sync_lock);
1271 return 0;
1272 }
1273
1274 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301275
1276 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001277 }
1278
1279 /* no waiting.. */
1280 fxn(arg);
1281
1282 return 0;
1283}
1284
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001285/* -----------------------------------------------------------------------------
1286 * Constructor & Destructor
1287 */
1288
Rob Clarkcd5351f2011-11-12 12:09:40 -06001289void omap_gem_free_object(struct drm_gem_object *obj)
1290{
1291 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001292 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001293 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1294
Rob Clarkf7f9f452011-12-05 19:19:22 -06001295 evict(obj);
1296
Rob Clarkf6b60362012-03-05 10:48:36 -06001297 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1298
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001299 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001300 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001301 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001302
Rob Clark9a0774e2012-01-16 12:51:17 -06001303 /* this means the object is still pinned.. which really should
1304 * not happen. I think..
1305 */
1306 WARN_ON(omap_obj->paddr_cnt > 0);
1307
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001308 if (omap_obj->pages) {
1309 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1310 kfree(omap_obj->pages);
1311 else
1312 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001313 }
1314
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001315 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1316 dma_free_writecombine(dev->dev, obj->size,
1317 omap_obj->vaddr, omap_obj->paddr);
1318 } else if (omap_obj->vaddr) {
1319 vunmap(omap_obj->vaddr);
1320 } else if (obj->import_attach) {
1321 drm_prime_gem_destroy(obj, omap_obj->sgt);
1322 }
1323
1324 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001325
1326 drm_gem_object_release(obj);
1327
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001328 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001329}
1330
1331/* GEM buffer object constructor */
1332struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1333 union omap_gem_size gsize, uint32_t flags)
1334{
Rob Clarka6a91822011-12-09 23:26:08 -06001335 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001336 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001337 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001338 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001339 size_t size;
1340 int ret;
1341
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001342 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001343 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001344 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001345 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001346 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001347 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001348
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001349 /*
1350 * Tiled buffers are always shmem paged backed. When they are
1351 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001352 */
1353 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001354 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001355
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001356 /*
1357 * Currently don't allow cached buffers. There is some caching
1358 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001359 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001360 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1361 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001362 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1363 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001364 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1365 * tiled. However, to lower the pressure on memory allocation,
1366 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001367 */
1368 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001369 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001370 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001371 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001372 */
1373 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001374 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001375
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001376 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001377 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001378 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001379 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001380
Rob Clarkcd5351f2011-11-12 12:09:40 -06001381 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001382 omap_obj->flags = flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001383
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001384 if (flags & OMAP_BO_TILED) {
1385 /*
1386 * For tiled buffers align dimensions to slot boundaries and
1387 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001388 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001389 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1390 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001391
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001392 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1393 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001394
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001395 omap_obj->width = gsize.tiled.width;
1396 omap_obj->height = gsize.tiled.height;
1397 } else {
1398 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001399 }
1400
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001401 spin_lock(&priv->list_lock);
1402 list_add(&omap_obj->mm_list, &priv->obj_list);
1403 spin_unlock(&priv->list_lock);
1404
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001405 /* Allocate memory if needed. */
1406 if (flags & OMAP_BO_MEM_DMA_API) {
1407 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1408 &omap_obj->paddr,
1409 GFP_KERNEL);
1410 if (!omap_obj->vaddr)
1411 goto fail;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001412 }
1413
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001414 /* Initialize the GEM object. */
1415 if (!(flags & OMAP_BO_MEM_SHMEM)) {
David Herrmann89c82332013-07-11 11:56:32 +02001416 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001417 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001418 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001419 if (ret)
1420 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001421
David Herrmannab5a60c2014-05-25 12:45:39 +02001422 mapping = file_inode(obj->filp)->i_mapping;
1423 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1424 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001425
1426 return obj;
1427
1428fail:
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001429 omap_gem_free_object(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001430 return NULL;
1431}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001432
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001433struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1434 struct sg_table *sgt)
1435{
1436 struct omap_drm_private *priv = dev->dev_private;
1437 struct omap_gem_object *omap_obj;
1438 struct drm_gem_object *obj;
1439 union omap_gem_size gsize;
1440
1441 /* Without a DMM only physically contiguous buffers can be supported. */
1442 if (sgt->orig_nents != 1 && !priv->has_dmm)
1443 return ERR_PTR(-EINVAL);
1444
1445 mutex_lock(&dev->struct_mutex);
1446
1447 gsize.bytes = PAGE_ALIGN(size);
1448 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1449 if (!obj) {
1450 obj = ERR_PTR(-ENOMEM);
1451 goto done;
1452 }
1453
1454 omap_obj = to_omap_bo(obj);
1455 omap_obj->sgt = sgt;
1456
1457 if (sgt->orig_nents == 1) {
1458 omap_obj->paddr = sg_dma_address(sgt->sgl);
1459 } else {
1460 /* Create pages list from sgt */
1461 struct sg_page_iter iter;
1462 struct page **pages;
1463 unsigned int npages;
1464 unsigned int i = 0;
1465
1466 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1467 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1468 if (!pages) {
1469 omap_gem_free_object(obj);
1470 obj = ERR_PTR(-ENOMEM);
1471 goto done;
1472 }
1473
1474 omap_obj->pages = pages;
1475
1476 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1477 pages[i++] = sg_page_iter_page(&iter);
1478 if (i > npages)
1479 break;
1480 }
1481
1482 if (WARN_ON(i != npages)) {
1483 omap_gem_free_object(obj);
1484 obj = ERR_PTR(-ENOMEM);
1485 goto done;
1486 }
1487 }
1488
1489done:
1490 mutex_unlock(&dev->struct_mutex);
1491 return obj;
1492}
1493
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001494/* convenience method to construct a GEM buffer object, and userspace handle */
1495int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1496 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1497{
1498 struct drm_gem_object *obj;
1499 int ret;
1500
1501 obj = omap_gem_new(dev, gsize, flags);
1502 if (!obj)
1503 return -ENOMEM;
1504
1505 ret = drm_gem_handle_create(file, obj, handle);
1506 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001507 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001508 return ret;
1509 }
1510
1511 /* drop reference from allocate - handle holds it now */
1512 drm_gem_object_unreference_unlocked(obj);
1513
1514 return 0;
1515}
1516
1517/* -----------------------------------------------------------------------------
1518 * Init & Cleanup
1519 */
1520
1521/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001522void omap_gem_init(struct drm_device *dev)
1523{
Rob Clarka6a91822011-12-09 23:26:08 -06001524 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001525 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001526 const enum tiler_fmt fmts[] = {
1527 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1528 };
Andy Gross5c137792012-03-05 10:48:39 -06001529 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001530
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001531 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001532 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001533 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001534 return;
1535 }
1536
Joe Perches78110bb2013-02-11 09:41:29 -08001537 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1538 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001539 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001540
1541 /* reserve 4k aligned/wide regions for userspace mappings: */
1542 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1543 uint16_t h = 1, w = PAGE_SIZE >> i;
1544 tiler_align(fmts[i], &w, &h);
1545 /* note: since each region is 1 4kb page wide, and minimum
1546 * number of rows, the height ends up being the same as the
1547 * # of pages in the region
1548 */
1549 usergart[i].height = h;
1550 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001551 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001552 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1553 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001554 struct omap_drm_usergart_entry *entry;
1555 struct tiler_block *block;
1556
1557 entry = &usergart[i].entry[j];
1558 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001559 if (IS_ERR(block)) {
1560 dev_err(dev->dev,
1561 "reserve failed: %d, %d, %ld\n",
1562 i, j, PTR_ERR(block));
1563 return;
1564 }
1565 entry->paddr = tiler_ssptr(block);
1566 entry->block = block;
1567
Russell King2d31ca32014-07-12 10:53:41 +01001568 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1569 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001570 usergart[i].stride_pfn << PAGE_SHIFT);
1571 }
1572 }
Rob Clarka6a91822011-12-09 23:26:08 -06001573
Laurent Pinchartf4302742015-12-14 22:39:34 +02001574 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001575 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001576}
1577
1578void omap_gem_deinit(struct drm_device *dev)
1579{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001580 struct omap_drm_private *priv = dev->dev_private;
1581
Rob Clarkf7f9f452011-12-05 19:19:22 -06001582 /* I believe we can rely on there being no more outstanding GEM
1583 * objects which could depend on usergart/dmm at this point.
1584 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001585 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001586}