blob: d4e1e11466f80733dc3f393c6213c83c7027a354 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Arnd Bergmann2d802452016-05-11 18:01:45 +020020#include <linux/seq_file.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060021#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020022#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080023#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020024
David Herrmann0de23972013-07-24 21:07:52 +020025#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060026
27#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060028#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060029
Rob Clarkcd5351f2011-11-12 12:09:40 -060030/*
31 * GEM buffer object implementation.
32 */
33
Rob Clarkcd5351f2011-11-12 12:09:40 -060034/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020035#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020037#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060038
Rob Clarkcd5351f2011-11-12 12:09:40 -060039struct omap_gem_object {
40 struct drm_gem_object base;
41
Rob Clarkf6b60362012-03-05 10:48:36 -060042 struct list_head mm_list;
43
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 uint32_t flags;
45
Rob Clarkf7f9f452011-12-05 19:19:22 -060046 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width, height;
48
Rob Clarka6a91822011-12-09 23:26:08 -060049 /** roll applied when mapping to DMM */
50 uint32_t roll;
51
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020053 * paddr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060054 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020055 * - buffers allocated through the DMA mapping API (with the
56 * OMAP_BO_MEM_DMA_API flag set)
57 *
58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
59 * if they are physically contiguous (when sgt->orig_nents == 1)
60 *
61 * - buffers mapped through the TILER when paddr_cnt is not zero, in
62 * which case the DMA address points to the TILER aperture
63 *
64 * Physically contiguous buffers have their DMA address equal to the
65 * physical address as we don't remap those buffers through the TILER.
66 *
67 * Buffers mapped to the TILER have their DMA address pointing to the
68 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
69 * the DMA address must be accessed through omap_get_get_paddr() to
70 * ensure that the mapping won't disappear unexpectedly. References must
71 * be released with omap_gem_put_paddr().
Rob Clarkcd5351f2011-11-12 12:09:40 -060072 */
73 dma_addr_t paddr;
74
75 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060076 * # of users of paddr
77 */
78 uint32_t paddr_cnt;
79
80 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020081 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
82 * is set and the sgt field is valid.
83 */
84 struct sg_table *sgt;
85
86 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060087 * tiler block used when buffer is remapped in DMM/TILER.
88 */
89 struct tiler_block *block;
90
91 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060092 * Array of backing pages, if allocated. Note that pages are never
93 * allocated for buffers originally allocated from contiguous memory
94 */
95 struct page **pages;
96
Rob Clarkf3bc9d22011-12-20 16:54:28 -060097 /** addresses corresponding to pages in above array */
98 dma_addr_t *addrs;
99
Rob Clarkcd5351f2011-11-12 12:09:40 -0600100 /**
101 * Virtual address, if mapped.
102 */
103 void *vaddr;
104
105 /**
106 * sync-object allocated on demand (if needed)
107 *
108 * Per-buffer sync-object for tracking pending and completed hw/dma
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +0200109 * read and write operations.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600110 */
111 struct {
112 uint32_t write_pending;
113 uint32_t write_complete;
114 uint32_t read_pending;
115 uint32_t read_complete;
116 } *sync;
117};
118
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200119#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600120
Rob Clarkf7f9f452011-12-05 19:19:22 -0600121/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
122 * not necessarily pinned in TILER all the time, and (b) when they are
123 * they are not necessarily page aligned, we reserve one or more small
124 * regions in each of the 2d containers to use as a user-GART where we
125 * can create a second page-aligned mapping of parts of the buffer
126 * being accessed from userspace.
127 *
128 * Note that we could optimize slightly when we know that multiple
129 * tiler containers are backed by the same PAT.. but I'll leave that
130 * for later..
131 */
132#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200133struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600134 struct tiler_block *block; /* the reserved tiler block */
135 dma_addr_t paddr;
136 struct drm_gem_object *obj; /* the current pinned obj */
137 pgoff_t obj_pgoff; /* page offset of obj currently
138 mapped in */
139};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200140
141struct omap_drm_usergart {
142 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600143 int height; /* height in rows */
144 int height_shift; /* ilog2(height in rows) */
145 int slot_shift; /* ilog2(width per slot) */
146 int stride_pfn; /* stride in pages */
147 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200148};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600149
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200150/* -----------------------------------------------------------------------------
151 * Helpers
152 */
153
154/** get mmap offset */
155static uint64_t mmap_offset(struct drm_gem_object *obj)
156{
157 struct drm_device *dev = obj->dev;
158 int ret;
159 size_t size;
160
161 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
162
163 /* Make it mmapable */
164 size = omap_gem_mmap_size(obj);
165 ret = drm_gem_create_mmap_offset_size(obj, size);
166 if (ret) {
167 dev_err(dev->dev, "could not allocate mmap offset\n");
168 return 0;
169 }
170
171 return drm_vma_node_offset_addr(&obj->vma_node);
172}
173
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200174static bool is_contiguous(struct omap_gem_object *omap_obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200175{
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200176 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
177 return true;
178
179 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
180 return true;
181
182 return false;
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200183}
184
185/* -----------------------------------------------------------------------------
186 * Eviction
187 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600188
189static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200190 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600191{
David Herrmann6796cb12014-01-03 14:24:19 +0100192 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200193 struct omap_drm_private *priv = obj->dev->dev_private;
194 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100195 size_t size = PAGE_SIZE * n;
196 loff_t off = mmap_offset(obj) +
197 (entry->obj_pgoff << PAGE_SHIFT);
198 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
199
200 if (m > 1) {
201 int i;
202 /* if stride > than PAGE_SIZE then sparse mapping: */
203 for (i = n; i > 0; i--) {
204 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
205 off, PAGE_SIZE, 1);
206 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600207 }
David Herrmann6796cb12014-01-03 14:24:19 +0100208 } else {
209 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
210 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600211 }
212
213 entry->obj = NULL;
214}
215
216/* Evict a buffer from usergart, if it is mapped there */
217static void evict(struct drm_gem_object *obj)
218{
219 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200220 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600221
222 if (omap_obj->flags & OMAP_BO_TILED) {
223 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
224 int i;
225
Rob Clarkf7f9f452011-12-05 19:19:22 -0600226 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200227 struct omap_drm_usergart_entry *entry =
228 &priv->usergart[fmt].entry[i];
229
Rob Clarkf7f9f452011-12-05 19:19:22 -0600230 if (entry->obj == obj)
231 evict_entry(obj, fmt, entry);
232 }
233 }
234}
235
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200236/* -----------------------------------------------------------------------------
237 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600239
240/** ensure backing pages are allocated */
241static int omap_gem_attach_pages(struct drm_gem_object *obj)
242{
Rob Clark8b6b5692012-05-17 02:37:25 -0600243 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600244 struct omap_gem_object *omap_obj = to_omap_bo(obj);
245 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200246 int npages = obj->size >> PAGE_SHIFT;
247 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600248 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600249
250 WARN_ON(omap_obj->pages);
251
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200252 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600253 if (IS_ERR(pages)) {
254 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
255 return PTR_ERR(pages);
256 }
257
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600258 /* for non-cached buffers, ensure the new pages are clean because
259 * DSS, GPU, etc. are not cache coherent:
260 */
261 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100262 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200263 if (!addrs) {
264 ret = -ENOMEM;
265 goto free_pages;
266 }
267
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600268 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600269 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600270 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200271
272 if (dma_mapping_error(dev->dev, addrs[i])) {
273 dev_warn(dev->dev,
274 "%s: failed to map page\n", __func__);
275
276 for (i = i - 1; i >= 0; --i) {
277 dma_unmap_page(dev->dev, addrs[i],
278 PAGE_SIZE, DMA_BIDIRECTIONAL);
279 }
280
281 ret = -ENOMEM;
282 goto free_addrs;
283 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600284 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600285 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100286 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287 if (!addrs) {
288 ret = -ENOMEM;
289 goto free_pages;
290 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600291 }
292
Rob Clark8b6b5692012-05-17 02:37:25 -0600293 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600294 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600295
Rob Clarkcd5351f2011-11-12 12:09:40 -0600296 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200297
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200298free_addrs:
299 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200300free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400301 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200302
303 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600304}
305
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200306/* acquire pages when needed (for example, for DMA where physically
307 * contiguous buffer is not required
308 */
309static int get_pages(struct drm_gem_object *obj, struct page ***pages)
310{
311 struct omap_gem_object *omap_obj = to_omap_bo(obj);
312 int ret = 0;
313
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200314 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200315 ret = omap_gem_attach_pages(obj);
316 if (ret) {
317 dev_err(obj->dev->dev, "could not attach pages\n");
318 return ret;
319 }
320 }
321
322 /* TODO: even phys-contig.. we should have a list of pages? */
323 *pages = omap_obj->pages;
324
325 return 0;
326}
327
Rob Clarkcd5351f2011-11-12 12:09:40 -0600328/** release backing pages */
329static void omap_gem_detach_pages(struct drm_gem_object *obj)
330{
331 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600332
333 /* for non-cached buffers, ensure the new pages are clean because
334 * DSS, GPU, etc. are not cache coherent:
335 */
336 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
337 int i, npages = obj->size >> PAGE_SHIFT;
338 for (i = 0; i < npages; i++) {
Peter Ujfalusi6cb09652015-11-27 11:00:37 +0200339 if (omap_obj->addrs[i])
340 dma_unmap_page(obj->dev->dev,
341 omap_obj->addrs[i],
342 PAGE_SIZE, DMA_BIDIRECTIONAL);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600343 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600344 }
345
Rob Clark8b6b5692012-05-17 02:37:25 -0600346 kfree(omap_obj->addrs);
347 omap_obj->addrs = NULL;
348
Rob Clarkddcd09d2013-08-07 13:41:27 -0400349 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600350 omap_obj->pages = NULL;
351}
352
Rob Clark6ad11bc2012-04-10 13:19:55 -0500353/* get buffer flags */
354uint32_t omap_gem_flags(struct drm_gem_object *obj)
355{
356 return to_omap_bo(obj)->flags;
357}
358
Rob Clarkc5b12472012-01-18 18:33:02 -0600359uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
360{
361 uint64_t offset;
362 mutex_lock(&obj->dev->struct_mutex);
363 offset = mmap_offset(obj);
364 mutex_unlock(&obj->dev->struct_mutex);
365 return offset;
366}
367
Rob Clarkf7f9f452011-12-05 19:19:22 -0600368/** get mmap size */
369size_t omap_gem_mmap_size(struct drm_gem_object *obj)
370{
371 struct omap_gem_object *omap_obj = to_omap_bo(obj);
372 size_t size = obj->size;
373
374 if (omap_obj->flags & OMAP_BO_TILED) {
375 /* for tiled buffers, the virtual size has stride rounded up
376 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
377 * 32kb later!). But we don't back the entire buffer with
378 * pages, only the valid picture part.. so need to adjust for
379 * this in the size used to mmap and generate mmap offset
380 */
381 size = tiler_vsize(gem2fmt(omap_obj->flags),
382 omap_obj->width, omap_obj->height);
383 }
384
385 return size;
386}
387
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200388/* -----------------------------------------------------------------------------
389 * Fault Handling
390 */
391
Rob Clarkf7f9f452011-12-05 19:19:22 -0600392/* Normal handling for the case of faulting in non-tiled buffers */
393static int fault_1d(struct drm_gem_object *obj,
394 struct vm_area_struct *vma, struct vm_fault *vmf)
395{
396 struct omap_gem_object *omap_obj = to_omap_bo(obj);
397 unsigned long pfn;
398 pgoff_t pgoff;
399
400 /* We don't use vmf->pgoff since that has the fake offset: */
401 pgoff = ((unsigned long)vmf->virtual_address -
402 vma->vm_start) >> PAGE_SHIFT;
403
404 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600405 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600406 pfn = page_to_pfn(omap_obj->pages[pgoff]);
407 } else {
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200408 BUG_ON(!is_contiguous(omap_obj));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600409 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
410 }
411
412 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
413 pfn, pfn << PAGE_SHIFT);
414
Dan Williams01c8f1c2016-01-15 16:56:40 -0800415 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
416 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600417}
418
419/* Special handling for the case of faulting in 2d tiled buffers */
420static int fault_2d(struct drm_gem_object *obj,
421 struct vm_area_struct *vma, struct vm_fault *vmf)
422{
423 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200424 struct omap_drm_private *priv = obj->dev->dev_private;
425 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600426 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
427 struct page *pages[64]; /* XXX is this too much to have on stack? */
428 unsigned long pfn;
429 pgoff_t pgoff, base_pgoff;
430 void __user *vaddr;
431 int i, ret, slots;
432
Rob Clarke5598952012-03-05 10:48:40 -0600433 /*
434 * Note the height of the slot is also equal to the number of pages
435 * that need to be mapped in to fill 4kb wide CPU page. If the slot
436 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600437 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200438 const int n = priv->usergart[fmt].height;
439 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke5598952012-03-05 10:48:40 -0600440
441 /*
442 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
443 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
444 * into account in some of the math, so figure out virtual stride
445 * in pages
446 */
447 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600448
449 /* We don't use vmf->pgoff since that has the fake offset: */
450 pgoff = ((unsigned long)vmf->virtual_address -
451 vma->vm_start) >> PAGE_SHIFT;
452
Rob Clarke5598952012-03-05 10:48:40 -0600453 /*
454 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600455 * boundary in the y direction:
456 */
Rob Clarke5598952012-03-05 10:48:40 -0600457 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600458
Rob Clarke5598952012-03-05 10:48:40 -0600459 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200460 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600461
Rob Clarke5598952012-03-05 10:48:40 -0600462 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
463
Laurent Pinchartf4302742015-12-14 22:39:34 +0200464 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke5598952012-03-05 10:48:40 -0600465
Rob Clarkf7f9f452011-12-05 19:19:22 -0600466 /* evict previous buffer using this usergart entry, if any: */
467 if (entry->obj)
468 evict_entry(entry->obj, fmt, entry);
469
470 entry->obj = obj;
471 entry->obj_pgoff = base_pgoff;
472
Rob Clarke5598952012-03-05 10:48:40 -0600473 /* now convert base_pgoff to phys offset from virt offset: */
474 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600475
Rob Clarke5598952012-03-05 10:48:40 -0600476 /* for wider-than 4k.. figure out which part of the slot-row we want: */
477 if (m > 1) {
478 int off = pgoff % m;
479 entry->obj_pgoff += off;
480 base_pgoff /= m;
481 slots = min(slots - (off << n_shift), n);
482 base_pgoff += off << n_shift;
483 vaddr += off << PAGE_SHIFT;
484 }
485
486 /*
487 * Map in pages. Beyond the valid pixel part of the buffer, we set
488 * pages[i] to NULL to get a dummy page mapped in.. if someone
489 * reads/writes it they will get random/undefined content, but at
490 * least it won't be corrupting whatever other random page used to
491 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600492 */
493 memcpy(pages, &omap_obj->pages[base_pgoff],
494 sizeof(struct page *) * slots);
495 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600496 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600497
Rob Clarka6a91822011-12-09 23:26:08 -0600498 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600499 if (ret) {
500 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
501 return ret;
502 }
503
Rob Clarkf7f9f452011-12-05 19:19:22 -0600504 pfn = entry->paddr >> PAGE_SHIFT;
505
506 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
507 pfn, pfn << PAGE_SHIFT);
508
Rob Clarke5598952012-03-05 10:48:40 -0600509 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800510 vm_insert_mixed(vma, (unsigned long)vaddr,
511 __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200512 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600513 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600514 }
515
516 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200517 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
518 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600519
520 return 0;
521}
522
Rob Clarkcd5351f2011-11-12 12:09:40 -0600523/**
524 * omap_gem_fault - pagefault handler for GEM objects
525 * @vma: the VMA of the GEM object
526 * @vmf: fault detail
527 *
528 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
529 * does most of the work for us including the actual map/unmap calls
530 * but we need to do the actual page work.
531 *
532 * The VMA was set up by GEM. In doing so it also ensured that the
533 * vma->vm_private_data points to the GEM object that is backing this
534 * mapping.
535 */
536int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
537{
538 struct drm_gem_object *obj = vma->vm_private_data;
539 struct omap_gem_object *omap_obj = to_omap_bo(obj);
540 struct drm_device *dev = obj->dev;
541 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600542 int ret;
543
544 /* Make sure we don't parallel update on a fault, nor move or remove
545 * something from beneath our feet
546 */
547 mutex_lock(&dev->struct_mutex);
548
549 /* if a shmem backed object, make sure we have pages attached now */
550 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900551 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600552 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600553
554 /* where should we do corresponding put_pages().. we are mapping
555 * the original page, rather than thru a GART, so we can't rely
556 * on eviction to trigger this. But munmap() or all mappings should
557 * probably trigger put_pages()?
558 */
559
Rob Clarkf7f9f452011-12-05 19:19:22 -0600560 if (omap_obj->flags & OMAP_BO_TILED)
561 ret = fault_2d(obj, vma, vmf);
562 else
563 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600564
Rob Clarkcd5351f2011-11-12 12:09:40 -0600565
566fail:
567 mutex_unlock(&dev->struct_mutex);
568 switch (ret) {
569 case 0:
570 case -ERESTARTSYS:
571 case -EINTR:
Rob Clarke1d4ee02013-10-20 12:07:42 -0400572 case -EBUSY:
573 /*
574 * EBUSY is ok: this just means that another thread
575 * already did the job.
576 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600577 return VM_FAULT_NOPAGE;
578 case -ENOMEM:
579 return VM_FAULT_OOM;
580 default:
581 return VM_FAULT_SIGBUS;
582 }
583}
584
585/** We override mainly to fix up some of the vm mapping flags.. */
586int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
587{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600588 int ret;
589
590 ret = drm_gem_mmap(filp, vma);
591 if (ret) {
592 DBG("mmap failed: %d", ret);
593 return ret;
594 }
595
Rob Clark8b6b5692012-05-17 02:37:25 -0600596 return omap_gem_mmap_obj(vma->vm_private_data, vma);
597}
598
599int omap_gem_mmap_obj(struct drm_gem_object *obj,
600 struct vm_area_struct *vma)
601{
602 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600603
604 vma->vm_flags &= ~VM_PFNMAP;
605 vma->vm_flags |= VM_MIXEDMAP;
606
607 if (omap_obj->flags & OMAP_BO_WC) {
608 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
609 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
610 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
611 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600612 /*
613 * We do have some private objects, at least for scanout buffers
614 * on hardware without DMM/TILER. But these are allocated write-
615 * combine
616 */
617 if (WARN_ON(!obj->filp))
618 return -EINVAL;
619
620 /*
621 * Shunt off cached objs to shmem file so they have their own
622 * address_space (so unmap_mapping_range does what we want,
623 * in particular in the case of mmap'd dmabufs)
624 */
625 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600626 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400627 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600628
Rob Clarkcd5351f2011-11-12 12:09:40 -0600629 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
630 }
631
Rob Clark8b6b5692012-05-17 02:37:25 -0600632 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600633}
634
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200635/* -----------------------------------------------------------------------------
636 * Dumb Buffers
637 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600638
Rob Clarkcd5351f2011-11-12 12:09:40 -0600639/**
640 * omap_gem_dumb_create - create a dumb buffer
641 * @drm_file: our client file
642 * @dev: our device
643 * @args: the requested arguments copied from userspace
644 *
645 * Allocate a buffer suitable for use for a frame buffer of the
646 * form described by user space. Give userspace a handle by which
647 * to reference it.
648 */
649int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
650 struct drm_mode_create_dumb *args)
651{
652 union omap_gem_size gsize;
653
Tomi Valkeinence481ed2016-04-19 09:06:32 +0300654 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Tomi Valkeinen6a5228f2016-04-18 18:18:37 +0300655
Rob Clarkcd5351f2011-11-12 12:09:40 -0600656 args->size = PAGE_ALIGN(args->pitch * args->height);
657
658 gsize = (union omap_gem_size){
659 .bytes = args->size,
660 };
661
662 return omap_gem_new_handle(dev, file, gsize,
663 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
664}
665
666/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600667 * omap_gem_dumb_map - buffer mapping for dumb interface
668 * @file: our drm client file
669 * @dev: drm device
670 * @handle: GEM handle to the object (from dumb_create)
671 *
672 * Do the necessary setup to allow the mapping of the frame buffer
673 * into user memory. We don't have to do much here at the moment.
674 */
675int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
676 uint32_t handle, uint64_t *offset)
677{
678 struct drm_gem_object *obj;
679 int ret = 0;
680
Rob Clarkcd5351f2011-11-12 12:09:40 -0600681 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100682 obj = drm_gem_object_lookup(file, handle);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600683 if (obj == NULL) {
684 ret = -ENOENT;
685 goto fail;
686 }
687
688 *offset = omap_gem_mmap_offset(obj);
689
690 drm_gem_object_unreference_unlocked(obj);
691
692fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600693 return ret;
694}
695
Laurent Pincharte1c11742015-12-14 22:39:30 +0200696#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600697/* Set scrolling position. This allows us to implement fast scrolling
698 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600699 *
700 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600701 */
702int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
703{
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
705 uint32_t npages = obj->size >> PAGE_SHIFT;
706 int ret = 0;
707
708 if (roll > npages) {
709 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
710 return -EINVAL;
711 }
712
Rob Clarka6a91822011-12-09 23:26:08 -0600713 omap_obj->roll = roll;
714
Rob Clarkaf695922011-12-16 11:34:34 -0600715 mutex_lock(&obj->dev->struct_mutex);
716
Rob Clarka6a91822011-12-09 23:26:08 -0600717 /* if we aren't mapped yet, we don't need to do anything */
718 if (omap_obj->block) {
719 struct page **pages;
720 ret = get_pages(obj, &pages);
721 if (ret)
722 goto fail;
723 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
724 if (ret)
725 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
726 }
727
728fail:
729 mutex_unlock(&obj->dev->struct_mutex);
730
731 return ret;
732}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200733#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600734
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200735/* -----------------------------------------------------------------------------
736 * Memory Management & DMA Sync
737 */
738
739/**
740 * shmem buffers that are mapped cached can simulate coherency via using
741 * page faulting to keep track of dirty pages
742 */
743static inline bool is_cached_coherent(struct drm_gem_object *obj)
744{
745 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200746
747 return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200748 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
749}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600750
Rob Clark8b6b5692012-05-17 02:37:25 -0600751/* Sync the buffer for CPU access.. note pages should already be
752 * attached, ie. omap_gem_get_pages()
753 */
754void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
755{
756 struct drm_device *dev = obj->dev;
757 struct omap_gem_object *omap_obj = to_omap_bo(obj);
758
759 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
760 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
761 PAGE_SIZE, DMA_BIDIRECTIONAL);
762 omap_obj->addrs[pgoff] = 0;
763 }
764}
765
766/* sync the buffer for DMA access */
767void omap_gem_dma_sync(struct drm_gem_object *obj,
768 enum dma_data_direction dir)
769{
770 struct drm_device *dev = obj->dev;
771 struct omap_gem_object *omap_obj = to_omap_bo(obj);
772
773 if (is_cached_coherent(obj)) {
774 int i, npages = obj->size >> PAGE_SHIFT;
775 struct page **pages = omap_obj->pages;
776 bool dirty = false;
777
778 for (i = 0; i < npages; i++) {
779 if (!omap_obj->addrs[i]) {
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200780 dma_addr_t addr;
781
782 addr = dma_map_page(dev->dev, pages[i], 0,
Rob Clark8b6b5692012-05-17 02:37:25 -0600783 PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200784
785 if (dma_mapping_error(dev->dev, addr)) {
786 dev_warn(dev->dev,
787 "%s: failed to map page\n",
788 __func__);
789 break;
790 }
791
Rob Clark8b6b5692012-05-17 02:37:25 -0600792 dirty = true;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200793 omap_obj->addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600794 }
795 }
796
797 if (dirty) {
798 unmap_mapping_range(obj->filp->f_mapping, 0,
799 omap_gem_mmap_size(obj), 1);
800 }
801 }
802}
803
Rob Clarkcd5351f2011-11-12 12:09:40 -0600804/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
805 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
806 * map in TILER)
807 */
808int omap_gem_get_paddr(struct drm_gem_object *obj,
809 dma_addr_t *paddr, bool remap)
810{
Rob Clarka6a91822011-12-09 23:26:08 -0600811 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600812 struct omap_gem_object *omap_obj = to_omap_bo(obj);
813 int ret = 0;
814
Rob Clarkf7f9f452011-12-05 19:19:22 -0600815 mutex_lock(&obj->dev->struct_mutex);
816
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200817 if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600818 if (omap_obj->paddr_cnt == 0) {
819 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600820 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600821 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
822 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600823
Rob Clarkf7f9f452011-12-05 19:19:22 -0600824 BUG_ON(omap_obj->block);
825
826 ret = get_pages(obj, &pages);
827 if (ret)
828 goto fail;
829
Rob Clarkf7f9f452011-12-05 19:19:22 -0600830 if (omap_obj->flags & OMAP_BO_TILED) {
831 block = tiler_reserve_2d(fmt,
832 omap_obj->width,
833 omap_obj->height, 0);
834 } else {
835 block = tiler_reserve_1d(obj->size);
836 }
837
838 if (IS_ERR(block)) {
839 ret = PTR_ERR(block);
840 dev_err(obj->dev->dev,
841 "could not remap: %d (%d)\n", ret, fmt);
842 goto fail;
843 }
844
845 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600846 ret = tiler_pin(block, pages, npages,
847 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600848 if (ret) {
849 tiler_release(block);
850 dev_err(obj->dev->dev,
851 "could not pin: %d\n", ret);
852 goto fail;
853 }
854
855 omap_obj->paddr = tiler_ssptr(block);
856 omap_obj->block = block;
857
Russell King2d31ca32014-07-12 10:53:41 +0100858 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600859 }
860
861 omap_obj->paddr_cnt++;
862
863 *paddr = omap_obj->paddr;
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200864 } else if (is_contiguous(omap_obj)) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600865 *paddr = omap_obj->paddr;
866 } else {
867 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600868 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600869 }
870
Rob Clarkf7f9f452011-12-05 19:19:22 -0600871fail:
872 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600873
874 return ret;
875}
876
877/* Release physical address, when DMA is no longer being performed.. this
878 * could potentially unpin and unmap buffers from TILER
879 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300880void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600881{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600882 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300883 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600884
885 mutex_lock(&obj->dev->struct_mutex);
886 if (omap_obj->paddr_cnt > 0) {
887 omap_obj->paddr_cnt--;
888 if (omap_obj->paddr_cnt == 0) {
889 ret = tiler_unpin(omap_obj->block);
890 if (ret) {
891 dev_err(obj->dev->dev,
892 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600893 }
894 ret = tiler_release(omap_obj->block);
895 if (ret) {
896 dev_err(obj->dev->dev,
897 "could not release unmap: %d\n", ret);
898 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000899 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600900 omap_obj->block = NULL;
901 }
902 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300903
Rob Clarkf7f9f452011-12-05 19:19:22 -0600904 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600905}
906
Rob Clark3c810c62012-08-15 15:18:01 -0500907/* Get rotated scanout address (only valid if already pinned), at the
908 * specified orientation and x,y offset from top-left corner of buffer
909 * (only valid for tiled 2d buffers)
910 */
911int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
912 int x, int y, dma_addr_t *paddr)
913{
914 struct omap_gem_object *omap_obj = to_omap_bo(obj);
915 int ret = -EINVAL;
916
917 mutex_lock(&obj->dev->struct_mutex);
918 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
919 (omap_obj->flags & OMAP_BO_TILED)) {
920 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
921 ret = 0;
922 }
923 mutex_unlock(&obj->dev->struct_mutex);
924 return ret;
925}
926
927/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
928int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
929{
930 struct omap_gem_object *omap_obj = to_omap_bo(obj);
931 int ret = -EINVAL;
932 if (omap_obj->flags & OMAP_BO_TILED)
933 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
934 return ret;
935}
936
Rob Clark6ad11bc2012-04-10 13:19:55 -0500937/* if !remap, and we don't have pages backing, then fail, rather than
938 * increasing the pin count (which we don't really do yet anyways,
939 * because we don't support swapping pages back out). And 'remap'
940 * might not be quite the right name, but I wanted to keep it working
941 * similarly to omap_gem_get_paddr(). Note though that mutex is not
942 * aquired if !remap (because this can be called in atomic ctxt),
943 * but probably omap_gem_get_paddr() should be changed to work in the
944 * same way. If !remap, a matching omap_gem_put_pages() call is not
945 * required (and should not be made).
946 */
947int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
948 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600949{
950 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500951 if (!remap) {
952 struct omap_gem_object *omap_obj = to_omap_bo(obj);
953 if (!omap_obj->pages)
954 return -ENOMEM;
955 *pages = omap_obj->pages;
956 return 0;
957 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600958 mutex_lock(&obj->dev->struct_mutex);
959 ret = get_pages(obj, pages);
960 mutex_unlock(&obj->dev->struct_mutex);
961 return ret;
962}
963
964/* release pages when DMA no longer being performed */
965int omap_gem_put_pages(struct drm_gem_object *obj)
966{
967 /* do something here if we dynamically attach/detach pages.. at
968 * least they would no longer need to be pinned if everyone has
969 * released the pages..
970 */
971 return 0;
972}
973
Laurent Pincharte1c11742015-12-14 22:39:30 +0200974#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600975/* Get kernel virtual address for CPU access.. this more or less only
976 * exists for omap_fbdev. This should be called with struct_mutex
977 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600978 */
979void *omap_gem_vaddr(struct drm_gem_object *obj)
980{
981 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900982 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600983 if (!omap_obj->vaddr) {
984 struct page **pages;
985 int ret = get_pages(obj, &pages);
986 if (ret)
987 return ERR_PTR(ret);
988 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
989 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
990 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600991 return omap_obj->vaddr;
992}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200993#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600994
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200995/* -----------------------------------------------------------------------------
996 * Power Management
997 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600998
Andy Grosse78edba2012-12-19 14:53:37 -0600999#ifdef CONFIG_PM
1000/* re-pin objects in DMM in resume path: */
1001int omap_gem_resume(struct device *dev)
1002{
1003 struct drm_device *drm_dev = dev_get_drvdata(dev);
1004 struct omap_drm_private *priv = drm_dev->dev_private;
1005 struct omap_gem_object *omap_obj;
1006 int ret = 0;
1007
1008 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1009 if (omap_obj->block) {
1010 struct drm_gem_object *obj = &omap_obj->base;
1011 uint32_t npages = obj->size >> PAGE_SHIFT;
1012 WARN_ON(!omap_obj->pages); /* this can't happen */
1013 ret = tiler_pin(omap_obj->block,
1014 omap_obj->pages, npages,
1015 omap_obj->roll, true);
1016 if (ret) {
1017 dev_err(dev, "could not repin: %d\n", ret);
1018 return ret;
1019 }
1020 }
1021 }
1022
1023 return 0;
1024}
1025#endif
1026
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001027/* -----------------------------------------------------------------------------
1028 * DebugFS
1029 */
1030
Rob Clarkf6b60362012-03-05 10:48:36 -06001031#ifdef CONFIG_DEBUG_FS
1032void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1033{
Rob Clarkf6b60362012-03-05 10:48:36 -06001034 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001035 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001036
David Herrmann0de23972013-07-24 21:07:52 +02001037 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001038
Russell King2d31ca32014-07-12 10:53:41 +01001039 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001040 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001041 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001042 omap_obj->vaddr, omap_obj->roll);
1043
1044 if (omap_obj->flags & OMAP_BO_TILED) {
1045 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1046 if (omap_obj->block) {
1047 struct tcm_area *area = &omap_obj->block->area;
1048 seq_printf(m, " (%dx%d, %dx%d)",
1049 area->p0.x, area->p0.y,
1050 area->p1.x, area->p1.y);
1051 }
1052 } else {
1053 seq_printf(m, " %d", obj->size);
1054 }
1055
1056 seq_printf(m, "\n");
1057}
1058
1059void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1060{
1061 struct omap_gem_object *omap_obj;
1062 int count = 0;
1063 size_t size = 0;
1064
1065 list_for_each_entry(omap_obj, list, mm_list) {
1066 struct drm_gem_object *obj = &omap_obj->base;
1067 seq_printf(m, " ");
1068 omap_gem_describe(obj, m);
1069 count++;
1070 size += obj->size;
1071 }
1072
1073 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1074}
1075#endif
1076
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001077/* -----------------------------------------------------------------------------
1078 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001079 */
1080
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001081static DEFINE_SPINLOCK(sync_lock);
1082
Rob Clarkcd5351f2011-11-12 12:09:40 -06001083struct omap_gem_sync_waiter {
1084 struct list_head list;
1085 struct omap_gem_object *omap_obj;
1086 enum omap_gem_op op;
1087 uint32_t read_target, write_target;
1088 /* notify called w/ sync_lock held */
1089 void (*notify)(void *arg);
1090 void *arg;
1091};
1092
1093/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1094 * the read and/or write target count is achieved which can call a user
1095 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1096 * cpu access), etc.
1097 */
1098static LIST_HEAD(waiters);
1099
1100static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1101{
1102 struct omap_gem_object *omap_obj = waiter->omap_obj;
1103 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301104 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001105 return true;
1106 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301107 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001108 return true;
1109 return false;
1110}
1111
1112/* macro for sync debug.. */
1113#define SYNCDBG 0
1114#define SYNC(fmt, ...) do { if (SYNCDBG) \
1115 printk(KERN_ERR "%s:%d: "fmt"\n", \
1116 __func__, __LINE__, ##__VA_ARGS__); \
1117 } while (0)
1118
1119
1120static void sync_op_update(void)
1121{
1122 struct omap_gem_sync_waiter *waiter, *n;
1123 list_for_each_entry_safe(waiter, n, &waiters, list) {
1124 if (!is_waiting(waiter)) {
1125 list_del(&waiter->list);
1126 SYNC("notify: %p", waiter);
1127 waiter->notify(waiter->arg);
1128 kfree(waiter);
1129 }
1130 }
1131}
1132
1133static inline int sync_op(struct drm_gem_object *obj,
1134 enum omap_gem_op op, bool start)
1135{
1136 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1137 int ret = 0;
1138
1139 spin_lock(&sync_lock);
1140
1141 if (!omap_obj->sync) {
1142 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1143 if (!omap_obj->sync) {
1144 ret = -ENOMEM;
1145 goto unlock;
1146 }
1147 }
1148
1149 if (start) {
1150 if (op & OMAP_GEM_READ)
1151 omap_obj->sync->read_pending++;
1152 if (op & OMAP_GEM_WRITE)
1153 omap_obj->sync->write_pending++;
1154 } else {
1155 if (op & OMAP_GEM_READ)
1156 omap_obj->sync->read_complete++;
1157 if (op & OMAP_GEM_WRITE)
1158 omap_obj->sync->write_complete++;
1159 sync_op_update();
1160 }
1161
1162unlock:
1163 spin_unlock(&sync_lock);
1164
1165 return ret;
1166}
1167
Rob Clarkcd5351f2011-11-12 12:09:40 -06001168/* mark the start of read and/or write operation */
1169int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1170{
1171 return sync_op(obj, op, true);
1172}
1173
1174int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1175{
1176 return sync_op(obj, op, false);
1177}
1178
1179static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1180
1181static void sync_notify(void *arg)
1182{
1183 struct task_struct **waiter_task = arg;
1184 *waiter_task = NULL;
1185 wake_up_all(&sync_event);
1186}
1187
1188int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1189{
1190 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1191 int ret = 0;
1192 if (omap_obj->sync) {
1193 struct task_struct *waiter_task = current;
1194 struct omap_gem_sync_waiter *waiter =
1195 kzalloc(sizeof(*waiter), GFP_KERNEL);
1196
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001197 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001198 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001199
1200 waiter->omap_obj = omap_obj;
1201 waiter->op = op;
1202 waiter->read_target = omap_obj->sync->read_pending;
1203 waiter->write_target = omap_obj->sync->write_pending;
1204 waiter->notify = sync_notify;
1205 waiter->arg = &waiter_task;
1206
1207 spin_lock(&sync_lock);
1208 if (is_waiting(waiter)) {
1209 SYNC("waited: %p", waiter);
1210 list_add_tail(&waiter->list, &waiters);
1211 spin_unlock(&sync_lock);
1212 ret = wait_event_interruptible(sync_event,
1213 (waiter_task == NULL));
1214 spin_lock(&sync_lock);
1215 if (waiter_task) {
1216 SYNC("interrupted: %p", waiter);
1217 /* we were interrupted */
1218 list_del(&waiter->list);
1219 waiter_task = NULL;
1220 } else {
1221 /* freed in sync_op_update() */
1222 waiter = NULL;
1223 }
1224 }
1225 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001226 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001227 }
1228 return ret;
1229}
1230
1231/* call fxn(arg), either synchronously or asynchronously if the op
1232 * is currently blocked.. fxn() can be called from any context
1233 *
1234 * (TODO for now fxn is called back from whichever context calls
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001235 * omap_gem_op_finish().. but this could be better defined later
Rob Clarkcd5351f2011-11-12 12:09:40 -06001236 * if needed)
1237 *
1238 * TODO more code in common w/ _sync()..
1239 */
1240int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1241 void (*fxn)(void *arg), void *arg)
1242{
1243 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1244 if (omap_obj->sync) {
1245 struct omap_gem_sync_waiter *waiter =
1246 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1247
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001248 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001249 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001250
1251 waiter->omap_obj = omap_obj;
1252 waiter->op = op;
1253 waiter->read_target = omap_obj->sync->read_pending;
1254 waiter->write_target = omap_obj->sync->write_pending;
1255 waiter->notify = fxn;
1256 waiter->arg = arg;
1257
1258 spin_lock(&sync_lock);
1259 if (is_waiting(waiter)) {
1260 SYNC("waited: %p", waiter);
1261 list_add_tail(&waiter->list, &waiters);
1262 spin_unlock(&sync_lock);
1263 return 0;
1264 }
1265
1266 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301267
1268 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001269 }
1270
1271 /* no waiting.. */
1272 fxn(arg);
1273
1274 return 0;
1275}
1276
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001277/* -----------------------------------------------------------------------------
1278 * Constructor & Destructor
1279 */
1280
Rob Clarkcd5351f2011-11-12 12:09:40 -06001281void omap_gem_free_object(struct drm_gem_object *obj)
1282{
1283 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001284 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001285 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1286
Rob Clarkf7f9f452011-12-05 19:19:22 -06001287 evict(obj);
1288
Rob Clarkf6b60362012-03-05 10:48:36 -06001289 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1290
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001291 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001292 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001293 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001294
Rob Clark9a0774e2012-01-16 12:51:17 -06001295 /* this means the object is still pinned.. which really should
1296 * not happen. I think..
1297 */
1298 WARN_ON(omap_obj->paddr_cnt > 0);
1299
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001300 if (omap_obj->pages) {
1301 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1302 kfree(omap_obj->pages);
1303 else
Rob Clarkcd5351f2011-11-12 12:09:40 -06001304 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001305 }
1306
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001307 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001308 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1309 omap_obj->paddr);
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001310 } else if (omap_obj->vaddr) {
1311 vunmap(omap_obj->vaddr);
1312 } else if (obj->import_attach) {
1313 drm_prime_gem_destroy(obj, omap_obj->sgt);
1314 }
1315
1316 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001317
1318 drm_gem_object_release(obj);
1319
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001320 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001321}
1322
1323/* GEM buffer object constructor */
1324struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1325 union omap_gem_size gsize, uint32_t flags)
1326{
Rob Clarka6a91822011-12-09 23:26:08 -06001327 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001328 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001329 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001330 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001331 size_t size;
1332 int ret;
1333
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001334 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001335 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001336 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001337 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001338 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001339 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001340
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001341 /*
1342 * Tiled buffers are always shmem paged backed. When they are
1343 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001344 */
1345 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001346 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001347
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001348 /*
1349 * Currently don't allow cached buffers. There is some caching
1350 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001351 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001352 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1353 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001354 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1355 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001356 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1357 * tiled. However, to lower the pressure on memory allocation,
1358 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001359 */
1360 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001361 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001362 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001363 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001364 */
1365 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001366 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001367
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001368 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001369 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001370 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001371 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001372
Rob Clarkcd5351f2011-11-12 12:09:40 -06001373 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001374 omap_obj->flags = flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001375
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001376 if (flags & OMAP_BO_TILED) {
1377 /*
1378 * For tiled buffers align dimensions to slot boundaries and
1379 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001380 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001381 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1382 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001383
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001384 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1385 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001386
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001387 omap_obj->width = gsize.tiled.width;
1388 omap_obj->height = gsize.tiled.height;
1389 } else {
1390 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001391 }
1392
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001393 /* Initialize the GEM object. */
1394 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1395 drm_gem_private_object_init(dev, obj, size);
1396 } else {
1397 ret = drm_gem_object_init(dev, obj, size);
1398 if (ret)
1399 goto err_free;
1400
Al Viro93c76a32015-12-04 23:45:44 -05001401 mapping = obj->filp->f_mapping;
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001402 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1403 }
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001404
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001405 /* Allocate memory if needed. */
1406 if (flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001407 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1408 &omap_obj->paddr,
1409 GFP_KERNEL);
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001410 if (!omap_obj->vaddr)
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001411 goto err_release;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001412 }
1413
1414 spin_lock(&priv->list_lock);
1415 list_add(&omap_obj->mm_list, &priv->obj_list);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001416 spin_unlock(&priv->list_lock);
1417
Rob Clarkcd5351f2011-11-12 12:09:40 -06001418 return obj;
1419
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001420err_release:
1421 drm_gem_object_release(obj);
1422err_free:
1423 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001424 return NULL;
1425}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001426
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001427struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1428 struct sg_table *sgt)
1429{
1430 struct omap_drm_private *priv = dev->dev_private;
1431 struct omap_gem_object *omap_obj;
1432 struct drm_gem_object *obj;
1433 union omap_gem_size gsize;
1434
1435 /* Without a DMM only physically contiguous buffers can be supported. */
1436 if (sgt->orig_nents != 1 && !priv->has_dmm)
1437 return ERR_PTR(-EINVAL);
1438
1439 mutex_lock(&dev->struct_mutex);
1440
1441 gsize.bytes = PAGE_ALIGN(size);
1442 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1443 if (!obj) {
1444 obj = ERR_PTR(-ENOMEM);
1445 goto done;
1446 }
1447
1448 omap_obj = to_omap_bo(obj);
1449 omap_obj->sgt = sgt;
1450
1451 if (sgt->orig_nents == 1) {
1452 omap_obj->paddr = sg_dma_address(sgt->sgl);
1453 } else {
1454 /* Create pages list from sgt */
1455 struct sg_page_iter iter;
1456 struct page **pages;
1457 unsigned int npages;
1458 unsigned int i = 0;
1459
1460 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1461 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1462 if (!pages) {
1463 omap_gem_free_object(obj);
1464 obj = ERR_PTR(-ENOMEM);
1465 goto done;
1466 }
1467
1468 omap_obj->pages = pages;
1469
1470 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1471 pages[i++] = sg_page_iter_page(&iter);
1472 if (i > npages)
1473 break;
1474 }
1475
1476 if (WARN_ON(i != npages)) {
1477 omap_gem_free_object(obj);
1478 obj = ERR_PTR(-ENOMEM);
1479 goto done;
1480 }
1481 }
1482
1483done:
1484 mutex_unlock(&dev->struct_mutex);
1485 return obj;
1486}
1487
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001488/* convenience method to construct a GEM buffer object, and userspace handle */
1489int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1490 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1491{
1492 struct drm_gem_object *obj;
1493 int ret;
1494
1495 obj = omap_gem_new(dev, gsize, flags);
1496 if (!obj)
1497 return -ENOMEM;
1498
1499 ret = drm_gem_handle_create(file, obj, handle);
1500 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001501 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001502 return ret;
1503 }
1504
1505 /* drop reference from allocate - handle holds it now */
1506 drm_gem_object_unreference_unlocked(obj);
1507
1508 return 0;
1509}
1510
1511/* -----------------------------------------------------------------------------
1512 * Init & Cleanup
1513 */
1514
1515/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001516void omap_gem_init(struct drm_device *dev)
1517{
Rob Clarka6a91822011-12-09 23:26:08 -06001518 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001519 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001520 const enum tiler_fmt fmts[] = {
1521 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1522 };
Andy Gross5c137792012-03-05 10:48:39 -06001523 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001524
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001525 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001526 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001527 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001528 return;
1529 }
1530
Joe Perches78110bb2013-02-11 09:41:29 -08001531 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1532 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001533 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001534
1535 /* reserve 4k aligned/wide regions for userspace mappings: */
1536 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1537 uint16_t h = 1, w = PAGE_SIZE >> i;
1538 tiler_align(fmts[i], &w, &h);
1539 /* note: since each region is 1 4kb page wide, and minimum
1540 * number of rows, the height ends up being the same as the
1541 * # of pages in the region
1542 */
1543 usergart[i].height = h;
1544 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001545 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001546 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1547 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001548 struct omap_drm_usergart_entry *entry;
1549 struct tiler_block *block;
1550
1551 entry = &usergart[i].entry[j];
1552 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001553 if (IS_ERR(block)) {
1554 dev_err(dev->dev,
1555 "reserve failed: %d, %d, %ld\n",
1556 i, j, PTR_ERR(block));
1557 return;
1558 }
1559 entry->paddr = tiler_ssptr(block);
1560 entry->block = block;
1561
Russell King2d31ca32014-07-12 10:53:41 +01001562 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1563 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001564 usergart[i].stride_pfn << PAGE_SHIFT);
1565 }
1566 }
Rob Clarka6a91822011-12-09 23:26:08 -06001567
Laurent Pinchartf4302742015-12-14 22:39:34 +02001568 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001569 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001570}
1571
1572void omap_gem_deinit(struct drm_device *dev)
1573{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001574 struct omap_drm_private *priv = dev->dev_private;
1575
Rob Clarkf7f9f452011-12-05 19:19:22 -06001576 /* I believe we can rely on there being no more outstanding GEM
1577 * objects which could depend on usergart/dmm at this point.
1578 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001579 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001580}