blob: ec996c752160b5a7132870a83db45a1efc5ce7e9 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Arnd Bergmann2d802452016-05-11 18:01:45 +020020#include <linux/seq_file.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060021#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020022#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080023#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020024
David Herrmann0de23972013-07-24 21:07:52 +020025#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060026
27#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060028#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060029
Rob Clarkcd5351f2011-11-12 12:09:40 -060030/*
31 * GEM buffer object implementation.
32 */
33
Rob Clarkcd5351f2011-11-12 12:09:40 -060034/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020035#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020037#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060038
Rob Clarkcd5351f2011-11-12 12:09:40 -060039struct omap_gem_object {
40 struct drm_gem_object base;
41
Rob Clarkf6b60362012-03-05 10:48:36 -060042 struct list_head mm_list;
43
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 uint32_t flags;
45
Rob Clarkf7f9f452011-12-05 19:19:22 -060046 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width, height;
48
Rob Clarka6a91822011-12-09 23:26:08 -060049 /** roll applied when mapping to DMM */
50 uint32_t roll;
51
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020053 * paddr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060054 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020055 * - buffers allocated through the DMA mapping API (with the
56 * OMAP_BO_MEM_DMA_API flag set)
57 *
58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
59 * if they are physically contiguous (when sgt->orig_nents == 1)
60 *
61 * - buffers mapped through the TILER when paddr_cnt is not zero, in
62 * which case the DMA address points to the TILER aperture
63 *
64 * Physically contiguous buffers have their DMA address equal to the
65 * physical address as we don't remap those buffers through the TILER.
66 *
67 * Buffers mapped to the TILER have their DMA address pointing to the
68 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
69 * the DMA address must be accessed through omap_get_get_paddr() to
70 * ensure that the mapping won't disappear unexpectedly. References must
71 * be released with omap_gem_put_paddr().
Rob Clarkcd5351f2011-11-12 12:09:40 -060072 */
73 dma_addr_t paddr;
74
75 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060076 * # of users of paddr
77 */
78 uint32_t paddr_cnt;
79
80 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020081 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
82 * is set and the sgt field is valid.
83 */
84 struct sg_table *sgt;
85
86 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060087 * tiler block used when buffer is remapped in DMM/TILER.
88 */
89 struct tiler_block *block;
90
91 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060092 * Array of backing pages, if allocated. Note that pages are never
93 * allocated for buffers originally allocated from contiguous memory
94 */
95 struct page **pages;
96
Rob Clarkf3bc9d22011-12-20 16:54:28 -060097 /** addresses corresponding to pages in above array */
98 dma_addr_t *addrs;
99
Rob Clarkcd5351f2011-11-12 12:09:40 -0600100 /**
101 * Virtual address, if mapped.
102 */
103 void *vaddr;
104
105 /**
106 * sync-object allocated on demand (if needed)
107 *
108 * Per-buffer sync-object for tracking pending and completed hw/dma
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +0200109 * read and write operations.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600110 */
111 struct {
112 uint32_t write_pending;
113 uint32_t write_complete;
114 uint32_t read_pending;
115 uint32_t read_complete;
116 } *sync;
117};
118
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200119#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600120
Rob Clarkf7f9f452011-12-05 19:19:22 -0600121/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
122 * not necessarily pinned in TILER all the time, and (b) when they are
123 * they are not necessarily page aligned, we reserve one or more small
124 * regions in each of the 2d containers to use as a user-GART where we
125 * can create a second page-aligned mapping of parts of the buffer
126 * being accessed from userspace.
127 *
128 * Note that we could optimize slightly when we know that multiple
129 * tiler containers are backed by the same PAT.. but I'll leave that
130 * for later..
131 */
132#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200133struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600134 struct tiler_block *block; /* the reserved tiler block */
135 dma_addr_t paddr;
136 struct drm_gem_object *obj; /* the current pinned obj */
137 pgoff_t obj_pgoff; /* page offset of obj currently
138 mapped in */
139};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200140
141struct omap_drm_usergart {
142 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600143 int height; /* height in rows */
144 int height_shift; /* ilog2(height in rows) */
145 int slot_shift; /* ilog2(width per slot) */
146 int stride_pfn; /* stride in pages */
147 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200148};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600149
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200150/* -----------------------------------------------------------------------------
151 * Helpers
152 */
153
154/** get mmap offset */
155static uint64_t mmap_offset(struct drm_gem_object *obj)
156{
157 struct drm_device *dev = obj->dev;
158 int ret;
159 size_t size;
160
161 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
162
163 /* Make it mmapable */
164 size = omap_gem_mmap_size(obj);
165 ret = drm_gem_create_mmap_offset_size(obj, size);
166 if (ret) {
167 dev_err(dev->dev, "could not allocate mmap offset\n");
168 return 0;
169 }
170
171 return drm_vma_node_offset_addr(&obj->vma_node);
172}
173
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200174static bool is_contiguous(struct omap_gem_object *omap_obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200175{
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200176 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
177 return true;
178
179 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
180 return true;
181
182 return false;
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200183}
184
185/* -----------------------------------------------------------------------------
186 * Eviction
187 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600188
189static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200190 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600191{
David Herrmann6796cb12014-01-03 14:24:19 +0100192 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200193 struct omap_drm_private *priv = obj->dev->dev_private;
194 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100195 size_t size = PAGE_SIZE * n;
196 loff_t off = mmap_offset(obj) +
197 (entry->obj_pgoff << PAGE_SHIFT);
198 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
199
200 if (m > 1) {
201 int i;
202 /* if stride > than PAGE_SIZE then sparse mapping: */
203 for (i = n; i > 0; i--) {
204 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
205 off, PAGE_SIZE, 1);
206 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600207 }
David Herrmann6796cb12014-01-03 14:24:19 +0100208 } else {
209 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
210 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600211 }
212
213 entry->obj = NULL;
214}
215
216/* Evict a buffer from usergart, if it is mapped there */
217static void evict(struct drm_gem_object *obj)
218{
219 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200220 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600221
222 if (omap_obj->flags & OMAP_BO_TILED) {
223 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
224 int i;
225
Rob Clarkf7f9f452011-12-05 19:19:22 -0600226 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200227 struct omap_drm_usergart_entry *entry =
228 &priv->usergart[fmt].entry[i];
229
Rob Clarkf7f9f452011-12-05 19:19:22 -0600230 if (entry->obj == obj)
231 evict_entry(obj, fmt, entry);
232 }
233 }
234}
235
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200236/* -----------------------------------------------------------------------------
237 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600239
240/** ensure backing pages are allocated */
241static int omap_gem_attach_pages(struct drm_gem_object *obj)
242{
Rob Clark8b6b5692012-05-17 02:37:25 -0600243 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600244 struct omap_gem_object *omap_obj = to_omap_bo(obj);
245 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200246 int npages = obj->size >> PAGE_SHIFT;
247 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600248 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600249
250 WARN_ON(omap_obj->pages);
251
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200252 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600253 if (IS_ERR(pages)) {
254 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
255 return PTR_ERR(pages);
256 }
257
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600258 /* for non-cached buffers, ensure the new pages are clean because
259 * DSS, GPU, etc. are not cache coherent:
260 */
261 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100262 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200263 if (!addrs) {
264 ret = -ENOMEM;
265 goto free_pages;
266 }
267
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600268 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600269 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600270 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200271
272 if (dma_mapping_error(dev->dev, addrs[i])) {
273 dev_warn(dev->dev,
274 "%s: failed to map page\n", __func__);
275
276 for (i = i - 1; i >= 0; --i) {
277 dma_unmap_page(dev->dev, addrs[i],
278 PAGE_SIZE, DMA_BIDIRECTIONAL);
279 }
280
281 ret = -ENOMEM;
282 goto free_addrs;
283 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600284 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600285 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100286 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287 if (!addrs) {
288 ret = -ENOMEM;
289 goto free_pages;
290 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600291 }
292
Rob Clark8b6b5692012-05-17 02:37:25 -0600293 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600294 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600295
Rob Clarkcd5351f2011-11-12 12:09:40 -0600296 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200297
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200298free_addrs:
299 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200300free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400301 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200302
303 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600304}
305
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200306/* acquire pages when needed (for example, for DMA where physically
307 * contiguous buffer is not required
308 */
309static int get_pages(struct drm_gem_object *obj, struct page ***pages)
310{
311 struct omap_gem_object *omap_obj = to_omap_bo(obj);
312 int ret = 0;
313
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200314 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200315 ret = omap_gem_attach_pages(obj);
316 if (ret) {
317 dev_err(obj->dev->dev, "could not attach pages\n");
318 return ret;
319 }
320 }
321
322 /* TODO: even phys-contig.. we should have a list of pages? */
323 *pages = omap_obj->pages;
324
325 return 0;
326}
327
Rob Clarkcd5351f2011-11-12 12:09:40 -0600328/** release backing pages */
329static void omap_gem_detach_pages(struct drm_gem_object *obj)
330{
331 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600332
333 /* for non-cached buffers, ensure the new pages are clean because
334 * DSS, GPU, etc. are not cache coherent:
335 */
336 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
337 int i, npages = obj->size >> PAGE_SHIFT;
338 for (i = 0; i < npages; i++) {
339 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
340 PAGE_SIZE, DMA_BIDIRECTIONAL);
341 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600342 }
343
Rob Clark8b6b5692012-05-17 02:37:25 -0600344 kfree(omap_obj->addrs);
345 omap_obj->addrs = NULL;
346
Rob Clarkddcd09d2013-08-07 13:41:27 -0400347 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600348 omap_obj->pages = NULL;
349}
350
Rob Clark6ad11bc2012-04-10 13:19:55 -0500351/* get buffer flags */
352uint32_t omap_gem_flags(struct drm_gem_object *obj)
353{
354 return to_omap_bo(obj)->flags;
355}
356
Rob Clarkc5b12472012-01-18 18:33:02 -0600357uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
358{
359 uint64_t offset;
360 mutex_lock(&obj->dev->struct_mutex);
361 offset = mmap_offset(obj);
362 mutex_unlock(&obj->dev->struct_mutex);
363 return offset;
364}
365
Rob Clarkf7f9f452011-12-05 19:19:22 -0600366/** get mmap size */
367size_t omap_gem_mmap_size(struct drm_gem_object *obj)
368{
369 struct omap_gem_object *omap_obj = to_omap_bo(obj);
370 size_t size = obj->size;
371
372 if (omap_obj->flags & OMAP_BO_TILED) {
373 /* for tiled buffers, the virtual size has stride rounded up
374 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
375 * 32kb later!). But we don't back the entire buffer with
376 * pages, only the valid picture part.. so need to adjust for
377 * this in the size used to mmap and generate mmap offset
378 */
379 size = tiler_vsize(gem2fmt(omap_obj->flags),
380 omap_obj->width, omap_obj->height);
381 }
382
383 return size;
384}
385
Rob Clark3c810c62012-08-15 15:18:01 -0500386/* get tiled size, returns -EINVAL if not tiled buffer */
387int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
388{
389 struct omap_gem_object *omap_obj = to_omap_bo(obj);
390 if (omap_obj->flags & OMAP_BO_TILED) {
391 *w = omap_obj->width;
392 *h = omap_obj->height;
393 return 0;
394 }
395 return -EINVAL;
396}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600397
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200398/* -----------------------------------------------------------------------------
399 * Fault Handling
400 */
401
Rob Clarkf7f9f452011-12-05 19:19:22 -0600402/* Normal handling for the case of faulting in non-tiled buffers */
403static int fault_1d(struct drm_gem_object *obj,
404 struct vm_area_struct *vma, struct vm_fault *vmf)
405{
406 struct omap_gem_object *omap_obj = to_omap_bo(obj);
407 unsigned long pfn;
408 pgoff_t pgoff;
409
410 /* We don't use vmf->pgoff since that has the fake offset: */
411 pgoff = ((unsigned long)vmf->virtual_address -
412 vma->vm_start) >> PAGE_SHIFT;
413
414 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600415 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600416 pfn = page_to_pfn(omap_obj->pages[pgoff]);
417 } else {
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200418 BUG_ON(!is_contiguous(omap_obj));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600419 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
420 }
421
422 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
423 pfn, pfn << PAGE_SHIFT);
424
Dan Williams01c8f1c2016-01-15 16:56:40 -0800425 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
426 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600427}
428
429/* Special handling for the case of faulting in 2d tiled buffers */
430static int fault_2d(struct drm_gem_object *obj,
431 struct vm_area_struct *vma, struct vm_fault *vmf)
432{
433 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200434 struct omap_drm_private *priv = obj->dev->dev_private;
435 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600436 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
437 struct page *pages[64]; /* XXX is this too much to have on stack? */
438 unsigned long pfn;
439 pgoff_t pgoff, base_pgoff;
440 void __user *vaddr;
441 int i, ret, slots;
442
Rob Clarke5598952012-03-05 10:48:40 -0600443 /*
444 * Note the height of the slot is also equal to the number of pages
445 * that need to be mapped in to fill 4kb wide CPU page. If the slot
446 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600447 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200448 const int n = priv->usergart[fmt].height;
449 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke5598952012-03-05 10:48:40 -0600450
451 /*
452 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
453 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
454 * into account in some of the math, so figure out virtual stride
455 * in pages
456 */
457 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600458
459 /* We don't use vmf->pgoff since that has the fake offset: */
460 pgoff = ((unsigned long)vmf->virtual_address -
461 vma->vm_start) >> PAGE_SHIFT;
462
Rob Clarke5598952012-03-05 10:48:40 -0600463 /*
464 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600465 * boundary in the y direction:
466 */
Rob Clarke5598952012-03-05 10:48:40 -0600467 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600468
Rob Clarke5598952012-03-05 10:48:40 -0600469 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200470 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600471
Rob Clarke5598952012-03-05 10:48:40 -0600472 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
473
Laurent Pinchartf4302742015-12-14 22:39:34 +0200474 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke5598952012-03-05 10:48:40 -0600475
Rob Clarkf7f9f452011-12-05 19:19:22 -0600476 /* evict previous buffer using this usergart entry, if any: */
477 if (entry->obj)
478 evict_entry(entry->obj, fmt, entry);
479
480 entry->obj = obj;
481 entry->obj_pgoff = base_pgoff;
482
Rob Clarke5598952012-03-05 10:48:40 -0600483 /* now convert base_pgoff to phys offset from virt offset: */
484 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600485
Rob Clarke5598952012-03-05 10:48:40 -0600486 /* for wider-than 4k.. figure out which part of the slot-row we want: */
487 if (m > 1) {
488 int off = pgoff % m;
489 entry->obj_pgoff += off;
490 base_pgoff /= m;
491 slots = min(slots - (off << n_shift), n);
492 base_pgoff += off << n_shift;
493 vaddr += off << PAGE_SHIFT;
494 }
495
496 /*
497 * Map in pages. Beyond the valid pixel part of the buffer, we set
498 * pages[i] to NULL to get a dummy page mapped in.. if someone
499 * reads/writes it they will get random/undefined content, but at
500 * least it won't be corrupting whatever other random page used to
501 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600502 */
503 memcpy(pages, &omap_obj->pages[base_pgoff],
504 sizeof(struct page *) * slots);
505 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600506 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600507
Rob Clarka6a91822011-12-09 23:26:08 -0600508 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600509 if (ret) {
510 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
511 return ret;
512 }
513
Rob Clarkf7f9f452011-12-05 19:19:22 -0600514 pfn = entry->paddr >> PAGE_SHIFT;
515
516 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
517 pfn, pfn << PAGE_SHIFT);
518
Rob Clarke5598952012-03-05 10:48:40 -0600519 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800520 vm_insert_mixed(vma, (unsigned long)vaddr,
521 __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200522 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600523 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600524 }
525
526 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200527 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
528 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600529
530 return 0;
531}
532
Rob Clarkcd5351f2011-11-12 12:09:40 -0600533/**
534 * omap_gem_fault - pagefault handler for GEM objects
535 * @vma: the VMA of the GEM object
536 * @vmf: fault detail
537 *
538 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
539 * does most of the work for us including the actual map/unmap calls
540 * but we need to do the actual page work.
541 *
542 * The VMA was set up by GEM. In doing so it also ensured that the
543 * vma->vm_private_data points to the GEM object that is backing this
544 * mapping.
545 */
546int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
547{
548 struct drm_gem_object *obj = vma->vm_private_data;
549 struct omap_gem_object *omap_obj = to_omap_bo(obj);
550 struct drm_device *dev = obj->dev;
551 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600552 int ret;
553
554 /* Make sure we don't parallel update on a fault, nor move or remove
555 * something from beneath our feet
556 */
557 mutex_lock(&dev->struct_mutex);
558
559 /* if a shmem backed object, make sure we have pages attached now */
560 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900561 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600562 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600563
564 /* where should we do corresponding put_pages().. we are mapping
565 * the original page, rather than thru a GART, so we can't rely
566 * on eviction to trigger this. But munmap() or all mappings should
567 * probably trigger put_pages()?
568 */
569
Rob Clarkf7f9f452011-12-05 19:19:22 -0600570 if (omap_obj->flags & OMAP_BO_TILED)
571 ret = fault_2d(obj, vma, vmf);
572 else
573 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600574
Rob Clarkcd5351f2011-11-12 12:09:40 -0600575
576fail:
577 mutex_unlock(&dev->struct_mutex);
578 switch (ret) {
579 case 0:
580 case -ERESTARTSYS:
581 case -EINTR:
Rob Clarke1d4ee02013-10-20 12:07:42 -0400582 case -EBUSY:
583 /*
584 * EBUSY is ok: this just means that another thread
585 * already did the job.
586 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600587 return VM_FAULT_NOPAGE;
588 case -ENOMEM:
589 return VM_FAULT_OOM;
590 default:
591 return VM_FAULT_SIGBUS;
592 }
593}
594
595/** We override mainly to fix up some of the vm mapping flags.. */
596int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
597{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600598 int ret;
599
600 ret = drm_gem_mmap(filp, vma);
601 if (ret) {
602 DBG("mmap failed: %d", ret);
603 return ret;
604 }
605
Rob Clark8b6b5692012-05-17 02:37:25 -0600606 return omap_gem_mmap_obj(vma->vm_private_data, vma);
607}
608
609int omap_gem_mmap_obj(struct drm_gem_object *obj,
610 struct vm_area_struct *vma)
611{
612 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600613
614 vma->vm_flags &= ~VM_PFNMAP;
615 vma->vm_flags |= VM_MIXEDMAP;
616
617 if (omap_obj->flags & OMAP_BO_WC) {
618 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
619 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
620 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
621 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600622 /*
623 * We do have some private objects, at least for scanout buffers
624 * on hardware without DMM/TILER. But these are allocated write-
625 * combine
626 */
627 if (WARN_ON(!obj->filp))
628 return -EINVAL;
629
630 /*
631 * Shunt off cached objs to shmem file so they have their own
632 * address_space (so unmap_mapping_range does what we want,
633 * in particular in the case of mmap'd dmabufs)
634 */
635 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600636 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400637 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600638
Rob Clarkcd5351f2011-11-12 12:09:40 -0600639 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
640 }
641
Rob Clark8b6b5692012-05-17 02:37:25 -0600642 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600643}
644
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200645/* -----------------------------------------------------------------------------
646 * Dumb Buffers
647 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600648
Rob Clarkcd5351f2011-11-12 12:09:40 -0600649/**
650 * omap_gem_dumb_create - create a dumb buffer
651 * @drm_file: our client file
652 * @dev: our device
653 * @args: the requested arguments copied from userspace
654 *
655 * Allocate a buffer suitable for use for a frame buffer of the
656 * form described by user space. Give userspace a handle by which
657 * to reference it.
658 */
659int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
660 struct drm_mode_create_dumb *args)
661{
662 union omap_gem_size gsize;
663
Tomi Valkeinence481ed2016-04-19 09:06:32 +0300664 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Tomi Valkeinen6a5228f2016-04-18 18:18:37 +0300665
Rob Clarkcd5351f2011-11-12 12:09:40 -0600666 args->size = PAGE_ALIGN(args->pitch * args->height);
667
668 gsize = (union omap_gem_size){
669 .bytes = args->size,
670 };
671
672 return omap_gem_new_handle(dev, file, gsize,
673 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
674}
675
676/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600677 * omap_gem_dumb_map - buffer mapping for dumb interface
678 * @file: our drm client file
679 * @dev: drm device
680 * @handle: GEM handle to the object (from dumb_create)
681 *
682 * Do the necessary setup to allow the mapping of the frame buffer
683 * into user memory. We don't have to do much here at the moment.
684 */
685int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
686 uint32_t handle, uint64_t *offset)
687{
688 struct drm_gem_object *obj;
689 int ret = 0;
690
Rob Clarkcd5351f2011-11-12 12:09:40 -0600691 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100692 obj = drm_gem_object_lookup(file, handle);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600693 if (obj == NULL) {
694 ret = -ENOENT;
695 goto fail;
696 }
697
698 *offset = omap_gem_mmap_offset(obj);
699
700 drm_gem_object_unreference_unlocked(obj);
701
702fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600703 return ret;
704}
705
Laurent Pincharte1c11742015-12-14 22:39:30 +0200706#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600707/* Set scrolling position. This allows us to implement fast scrolling
708 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600709 *
710 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600711 */
712int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
713{
714 struct omap_gem_object *omap_obj = to_omap_bo(obj);
715 uint32_t npages = obj->size >> PAGE_SHIFT;
716 int ret = 0;
717
718 if (roll > npages) {
719 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
720 return -EINVAL;
721 }
722
Rob Clarka6a91822011-12-09 23:26:08 -0600723 omap_obj->roll = roll;
724
Rob Clarkaf695922011-12-16 11:34:34 -0600725 mutex_lock(&obj->dev->struct_mutex);
726
Rob Clarka6a91822011-12-09 23:26:08 -0600727 /* if we aren't mapped yet, we don't need to do anything */
728 if (omap_obj->block) {
729 struct page **pages;
730 ret = get_pages(obj, &pages);
731 if (ret)
732 goto fail;
733 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
734 if (ret)
735 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
736 }
737
738fail:
739 mutex_unlock(&obj->dev->struct_mutex);
740
741 return ret;
742}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200743#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600744
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200745/* -----------------------------------------------------------------------------
746 * Memory Management & DMA Sync
747 */
748
749/**
750 * shmem buffers that are mapped cached can simulate coherency via using
751 * page faulting to keep track of dirty pages
752 */
753static inline bool is_cached_coherent(struct drm_gem_object *obj)
754{
755 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200756
757 return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200758 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
759}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600760
Rob Clark8b6b5692012-05-17 02:37:25 -0600761/* Sync the buffer for CPU access.. note pages should already be
762 * attached, ie. omap_gem_get_pages()
763 */
764void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
765{
766 struct drm_device *dev = obj->dev;
767 struct omap_gem_object *omap_obj = to_omap_bo(obj);
768
769 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
770 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
771 PAGE_SIZE, DMA_BIDIRECTIONAL);
772 omap_obj->addrs[pgoff] = 0;
773 }
774}
775
776/* sync the buffer for DMA access */
777void omap_gem_dma_sync(struct drm_gem_object *obj,
778 enum dma_data_direction dir)
779{
780 struct drm_device *dev = obj->dev;
781 struct omap_gem_object *omap_obj = to_omap_bo(obj);
782
783 if (is_cached_coherent(obj)) {
784 int i, npages = obj->size >> PAGE_SHIFT;
785 struct page **pages = omap_obj->pages;
786 bool dirty = false;
787
788 for (i = 0; i < npages; i++) {
789 if (!omap_obj->addrs[i]) {
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200790 dma_addr_t addr;
791
792 addr = dma_map_page(dev->dev, pages[i], 0,
Rob Clark8b6b5692012-05-17 02:37:25 -0600793 PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200794
795 if (dma_mapping_error(dev->dev, addr)) {
796 dev_warn(dev->dev,
797 "%s: failed to map page\n",
798 __func__);
799 break;
800 }
801
Rob Clark8b6b5692012-05-17 02:37:25 -0600802 dirty = true;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200803 omap_obj->addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600804 }
805 }
806
807 if (dirty) {
808 unmap_mapping_range(obj->filp->f_mapping, 0,
809 omap_gem_mmap_size(obj), 1);
810 }
811 }
812}
813
Rob Clarkcd5351f2011-11-12 12:09:40 -0600814/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
815 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
816 * map in TILER)
817 */
818int omap_gem_get_paddr(struct drm_gem_object *obj,
819 dma_addr_t *paddr, bool remap)
820{
Rob Clarka6a91822011-12-09 23:26:08 -0600821 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600822 struct omap_gem_object *omap_obj = to_omap_bo(obj);
823 int ret = 0;
824
Rob Clarkf7f9f452011-12-05 19:19:22 -0600825 mutex_lock(&obj->dev->struct_mutex);
826
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200827 if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600828 if (omap_obj->paddr_cnt == 0) {
829 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600830 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600831 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
832 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600833
Rob Clarkf7f9f452011-12-05 19:19:22 -0600834 BUG_ON(omap_obj->block);
835
836 ret = get_pages(obj, &pages);
837 if (ret)
838 goto fail;
839
Rob Clarkf7f9f452011-12-05 19:19:22 -0600840 if (omap_obj->flags & OMAP_BO_TILED) {
841 block = tiler_reserve_2d(fmt,
842 omap_obj->width,
843 omap_obj->height, 0);
844 } else {
845 block = tiler_reserve_1d(obj->size);
846 }
847
848 if (IS_ERR(block)) {
849 ret = PTR_ERR(block);
850 dev_err(obj->dev->dev,
851 "could not remap: %d (%d)\n", ret, fmt);
852 goto fail;
853 }
854
855 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600856 ret = tiler_pin(block, pages, npages,
857 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600858 if (ret) {
859 tiler_release(block);
860 dev_err(obj->dev->dev,
861 "could not pin: %d\n", ret);
862 goto fail;
863 }
864
865 omap_obj->paddr = tiler_ssptr(block);
866 omap_obj->block = block;
867
Russell King2d31ca32014-07-12 10:53:41 +0100868 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600869 }
870
871 omap_obj->paddr_cnt++;
872
873 *paddr = omap_obj->paddr;
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200874 } else if (is_contiguous(omap_obj)) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600875 *paddr = omap_obj->paddr;
876 } else {
877 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600878 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600879 }
880
Rob Clarkf7f9f452011-12-05 19:19:22 -0600881fail:
882 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600883
884 return ret;
885}
886
887/* Release physical address, when DMA is no longer being performed.. this
888 * could potentially unpin and unmap buffers from TILER
889 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300890void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600891{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600892 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300893 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600894
895 mutex_lock(&obj->dev->struct_mutex);
896 if (omap_obj->paddr_cnt > 0) {
897 omap_obj->paddr_cnt--;
898 if (omap_obj->paddr_cnt == 0) {
899 ret = tiler_unpin(omap_obj->block);
900 if (ret) {
901 dev_err(obj->dev->dev,
902 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600903 }
904 ret = tiler_release(omap_obj->block);
905 if (ret) {
906 dev_err(obj->dev->dev,
907 "could not release unmap: %d\n", ret);
908 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000909 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600910 omap_obj->block = NULL;
911 }
912 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300913
Rob Clarkf7f9f452011-12-05 19:19:22 -0600914 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600915}
916
Rob Clark3c810c62012-08-15 15:18:01 -0500917/* Get rotated scanout address (only valid if already pinned), at the
918 * specified orientation and x,y offset from top-left corner of buffer
919 * (only valid for tiled 2d buffers)
920 */
921int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
922 int x, int y, dma_addr_t *paddr)
923{
924 struct omap_gem_object *omap_obj = to_omap_bo(obj);
925 int ret = -EINVAL;
926
927 mutex_lock(&obj->dev->struct_mutex);
928 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
929 (omap_obj->flags & OMAP_BO_TILED)) {
930 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
931 ret = 0;
932 }
933 mutex_unlock(&obj->dev->struct_mutex);
934 return ret;
935}
936
937/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
938int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
939{
940 struct omap_gem_object *omap_obj = to_omap_bo(obj);
941 int ret = -EINVAL;
942 if (omap_obj->flags & OMAP_BO_TILED)
943 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
944 return ret;
945}
946
Rob Clark6ad11bc2012-04-10 13:19:55 -0500947/* if !remap, and we don't have pages backing, then fail, rather than
948 * increasing the pin count (which we don't really do yet anyways,
949 * because we don't support swapping pages back out). And 'remap'
950 * might not be quite the right name, but I wanted to keep it working
951 * similarly to omap_gem_get_paddr(). Note though that mutex is not
952 * aquired if !remap (because this can be called in atomic ctxt),
953 * but probably omap_gem_get_paddr() should be changed to work in the
954 * same way. If !remap, a matching omap_gem_put_pages() call is not
955 * required (and should not be made).
956 */
957int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
958 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600959{
960 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500961 if (!remap) {
962 struct omap_gem_object *omap_obj = to_omap_bo(obj);
963 if (!omap_obj->pages)
964 return -ENOMEM;
965 *pages = omap_obj->pages;
966 return 0;
967 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600968 mutex_lock(&obj->dev->struct_mutex);
969 ret = get_pages(obj, pages);
970 mutex_unlock(&obj->dev->struct_mutex);
971 return ret;
972}
973
974/* release pages when DMA no longer being performed */
975int omap_gem_put_pages(struct drm_gem_object *obj)
976{
977 /* do something here if we dynamically attach/detach pages.. at
978 * least they would no longer need to be pinned if everyone has
979 * released the pages..
980 */
981 return 0;
982}
983
Laurent Pincharte1c11742015-12-14 22:39:30 +0200984#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600985/* Get kernel virtual address for CPU access.. this more or less only
986 * exists for omap_fbdev. This should be called with struct_mutex
987 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600988 */
989void *omap_gem_vaddr(struct drm_gem_object *obj)
990{
991 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900992 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600993 if (!omap_obj->vaddr) {
994 struct page **pages;
995 int ret = get_pages(obj, &pages);
996 if (ret)
997 return ERR_PTR(ret);
998 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
999 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1000 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001001 return omap_obj->vaddr;
1002}
Laurent Pincharte1c11742015-12-14 22:39:30 +02001003#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -06001004
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001005/* -----------------------------------------------------------------------------
1006 * Power Management
1007 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001008
Andy Grosse78edba2012-12-19 14:53:37 -06001009#ifdef CONFIG_PM
1010/* re-pin objects in DMM in resume path: */
1011int omap_gem_resume(struct device *dev)
1012{
1013 struct drm_device *drm_dev = dev_get_drvdata(dev);
1014 struct omap_drm_private *priv = drm_dev->dev_private;
1015 struct omap_gem_object *omap_obj;
1016 int ret = 0;
1017
1018 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1019 if (omap_obj->block) {
1020 struct drm_gem_object *obj = &omap_obj->base;
1021 uint32_t npages = obj->size >> PAGE_SHIFT;
1022 WARN_ON(!omap_obj->pages); /* this can't happen */
1023 ret = tiler_pin(omap_obj->block,
1024 omap_obj->pages, npages,
1025 omap_obj->roll, true);
1026 if (ret) {
1027 dev_err(dev, "could not repin: %d\n", ret);
1028 return ret;
1029 }
1030 }
1031 }
1032
1033 return 0;
1034}
1035#endif
1036
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001037/* -----------------------------------------------------------------------------
1038 * DebugFS
1039 */
1040
Rob Clarkf6b60362012-03-05 10:48:36 -06001041#ifdef CONFIG_DEBUG_FS
1042void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1043{
Rob Clarkf6b60362012-03-05 10:48:36 -06001044 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001045 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001046
David Herrmann0de23972013-07-24 21:07:52 +02001047 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001048
Russell King2d31ca32014-07-12 10:53:41 +01001049 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001050 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001051 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001052 omap_obj->vaddr, omap_obj->roll);
1053
1054 if (omap_obj->flags & OMAP_BO_TILED) {
1055 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1056 if (omap_obj->block) {
1057 struct tcm_area *area = &omap_obj->block->area;
1058 seq_printf(m, " (%dx%d, %dx%d)",
1059 area->p0.x, area->p0.y,
1060 area->p1.x, area->p1.y);
1061 }
1062 } else {
1063 seq_printf(m, " %d", obj->size);
1064 }
1065
1066 seq_printf(m, "\n");
1067}
1068
1069void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1070{
1071 struct omap_gem_object *omap_obj;
1072 int count = 0;
1073 size_t size = 0;
1074
1075 list_for_each_entry(omap_obj, list, mm_list) {
1076 struct drm_gem_object *obj = &omap_obj->base;
1077 seq_printf(m, " ");
1078 omap_gem_describe(obj, m);
1079 count++;
1080 size += obj->size;
1081 }
1082
1083 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1084}
1085#endif
1086
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001087/* -----------------------------------------------------------------------------
1088 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001089 */
1090
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001091static DEFINE_SPINLOCK(sync_lock);
1092
Rob Clarkcd5351f2011-11-12 12:09:40 -06001093struct omap_gem_sync_waiter {
1094 struct list_head list;
1095 struct omap_gem_object *omap_obj;
1096 enum omap_gem_op op;
1097 uint32_t read_target, write_target;
1098 /* notify called w/ sync_lock held */
1099 void (*notify)(void *arg);
1100 void *arg;
1101};
1102
1103/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1104 * the read and/or write target count is achieved which can call a user
1105 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1106 * cpu access), etc.
1107 */
1108static LIST_HEAD(waiters);
1109
1110static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1111{
1112 struct omap_gem_object *omap_obj = waiter->omap_obj;
1113 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301114 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001115 return true;
1116 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301117 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001118 return true;
1119 return false;
1120}
1121
1122/* macro for sync debug.. */
1123#define SYNCDBG 0
1124#define SYNC(fmt, ...) do { if (SYNCDBG) \
1125 printk(KERN_ERR "%s:%d: "fmt"\n", \
1126 __func__, __LINE__, ##__VA_ARGS__); \
1127 } while (0)
1128
1129
1130static void sync_op_update(void)
1131{
1132 struct omap_gem_sync_waiter *waiter, *n;
1133 list_for_each_entry_safe(waiter, n, &waiters, list) {
1134 if (!is_waiting(waiter)) {
1135 list_del(&waiter->list);
1136 SYNC("notify: %p", waiter);
1137 waiter->notify(waiter->arg);
1138 kfree(waiter);
1139 }
1140 }
1141}
1142
1143static inline int sync_op(struct drm_gem_object *obj,
1144 enum omap_gem_op op, bool start)
1145{
1146 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1147 int ret = 0;
1148
1149 spin_lock(&sync_lock);
1150
1151 if (!omap_obj->sync) {
1152 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1153 if (!omap_obj->sync) {
1154 ret = -ENOMEM;
1155 goto unlock;
1156 }
1157 }
1158
1159 if (start) {
1160 if (op & OMAP_GEM_READ)
1161 omap_obj->sync->read_pending++;
1162 if (op & OMAP_GEM_WRITE)
1163 omap_obj->sync->write_pending++;
1164 } else {
1165 if (op & OMAP_GEM_READ)
1166 omap_obj->sync->read_complete++;
1167 if (op & OMAP_GEM_WRITE)
1168 omap_obj->sync->write_complete++;
1169 sync_op_update();
1170 }
1171
1172unlock:
1173 spin_unlock(&sync_lock);
1174
1175 return ret;
1176}
1177
Rob Clarkcd5351f2011-11-12 12:09:40 -06001178/* mark the start of read and/or write operation */
1179int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1180{
1181 return sync_op(obj, op, true);
1182}
1183
1184int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1185{
1186 return sync_op(obj, op, false);
1187}
1188
1189static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1190
1191static void sync_notify(void *arg)
1192{
1193 struct task_struct **waiter_task = arg;
1194 *waiter_task = NULL;
1195 wake_up_all(&sync_event);
1196}
1197
1198int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1199{
1200 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1201 int ret = 0;
1202 if (omap_obj->sync) {
1203 struct task_struct *waiter_task = current;
1204 struct omap_gem_sync_waiter *waiter =
1205 kzalloc(sizeof(*waiter), GFP_KERNEL);
1206
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001207 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001208 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001209
1210 waiter->omap_obj = omap_obj;
1211 waiter->op = op;
1212 waiter->read_target = omap_obj->sync->read_pending;
1213 waiter->write_target = omap_obj->sync->write_pending;
1214 waiter->notify = sync_notify;
1215 waiter->arg = &waiter_task;
1216
1217 spin_lock(&sync_lock);
1218 if (is_waiting(waiter)) {
1219 SYNC("waited: %p", waiter);
1220 list_add_tail(&waiter->list, &waiters);
1221 spin_unlock(&sync_lock);
1222 ret = wait_event_interruptible(sync_event,
1223 (waiter_task == NULL));
1224 spin_lock(&sync_lock);
1225 if (waiter_task) {
1226 SYNC("interrupted: %p", waiter);
1227 /* we were interrupted */
1228 list_del(&waiter->list);
1229 waiter_task = NULL;
1230 } else {
1231 /* freed in sync_op_update() */
1232 waiter = NULL;
1233 }
1234 }
1235 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001236 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001237 }
1238 return ret;
1239}
1240
1241/* call fxn(arg), either synchronously or asynchronously if the op
1242 * is currently blocked.. fxn() can be called from any context
1243 *
1244 * (TODO for now fxn is called back from whichever context calls
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001245 * omap_gem_op_finish().. but this could be better defined later
Rob Clarkcd5351f2011-11-12 12:09:40 -06001246 * if needed)
1247 *
1248 * TODO more code in common w/ _sync()..
1249 */
1250int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1251 void (*fxn)(void *arg), void *arg)
1252{
1253 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1254 if (omap_obj->sync) {
1255 struct omap_gem_sync_waiter *waiter =
1256 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1257
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001258 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001259 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001260
1261 waiter->omap_obj = omap_obj;
1262 waiter->op = op;
1263 waiter->read_target = omap_obj->sync->read_pending;
1264 waiter->write_target = omap_obj->sync->write_pending;
1265 waiter->notify = fxn;
1266 waiter->arg = arg;
1267
1268 spin_lock(&sync_lock);
1269 if (is_waiting(waiter)) {
1270 SYNC("waited: %p", waiter);
1271 list_add_tail(&waiter->list, &waiters);
1272 spin_unlock(&sync_lock);
1273 return 0;
1274 }
1275
1276 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301277
1278 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001279 }
1280
1281 /* no waiting.. */
1282 fxn(arg);
1283
1284 return 0;
1285}
1286
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001287/* -----------------------------------------------------------------------------
1288 * Constructor & Destructor
1289 */
1290
Rob Clarkcd5351f2011-11-12 12:09:40 -06001291void omap_gem_free_object(struct drm_gem_object *obj)
1292{
1293 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001294 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001295 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1296
Rob Clarkf7f9f452011-12-05 19:19:22 -06001297 evict(obj);
1298
Rob Clarkf6b60362012-03-05 10:48:36 -06001299 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1300
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001301 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001302 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001303 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001304
Rob Clark9a0774e2012-01-16 12:51:17 -06001305 /* this means the object is still pinned.. which really should
1306 * not happen. I think..
1307 */
1308 WARN_ON(omap_obj->paddr_cnt > 0);
1309
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001310 if (omap_obj->pages) {
1311 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1312 kfree(omap_obj->pages);
1313 else
Rob Clarkcd5351f2011-11-12 12:09:40 -06001314 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001315 }
1316
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001317 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001318 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1319 omap_obj->paddr);
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001320 } else if (omap_obj->vaddr) {
1321 vunmap(omap_obj->vaddr);
1322 } else if (obj->import_attach) {
1323 drm_prime_gem_destroy(obj, omap_obj->sgt);
1324 }
1325
1326 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001327
1328 drm_gem_object_release(obj);
1329
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001330 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001331}
1332
1333/* GEM buffer object constructor */
1334struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1335 union omap_gem_size gsize, uint32_t flags)
1336{
Rob Clarka6a91822011-12-09 23:26:08 -06001337 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001338 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001339 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001340 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001341 size_t size;
1342 int ret;
1343
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001344 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001345 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001346 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001347 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001348 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001349 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001350
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001351 /*
1352 * Tiled buffers are always shmem paged backed. When they are
1353 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001354 */
1355 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001356 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001357
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001358 /*
1359 * Currently don't allow cached buffers. There is some caching
1360 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001361 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001362 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1363 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001364 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1365 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001366 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1367 * tiled. However, to lower the pressure on memory allocation,
1368 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001369 */
1370 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001371 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001372 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001373 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001374 */
1375 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001376 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001377
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001378 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001379 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001380 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001381 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001382
Rob Clarkcd5351f2011-11-12 12:09:40 -06001383 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001384 omap_obj->flags = flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001385
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001386 if (flags & OMAP_BO_TILED) {
1387 /*
1388 * For tiled buffers align dimensions to slot boundaries and
1389 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001390 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001391 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1392 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001393
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001394 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1395 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001396
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001397 omap_obj->width = gsize.tiled.width;
1398 omap_obj->height = gsize.tiled.height;
1399 } else {
1400 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001401 }
1402
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001403 /* Initialize the GEM object. */
1404 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1405 drm_gem_private_object_init(dev, obj, size);
1406 } else {
1407 ret = drm_gem_object_init(dev, obj, size);
1408 if (ret)
1409 goto err_free;
1410
1411 mapping = file_inode(obj->filp)->i_mapping;
1412 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1413 }
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001414
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001415 /* Allocate memory if needed. */
1416 if (flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001417 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1418 &omap_obj->paddr,
1419 GFP_KERNEL);
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001420 if (!omap_obj->vaddr)
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001421 goto err_release;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001422 }
1423
1424 spin_lock(&priv->list_lock);
1425 list_add(&omap_obj->mm_list, &priv->obj_list);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001426 spin_unlock(&priv->list_lock);
1427
Rob Clarkcd5351f2011-11-12 12:09:40 -06001428 return obj;
1429
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001430err_release:
1431 drm_gem_object_release(obj);
1432err_free:
1433 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001434 return NULL;
1435}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001436
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001437struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1438 struct sg_table *sgt)
1439{
1440 struct omap_drm_private *priv = dev->dev_private;
1441 struct omap_gem_object *omap_obj;
1442 struct drm_gem_object *obj;
1443 union omap_gem_size gsize;
1444
1445 /* Without a DMM only physically contiguous buffers can be supported. */
1446 if (sgt->orig_nents != 1 && !priv->has_dmm)
1447 return ERR_PTR(-EINVAL);
1448
1449 mutex_lock(&dev->struct_mutex);
1450
1451 gsize.bytes = PAGE_ALIGN(size);
1452 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1453 if (!obj) {
1454 obj = ERR_PTR(-ENOMEM);
1455 goto done;
1456 }
1457
1458 omap_obj = to_omap_bo(obj);
1459 omap_obj->sgt = sgt;
1460
1461 if (sgt->orig_nents == 1) {
1462 omap_obj->paddr = sg_dma_address(sgt->sgl);
1463 } else {
1464 /* Create pages list from sgt */
1465 struct sg_page_iter iter;
1466 struct page **pages;
1467 unsigned int npages;
1468 unsigned int i = 0;
1469
1470 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1471 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1472 if (!pages) {
1473 omap_gem_free_object(obj);
1474 obj = ERR_PTR(-ENOMEM);
1475 goto done;
1476 }
1477
1478 omap_obj->pages = pages;
1479
1480 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1481 pages[i++] = sg_page_iter_page(&iter);
1482 if (i > npages)
1483 break;
1484 }
1485
1486 if (WARN_ON(i != npages)) {
1487 omap_gem_free_object(obj);
1488 obj = ERR_PTR(-ENOMEM);
1489 goto done;
1490 }
1491 }
1492
1493done:
1494 mutex_unlock(&dev->struct_mutex);
1495 return obj;
1496}
1497
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001498/* convenience method to construct a GEM buffer object, and userspace handle */
1499int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1500 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1501{
1502 struct drm_gem_object *obj;
1503 int ret;
1504
1505 obj = omap_gem_new(dev, gsize, flags);
1506 if (!obj)
1507 return -ENOMEM;
1508
1509 ret = drm_gem_handle_create(file, obj, handle);
1510 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001511 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001512 return ret;
1513 }
1514
1515 /* drop reference from allocate - handle holds it now */
1516 drm_gem_object_unreference_unlocked(obj);
1517
1518 return 0;
1519}
1520
1521/* -----------------------------------------------------------------------------
1522 * Init & Cleanup
1523 */
1524
1525/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001526void omap_gem_init(struct drm_device *dev)
1527{
Rob Clarka6a91822011-12-09 23:26:08 -06001528 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001529 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001530 const enum tiler_fmt fmts[] = {
1531 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1532 };
Andy Gross5c137792012-03-05 10:48:39 -06001533 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001534
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001535 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001536 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001537 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001538 return;
1539 }
1540
Joe Perches78110bb2013-02-11 09:41:29 -08001541 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1542 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001543 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001544
1545 /* reserve 4k aligned/wide regions for userspace mappings: */
1546 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1547 uint16_t h = 1, w = PAGE_SIZE >> i;
1548 tiler_align(fmts[i], &w, &h);
1549 /* note: since each region is 1 4kb page wide, and minimum
1550 * number of rows, the height ends up being the same as the
1551 * # of pages in the region
1552 */
1553 usergart[i].height = h;
1554 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001555 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001556 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1557 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001558 struct omap_drm_usergart_entry *entry;
1559 struct tiler_block *block;
1560
1561 entry = &usergart[i].entry[j];
1562 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001563 if (IS_ERR(block)) {
1564 dev_err(dev->dev,
1565 "reserve failed: %d, %d, %ld\n",
1566 i, j, PTR_ERR(block));
1567 return;
1568 }
1569 entry->paddr = tiler_ssptr(block);
1570 entry->block = block;
1571
Russell King2d31ca32014-07-12 10:53:41 +01001572 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1573 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001574 usergart[i].stride_pfn << PAGE_SHIFT);
1575 }
1576 }
Rob Clarka6a91822011-12-09 23:26:08 -06001577
Laurent Pinchartf4302742015-12-14 22:39:34 +02001578 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001579 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001580}
1581
1582void omap_gem_deinit(struct drm_device *dev)
1583{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001584 struct omap_drm_private *priv = dev->dev_private;
1585
Rob Clarkf7f9f452011-12-05 19:19:22 -06001586 /* I believe we can rely on there being no more outstanding GEM
1587 * objects which could depend on usergart/dmm at this point.
1588 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001589 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001590}