blob: cb541d6b3c2bec46719c5d03399e2fbfb9f42d52 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080022#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020023
David Herrmann0de23972013-07-24 21:07:52 +020024#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060025
26#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060027#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060028
Rob Clarkcd5351f2011-11-12 12:09:40 -060029/*
30 * GEM buffer object implementation.
31 */
32
Rob Clarkcd5351f2011-11-12 12:09:40 -060033/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchart7ef93b02015-12-14 22:39:33 +020034#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
Rob Clarkcd5351f2011-11-12 12:09:40 -060035#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
36#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
37
Rob Clarkcd5351f2011-11-12 12:09:40 -060038struct omap_gem_object {
39 struct drm_gem_object base;
40
Rob Clarkf6b60362012-03-05 10:48:36 -060041 struct list_head mm_list;
42
Rob Clarkcd5351f2011-11-12 12:09:40 -060043 uint32_t flags;
44
Rob Clarkf7f9f452011-12-05 19:19:22 -060045 /** width/height for tiled formats (rounded up to slot boundaries) */
46 uint16_t width, height;
47
Rob Clarka6a91822011-12-09 23:26:08 -060048 /** roll applied when mapping to DMM */
49 uint32_t roll;
50
Rob Clarkcd5351f2011-11-12 12:09:40 -060051 /**
52 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
Rob Clarkf7f9f452011-12-05 19:19:22 -060053 * is set and the paddr is valid. Also if the buffer is remapped in
54 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
55 * the physical address and OMAP_BO_DMA is not set, then you should
56 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
57 * not removed from under your feet.
Rob Clarkcd5351f2011-11-12 12:09:40 -060058 *
59 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
60 * buffer is requested, but doesn't mean that it is. Use the
61 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
62 * physical address.
63 */
64 dma_addr_t paddr;
65
66 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060067 * # of users of paddr
68 */
69 uint32_t paddr_cnt;
70
71 /**
72 * tiler block used when buffer is remapped in DMM/TILER.
73 */
74 struct tiler_block *block;
75
76 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060077 * Array of backing pages, if allocated. Note that pages are never
78 * allocated for buffers originally allocated from contiguous memory
79 */
80 struct page **pages;
81
Rob Clarkf3bc9d22011-12-20 16:54:28 -060082 /** addresses corresponding to pages in above array */
83 dma_addr_t *addrs;
84
Rob Clarkcd5351f2011-11-12 12:09:40 -060085 /**
86 * Virtual address, if mapped.
87 */
88 void *vaddr;
89
90 /**
91 * sync-object allocated on demand (if needed)
92 *
93 * Per-buffer sync-object for tracking pending and completed hw/dma
94 * read and write operations. The layout in memory is dictated by
95 * the SGX firmware, which uses this information to stall the command
96 * stream if a surface is not ready yet.
97 *
98 * Note that when buffer is used by SGX, the sync-object needs to be
99 * allocated from a special heap of sync-objects. This way many sync
100 * objects can be packed in a page, and not waste GPU virtual address
101 * space. Because of this we have to have a omap_gem_set_sync_object()
102 * API to allow replacement of the syncobj after it has (potentially)
103 * already been allocated. A bit ugly but I haven't thought of a
104 * better alternative.
105 */
106 struct {
107 uint32_t write_pending;
108 uint32_t write_complete;
109 uint32_t read_pending;
110 uint32_t read_complete;
111 } *sync;
112};
113
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200114#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600115
Rob Clarkf7f9f452011-12-05 19:19:22 -0600116/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
117 * not necessarily pinned in TILER all the time, and (b) when they are
118 * they are not necessarily page aligned, we reserve one or more small
119 * regions in each of the 2d containers to use as a user-GART where we
120 * can create a second page-aligned mapping of parts of the buffer
121 * being accessed from userspace.
122 *
123 * Note that we could optimize slightly when we know that multiple
124 * tiler containers are backed by the same PAT.. but I'll leave that
125 * for later..
126 */
127#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200128struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600129 struct tiler_block *block; /* the reserved tiler block */
130 dma_addr_t paddr;
131 struct drm_gem_object *obj; /* the current pinned obj */
132 pgoff_t obj_pgoff; /* page offset of obj currently
133 mapped in */
134};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200135
136struct omap_drm_usergart {
137 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600138 int height; /* height in rows */
139 int height_shift; /* ilog2(height in rows) */
140 int slot_shift; /* ilog2(width per slot) */
141 int stride_pfn; /* stride in pages */
142 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200143};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600144
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200145/* -----------------------------------------------------------------------------
146 * Helpers
147 */
148
149/** get mmap offset */
150static uint64_t mmap_offset(struct drm_gem_object *obj)
151{
152 struct drm_device *dev = obj->dev;
153 int ret;
154 size_t size;
155
156 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
157
158 /* Make it mmapable */
159 size = omap_gem_mmap_size(obj);
160 ret = drm_gem_create_mmap_offset_size(obj, size);
161 if (ret) {
162 dev_err(dev->dev, "could not allocate mmap offset\n");
163 return 0;
164 }
165
166 return drm_vma_node_offset_addr(&obj->vma_node);
167}
168
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200169/* GEM objects can either be allocated from contiguous memory (in which
170 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
171 * contiguous buffers can be remapped in TILER/DMM if they need to be
172 * contiguous... but we don't do this all the time to reduce pressure
173 * on TILER/DMM space when we know at allocation time that the buffer
174 * will need to be scanned out.
175 */
176static inline bool is_shmem(struct drm_gem_object *obj)
177{
178 return obj->filp != NULL;
179}
180
181/* -----------------------------------------------------------------------------
182 * Eviction
183 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600184
185static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200186 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600187{
David Herrmann6796cb12014-01-03 14:24:19 +0100188 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200189 struct omap_drm_private *priv = obj->dev->dev_private;
190 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100191 size_t size = PAGE_SIZE * n;
192 loff_t off = mmap_offset(obj) +
193 (entry->obj_pgoff << PAGE_SHIFT);
194 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
195
196 if (m > 1) {
197 int i;
198 /* if stride > than PAGE_SIZE then sparse mapping: */
199 for (i = n; i > 0; i--) {
200 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
201 off, PAGE_SIZE, 1);
202 off += PAGE_SIZE * m;
Rob Clarke5598952012-03-05 10:48:40 -0600203 }
David Herrmann6796cb12014-01-03 14:24:19 +0100204 } else {
205 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
206 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600207 }
208
209 entry->obj = NULL;
210}
211
212/* Evict a buffer from usergart, if it is mapped there */
213static void evict(struct drm_gem_object *obj)
214{
215 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200216 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600217
218 if (omap_obj->flags & OMAP_BO_TILED) {
219 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
220 int i;
221
Rob Clarkf7f9f452011-12-05 19:19:22 -0600222 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200223 struct omap_drm_usergart_entry *entry =
224 &priv->usergart[fmt].entry[i];
225
Rob Clarkf7f9f452011-12-05 19:19:22 -0600226 if (entry->obj == obj)
227 evict_entry(obj, fmt, entry);
228 }
229 }
230}
231
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200232/* -----------------------------------------------------------------------------
233 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600234 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600235
236/** ensure backing pages are allocated */
237static int omap_gem_attach_pages(struct drm_gem_object *obj)
238{
Rob Clark8b6b5692012-05-17 02:37:25 -0600239 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600240 struct omap_gem_object *omap_obj = to_omap_bo(obj);
241 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200242 int npages = obj->size >> PAGE_SHIFT;
243 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600244 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600245
246 WARN_ON(omap_obj->pages);
247
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200248 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600249 if (IS_ERR(pages)) {
250 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
251 return PTR_ERR(pages);
252 }
253
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600254 /* for non-cached buffers, ensure the new pages are clean because
255 * DSS, GPU, etc. are not cache coherent:
256 */
257 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100258 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200259 if (!addrs) {
260 ret = -ENOMEM;
261 goto free_pages;
262 }
263
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600264 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600265 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600266 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200267
268 if (dma_mapping_error(dev->dev, addrs[i])) {
269 dev_warn(dev->dev,
270 "%s: failed to map page\n", __func__);
271
272 for (i = i - 1; i >= 0; --i) {
273 dma_unmap_page(dev->dev, addrs[i],
274 PAGE_SIZE, DMA_BIDIRECTIONAL);
275 }
276
277 ret = -ENOMEM;
278 goto free_addrs;
279 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600280 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600281 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100282 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200283 if (!addrs) {
284 ret = -ENOMEM;
285 goto free_pages;
286 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600287 }
288
Rob Clark8b6b5692012-05-17 02:37:25 -0600289 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600290 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600291
Rob Clarkcd5351f2011-11-12 12:09:40 -0600292 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200293
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200294free_addrs:
295 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200296free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400297 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200298
299 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600300}
301
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200302/* acquire pages when needed (for example, for DMA where physically
303 * contiguous buffer is not required
304 */
305static int get_pages(struct drm_gem_object *obj, struct page ***pages)
306{
307 struct omap_gem_object *omap_obj = to_omap_bo(obj);
308 int ret = 0;
309
310 if (is_shmem(obj) && !omap_obj->pages) {
311 ret = omap_gem_attach_pages(obj);
312 if (ret) {
313 dev_err(obj->dev->dev, "could not attach pages\n");
314 return ret;
315 }
316 }
317
318 /* TODO: even phys-contig.. we should have a list of pages? */
319 *pages = omap_obj->pages;
320
321 return 0;
322}
323
Rob Clarkcd5351f2011-11-12 12:09:40 -0600324/** release backing pages */
325static void omap_gem_detach_pages(struct drm_gem_object *obj)
326{
327 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600328
329 /* for non-cached buffers, ensure the new pages are clean because
330 * DSS, GPU, etc. are not cache coherent:
331 */
332 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
333 int i, npages = obj->size >> PAGE_SHIFT;
334 for (i = 0; i < npages; i++) {
335 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
336 PAGE_SIZE, DMA_BIDIRECTIONAL);
337 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600338 }
339
Rob Clark8b6b5692012-05-17 02:37:25 -0600340 kfree(omap_obj->addrs);
341 omap_obj->addrs = NULL;
342
Rob Clarkddcd09d2013-08-07 13:41:27 -0400343 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600344 omap_obj->pages = NULL;
345}
346
Rob Clark6ad11bc2012-04-10 13:19:55 -0500347/* get buffer flags */
348uint32_t omap_gem_flags(struct drm_gem_object *obj)
349{
350 return to_omap_bo(obj)->flags;
351}
352
Rob Clarkc5b12472012-01-18 18:33:02 -0600353uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
354{
355 uint64_t offset;
356 mutex_lock(&obj->dev->struct_mutex);
357 offset = mmap_offset(obj);
358 mutex_unlock(&obj->dev->struct_mutex);
359 return offset;
360}
361
Rob Clarkf7f9f452011-12-05 19:19:22 -0600362/** get mmap size */
363size_t omap_gem_mmap_size(struct drm_gem_object *obj)
364{
365 struct omap_gem_object *omap_obj = to_omap_bo(obj);
366 size_t size = obj->size;
367
368 if (omap_obj->flags & OMAP_BO_TILED) {
369 /* for tiled buffers, the virtual size has stride rounded up
370 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
371 * 32kb later!). But we don't back the entire buffer with
372 * pages, only the valid picture part.. so need to adjust for
373 * this in the size used to mmap and generate mmap offset
374 */
375 size = tiler_vsize(gem2fmt(omap_obj->flags),
376 omap_obj->width, omap_obj->height);
377 }
378
379 return size;
380}
381
Rob Clark3c810c62012-08-15 15:18:01 -0500382/* get tiled size, returns -EINVAL if not tiled buffer */
383int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
384{
385 struct omap_gem_object *omap_obj = to_omap_bo(obj);
386 if (omap_obj->flags & OMAP_BO_TILED) {
387 *w = omap_obj->width;
388 *h = omap_obj->height;
389 return 0;
390 }
391 return -EINVAL;
392}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600393
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200394/* -----------------------------------------------------------------------------
395 * Fault Handling
396 */
397
Rob Clarkf7f9f452011-12-05 19:19:22 -0600398/* Normal handling for the case of faulting in non-tiled buffers */
399static int fault_1d(struct drm_gem_object *obj,
400 struct vm_area_struct *vma, struct vm_fault *vmf)
401{
402 struct omap_gem_object *omap_obj = to_omap_bo(obj);
403 unsigned long pfn;
404 pgoff_t pgoff;
405
406 /* We don't use vmf->pgoff since that has the fake offset: */
407 pgoff = ((unsigned long)vmf->virtual_address -
408 vma->vm_start) >> PAGE_SHIFT;
409
410 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600411 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600412 pfn = page_to_pfn(omap_obj->pages[pgoff]);
413 } else {
414 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
415 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
416 }
417
418 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
419 pfn, pfn << PAGE_SHIFT);
420
Dan Williams01c8f1c2016-01-15 16:56:40 -0800421 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
422 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600423}
424
425/* Special handling for the case of faulting in 2d tiled buffers */
426static int fault_2d(struct drm_gem_object *obj,
427 struct vm_area_struct *vma, struct vm_fault *vmf)
428{
429 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200430 struct omap_drm_private *priv = obj->dev->dev_private;
431 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600432 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
433 struct page *pages[64]; /* XXX is this too much to have on stack? */
434 unsigned long pfn;
435 pgoff_t pgoff, base_pgoff;
436 void __user *vaddr;
437 int i, ret, slots;
438
Rob Clarke5598952012-03-05 10:48:40 -0600439 /*
440 * Note the height of the slot is also equal to the number of pages
441 * that need to be mapped in to fill 4kb wide CPU page. If the slot
442 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600443 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200444 const int n = priv->usergart[fmt].height;
445 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke5598952012-03-05 10:48:40 -0600446
447 /*
448 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
449 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
450 * into account in some of the math, so figure out virtual stride
451 * in pages
452 */
453 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600454
455 /* We don't use vmf->pgoff since that has the fake offset: */
456 pgoff = ((unsigned long)vmf->virtual_address -
457 vma->vm_start) >> PAGE_SHIFT;
458
Rob Clarke5598952012-03-05 10:48:40 -0600459 /*
460 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600461 * boundary in the y direction:
462 */
Rob Clarke5598952012-03-05 10:48:40 -0600463 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600464
Rob Clarke5598952012-03-05 10:48:40 -0600465 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200466 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600467
Rob Clarke5598952012-03-05 10:48:40 -0600468 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
469
Laurent Pinchartf4302742015-12-14 22:39:34 +0200470 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke5598952012-03-05 10:48:40 -0600471
Rob Clarkf7f9f452011-12-05 19:19:22 -0600472 /* evict previous buffer using this usergart entry, if any: */
473 if (entry->obj)
474 evict_entry(entry->obj, fmt, entry);
475
476 entry->obj = obj;
477 entry->obj_pgoff = base_pgoff;
478
Rob Clarke5598952012-03-05 10:48:40 -0600479 /* now convert base_pgoff to phys offset from virt offset: */
480 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600481
Rob Clarke5598952012-03-05 10:48:40 -0600482 /* for wider-than 4k.. figure out which part of the slot-row we want: */
483 if (m > 1) {
484 int off = pgoff % m;
485 entry->obj_pgoff += off;
486 base_pgoff /= m;
487 slots = min(slots - (off << n_shift), n);
488 base_pgoff += off << n_shift;
489 vaddr += off << PAGE_SHIFT;
490 }
491
492 /*
493 * Map in pages. Beyond the valid pixel part of the buffer, we set
494 * pages[i] to NULL to get a dummy page mapped in.. if someone
495 * reads/writes it they will get random/undefined content, but at
496 * least it won't be corrupting whatever other random page used to
497 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600498 */
499 memcpy(pages, &omap_obj->pages[base_pgoff],
500 sizeof(struct page *) * slots);
501 memset(pages + slots, 0,
Rob Clarke5598952012-03-05 10:48:40 -0600502 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600503
Rob Clarka6a91822011-12-09 23:26:08 -0600504 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600505 if (ret) {
506 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
507 return ret;
508 }
509
Rob Clarkf7f9f452011-12-05 19:19:22 -0600510 pfn = entry->paddr >> PAGE_SHIFT;
511
512 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
513 pfn, pfn << PAGE_SHIFT);
514
Rob Clarke5598952012-03-05 10:48:40 -0600515 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800516 vm_insert_mixed(vma, (unsigned long)vaddr,
517 __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200518 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke5598952012-03-05 10:48:40 -0600519 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600520 }
521
522 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200523 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
524 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600525
526 return 0;
527}
528
Rob Clarkcd5351f2011-11-12 12:09:40 -0600529/**
530 * omap_gem_fault - pagefault handler for GEM objects
531 * @vma: the VMA of the GEM object
532 * @vmf: fault detail
533 *
534 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
535 * does most of the work for us including the actual map/unmap calls
536 * but we need to do the actual page work.
537 *
538 * The VMA was set up by GEM. In doing so it also ensured that the
539 * vma->vm_private_data points to the GEM object that is backing this
540 * mapping.
541 */
542int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
543{
544 struct drm_gem_object *obj = vma->vm_private_data;
545 struct omap_gem_object *omap_obj = to_omap_bo(obj);
546 struct drm_device *dev = obj->dev;
547 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600548 int ret;
549
550 /* Make sure we don't parallel update on a fault, nor move or remove
551 * something from beneath our feet
552 */
553 mutex_lock(&dev->struct_mutex);
554
555 /* if a shmem backed object, make sure we have pages attached now */
556 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900557 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600558 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600559
560 /* where should we do corresponding put_pages().. we are mapping
561 * the original page, rather than thru a GART, so we can't rely
562 * on eviction to trigger this. But munmap() or all mappings should
563 * probably trigger put_pages()?
564 */
565
Rob Clarkf7f9f452011-12-05 19:19:22 -0600566 if (omap_obj->flags & OMAP_BO_TILED)
567 ret = fault_2d(obj, vma, vmf);
568 else
569 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600570
Rob Clarkcd5351f2011-11-12 12:09:40 -0600571
572fail:
573 mutex_unlock(&dev->struct_mutex);
574 switch (ret) {
575 case 0:
576 case -ERESTARTSYS:
577 case -EINTR:
578 return VM_FAULT_NOPAGE;
579 case -ENOMEM:
580 return VM_FAULT_OOM;
581 default:
582 return VM_FAULT_SIGBUS;
583 }
584}
585
586/** We override mainly to fix up some of the vm mapping flags.. */
587int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
588{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600589 int ret;
590
591 ret = drm_gem_mmap(filp, vma);
592 if (ret) {
593 DBG("mmap failed: %d", ret);
594 return ret;
595 }
596
Rob Clark8b6b5692012-05-17 02:37:25 -0600597 return omap_gem_mmap_obj(vma->vm_private_data, vma);
598}
599
600int omap_gem_mmap_obj(struct drm_gem_object *obj,
601 struct vm_area_struct *vma)
602{
603 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600604
605 vma->vm_flags &= ~VM_PFNMAP;
606 vma->vm_flags |= VM_MIXEDMAP;
607
608 if (omap_obj->flags & OMAP_BO_WC) {
609 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
610 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
611 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
612 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600613 /*
614 * We do have some private objects, at least for scanout buffers
615 * on hardware without DMM/TILER. But these are allocated write-
616 * combine
617 */
618 if (WARN_ON(!obj->filp))
619 return -EINVAL;
620
621 /*
622 * Shunt off cached objs to shmem file so they have their own
623 * address_space (so unmap_mapping_range does what we want,
624 * in particular in the case of mmap'd dmabufs)
625 */
626 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600627 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400628 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600629
Rob Clarkcd5351f2011-11-12 12:09:40 -0600630 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
631 }
632
Rob Clark8b6b5692012-05-17 02:37:25 -0600633 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600634}
635
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200636/* -----------------------------------------------------------------------------
637 * Dumb Buffers
638 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600639
Rob Clarkcd5351f2011-11-12 12:09:40 -0600640/**
641 * omap_gem_dumb_create - create a dumb buffer
642 * @drm_file: our client file
643 * @dev: our device
644 * @args: the requested arguments copied from userspace
645 *
646 * Allocate a buffer suitable for use for a frame buffer of the
647 * form described by user space. Give userspace a handle by which
648 * to reference it.
649 */
650int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
651 struct drm_mode_create_dumb *args)
652{
653 union omap_gem_size gsize;
654
Thierry Redingbdb2b932014-11-03 11:57:33 +0100655 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600656 args->size = PAGE_ALIGN(args->pitch * args->height);
657
658 gsize = (union omap_gem_size){
659 .bytes = args->size,
660 };
661
662 return omap_gem_new_handle(dev, file, gsize,
663 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
664}
665
666/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600667 * omap_gem_dumb_map - buffer mapping for dumb interface
668 * @file: our drm client file
669 * @dev: drm device
670 * @handle: GEM handle to the object (from dumb_create)
671 *
672 * Do the necessary setup to allow the mapping of the frame buffer
673 * into user memory. We don't have to do much here at the moment.
674 */
675int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
676 uint32_t handle, uint64_t *offset)
677{
678 struct drm_gem_object *obj;
679 int ret = 0;
680
Rob Clarkcd5351f2011-11-12 12:09:40 -0600681 /* GEM does all our handle to object mapping */
682 obj = drm_gem_object_lookup(dev, file, handle);
683 if (obj == NULL) {
684 ret = -ENOENT;
685 goto fail;
686 }
687
688 *offset = omap_gem_mmap_offset(obj);
689
690 drm_gem_object_unreference_unlocked(obj);
691
692fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600693 return ret;
694}
695
Laurent Pincharte1c11742015-12-14 22:39:30 +0200696#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600697/* Set scrolling position. This allows us to implement fast scrolling
698 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600699 *
700 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600701 */
702int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
703{
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
705 uint32_t npages = obj->size >> PAGE_SHIFT;
706 int ret = 0;
707
708 if (roll > npages) {
709 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
710 return -EINVAL;
711 }
712
Rob Clarka6a91822011-12-09 23:26:08 -0600713 omap_obj->roll = roll;
714
Rob Clarkaf695922011-12-16 11:34:34 -0600715 mutex_lock(&obj->dev->struct_mutex);
716
Rob Clarka6a91822011-12-09 23:26:08 -0600717 /* if we aren't mapped yet, we don't need to do anything */
718 if (omap_obj->block) {
719 struct page **pages;
720 ret = get_pages(obj, &pages);
721 if (ret)
722 goto fail;
723 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
724 if (ret)
725 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
726 }
727
728fail:
729 mutex_unlock(&obj->dev->struct_mutex);
730
731 return ret;
732}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200733#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600734
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200735/* -----------------------------------------------------------------------------
736 * Memory Management & DMA Sync
737 */
738
739/**
740 * shmem buffers that are mapped cached can simulate coherency via using
741 * page faulting to keep track of dirty pages
742 */
743static inline bool is_cached_coherent(struct drm_gem_object *obj)
744{
745 struct omap_gem_object *omap_obj = to_omap_bo(obj);
746 return is_shmem(obj) &&
747 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
748}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600749
Rob Clark8b6b5692012-05-17 02:37:25 -0600750/* Sync the buffer for CPU access.. note pages should already be
751 * attached, ie. omap_gem_get_pages()
752 */
753void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
754{
755 struct drm_device *dev = obj->dev;
756 struct omap_gem_object *omap_obj = to_omap_bo(obj);
757
758 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
759 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
760 PAGE_SIZE, DMA_BIDIRECTIONAL);
761 omap_obj->addrs[pgoff] = 0;
762 }
763}
764
765/* sync the buffer for DMA access */
766void omap_gem_dma_sync(struct drm_gem_object *obj,
767 enum dma_data_direction dir)
768{
769 struct drm_device *dev = obj->dev;
770 struct omap_gem_object *omap_obj = to_omap_bo(obj);
771
772 if (is_cached_coherent(obj)) {
773 int i, npages = obj->size >> PAGE_SHIFT;
774 struct page **pages = omap_obj->pages;
775 bool dirty = false;
776
777 for (i = 0; i < npages; i++) {
778 if (!omap_obj->addrs[i]) {
779 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
780 PAGE_SIZE, DMA_BIDIRECTIONAL);
781 dirty = true;
782 }
783 }
784
785 if (dirty) {
786 unmap_mapping_range(obj->filp->f_mapping, 0,
787 omap_gem_mmap_size(obj), 1);
788 }
789 }
790}
791
Rob Clarkcd5351f2011-11-12 12:09:40 -0600792/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
793 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
794 * map in TILER)
795 */
796int omap_gem_get_paddr(struct drm_gem_object *obj,
797 dma_addr_t *paddr, bool remap)
798{
Rob Clarka6a91822011-12-09 23:26:08 -0600799 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600800 struct omap_gem_object *omap_obj = to_omap_bo(obj);
801 int ret = 0;
802
Rob Clarkf7f9f452011-12-05 19:19:22 -0600803 mutex_lock(&obj->dev->struct_mutex);
804
Rob Clarka6a91822011-12-09 23:26:08 -0600805 if (remap && is_shmem(obj) && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600806 if (omap_obj->paddr_cnt == 0) {
807 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600808 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600809 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
810 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600811
Rob Clarkf7f9f452011-12-05 19:19:22 -0600812 BUG_ON(omap_obj->block);
813
814 ret = get_pages(obj, &pages);
815 if (ret)
816 goto fail;
817
Rob Clarkf7f9f452011-12-05 19:19:22 -0600818 if (omap_obj->flags & OMAP_BO_TILED) {
819 block = tiler_reserve_2d(fmt,
820 omap_obj->width,
821 omap_obj->height, 0);
822 } else {
823 block = tiler_reserve_1d(obj->size);
824 }
825
826 if (IS_ERR(block)) {
827 ret = PTR_ERR(block);
828 dev_err(obj->dev->dev,
829 "could not remap: %d (%d)\n", ret, fmt);
830 goto fail;
831 }
832
833 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600834 ret = tiler_pin(block, pages, npages,
835 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600836 if (ret) {
837 tiler_release(block);
838 dev_err(obj->dev->dev,
839 "could not pin: %d\n", ret);
840 goto fail;
841 }
842
843 omap_obj->paddr = tiler_ssptr(block);
844 omap_obj->block = block;
845
Russell King2d31ca32014-07-12 10:53:41 +0100846 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600847 }
848
849 omap_obj->paddr_cnt++;
850
851 *paddr = omap_obj->paddr;
852 } else if (omap_obj->flags & OMAP_BO_DMA) {
853 *paddr = omap_obj->paddr;
854 } else {
855 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600856 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600857 }
858
Rob Clarkf7f9f452011-12-05 19:19:22 -0600859fail:
860 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600861
862 return ret;
863}
864
865/* Release physical address, when DMA is no longer being performed.. this
866 * could potentially unpin and unmap buffers from TILER
867 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300868void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600869{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600870 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300871 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600872
873 mutex_lock(&obj->dev->struct_mutex);
874 if (omap_obj->paddr_cnt > 0) {
875 omap_obj->paddr_cnt--;
876 if (omap_obj->paddr_cnt == 0) {
877 ret = tiler_unpin(omap_obj->block);
878 if (ret) {
879 dev_err(obj->dev->dev,
880 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600881 }
882 ret = tiler_release(omap_obj->block);
883 if (ret) {
884 dev_err(obj->dev->dev,
885 "could not release unmap: %d\n", ret);
886 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000887 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600888 omap_obj->block = NULL;
889 }
890 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300891
Rob Clarkf7f9f452011-12-05 19:19:22 -0600892 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600893}
894
Rob Clark3c810c62012-08-15 15:18:01 -0500895/* Get rotated scanout address (only valid if already pinned), at the
896 * specified orientation and x,y offset from top-left corner of buffer
897 * (only valid for tiled 2d buffers)
898 */
899int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
900 int x, int y, dma_addr_t *paddr)
901{
902 struct omap_gem_object *omap_obj = to_omap_bo(obj);
903 int ret = -EINVAL;
904
905 mutex_lock(&obj->dev->struct_mutex);
906 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
907 (omap_obj->flags & OMAP_BO_TILED)) {
908 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
909 ret = 0;
910 }
911 mutex_unlock(&obj->dev->struct_mutex);
912 return ret;
913}
914
915/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
916int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
917{
918 struct omap_gem_object *omap_obj = to_omap_bo(obj);
919 int ret = -EINVAL;
920 if (omap_obj->flags & OMAP_BO_TILED)
921 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
922 return ret;
923}
924
Rob Clark6ad11bc2012-04-10 13:19:55 -0500925/* if !remap, and we don't have pages backing, then fail, rather than
926 * increasing the pin count (which we don't really do yet anyways,
927 * because we don't support swapping pages back out). And 'remap'
928 * might not be quite the right name, but I wanted to keep it working
929 * similarly to omap_gem_get_paddr(). Note though that mutex is not
930 * aquired if !remap (because this can be called in atomic ctxt),
931 * but probably omap_gem_get_paddr() should be changed to work in the
932 * same way. If !remap, a matching omap_gem_put_pages() call is not
933 * required (and should not be made).
934 */
935int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
936 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600937{
938 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500939 if (!remap) {
940 struct omap_gem_object *omap_obj = to_omap_bo(obj);
941 if (!omap_obj->pages)
942 return -ENOMEM;
943 *pages = omap_obj->pages;
944 return 0;
945 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600946 mutex_lock(&obj->dev->struct_mutex);
947 ret = get_pages(obj, pages);
948 mutex_unlock(&obj->dev->struct_mutex);
949 return ret;
950}
951
952/* release pages when DMA no longer being performed */
953int omap_gem_put_pages(struct drm_gem_object *obj)
954{
955 /* do something here if we dynamically attach/detach pages.. at
956 * least they would no longer need to be pinned if everyone has
957 * released the pages..
958 */
959 return 0;
960}
961
Laurent Pincharte1c11742015-12-14 22:39:30 +0200962#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600963/* Get kernel virtual address for CPU access.. this more or less only
964 * exists for omap_fbdev. This should be called with struct_mutex
965 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600966 */
967void *omap_gem_vaddr(struct drm_gem_object *obj)
968{
969 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900970 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600971 if (!omap_obj->vaddr) {
972 struct page **pages;
973 int ret = get_pages(obj, &pages);
974 if (ret)
975 return ERR_PTR(ret);
976 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
977 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
978 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600979 return omap_obj->vaddr;
980}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200981#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600982
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200983/* -----------------------------------------------------------------------------
984 * Power Management
985 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600986
Andy Grosse78edba2012-12-19 14:53:37 -0600987#ifdef CONFIG_PM
988/* re-pin objects in DMM in resume path: */
989int omap_gem_resume(struct device *dev)
990{
991 struct drm_device *drm_dev = dev_get_drvdata(dev);
992 struct omap_drm_private *priv = drm_dev->dev_private;
993 struct omap_gem_object *omap_obj;
994 int ret = 0;
995
996 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
997 if (omap_obj->block) {
998 struct drm_gem_object *obj = &omap_obj->base;
999 uint32_t npages = obj->size >> PAGE_SHIFT;
1000 WARN_ON(!omap_obj->pages); /* this can't happen */
1001 ret = tiler_pin(omap_obj->block,
1002 omap_obj->pages, npages,
1003 omap_obj->roll, true);
1004 if (ret) {
1005 dev_err(dev, "could not repin: %d\n", ret);
1006 return ret;
1007 }
1008 }
1009 }
1010
1011 return 0;
1012}
1013#endif
1014
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001015/* -----------------------------------------------------------------------------
1016 * DebugFS
1017 */
1018
Rob Clarkf6b60362012-03-05 10:48:36 -06001019#ifdef CONFIG_DEBUG_FS
1020void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1021{
Rob Clarkf6b60362012-03-05 10:48:36 -06001022 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001023 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001024
David Herrmann0de23972013-07-24 21:07:52 +02001025 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001026
Russell King2d31ca32014-07-12 10:53:41 +01001027 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001028 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001029 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001030 omap_obj->vaddr, omap_obj->roll);
1031
1032 if (omap_obj->flags & OMAP_BO_TILED) {
1033 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1034 if (omap_obj->block) {
1035 struct tcm_area *area = &omap_obj->block->area;
1036 seq_printf(m, " (%dx%d, %dx%d)",
1037 area->p0.x, area->p0.y,
1038 area->p1.x, area->p1.y);
1039 }
1040 } else {
1041 seq_printf(m, " %d", obj->size);
1042 }
1043
1044 seq_printf(m, "\n");
1045}
1046
1047void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1048{
1049 struct omap_gem_object *omap_obj;
1050 int count = 0;
1051 size_t size = 0;
1052
1053 list_for_each_entry(omap_obj, list, mm_list) {
1054 struct drm_gem_object *obj = &omap_obj->base;
1055 seq_printf(m, " ");
1056 omap_gem_describe(obj, m);
1057 count++;
1058 size += obj->size;
1059 }
1060
1061 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1062}
1063#endif
1064
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001065/* -----------------------------------------------------------------------------
1066 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001067 */
1068
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001069static DEFINE_SPINLOCK(sync_lock);
1070
Rob Clarkcd5351f2011-11-12 12:09:40 -06001071struct omap_gem_sync_waiter {
1072 struct list_head list;
1073 struct omap_gem_object *omap_obj;
1074 enum omap_gem_op op;
1075 uint32_t read_target, write_target;
1076 /* notify called w/ sync_lock held */
1077 void (*notify)(void *arg);
1078 void *arg;
1079};
1080
1081/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1082 * the read and/or write target count is achieved which can call a user
1083 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1084 * cpu access), etc.
1085 */
1086static LIST_HEAD(waiters);
1087
1088static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1089{
1090 struct omap_gem_object *omap_obj = waiter->omap_obj;
1091 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301092 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001093 return true;
1094 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301095 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001096 return true;
1097 return false;
1098}
1099
1100/* macro for sync debug.. */
1101#define SYNCDBG 0
1102#define SYNC(fmt, ...) do { if (SYNCDBG) \
1103 printk(KERN_ERR "%s:%d: "fmt"\n", \
1104 __func__, __LINE__, ##__VA_ARGS__); \
1105 } while (0)
1106
1107
1108static void sync_op_update(void)
1109{
1110 struct omap_gem_sync_waiter *waiter, *n;
1111 list_for_each_entry_safe(waiter, n, &waiters, list) {
1112 if (!is_waiting(waiter)) {
1113 list_del(&waiter->list);
1114 SYNC("notify: %p", waiter);
1115 waiter->notify(waiter->arg);
1116 kfree(waiter);
1117 }
1118 }
1119}
1120
1121static inline int sync_op(struct drm_gem_object *obj,
1122 enum omap_gem_op op, bool start)
1123{
1124 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1125 int ret = 0;
1126
1127 spin_lock(&sync_lock);
1128
1129 if (!omap_obj->sync) {
1130 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1131 if (!omap_obj->sync) {
1132 ret = -ENOMEM;
1133 goto unlock;
1134 }
1135 }
1136
1137 if (start) {
1138 if (op & OMAP_GEM_READ)
1139 omap_obj->sync->read_pending++;
1140 if (op & OMAP_GEM_WRITE)
1141 omap_obj->sync->write_pending++;
1142 } else {
1143 if (op & OMAP_GEM_READ)
1144 omap_obj->sync->read_complete++;
1145 if (op & OMAP_GEM_WRITE)
1146 omap_obj->sync->write_complete++;
1147 sync_op_update();
1148 }
1149
1150unlock:
1151 spin_unlock(&sync_lock);
1152
1153 return ret;
1154}
1155
1156/* it is a bit lame to handle updates in this sort of polling way, but
1157 * in case of PVR, the GPU can directly update read/write complete
1158 * values, and not really tell us which ones it updated.. this also
1159 * means that sync_lock is not quite sufficient. So we'll need to
1160 * do something a bit better when it comes time to add support for
1161 * separate 2d hw..
1162 */
1163void omap_gem_op_update(void)
1164{
1165 spin_lock(&sync_lock);
1166 sync_op_update();
1167 spin_unlock(&sync_lock);
1168}
1169
1170/* mark the start of read and/or write operation */
1171int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1172{
1173 return sync_op(obj, op, true);
1174}
1175
1176int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1177{
1178 return sync_op(obj, op, false);
1179}
1180
1181static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1182
1183static void sync_notify(void *arg)
1184{
1185 struct task_struct **waiter_task = arg;
1186 *waiter_task = NULL;
1187 wake_up_all(&sync_event);
1188}
1189
1190int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1191{
1192 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1193 int ret = 0;
1194 if (omap_obj->sync) {
1195 struct task_struct *waiter_task = current;
1196 struct omap_gem_sync_waiter *waiter =
1197 kzalloc(sizeof(*waiter), GFP_KERNEL);
1198
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001199 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001200 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001201
1202 waiter->omap_obj = omap_obj;
1203 waiter->op = op;
1204 waiter->read_target = omap_obj->sync->read_pending;
1205 waiter->write_target = omap_obj->sync->write_pending;
1206 waiter->notify = sync_notify;
1207 waiter->arg = &waiter_task;
1208
1209 spin_lock(&sync_lock);
1210 if (is_waiting(waiter)) {
1211 SYNC("waited: %p", waiter);
1212 list_add_tail(&waiter->list, &waiters);
1213 spin_unlock(&sync_lock);
1214 ret = wait_event_interruptible(sync_event,
1215 (waiter_task == NULL));
1216 spin_lock(&sync_lock);
1217 if (waiter_task) {
1218 SYNC("interrupted: %p", waiter);
1219 /* we were interrupted */
1220 list_del(&waiter->list);
1221 waiter_task = NULL;
1222 } else {
1223 /* freed in sync_op_update() */
1224 waiter = NULL;
1225 }
1226 }
1227 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001228 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001229 }
1230 return ret;
1231}
1232
1233/* call fxn(arg), either synchronously or asynchronously if the op
1234 * is currently blocked.. fxn() can be called from any context
1235 *
1236 * (TODO for now fxn is called back from whichever context calls
1237 * omap_gem_op_update().. but this could be better defined later
1238 * if needed)
1239 *
1240 * TODO more code in common w/ _sync()..
1241 */
1242int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1243 void (*fxn)(void *arg), void *arg)
1244{
1245 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1246 if (omap_obj->sync) {
1247 struct omap_gem_sync_waiter *waiter =
1248 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1249
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001250 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001251 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001252
1253 waiter->omap_obj = omap_obj;
1254 waiter->op = op;
1255 waiter->read_target = omap_obj->sync->read_pending;
1256 waiter->write_target = omap_obj->sync->write_pending;
1257 waiter->notify = fxn;
1258 waiter->arg = arg;
1259
1260 spin_lock(&sync_lock);
1261 if (is_waiting(waiter)) {
1262 SYNC("waited: %p", waiter);
1263 list_add_tail(&waiter->list, &waiters);
1264 spin_unlock(&sync_lock);
1265 return 0;
1266 }
1267
1268 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301269
1270 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001271 }
1272
1273 /* no waiting.. */
1274 fxn(arg);
1275
1276 return 0;
1277}
1278
1279/* special API so PVR can update the buffer to use a sync-object allocated
1280 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1281 * perspective) sync-object, so we overwrite the new syncobj w/ values
1282 * from the already allocated syncobj (if there is one)
1283 */
1284int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1285{
1286 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1287 int ret = 0;
1288
1289 spin_lock(&sync_lock);
1290
1291 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1292 /* clearing a previously set syncobj */
Peter Huewee6200962013-01-26 00:40:13 +01001293 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1294 GFP_ATOMIC);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001295 if (!syncobj) {
1296 ret = -ENOMEM;
1297 goto unlock;
1298 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001299 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1300 omap_obj->sync = syncobj;
1301 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1302 /* replacing an existing syncobj */
1303 if (omap_obj->sync) {
1304 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1305 kfree(omap_obj->sync);
1306 }
1307 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1308 omap_obj->sync = syncobj;
1309 }
1310
1311unlock:
1312 spin_unlock(&sync_lock);
1313 return ret;
1314}
1315
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001316/* -----------------------------------------------------------------------------
1317 * Constructor & Destructor
1318 */
1319
Rob Clarkcd5351f2011-11-12 12:09:40 -06001320/* don't call directly.. called from GEM core when it is time to actually
1321 * free the object..
1322 */
1323void omap_gem_free_object(struct drm_gem_object *obj)
1324{
1325 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001326 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001327 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1328
Rob Clarkf7f9f452011-12-05 19:19:22 -06001329 evict(obj);
1330
Rob Clarkf6b60362012-03-05 10:48:36 -06001331 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1332
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001333 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001334 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001335 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001336
Rob Clark9a0774e2012-01-16 12:51:17 -06001337 /* this means the object is still pinned.. which really should
1338 * not happen. I think..
1339 */
1340 WARN_ON(omap_obj->paddr_cnt > 0);
1341
Rob Clarkcd5351f2011-11-12 12:09:40 -06001342 /* don't free externally allocated backing memory */
1343 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001344 if (omap_obj->pages)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001345 omap_gem_detach_pages(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001346
Rob Clarkcd5351f2011-11-12 12:09:40 -06001347 if (!is_shmem(obj)) {
1348 dma_free_writecombine(dev->dev, obj->size,
1349 omap_obj->vaddr, omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001350 } else if (omap_obj->vaddr) {
1351 vunmap(omap_obj->vaddr);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001352 }
1353 }
1354
1355 /* don't free externally allocated syncobj */
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001356 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001357 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001358
1359 drm_gem_object_release(obj);
1360
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001361 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001362}
1363
1364/* GEM buffer object constructor */
1365struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1366 union omap_gem_size gsize, uint32_t flags)
1367{
Rob Clarka6a91822011-12-09 23:26:08 -06001368 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001369 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001370 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001371 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001372 size_t size;
1373 int ret;
1374
1375 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001376 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001377 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001378 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001379 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001380
Rob Clarkf7f9f452011-12-05 19:19:22 -06001381 /* tiled buffers are always shmem paged backed.. when they are
1382 * scanned out, they are remapped into DMM/TILER
1383 */
1384 flags &= ~OMAP_BO_SCANOUT;
1385
1386 /* currently don't allow cached buffers.. there is some caching
1387 * stuff that needs to be handled better
1388 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001389 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1390 flags |= tiler_get_cpu_cache_flags();
Rob Clarkf7f9f452011-12-05 19:19:22 -06001391
1392 /* align dimensions to slot boundaries... */
1393 tiler_align(gem2fmt(flags),
1394 &gsize.tiled.width, &gsize.tiled.height);
1395
1396 /* ...and calculate size based on aligned dimensions */
1397 size = tiler_size(gem2fmt(flags),
1398 gsize.tiled.width, gsize.tiled.height);
1399 } else {
1400 size = PAGE_ALIGN(gsize.bytes);
1401 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001402
1403 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001404 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001405 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001406
Rob Clarkcd5351f2011-11-12 12:09:40 -06001407 obj = &omap_obj->base;
1408
Rob Clarka6a91822011-12-09 23:26:08 -06001409 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1410 /* attempt to allocate contiguous memory if we don't
1411 * have DMM for remappign discontiguous buffers
1412 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001413 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1414 &omap_obj->paddr, GFP_KERNEL);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001415 if (!omap_obj->vaddr) {
1416 kfree(omap_obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001417
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001418 return NULL;
1419 }
1420
1421 flags |= OMAP_BO_DMA;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001422 }
1423
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001424 spin_lock(&priv->list_lock);
1425 list_add(&omap_obj->mm_list, &priv->obj_list);
1426 spin_unlock(&priv->list_lock);
1427
Rob Clarkcd5351f2011-11-12 12:09:40 -06001428 omap_obj->flags = flags;
1429
Rob Clarkf7f9f452011-12-05 19:19:22 -06001430 if (flags & OMAP_BO_TILED) {
1431 omap_obj->width = gsize.tiled.width;
1432 omap_obj->height = gsize.tiled.height;
1433 }
1434
David Herrmannab5a60c2014-05-25 12:45:39 +02001435 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
David Herrmann89c82332013-07-11 11:56:32 +02001436 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001437 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001438 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001439 if (ret)
1440 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001441
David Herrmannab5a60c2014-05-25 12:45:39 +02001442 mapping = file_inode(obj->filp)->i_mapping;
1443 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1444 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001445
1446 return obj;
1447
1448fail:
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001449 omap_gem_free_object(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001450 return NULL;
1451}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001452
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001453/* convenience method to construct a GEM buffer object, and userspace handle */
1454int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1455 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1456{
1457 struct drm_gem_object *obj;
1458 int ret;
1459
1460 obj = omap_gem_new(dev, gsize, flags);
1461 if (!obj)
1462 return -ENOMEM;
1463
1464 ret = drm_gem_handle_create(file, obj, handle);
1465 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001466 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001467 return ret;
1468 }
1469
1470 /* drop reference from allocate - handle holds it now */
1471 drm_gem_object_unreference_unlocked(obj);
1472
1473 return 0;
1474}
1475
1476/* -----------------------------------------------------------------------------
1477 * Init & Cleanup
1478 */
1479
1480/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001481void omap_gem_init(struct drm_device *dev)
1482{
Rob Clarka6a91822011-12-09 23:26:08 -06001483 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001484 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001485 const enum tiler_fmt fmts[] = {
1486 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1487 };
Andy Gross5c137792012-03-05 10:48:39 -06001488 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001489
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001490 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001491 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001492 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001493 return;
1494 }
1495
Joe Perches78110bb2013-02-11 09:41:29 -08001496 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1497 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001498 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001499
1500 /* reserve 4k aligned/wide regions for userspace mappings: */
1501 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1502 uint16_t h = 1, w = PAGE_SIZE >> i;
1503 tiler_align(fmts[i], &w, &h);
1504 /* note: since each region is 1 4kb page wide, and minimum
1505 * number of rows, the height ends up being the same as the
1506 * # of pages in the region
1507 */
1508 usergart[i].height = h;
1509 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001510 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001511 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1512 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001513 struct omap_drm_usergart_entry *entry;
1514 struct tiler_block *block;
1515
1516 entry = &usergart[i].entry[j];
1517 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001518 if (IS_ERR(block)) {
1519 dev_err(dev->dev,
1520 "reserve failed: %d, %d, %ld\n",
1521 i, j, PTR_ERR(block));
1522 return;
1523 }
1524 entry->paddr = tiler_ssptr(block);
1525 entry->block = block;
1526
Russell King2d31ca32014-07-12 10:53:41 +01001527 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1528 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001529 usergart[i].stride_pfn << PAGE_SHIFT);
1530 }
1531 }
Rob Clarka6a91822011-12-09 23:26:08 -06001532
Laurent Pinchartf4302742015-12-14 22:39:34 +02001533 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001534 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001535}
1536
1537void omap_gem_deinit(struct drm_device *dev)
1538{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001539 struct omap_drm_private *priv = dev->dev_private;
1540
Rob Clarkf7f9f452011-12-05 19:19:22 -06001541 /* I believe we can rely on there being no more outstanding GEM
1542 * objects which could depend on usergart/dmm at this point.
1543 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001544 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001545}