Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1 | /* |
Rob Clark | 8bb0daf | 2013-02-11 12:43:09 -0500 | [diff] [blame] | 2 | * drivers/gpu/drm/omapdrm/omap_gem.c |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2011 Texas Instruments |
| 5 | * Author: Rob Clark <rob.clark@linaro.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/shmem_fs.h> |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 23 | #include <drm/drm_vma_manager.h> |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 24 | |
| 25 | #include "omap_drv.h" |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 26 | #include "omap_dmm_tiler.h" |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 27 | |
| 28 | /* remove these once drm core helpers are merged */ |
YAMANE Toshiaki | 801d5bc | 2012-11-14 19:32:56 +0900 | [diff] [blame] | 29 | struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 30 | void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
| 31 | bool dirty, bool accessed); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 32 | int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 33 | |
| 34 | /* |
| 35 | * GEM buffer object implementation. |
| 36 | */ |
| 37 | |
| 38 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
| 39 | |
| 40 | /* note: we use upper 8 bits of flags for driver-internal flags: */ |
| 41 | #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ |
| 42 | #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ |
| 43 | #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ |
| 44 | |
| 45 | |
| 46 | struct omap_gem_object { |
| 47 | struct drm_gem_object base; |
| 48 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 49 | struct list_head mm_list; |
| 50 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 51 | uint32_t flags; |
| 52 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 53 | /** width/height for tiled formats (rounded up to slot boundaries) */ |
| 54 | uint16_t width, height; |
| 55 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 56 | /** roll applied when mapping to DMM */ |
| 57 | uint32_t roll; |
| 58 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 59 | /** |
| 60 | * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 61 | * is set and the paddr is valid. Also if the buffer is remapped in |
| 62 | * TILER and paddr_cnt > 0, then paddr is valid. But if you are using |
| 63 | * the physical address and OMAP_BO_DMA is not set, then you should |
| 64 | * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is |
| 65 | * not removed from under your feet. |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 66 | * |
| 67 | * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable |
| 68 | * buffer is requested, but doesn't mean that it is. Use the |
| 69 | * OMAP_BO_DMA flag to determine if the buffer has a DMA capable |
| 70 | * physical address. |
| 71 | */ |
| 72 | dma_addr_t paddr; |
| 73 | |
| 74 | /** |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 75 | * # of users of paddr |
| 76 | */ |
| 77 | uint32_t paddr_cnt; |
| 78 | |
| 79 | /** |
| 80 | * tiler block used when buffer is remapped in DMM/TILER. |
| 81 | */ |
| 82 | struct tiler_block *block; |
| 83 | |
| 84 | /** |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 85 | * Array of backing pages, if allocated. Note that pages are never |
| 86 | * allocated for buffers originally allocated from contiguous memory |
| 87 | */ |
| 88 | struct page **pages; |
| 89 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 90 | /** addresses corresponding to pages in above array */ |
| 91 | dma_addr_t *addrs; |
| 92 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 93 | /** |
| 94 | * Virtual address, if mapped. |
| 95 | */ |
| 96 | void *vaddr; |
| 97 | |
| 98 | /** |
| 99 | * sync-object allocated on demand (if needed) |
| 100 | * |
| 101 | * Per-buffer sync-object for tracking pending and completed hw/dma |
| 102 | * read and write operations. The layout in memory is dictated by |
| 103 | * the SGX firmware, which uses this information to stall the command |
| 104 | * stream if a surface is not ready yet. |
| 105 | * |
| 106 | * Note that when buffer is used by SGX, the sync-object needs to be |
| 107 | * allocated from a special heap of sync-objects. This way many sync |
| 108 | * objects can be packed in a page, and not waste GPU virtual address |
| 109 | * space. Because of this we have to have a omap_gem_set_sync_object() |
| 110 | * API to allow replacement of the syncobj after it has (potentially) |
| 111 | * already been allocated. A bit ugly but I haven't thought of a |
| 112 | * better alternative. |
| 113 | */ |
| 114 | struct { |
| 115 | uint32_t write_pending; |
| 116 | uint32_t write_complete; |
| 117 | uint32_t read_pending; |
| 118 | uint32_t read_complete; |
| 119 | } *sync; |
| 120 | }; |
| 121 | |
Rob Clark | c5b1247 | 2012-01-18 18:33:02 -0600 | [diff] [blame] | 122 | static int get_pages(struct drm_gem_object *obj, struct page ***pages); |
| 123 | static uint64_t mmap_offset(struct drm_gem_object *obj); |
| 124 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 125 | /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are |
| 126 | * not necessarily pinned in TILER all the time, and (b) when they are |
| 127 | * they are not necessarily page aligned, we reserve one or more small |
| 128 | * regions in each of the 2d containers to use as a user-GART where we |
| 129 | * can create a second page-aligned mapping of parts of the buffer |
| 130 | * being accessed from userspace. |
| 131 | * |
| 132 | * Note that we could optimize slightly when we know that multiple |
| 133 | * tiler containers are backed by the same PAT.. but I'll leave that |
| 134 | * for later.. |
| 135 | */ |
| 136 | #define NUM_USERGART_ENTRIES 2 |
| 137 | struct usergart_entry { |
| 138 | struct tiler_block *block; /* the reserved tiler block */ |
| 139 | dma_addr_t paddr; |
| 140 | struct drm_gem_object *obj; /* the current pinned obj */ |
| 141 | pgoff_t obj_pgoff; /* page offset of obj currently |
| 142 | mapped in */ |
| 143 | }; |
| 144 | static struct { |
| 145 | struct usergart_entry entry[NUM_USERGART_ENTRIES]; |
| 146 | int height; /* height in rows */ |
| 147 | int height_shift; /* ilog2(height in rows) */ |
| 148 | int slot_shift; /* ilog2(width per slot) */ |
| 149 | int stride_pfn; /* stride in pages */ |
| 150 | int last; /* index of last used entry */ |
| 151 | } *usergart; |
| 152 | |
| 153 | static void evict_entry(struct drm_gem_object *obj, |
| 154 | enum tiler_fmt fmt, struct usergart_entry *entry) |
| 155 | { |
David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 156 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 157 | int n = usergart[fmt].height; |
| 158 | size_t size = PAGE_SIZE * n; |
| 159 | loff_t off = mmap_offset(obj) + |
| 160 | (entry->obj_pgoff << PAGE_SHIFT); |
| 161 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
| 162 | |
| 163 | if (m > 1) { |
| 164 | int i; |
| 165 | /* if stride > than PAGE_SIZE then sparse mapping: */ |
| 166 | for (i = n; i > 0; i--) { |
| 167 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, |
| 168 | off, PAGE_SIZE, 1); |
| 169 | off += PAGE_SIZE * m; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 170 | } |
David Herrmann | 6796cb1 | 2014-01-03 14:24:19 +0100 | [diff] [blame] | 171 | } else { |
| 172 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, |
| 173 | off, size, 1); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | entry->obj = NULL; |
| 177 | } |
| 178 | |
| 179 | /* Evict a buffer from usergart, if it is mapped there */ |
| 180 | static void evict(struct drm_gem_object *obj) |
| 181 | { |
| 182 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 183 | |
| 184 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 185 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 186 | int i; |
| 187 | |
| 188 | if (!usergart) |
| 189 | return; |
| 190 | |
| 191 | for (i = 0; i < NUM_USERGART_ENTRIES; i++) { |
| 192 | struct usergart_entry *entry = &usergart[fmt].entry[i]; |
| 193 | if (entry->obj == obj) |
| 194 | evict_entry(obj, fmt, entry); |
| 195 | } |
| 196 | } |
| 197 | } |
| 198 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 199 | /* GEM objects can either be allocated from contiguous memory (in which |
| 200 | * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non |
| 201 | * contiguous buffers can be remapped in TILER/DMM if they need to be |
| 202 | * contiguous... but we don't do this all the time to reduce pressure |
| 203 | * on TILER/DMM space when we know at allocation time that the buffer |
| 204 | * will need to be scanned out. |
| 205 | */ |
| 206 | static inline bool is_shmem(struct drm_gem_object *obj) |
| 207 | { |
| 208 | return obj->filp != NULL; |
| 209 | } |
| 210 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 211 | /** |
| 212 | * shmem buffers that are mapped cached can simulate coherency via using |
| 213 | * page faulting to keep track of dirty pages |
| 214 | */ |
| 215 | static inline bool is_cached_coherent(struct drm_gem_object *obj) |
| 216 | { |
| 217 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 218 | return is_shmem(obj) && |
| 219 | ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); |
| 220 | } |
| 221 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 222 | static DEFINE_SPINLOCK(sync_lock); |
| 223 | |
| 224 | /** ensure backing pages are allocated */ |
| 225 | static int omap_gem_attach_pages(struct drm_gem_object *obj) |
| 226 | { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 227 | struct drm_device *dev = obj->dev; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 228 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 229 | struct page **pages; |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 230 | int npages = obj->size >> PAGE_SHIFT; |
| 231 | int i, ret; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 232 | dma_addr_t *addrs; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 233 | |
| 234 | WARN_ON(omap_obj->pages); |
| 235 | |
| 236 | /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the |
| 237 | * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably |
| 238 | * we actually want CMA memory for it all anyways.. |
| 239 | */ |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 240 | pages = drm_gem_get_pages(obj, GFP_KERNEL); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 241 | if (IS_ERR(pages)) { |
| 242 | dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); |
| 243 | return PTR_ERR(pages); |
| 244 | } |
| 245 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 246 | /* for non-cached buffers, ensure the new pages are clean because |
| 247 | * DSS, GPU, etc. are not cache coherent: |
| 248 | */ |
| 249 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { |
Vincent Penquerc'h | 23d84ed | 2012-10-09 19:40:39 +0100 | [diff] [blame] | 250 | addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 251 | if (!addrs) { |
| 252 | ret = -ENOMEM; |
| 253 | goto free_pages; |
| 254 | } |
| 255 | |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 256 | for (i = 0; i < npages; i++) { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 257 | addrs[i] = dma_map_page(dev->dev, pages[i], |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 258 | 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 259 | } |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 260 | } else { |
Vincent Penquerc'h | 23d84ed | 2012-10-09 19:40:39 +0100 | [diff] [blame] | 261 | addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 262 | if (!addrs) { |
| 263 | ret = -ENOMEM; |
| 264 | goto free_pages; |
| 265 | } |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 266 | } |
| 267 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 268 | omap_obj->addrs = addrs; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 269 | omap_obj->pages = pages; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 270 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 271 | return 0; |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 272 | |
| 273 | free_pages: |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 274 | drm_gem_put_pages(obj, pages, true, false); |
Emil Goode | d4eb23a | 2012-08-17 18:53:26 +0200 | [diff] [blame] | 275 | |
| 276 | return ret; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | /** release backing pages */ |
| 280 | static void omap_gem_detach_pages(struct drm_gem_object *obj) |
| 281 | { |
| 282 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 283 | |
| 284 | /* for non-cached buffers, ensure the new pages are clean because |
| 285 | * DSS, GPU, etc. are not cache coherent: |
| 286 | */ |
| 287 | if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { |
| 288 | int i, npages = obj->size >> PAGE_SHIFT; |
| 289 | for (i = 0; i < npages; i++) { |
| 290 | dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], |
| 291 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 292 | } |
Rob Clark | f3bc9d2 | 2011-12-20 16:54:28 -0600 | [diff] [blame] | 293 | } |
| 294 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 295 | kfree(omap_obj->addrs); |
| 296 | omap_obj->addrs = NULL; |
| 297 | |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 298 | drm_gem_put_pages(obj, omap_obj->pages, true, false); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 299 | omap_obj->pages = NULL; |
| 300 | } |
| 301 | |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 302 | /* get buffer flags */ |
| 303 | uint32_t omap_gem_flags(struct drm_gem_object *obj) |
| 304 | { |
| 305 | return to_omap_bo(obj)->flags; |
| 306 | } |
| 307 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 308 | /** get mmap offset */ |
Rob Clark | c5b1247 | 2012-01-18 18:33:02 -0600 | [diff] [blame] | 309 | static uint64_t mmap_offset(struct drm_gem_object *obj) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 310 | { |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 311 | struct drm_device *dev = obj->dev; |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 312 | int ret; |
| 313 | size_t size; |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 314 | |
| 315 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 316 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 317 | /* Make it mmapable */ |
| 318 | size = omap_gem_mmap_size(obj); |
Rob Clark | ddcd09d | 2013-08-07 13:41:27 -0400 | [diff] [blame] | 319 | ret = drm_gem_create_mmap_offset_size(obj, size); |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 320 | if (ret) { |
| 321 | dev_err(dev->dev, "could not allocate mmap offset\n"); |
| 322 | return 0; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 323 | } |
| 324 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 325 | return drm_vma_node_offset_addr(&obj->vma_node); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 326 | } |
| 327 | |
Rob Clark | c5b1247 | 2012-01-18 18:33:02 -0600 | [diff] [blame] | 328 | uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) |
| 329 | { |
| 330 | uint64_t offset; |
| 331 | mutex_lock(&obj->dev->struct_mutex); |
| 332 | offset = mmap_offset(obj); |
| 333 | mutex_unlock(&obj->dev->struct_mutex); |
| 334 | return offset; |
| 335 | } |
| 336 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 337 | /** get mmap size */ |
| 338 | size_t omap_gem_mmap_size(struct drm_gem_object *obj) |
| 339 | { |
| 340 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 341 | size_t size = obj->size; |
| 342 | |
| 343 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 344 | /* for tiled buffers, the virtual size has stride rounded up |
| 345 | * to 4kb.. (to hide the fact that row n+1 might start 16kb or |
| 346 | * 32kb later!). But we don't back the entire buffer with |
| 347 | * pages, only the valid picture part.. so need to adjust for |
| 348 | * this in the size used to mmap and generate mmap offset |
| 349 | */ |
| 350 | size = tiler_vsize(gem2fmt(omap_obj->flags), |
| 351 | omap_obj->width, omap_obj->height); |
| 352 | } |
| 353 | |
| 354 | return size; |
| 355 | } |
| 356 | |
Rob Clark | 3c810c6 | 2012-08-15 15:18:01 -0500 | [diff] [blame] | 357 | /* get tiled size, returns -EINVAL if not tiled buffer */ |
| 358 | int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) |
| 359 | { |
| 360 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 361 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 362 | *w = omap_obj->width; |
| 363 | *h = omap_obj->height; |
| 364 | return 0; |
| 365 | } |
| 366 | return -EINVAL; |
| 367 | } |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 368 | |
| 369 | /* Normal handling for the case of faulting in non-tiled buffers */ |
| 370 | static int fault_1d(struct drm_gem_object *obj, |
| 371 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 372 | { |
| 373 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 374 | unsigned long pfn; |
| 375 | pgoff_t pgoff; |
| 376 | |
| 377 | /* We don't use vmf->pgoff since that has the fake offset: */ |
| 378 | pgoff = ((unsigned long)vmf->virtual_address - |
| 379 | vma->vm_start) >> PAGE_SHIFT; |
| 380 | |
| 381 | if (omap_obj->pages) { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 382 | omap_gem_cpu_sync(obj, pgoff); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 383 | pfn = page_to_pfn(omap_obj->pages[pgoff]); |
| 384 | } else { |
| 385 | BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); |
| 386 | pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; |
| 387 | } |
| 388 | |
| 389 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 390 | pfn, pfn << PAGE_SHIFT); |
| 391 | |
| 392 | return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); |
| 393 | } |
| 394 | |
| 395 | /* Special handling for the case of faulting in 2d tiled buffers */ |
| 396 | static int fault_2d(struct drm_gem_object *obj, |
| 397 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 398 | { |
| 399 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 400 | struct usergart_entry *entry; |
| 401 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 402 | struct page *pages[64]; /* XXX is this too much to have on stack? */ |
| 403 | unsigned long pfn; |
| 404 | pgoff_t pgoff, base_pgoff; |
| 405 | void __user *vaddr; |
| 406 | int i, ret, slots; |
| 407 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 408 | /* |
| 409 | * Note the height of the slot is also equal to the number of pages |
| 410 | * that need to be mapped in to fill 4kb wide CPU page. If the slot |
| 411 | * height is 64, then 64 pages fill a 4kb wide by 64 row region. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 412 | */ |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 413 | const int n = usergart[fmt].height; |
| 414 | const int n_shift = usergart[fmt].height_shift; |
| 415 | |
| 416 | /* |
| 417 | * If buffer width in bytes > PAGE_SIZE then the virtual stride is |
| 418 | * rounded up to next multiple of PAGE_SIZE.. this need to be taken |
| 419 | * into account in some of the math, so figure out virtual stride |
| 420 | * in pages |
| 421 | */ |
| 422 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 423 | |
| 424 | /* We don't use vmf->pgoff since that has the fake offset: */ |
| 425 | pgoff = ((unsigned long)vmf->virtual_address - |
| 426 | vma->vm_start) >> PAGE_SHIFT; |
| 427 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 428 | /* |
| 429 | * Actual address we start mapping at is rounded down to previous slot |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 430 | * boundary in the y direction: |
| 431 | */ |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 432 | base_pgoff = round_down(pgoff, m << n_shift); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 433 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 434 | /* figure out buffer width in slots */ |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 435 | slots = omap_obj->width >> usergart[fmt].slot_shift; |
| 436 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 437 | vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); |
| 438 | |
| 439 | entry = &usergart[fmt].entry[usergart[fmt].last]; |
| 440 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 441 | /* evict previous buffer using this usergart entry, if any: */ |
| 442 | if (entry->obj) |
| 443 | evict_entry(entry->obj, fmt, entry); |
| 444 | |
| 445 | entry->obj = obj; |
| 446 | entry->obj_pgoff = base_pgoff; |
| 447 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 448 | /* now convert base_pgoff to phys offset from virt offset: */ |
| 449 | base_pgoff = (base_pgoff >> n_shift) * slots; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 450 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 451 | /* for wider-than 4k.. figure out which part of the slot-row we want: */ |
| 452 | if (m > 1) { |
| 453 | int off = pgoff % m; |
| 454 | entry->obj_pgoff += off; |
| 455 | base_pgoff /= m; |
| 456 | slots = min(slots - (off << n_shift), n); |
| 457 | base_pgoff += off << n_shift; |
| 458 | vaddr += off << PAGE_SHIFT; |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * Map in pages. Beyond the valid pixel part of the buffer, we set |
| 463 | * pages[i] to NULL to get a dummy page mapped in.. if someone |
| 464 | * reads/writes it they will get random/undefined content, but at |
| 465 | * least it won't be corrupting whatever other random page used to |
| 466 | * be mapped in, or other undefined behavior. |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 467 | */ |
| 468 | memcpy(pages, &omap_obj->pages[base_pgoff], |
| 469 | sizeof(struct page *) * slots); |
| 470 | memset(pages + slots, 0, |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 471 | sizeof(struct page *) * (n - slots)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 472 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 473 | ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 474 | if (ret) { |
| 475 | dev_err(obj->dev->dev, "failed to pin: %d\n", ret); |
| 476 | return ret; |
| 477 | } |
| 478 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 479 | pfn = entry->paddr >> PAGE_SHIFT; |
| 480 | |
| 481 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, |
| 482 | pfn, pfn << PAGE_SHIFT); |
| 483 | |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 484 | for (i = n; i > 0; i--) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 485 | vm_insert_mixed(vma, (unsigned long)vaddr, pfn); |
| 486 | pfn += usergart[fmt].stride_pfn; |
Rob Clark | e559895 | 2012-03-05 10:48:40 -0600 | [diff] [blame] | 487 | vaddr += PAGE_SIZE * m; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | /* simple round-robin: */ |
| 491 | usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; |
| 492 | |
| 493 | return 0; |
| 494 | } |
| 495 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 496 | /** |
| 497 | * omap_gem_fault - pagefault handler for GEM objects |
| 498 | * @vma: the VMA of the GEM object |
| 499 | * @vmf: fault detail |
| 500 | * |
| 501 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM |
| 502 | * does most of the work for us including the actual map/unmap calls |
| 503 | * but we need to do the actual page work. |
| 504 | * |
| 505 | * The VMA was set up by GEM. In doing so it also ensured that the |
| 506 | * vma->vm_private_data points to the GEM object that is backing this |
| 507 | * mapping. |
| 508 | */ |
| 509 | int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 510 | { |
| 511 | struct drm_gem_object *obj = vma->vm_private_data; |
| 512 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 513 | struct drm_device *dev = obj->dev; |
| 514 | struct page **pages; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 515 | int ret; |
| 516 | |
| 517 | /* Make sure we don't parallel update on a fault, nor move or remove |
| 518 | * something from beneath our feet |
| 519 | */ |
| 520 | mutex_lock(&dev->struct_mutex); |
| 521 | |
| 522 | /* if a shmem backed object, make sure we have pages attached now */ |
| 523 | ret = get_pages(obj, &pages); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 524 | if (ret) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 525 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 526 | |
| 527 | /* where should we do corresponding put_pages().. we are mapping |
| 528 | * the original page, rather than thru a GART, so we can't rely |
| 529 | * on eviction to trigger this. But munmap() or all mappings should |
| 530 | * probably trigger put_pages()? |
| 531 | */ |
| 532 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 533 | if (omap_obj->flags & OMAP_BO_TILED) |
| 534 | ret = fault_2d(obj, vma, vmf); |
| 535 | else |
| 536 | ret = fault_1d(obj, vma, vmf); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 537 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 538 | |
| 539 | fail: |
| 540 | mutex_unlock(&dev->struct_mutex); |
| 541 | switch (ret) { |
| 542 | case 0: |
| 543 | case -ERESTARTSYS: |
| 544 | case -EINTR: |
| 545 | return VM_FAULT_NOPAGE; |
| 546 | case -ENOMEM: |
| 547 | return VM_FAULT_OOM; |
| 548 | default: |
| 549 | return VM_FAULT_SIGBUS; |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | /** We override mainly to fix up some of the vm mapping flags.. */ |
| 554 | int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 555 | { |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 556 | int ret; |
| 557 | |
| 558 | ret = drm_gem_mmap(filp, vma); |
| 559 | if (ret) { |
| 560 | DBG("mmap failed: %d", ret); |
| 561 | return ret; |
| 562 | } |
| 563 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 564 | return omap_gem_mmap_obj(vma->vm_private_data, vma); |
| 565 | } |
| 566 | |
| 567 | int omap_gem_mmap_obj(struct drm_gem_object *obj, |
| 568 | struct vm_area_struct *vma) |
| 569 | { |
| 570 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 571 | |
| 572 | vma->vm_flags &= ~VM_PFNMAP; |
| 573 | vma->vm_flags |= VM_MIXEDMAP; |
| 574 | |
| 575 | if (omap_obj->flags & OMAP_BO_WC) { |
| 576 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 577 | } else if (omap_obj->flags & OMAP_BO_UNCACHED) { |
| 578 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
| 579 | } else { |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 580 | /* |
| 581 | * We do have some private objects, at least for scanout buffers |
| 582 | * on hardware without DMM/TILER. But these are allocated write- |
| 583 | * combine |
| 584 | */ |
| 585 | if (WARN_ON(!obj->filp)) |
| 586 | return -EINVAL; |
| 587 | |
| 588 | /* |
| 589 | * Shunt off cached objs to shmem file so they have their own |
| 590 | * address_space (so unmap_mapping_range does what we want, |
| 591 | * in particular in the case of mmap'd dmabufs) |
| 592 | */ |
| 593 | fput(vma->vm_file); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 594 | vma->vm_pgoff = 0; |
Al Viro | cb0942b | 2012-08-27 14:48:26 -0400 | [diff] [blame] | 595 | vma->vm_file = get_file(obj->filp); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 596 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 597 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 598 | } |
| 599 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 600 | return 0; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 601 | } |
| 602 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 603 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 604 | /** |
| 605 | * omap_gem_dumb_create - create a dumb buffer |
| 606 | * @drm_file: our client file |
| 607 | * @dev: our device |
| 608 | * @args: the requested arguments copied from userspace |
| 609 | * |
| 610 | * Allocate a buffer suitable for use for a frame buffer of the |
| 611 | * form described by user space. Give userspace a handle by which |
| 612 | * to reference it. |
| 613 | */ |
| 614 | int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 615 | struct drm_mode_create_dumb *args) |
| 616 | { |
| 617 | union omap_gem_size gsize; |
| 618 | |
| 619 | /* in case someone tries to feed us a completely bogus stride: */ |
| 620 | args->pitch = align_pitch(args->pitch, args->width, args->bpp); |
| 621 | args->size = PAGE_ALIGN(args->pitch * args->height); |
| 622 | |
| 623 | gsize = (union omap_gem_size){ |
| 624 | .bytes = args->size, |
| 625 | }; |
| 626 | |
| 627 | return omap_gem_new_handle(dev, file, gsize, |
| 628 | OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); |
| 629 | } |
| 630 | |
| 631 | /** |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 632 | * omap_gem_dumb_map - buffer mapping for dumb interface |
| 633 | * @file: our drm client file |
| 634 | * @dev: drm device |
| 635 | * @handle: GEM handle to the object (from dumb_create) |
| 636 | * |
| 637 | * Do the necessary setup to allow the mapping of the frame buffer |
| 638 | * into user memory. We don't have to do much here at the moment. |
| 639 | */ |
| 640 | int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 641 | uint32_t handle, uint64_t *offset) |
| 642 | { |
| 643 | struct drm_gem_object *obj; |
| 644 | int ret = 0; |
| 645 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 646 | /* GEM does all our handle to object mapping */ |
| 647 | obj = drm_gem_object_lookup(dev, file, handle); |
| 648 | if (obj == NULL) { |
| 649 | ret = -ENOENT; |
| 650 | goto fail; |
| 651 | } |
| 652 | |
| 653 | *offset = omap_gem_mmap_offset(obj); |
| 654 | |
| 655 | drm_gem_object_unreference_unlocked(obj); |
| 656 | |
| 657 | fail: |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 658 | return ret; |
| 659 | } |
| 660 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 661 | /* Set scrolling position. This allows us to implement fast scrolling |
| 662 | * for console. |
Rob Clark | 9b55b95 | 2012-03-05 10:48:33 -0600 | [diff] [blame] | 663 | * |
| 664 | * Call only from non-atomic contexts. |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 665 | */ |
| 666 | int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) |
| 667 | { |
| 668 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 669 | uint32_t npages = obj->size >> PAGE_SHIFT; |
| 670 | int ret = 0; |
| 671 | |
| 672 | if (roll > npages) { |
| 673 | dev_err(obj->dev->dev, "invalid roll: %d\n", roll); |
| 674 | return -EINVAL; |
| 675 | } |
| 676 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 677 | omap_obj->roll = roll; |
| 678 | |
Rob Clark | af69592 | 2011-12-16 11:34:34 -0600 | [diff] [blame] | 679 | mutex_lock(&obj->dev->struct_mutex); |
| 680 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 681 | /* if we aren't mapped yet, we don't need to do anything */ |
| 682 | if (omap_obj->block) { |
| 683 | struct page **pages; |
| 684 | ret = get_pages(obj, &pages); |
| 685 | if (ret) |
| 686 | goto fail; |
| 687 | ret = tiler_pin(omap_obj->block, pages, npages, roll, true); |
| 688 | if (ret) |
| 689 | dev_err(obj->dev->dev, "could not repin: %d\n", ret); |
| 690 | } |
| 691 | |
| 692 | fail: |
| 693 | mutex_unlock(&obj->dev->struct_mutex); |
| 694 | |
| 695 | return ret; |
| 696 | } |
| 697 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 698 | /* Sync the buffer for CPU access.. note pages should already be |
| 699 | * attached, ie. omap_gem_get_pages() |
| 700 | */ |
| 701 | void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) |
| 702 | { |
| 703 | struct drm_device *dev = obj->dev; |
| 704 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 705 | |
| 706 | if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { |
| 707 | dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], |
| 708 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 709 | omap_obj->addrs[pgoff] = 0; |
| 710 | } |
| 711 | } |
| 712 | |
| 713 | /* sync the buffer for DMA access */ |
| 714 | void omap_gem_dma_sync(struct drm_gem_object *obj, |
| 715 | enum dma_data_direction dir) |
| 716 | { |
| 717 | struct drm_device *dev = obj->dev; |
| 718 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 719 | |
| 720 | if (is_cached_coherent(obj)) { |
| 721 | int i, npages = obj->size >> PAGE_SHIFT; |
| 722 | struct page **pages = omap_obj->pages; |
| 723 | bool dirty = false; |
| 724 | |
| 725 | for (i = 0; i < npages; i++) { |
| 726 | if (!omap_obj->addrs[i]) { |
| 727 | omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, |
| 728 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 729 | dirty = true; |
| 730 | } |
| 731 | } |
| 732 | |
| 733 | if (dirty) { |
| 734 | unmap_mapping_range(obj->filp->f_mapping, 0, |
| 735 | omap_gem_mmap_size(obj), 1); |
| 736 | } |
| 737 | } |
| 738 | } |
| 739 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 740 | /* Get physical address for DMA.. if 'remap' is true, and the buffer is not |
| 741 | * already contiguous, remap it to pin in physically contiguous memory.. (ie. |
| 742 | * map in TILER) |
| 743 | */ |
| 744 | int omap_gem_get_paddr(struct drm_gem_object *obj, |
| 745 | dma_addr_t *paddr, bool remap) |
| 746 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 747 | struct omap_drm_private *priv = obj->dev->dev_private; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 748 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 749 | int ret = 0; |
| 750 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 751 | mutex_lock(&obj->dev->struct_mutex); |
| 752 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 753 | if (remap && is_shmem(obj) && priv->has_dmm) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 754 | if (omap_obj->paddr_cnt == 0) { |
| 755 | struct page **pages; |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 756 | uint32_t npages = obj->size >> PAGE_SHIFT; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 757 | enum tiler_fmt fmt = gem2fmt(omap_obj->flags); |
| 758 | struct tiler_block *block; |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 759 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 760 | BUG_ON(omap_obj->block); |
| 761 | |
| 762 | ret = get_pages(obj, &pages); |
| 763 | if (ret) |
| 764 | goto fail; |
| 765 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 766 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 767 | block = tiler_reserve_2d(fmt, |
| 768 | omap_obj->width, |
| 769 | omap_obj->height, 0); |
| 770 | } else { |
| 771 | block = tiler_reserve_1d(obj->size); |
| 772 | } |
| 773 | |
| 774 | if (IS_ERR(block)) { |
| 775 | ret = PTR_ERR(block); |
| 776 | dev_err(obj->dev->dev, |
| 777 | "could not remap: %d (%d)\n", ret, fmt); |
| 778 | goto fail; |
| 779 | } |
| 780 | |
| 781 | /* TODO: enable async refill.. */ |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 782 | ret = tiler_pin(block, pages, npages, |
| 783 | omap_obj->roll, true); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 784 | if (ret) { |
| 785 | tiler_release(block); |
| 786 | dev_err(obj->dev->dev, |
| 787 | "could not pin: %d\n", ret); |
| 788 | goto fail; |
| 789 | } |
| 790 | |
| 791 | omap_obj->paddr = tiler_ssptr(block); |
| 792 | omap_obj->block = block; |
| 793 | |
| 794 | DBG("got paddr: %08x", omap_obj->paddr); |
| 795 | } |
| 796 | |
| 797 | omap_obj->paddr_cnt++; |
| 798 | |
| 799 | *paddr = omap_obj->paddr; |
| 800 | } else if (omap_obj->flags & OMAP_BO_DMA) { |
| 801 | *paddr = omap_obj->paddr; |
| 802 | } else { |
| 803 | ret = -EINVAL; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 804 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 805 | } |
| 806 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 807 | fail: |
| 808 | mutex_unlock(&obj->dev->struct_mutex); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 809 | |
| 810 | return ret; |
| 811 | } |
| 812 | |
| 813 | /* Release physical address, when DMA is no longer being performed.. this |
| 814 | * could potentially unpin and unmap buffers from TILER |
| 815 | */ |
| 816 | int omap_gem_put_paddr(struct drm_gem_object *obj) |
| 817 | { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 818 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 819 | int ret = 0; |
| 820 | |
| 821 | mutex_lock(&obj->dev->struct_mutex); |
| 822 | if (omap_obj->paddr_cnt > 0) { |
| 823 | omap_obj->paddr_cnt--; |
| 824 | if (omap_obj->paddr_cnt == 0) { |
| 825 | ret = tiler_unpin(omap_obj->block); |
| 826 | if (ret) { |
| 827 | dev_err(obj->dev->dev, |
| 828 | "could not unpin pages: %d\n", ret); |
| 829 | goto fail; |
| 830 | } |
| 831 | ret = tiler_release(omap_obj->block); |
| 832 | if (ret) { |
| 833 | dev_err(obj->dev->dev, |
| 834 | "could not release unmap: %d\n", ret); |
| 835 | } |
| 836 | omap_obj->block = NULL; |
| 837 | } |
| 838 | } |
| 839 | fail: |
| 840 | mutex_unlock(&obj->dev->struct_mutex); |
| 841 | return ret; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 842 | } |
| 843 | |
Rob Clark | 3c810c6 | 2012-08-15 15:18:01 -0500 | [diff] [blame] | 844 | /* Get rotated scanout address (only valid if already pinned), at the |
| 845 | * specified orientation and x,y offset from top-left corner of buffer |
| 846 | * (only valid for tiled 2d buffers) |
| 847 | */ |
| 848 | int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, |
| 849 | int x, int y, dma_addr_t *paddr) |
| 850 | { |
| 851 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 852 | int ret = -EINVAL; |
| 853 | |
| 854 | mutex_lock(&obj->dev->struct_mutex); |
| 855 | if ((omap_obj->paddr_cnt > 0) && omap_obj->block && |
| 856 | (omap_obj->flags & OMAP_BO_TILED)) { |
| 857 | *paddr = tiler_tsptr(omap_obj->block, orient, x, y); |
| 858 | ret = 0; |
| 859 | } |
| 860 | mutex_unlock(&obj->dev->struct_mutex); |
| 861 | return ret; |
| 862 | } |
| 863 | |
| 864 | /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ |
| 865 | int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) |
| 866 | { |
| 867 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 868 | int ret = -EINVAL; |
| 869 | if (omap_obj->flags & OMAP_BO_TILED) |
| 870 | ret = tiler_stride(gem2fmt(omap_obj->flags), orient); |
| 871 | return ret; |
| 872 | } |
| 873 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 874 | /* acquire pages when needed (for example, for DMA where physically |
| 875 | * contiguous buffer is not required |
| 876 | */ |
| 877 | static int get_pages(struct drm_gem_object *obj, struct page ***pages) |
| 878 | { |
| 879 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 880 | int ret = 0; |
| 881 | |
| 882 | if (is_shmem(obj) && !omap_obj->pages) { |
| 883 | ret = omap_gem_attach_pages(obj); |
| 884 | if (ret) { |
| 885 | dev_err(obj->dev->dev, "could not attach pages\n"); |
| 886 | return ret; |
| 887 | } |
| 888 | } |
| 889 | |
| 890 | /* TODO: even phys-contig.. we should have a list of pages? */ |
| 891 | *pages = omap_obj->pages; |
| 892 | |
| 893 | return 0; |
| 894 | } |
| 895 | |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 896 | /* if !remap, and we don't have pages backing, then fail, rather than |
| 897 | * increasing the pin count (which we don't really do yet anyways, |
| 898 | * because we don't support swapping pages back out). And 'remap' |
| 899 | * might not be quite the right name, but I wanted to keep it working |
| 900 | * similarly to omap_gem_get_paddr(). Note though that mutex is not |
| 901 | * aquired if !remap (because this can be called in atomic ctxt), |
| 902 | * but probably omap_gem_get_paddr() should be changed to work in the |
| 903 | * same way. If !remap, a matching omap_gem_put_pages() call is not |
| 904 | * required (and should not be made). |
| 905 | */ |
| 906 | int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, |
| 907 | bool remap) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 908 | { |
| 909 | int ret; |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 910 | if (!remap) { |
| 911 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 912 | if (!omap_obj->pages) |
| 913 | return -ENOMEM; |
| 914 | *pages = omap_obj->pages; |
| 915 | return 0; |
| 916 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 917 | mutex_lock(&obj->dev->struct_mutex); |
| 918 | ret = get_pages(obj, pages); |
| 919 | mutex_unlock(&obj->dev->struct_mutex); |
| 920 | return ret; |
| 921 | } |
| 922 | |
| 923 | /* release pages when DMA no longer being performed */ |
| 924 | int omap_gem_put_pages(struct drm_gem_object *obj) |
| 925 | { |
| 926 | /* do something here if we dynamically attach/detach pages.. at |
| 927 | * least they would no longer need to be pinned if everyone has |
| 928 | * released the pages.. |
| 929 | */ |
| 930 | return 0; |
| 931 | } |
| 932 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 933 | /* Get kernel virtual address for CPU access.. this more or less only |
| 934 | * exists for omap_fbdev. This should be called with struct_mutex |
| 935 | * held. |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 936 | */ |
| 937 | void *omap_gem_vaddr(struct drm_gem_object *obj) |
| 938 | { |
| 939 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
YAMANE Toshiaki | 696e3ca | 2012-11-14 19:33:43 +0900 | [diff] [blame] | 940 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 941 | if (!omap_obj->vaddr) { |
| 942 | struct page **pages; |
| 943 | int ret = get_pages(obj, &pages); |
| 944 | if (ret) |
| 945 | return ERR_PTR(ret); |
| 946 | omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 947 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 948 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 949 | return omap_obj->vaddr; |
| 950 | } |
| 951 | |
Andy Gross | e78edba | 2012-12-19 14:53:37 -0600 | [diff] [blame] | 952 | #ifdef CONFIG_PM |
| 953 | /* re-pin objects in DMM in resume path: */ |
| 954 | int omap_gem_resume(struct device *dev) |
| 955 | { |
| 956 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
| 957 | struct omap_drm_private *priv = drm_dev->dev_private; |
| 958 | struct omap_gem_object *omap_obj; |
| 959 | int ret = 0; |
| 960 | |
| 961 | list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { |
| 962 | if (omap_obj->block) { |
| 963 | struct drm_gem_object *obj = &omap_obj->base; |
| 964 | uint32_t npages = obj->size >> PAGE_SHIFT; |
| 965 | WARN_ON(!omap_obj->pages); /* this can't happen */ |
| 966 | ret = tiler_pin(omap_obj->block, |
| 967 | omap_obj->pages, npages, |
| 968 | omap_obj->roll, true); |
| 969 | if (ret) { |
| 970 | dev_err(dev, "could not repin: %d\n", ret); |
| 971 | return ret; |
| 972 | } |
| 973 | } |
| 974 | } |
| 975 | |
| 976 | return 0; |
| 977 | } |
| 978 | #endif |
| 979 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 980 | #ifdef CONFIG_DEBUG_FS |
| 981 | void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
| 982 | { |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 983 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 984 | uint64_t off; |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 985 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 986 | off = drm_vma_node_start(&obj->vma_node); |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 987 | |
| 988 | seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", |
| 989 | omap_obj->flags, obj->name, obj->refcount.refcount.counter, |
| 990 | off, omap_obj->paddr, omap_obj->paddr_cnt, |
| 991 | omap_obj->vaddr, omap_obj->roll); |
| 992 | |
| 993 | if (omap_obj->flags & OMAP_BO_TILED) { |
| 994 | seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); |
| 995 | if (omap_obj->block) { |
| 996 | struct tcm_area *area = &omap_obj->block->area; |
| 997 | seq_printf(m, " (%dx%d, %dx%d)", |
| 998 | area->p0.x, area->p0.y, |
| 999 | area->p1.x, area->p1.y); |
| 1000 | } |
| 1001 | } else { |
| 1002 | seq_printf(m, " %d", obj->size); |
| 1003 | } |
| 1004 | |
| 1005 | seq_printf(m, "\n"); |
| 1006 | } |
| 1007 | |
| 1008 | void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) |
| 1009 | { |
| 1010 | struct omap_gem_object *omap_obj; |
| 1011 | int count = 0; |
| 1012 | size_t size = 0; |
| 1013 | |
| 1014 | list_for_each_entry(omap_obj, list, mm_list) { |
| 1015 | struct drm_gem_object *obj = &omap_obj->base; |
| 1016 | seq_printf(m, " "); |
| 1017 | omap_gem_describe(obj, m); |
| 1018 | count++; |
| 1019 | size += obj->size; |
| 1020 | } |
| 1021 | |
| 1022 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); |
| 1023 | } |
| 1024 | #endif |
| 1025 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1026 | /* Buffer Synchronization: |
| 1027 | */ |
| 1028 | |
| 1029 | struct omap_gem_sync_waiter { |
| 1030 | struct list_head list; |
| 1031 | struct omap_gem_object *omap_obj; |
| 1032 | enum omap_gem_op op; |
| 1033 | uint32_t read_target, write_target; |
| 1034 | /* notify called w/ sync_lock held */ |
| 1035 | void (*notify)(void *arg); |
| 1036 | void *arg; |
| 1037 | }; |
| 1038 | |
| 1039 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when |
| 1040 | * the read and/or write target count is achieved which can call a user |
| 1041 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for |
| 1042 | * cpu access), etc. |
| 1043 | */ |
| 1044 | static LIST_HEAD(waiters); |
| 1045 | |
| 1046 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) |
| 1047 | { |
| 1048 | struct omap_gem_object *omap_obj = waiter->omap_obj; |
| 1049 | if ((waiter->op & OMAP_GEM_READ) && |
Archit Taneja | f2cff0f | 2014-04-11 12:53:31 +0530 | [diff] [blame^] | 1050 | (omap_obj->sync->write_complete < waiter->write_target)) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1051 | return true; |
| 1052 | if ((waiter->op & OMAP_GEM_WRITE) && |
Archit Taneja | f2cff0f | 2014-04-11 12:53:31 +0530 | [diff] [blame^] | 1053 | (omap_obj->sync->read_complete < waiter->read_target)) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1054 | return true; |
| 1055 | return false; |
| 1056 | } |
| 1057 | |
| 1058 | /* macro for sync debug.. */ |
| 1059 | #define SYNCDBG 0 |
| 1060 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ |
| 1061 | printk(KERN_ERR "%s:%d: "fmt"\n", \ |
| 1062 | __func__, __LINE__, ##__VA_ARGS__); \ |
| 1063 | } while (0) |
| 1064 | |
| 1065 | |
| 1066 | static void sync_op_update(void) |
| 1067 | { |
| 1068 | struct omap_gem_sync_waiter *waiter, *n; |
| 1069 | list_for_each_entry_safe(waiter, n, &waiters, list) { |
| 1070 | if (!is_waiting(waiter)) { |
| 1071 | list_del(&waiter->list); |
| 1072 | SYNC("notify: %p", waiter); |
| 1073 | waiter->notify(waiter->arg); |
| 1074 | kfree(waiter); |
| 1075 | } |
| 1076 | } |
| 1077 | } |
| 1078 | |
| 1079 | static inline int sync_op(struct drm_gem_object *obj, |
| 1080 | enum omap_gem_op op, bool start) |
| 1081 | { |
| 1082 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1083 | int ret = 0; |
| 1084 | |
| 1085 | spin_lock(&sync_lock); |
| 1086 | |
| 1087 | if (!omap_obj->sync) { |
| 1088 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); |
| 1089 | if (!omap_obj->sync) { |
| 1090 | ret = -ENOMEM; |
| 1091 | goto unlock; |
| 1092 | } |
| 1093 | } |
| 1094 | |
| 1095 | if (start) { |
| 1096 | if (op & OMAP_GEM_READ) |
| 1097 | omap_obj->sync->read_pending++; |
| 1098 | if (op & OMAP_GEM_WRITE) |
| 1099 | omap_obj->sync->write_pending++; |
| 1100 | } else { |
| 1101 | if (op & OMAP_GEM_READ) |
| 1102 | omap_obj->sync->read_complete++; |
| 1103 | if (op & OMAP_GEM_WRITE) |
| 1104 | omap_obj->sync->write_complete++; |
| 1105 | sync_op_update(); |
| 1106 | } |
| 1107 | |
| 1108 | unlock: |
| 1109 | spin_unlock(&sync_lock); |
| 1110 | |
| 1111 | return ret; |
| 1112 | } |
| 1113 | |
| 1114 | /* it is a bit lame to handle updates in this sort of polling way, but |
| 1115 | * in case of PVR, the GPU can directly update read/write complete |
| 1116 | * values, and not really tell us which ones it updated.. this also |
| 1117 | * means that sync_lock is not quite sufficient. So we'll need to |
| 1118 | * do something a bit better when it comes time to add support for |
| 1119 | * separate 2d hw.. |
| 1120 | */ |
| 1121 | void omap_gem_op_update(void) |
| 1122 | { |
| 1123 | spin_lock(&sync_lock); |
| 1124 | sync_op_update(); |
| 1125 | spin_unlock(&sync_lock); |
| 1126 | } |
| 1127 | |
| 1128 | /* mark the start of read and/or write operation */ |
| 1129 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1130 | { |
| 1131 | return sync_op(obj, op, true); |
| 1132 | } |
| 1133 | |
| 1134 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1135 | { |
| 1136 | return sync_op(obj, op, false); |
| 1137 | } |
| 1138 | |
| 1139 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); |
| 1140 | |
| 1141 | static void sync_notify(void *arg) |
| 1142 | { |
| 1143 | struct task_struct **waiter_task = arg; |
| 1144 | *waiter_task = NULL; |
| 1145 | wake_up_all(&sync_event); |
| 1146 | } |
| 1147 | |
| 1148 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) |
| 1149 | { |
| 1150 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1151 | int ret = 0; |
| 1152 | if (omap_obj->sync) { |
| 1153 | struct task_struct *waiter_task = current; |
| 1154 | struct omap_gem_sync_waiter *waiter = |
| 1155 | kzalloc(sizeof(*waiter), GFP_KERNEL); |
| 1156 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1157 | if (!waiter) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1158 | return -ENOMEM; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1159 | |
| 1160 | waiter->omap_obj = omap_obj; |
| 1161 | waiter->op = op; |
| 1162 | waiter->read_target = omap_obj->sync->read_pending; |
| 1163 | waiter->write_target = omap_obj->sync->write_pending; |
| 1164 | waiter->notify = sync_notify; |
| 1165 | waiter->arg = &waiter_task; |
| 1166 | |
| 1167 | spin_lock(&sync_lock); |
| 1168 | if (is_waiting(waiter)) { |
| 1169 | SYNC("waited: %p", waiter); |
| 1170 | list_add_tail(&waiter->list, &waiters); |
| 1171 | spin_unlock(&sync_lock); |
| 1172 | ret = wait_event_interruptible(sync_event, |
| 1173 | (waiter_task == NULL)); |
| 1174 | spin_lock(&sync_lock); |
| 1175 | if (waiter_task) { |
| 1176 | SYNC("interrupted: %p", waiter); |
| 1177 | /* we were interrupted */ |
| 1178 | list_del(&waiter->list); |
| 1179 | waiter_task = NULL; |
| 1180 | } else { |
| 1181 | /* freed in sync_op_update() */ |
| 1182 | waiter = NULL; |
| 1183 | } |
| 1184 | } |
| 1185 | spin_unlock(&sync_lock); |
| 1186 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1187 | if (waiter) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1188 | kfree(waiter); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1189 | } |
| 1190 | return ret; |
| 1191 | } |
| 1192 | |
| 1193 | /* call fxn(arg), either synchronously or asynchronously if the op |
| 1194 | * is currently blocked.. fxn() can be called from any context |
| 1195 | * |
| 1196 | * (TODO for now fxn is called back from whichever context calls |
| 1197 | * omap_gem_op_update().. but this could be better defined later |
| 1198 | * if needed) |
| 1199 | * |
| 1200 | * TODO more code in common w/ _sync().. |
| 1201 | */ |
| 1202 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, |
| 1203 | void (*fxn)(void *arg), void *arg) |
| 1204 | { |
| 1205 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1206 | if (omap_obj->sync) { |
| 1207 | struct omap_gem_sync_waiter *waiter = |
| 1208 | kzalloc(sizeof(*waiter), GFP_ATOMIC); |
| 1209 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1210 | if (!waiter) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1211 | return -ENOMEM; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1212 | |
| 1213 | waiter->omap_obj = omap_obj; |
| 1214 | waiter->op = op; |
| 1215 | waiter->read_target = omap_obj->sync->read_pending; |
| 1216 | waiter->write_target = omap_obj->sync->write_pending; |
| 1217 | waiter->notify = fxn; |
| 1218 | waiter->arg = arg; |
| 1219 | |
| 1220 | spin_lock(&sync_lock); |
| 1221 | if (is_waiting(waiter)) { |
| 1222 | SYNC("waited: %p", waiter); |
| 1223 | list_add_tail(&waiter->list, &waiters); |
| 1224 | spin_unlock(&sync_lock); |
| 1225 | return 0; |
| 1226 | } |
| 1227 | |
| 1228 | spin_unlock(&sync_lock); |
Subhajit Paul | 15ec2ca | 2014-04-11 12:53:30 +0530 | [diff] [blame] | 1229 | |
| 1230 | kfree(waiter); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1231 | } |
| 1232 | |
| 1233 | /* no waiting.. */ |
| 1234 | fxn(arg); |
| 1235 | |
| 1236 | return 0; |
| 1237 | } |
| 1238 | |
| 1239 | /* special API so PVR can update the buffer to use a sync-object allocated |
| 1240 | * from it's sync-obj heap. Only used for a newly allocated (from PVR's |
| 1241 | * perspective) sync-object, so we overwrite the new syncobj w/ values |
| 1242 | * from the already allocated syncobj (if there is one) |
| 1243 | */ |
| 1244 | int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) |
| 1245 | { |
| 1246 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1247 | int ret = 0; |
| 1248 | |
| 1249 | spin_lock(&sync_lock); |
| 1250 | |
| 1251 | if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { |
| 1252 | /* clearing a previously set syncobj */ |
Peter Huewe | e620096 | 2013-01-26 00:40:13 +0100 | [diff] [blame] | 1253 | syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), |
| 1254 | GFP_ATOMIC); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1255 | if (!syncobj) { |
| 1256 | ret = -ENOMEM; |
| 1257 | goto unlock; |
| 1258 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1259 | omap_obj->flags &= ~OMAP_BO_EXT_SYNC; |
| 1260 | omap_obj->sync = syncobj; |
| 1261 | } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { |
| 1262 | /* replacing an existing syncobj */ |
| 1263 | if (omap_obj->sync) { |
| 1264 | memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); |
| 1265 | kfree(omap_obj->sync); |
| 1266 | } |
| 1267 | omap_obj->flags |= OMAP_BO_EXT_SYNC; |
| 1268 | omap_obj->sync = syncobj; |
| 1269 | } |
| 1270 | |
| 1271 | unlock: |
| 1272 | spin_unlock(&sync_lock); |
| 1273 | return ret; |
| 1274 | } |
| 1275 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1276 | /* don't call directly.. called from GEM core when it is time to actually |
| 1277 | * free the object.. |
| 1278 | */ |
| 1279 | void omap_gem_free_object(struct drm_gem_object *obj) |
| 1280 | { |
| 1281 | struct drm_device *dev = obj->dev; |
| 1282 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
| 1283 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1284 | evict(obj); |
| 1285 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1286 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 1287 | |
| 1288 | list_del(&omap_obj->mm_list); |
| 1289 | |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 1290 | drm_gem_free_mmap_offset(obj); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1291 | |
Rob Clark | 9a0774e | 2012-01-16 12:51:17 -0600 | [diff] [blame] | 1292 | /* this means the object is still pinned.. which really should |
| 1293 | * not happen. I think.. |
| 1294 | */ |
| 1295 | WARN_ON(omap_obj->paddr_cnt > 0); |
| 1296 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1297 | /* don't free externally allocated backing memory */ |
| 1298 | if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1299 | if (omap_obj->pages) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1300 | omap_gem_detach_pages(obj); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1301 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1302 | if (!is_shmem(obj)) { |
| 1303 | dma_free_writecombine(dev->dev, obj->size, |
| 1304 | omap_obj->vaddr, omap_obj->paddr); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1305 | } else if (omap_obj->vaddr) { |
| 1306 | vunmap(omap_obj->vaddr); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1307 | } |
| 1308 | } |
| 1309 | |
| 1310 | /* don't free externally allocated syncobj */ |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1311 | if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1312 | kfree(omap_obj->sync); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1313 | |
| 1314 | drm_gem_object_release(obj); |
| 1315 | |
| 1316 | kfree(obj); |
| 1317 | } |
| 1318 | |
| 1319 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
| 1320 | int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
| 1321 | union omap_gem_size gsize, uint32_t flags, uint32_t *handle) |
| 1322 | { |
| 1323 | struct drm_gem_object *obj; |
| 1324 | int ret; |
| 1325 | |
| 1326 | obj = omap_gem_new(dev, gsize, flags); |
| 1327 | if (!obj) |
| 1328 | return -ENOMEM; |
| 1329 | |
| 1330 | ret = drm_gem_handle_create(file, obj, handle); |
| 1331 | if (ret) { |
| 1332 | drm_gem_object_release(obj); |
| 1333 | kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ |
| 1334 | return ret; |
| 1335 | } |
| 1336 | |
| 1337 | /* drop reference from allocate - handle holds it now */ |
| 1338 | drm_gem_object_unreference_unlocked(obj); |
| 1339 | |
| 1340 | return 0; |
| 1341 | } |
| 1342 | |
| 1343 | /* GEM buffer object constructor */ |
| 1344 | struct drm_gem_object *omap_gem_new(struct drm_device *dev, |
| 1345 | union omap_gem_size gsize, uint32_t flags) |
| 1346 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1347 | struct omap_drm_private *priv = dev->dev_private; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1348 | struct omap_gem_object *omap_obj; |
| 1349 | struct drm_gem_object *obj = NULL; |
| 1350 | size_t size; |
| 1351 | int ret; |
| 1352 | |
| 1353 | if (flags & OMAP_BO_TILED) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1354 | if (!usergart) { |
| 1355 | dev_err(dev->dev, "Tiled buffers require DMM\n"); |
| 1356 | goto fail; |
| 1357 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1358 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1359 | /* tiled buffers are always shmem paged backed.. when they are |
| 1360 | * scanned out, they are remapped into DMM/TILER |
| 1361 | */ |
| 1362 | flags &= ~OMAP_BO_SCANOUT; |
| 1363 | |
| 1364 | /* currently don't allow cached buffers.. there is some caching |
| 1365 | * stuff that needs to be handled better |
| 1366 | */ |
| 1367 | flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED); |
| 1368 | flags |= OMAP_BO_WC; |
| 1369 | |
| 1370 | /* align dimensions to slot boundaries... */ |
| 1371 | tiler_align(gem2fmt(flags), |
| 1372 | &gsize.tiled.width, &gsize.tiled.height); |
| 1373 | |
| 1374 | /* ...and calculate size based on aligned dimensions */ |
| 1375 | size = tiler_size(gem2fmt(flags), |
| 1376 | gsize.tiled.width, gsize.tiled.height); |
| 1377 | } else { |
| 1378 | size = PAGE_ALIGN(gsize.bytes); |
| 1379 | } |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1380 | |
| 1381 | omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); |
Joe Perches | 78110bb | 2013-02-11 09:41:29 -0800 | [diff] [blame] | 1382 | if (!omap_obj) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1383 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1384 | |
Rob Clark | f6b6036 | 2012-03-05 10:48:36 -0600 | [diff] [blame] | 1385 | list_add(&omap_obj->mm_list, &priv->obj_list); |
| 1386 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1387 | obj = &omap_obj->base; |
| 1388 | |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1389 | if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { |
| 1390 | /* attempt to allocate contiguous memory if we don't |
| 1391 | * have DMM for remappign discontiguous buffers |
| 1392 | */ |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1393 | omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, |
| 1394 | &omap_obj->paddr, GFP_KERNEL); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1395 | if (omap_obj->vaddr) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1396 | flags |= OMAP_BO_DMA; |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1397 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1398 | } |
| 1399 | |
| 1400 | omap_obj->flags = flags; |
| 1401 | |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1402 | if (flags & OMAP_BO_TILED) { |
| 1403 | omap_obj->width = gsize.tiled.width; |
| 1404 | omap_obj->height = gsize.tiled.height; |
| 1405 | } |
| 1406 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 1407 | ret = 0; |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1408 | if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 1409 | drm_gem_private_object_init(dev, obj, size); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1410 | else |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1411 | ret = drm_gem_object_init(dev, obj, size); |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1412 | |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1413 | if (ret) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1414 | goto fail; |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1415 | |
| 1416 | return obj; |
| 1417 | |
| 1418 | fail: |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1419 | if (obj) |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1420 | omap_gem_free_object(obj); |
YAMANE Toshiaki | ae05303 | 2012-11-14 19:33:17 +0900 | [diff] [blame] | 1421 | |
Rob Clark | cd5351f | 2011-11-12 12:09:40 -0600 | [diff] [blame] | 1422 | return NULL; |
| 1423 | } |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1424 | |
| 1425 | /* init/cleanup.. if DMM is used, we need to set some stuff up.. */ |
| 1426 | void omap_gem_init(struct drm_device *dev) |
| 1427 | { |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1428 | struct omap_drm_private *priv = dev->dev_private; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1429 | const enum tiler_fmt fmts[] = { |
| 1430 | TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT |
| 1431 | }; |
Andy Gross | 5c13779 | 2012-03-05 10:48:39 -0600 | [diff] [blame] | 1432 | int i, j; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1433 | |
Andy Gross | e5e4e9b | 2012-10-17 00:30:03 -0500 | [diff] [blame] | 1434 | if (!dmm_is_available()) { |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1435 | /* DMM only supported on OMAP4 and later, so this isn't fatal */ |
Andy Gross | 5c13779 | 2012-03-05 10:48:39 -0600 | [diff] [blame] | 1436 | dev_warn(dev->dev, "DMM not available, disable DMM support\n"); |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1437 | return; |
| 1438 | } |
| 1439 | |
Joe Perches | 78110bb | 2013-02-11 09:41:29 -0800 | [diff] [blame] | 1440 | usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); |
| 1441 | if (!usergart) |
Rob Clark | b369839 | 2011-12-09 23:26:06 -0600 | [diff] [blame] | 1442 | return; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1443 | |
| 1444 | /* reserve 4k aligned/wide regions for userspace mappings: */ |
| 1445 | for (i = 0; i < ARRAY_SIZE(fmts); i++) { |
| 1446 | uint16_t h = 1, w = PAGE_SIZE >> i; |
| 1447 | tiler_align(fmts[i], &w, &h); |
| 1448 | /* note: since each region is 1 4kb page wide, and minimum |
| 1449 | * number of rows, the height ends up being the same as the |
| 1450 | * # of pages in the region |
| 1451 | */ |
| 1452 | usergart[i].height = h; |
| 1453 | usergart[i].height_shift = ilog2(h); |
Rob Clark | 3c810c6 | 2012-08-15 15:18:01 -0500 | [diff] [blame] | 1454 | usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1455 | usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); |
| 1456 | for (j = 0; j < NUM_USERGART_ENTRIES; j++) { |
| 1457 | struct usergart_entry *entry = &usergart[i].entry[j]; |
| 1458 | struct tiler_block *block = |
| 1459 | tiler_reserve_2d(fmts[i], w, h, |
| 1460 | PAGE_SIZE); |
| 1461 | if (IS_ERR(block)) { |
| 1462 | dev_err(dev->dev, |
| 1463 | "reserve failed: %d, %d, %ld\n", |
| 1464 | i, j, PTR_ERR(block)); |
| 1465 | return; |
| 1466 | } |
| 1467 | entry->paddr = tiler_ssptr(block); |
| 1468 | entry->block = block; |
| 1469 | |
| 1470 | DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, |
| 1471 | entry->paddr, |
| 1472 | usergart[i].stride_pfn << PAGE_SHIFT); |
| 1473 | } |
| 1474 | } |
Rob Clark | a6a9182 | 2011-12-09 23:26:08 -0600 | [diff] [blame] | 1475 | |
| 1476 | priv->has_dmm = true; |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1477 | } |
| 1478 | |
| 1479 | void omap_gem_deinit(struct drm_device *dev) |
| 1480 | { |
| 1481 | /* I believe we can rely on there being no more outstanding GEM |
| 1482 | * objects which could depend on usergart/dmm at this point. |
| 1483 | */ |
Rob Clark | f7f9f45 | 2011-12-05 19:19:22 -0600 | [diff] [blame] | 1484 | kfree(usergart); |
| 1485 | } |