Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Sinclair Yeh | 54fbde8 | 2015-07-29 12:38:02 -0700 | [diff] [blame] | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #include "vmwgfx_drv.h" |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/ttm/ttm_bo_driver.h> |
| 30 | #include <drm/ttm/ttm_placement.h> |
| 31 | #include <drm/ttm/ttm_page_alloc.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 32 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 33 | static struct ttm_place vram_placement_flags = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 34 | .fpfn = 0, |
| 35 | .lpfn = 0, |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 36 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
| 37 | }; |
| 38 | |
| 39 | static struct ttm_place vram_ne_placement_flags = { |
| 40 | .fpfn = 0, |
| 41 | .lpfn = 0, |
| 42 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 43 | }; |
| 44 | |
| 45 | static struct ttm_place sys_placement_flags = { |
| 46 | .fpfn = 0, |
| 47 | .lpfn = 0, |
| 48 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
| 49 | }; |
| 50 | |
| 51 | static struct ttm_place sys_ne_placement_flags = { |
| 52 | .fpfn = 0, |
| 53 | .lpfn = 0, |
| 54 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 55 | }; |
| 56 | |
| 57 | static struct ttm_place gmr_placement_flags = { |
| 58 | .fpfn = 0, |
| 59 | .lpfn = 0, |
| 60 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
| 61 | }; |
| 62 | |
| 63 | static struct ttm_place gmr_ne_placement_flags = { |
| 64 | .fpfn = 0, |
| 65 | .lpfn = 0, |
| 66 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 67 | }; |
| 68 | |
| 69 | static struct ttm_place mob_placement_flags = { |
| 70 | .fpfn = 0, |
| 71 | .lpfn = 0, |
| 72 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
| 73 | }; |
| 74 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 75 | static struct ttm_place mob_ne_placement_flags = { |
| 76 | .fpfn = 0, |
| 77 | .lpfn = 0, |
| 78 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 79 | }; |
| 80 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 81 | struct ttm_placement vmw_vram_placement = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 82 | .num_placement = 1, |
| 83 | .placement = &vram_placement_flags, |
| 84 | .num_busy_placement = 1, |
| 85 | .busy_placement = &vram_placement_flags |
| 86 | }; |
| 87 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 88 | static struct ttm_place vram_gmr_placement_flags[] = { |
| 89 | { |
| 90 | .fpfn = 0, |
| 91 | .lpfn = 0, |
| 92 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
| 93 | }, { |
| 94 | .fpfn = 0, |
| 95 | .lpfn = 0, |
| 96 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
| 97 | } |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 98 | }; |
| 99 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 100 | static struct ttm_place gmr_vram_placement_flags[] = { |
| 101 | { |
| 102 | .fpfn = 0, |
| 103 | .lpfn = 0, |
| 104 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
| 105 | }, { |
| 106 | .fpfn = 0, |
| 107 | .lpfn = 0, |
| 108 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
| 109 | } |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 110 | }; |
| 111 | |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 112 | struct ttm_placement vmw_vram_gmr_placement = { |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 113 | .num_placement = 2, |
| 114 | .placement = vram_gmr_placement_flags, |
| 115 | .num_busy_placement = 1, |
| 116 | .busy_placement = &gmr_placement_flags |
| 117 | }; |
| 118 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 119 | static struct ttm_place vram_gmr_ne_placement_flags[] = { |
| 120 | { |
| 121 | .fpfn = 0, |
| 122 | .lpfn = 0, |
| 123 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | |
| 124 | TTM_PL_FLAG_NO_EVICT |
| 125 | }, { |
| 126 | .fpfn = 0, |
| 127 | .lpfn = 0, |
| 128 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | |
| 129 | TTM_PL_FLAG_NO_EVICT |
| 130 | } |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 131 | }; |
| 132 | |
| 133 | struct ttm_placement vmw_vram_gmr_ne_placement = { |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 134 | .num_placement = 2, |
| 135 | .placement = vram_gmr_ne_placement_flags, |
| 136 | .num_busy_placement = 1, |
| 137 | .busy_placement = &gmr_ne_placement_flags |
| 138 | }; |
| 139 | |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 140 | struct ttm_placement vmw_vram_sys_placement = { |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 141 | .num_placement = 1, |
| 142 | .placement = &vram_placement_flags, |
| 143 | .num_busy_placement = 1, |
| 144 | .busy_placement = &sys_placement_flags |
| 145 | }; |
| 146 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 147 | struct ttm_placement vmw_vram_ne_placement = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 148 | .num_placement = 1, |
| 149 | .placement = &vram_ne_placement_flags, |
| 150 | .num_busy_placement = 1, |
| 151 | .busy_placement = &vram_ne_placement_flags |
| 152 | }; |
| 153 | |
| 154 | struct ttm_placement vmw_sys_placement = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 155 | .num_placement = 1, |
| 156 | .placement = &sys_placement_flags, |
| 157 | .num_busy_placement = 1, |
| 158 | .busy_placement = &sys_placement_flags |
| 159 | }; |
| 160 | |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 161 | struct ttm_placement vmw_sys_ne_placement = { |
Thomas Hellstrom | 3530bdc | 2012-11-21 10:49:52 +0100 | [diff] [blame] | 162 | .num_placement = 1, |
| 163 | .placement = &sys_ne_placement_flags, |
| 164 | .num_busy_placement = 1, |
| 165 | .busy_placement = &sys_ne_placement_flags |
| 166 | }; |
| 167 | |
Christian König | f1217ed | 2014-08-27 13:16:04 +0200 | [diff] [blame] | 168 | static struct ttm_place evictable_placement_flags[] = { |
| 169 | { |
| 170 | .fpfn = 0, |
| 171 | .lpfn = 0, |
| 172 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
| 173 | }, { |
| 174 | .fpfn = 0, |
| 175 | .lpfn = 0, |
| 176 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
| 177 | }, { |
| 178 | .fpfn = 0, |
| 179 | .lpfn = 0, |
| 180 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
| 181 | }, { |
| 182 | .fpfn = 0, |
| 183 | .lpfn = 0, |
| 184 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
| 185 | } |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 186 | }; |
| 187 | |
| 188 | struct ttm_placement vmw_evictable_placement = { |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 189 | .num_placement = 4, |
Jakob Bornecrantz | d991ef0 | 2011-10-04 20:13:21 +0200 | [diff] [blame] | 190 | .placement = evictable_placement_flags, |
| 191 | .num_busy_placement = 1, |
| 192 | .busy_placement = &sys_placement_flags |
| 193 | }; |
| 194 | |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 195 | struct ttm_placement vmw_srf_placement = { |
Thomas Hellstrom | 5bb39e8 | 2011-10-04 20:13:33 +0200 | [diff] [blame] | 196 | .num_placement = 1, |
| 197 | .num_busy_placement = 2, |
| 198 | .placement = &gmr_placement_flags, |
| 199 | .busy_placement = gmr_vram_placement_flags |
| 200 | }; |
| 201 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 202 | struct ttm_placement vmw_mob_placement = { |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 203 | .num_placement = 1, |
| 204 | .num_busy_placement = 1, |
| 205 | .placement = &mob_placement_flags, |
| 206 | .busy_placement = &mob_placement_flags |
| 207 | }; |
| 208 | |
Thomas Hellstrom | 3eab3d9 | 2015-06-25 11:57:56 -0700 | [diff] [blame] | 209 | struct ttm_placement vmw_mob_ne_placement = { |
| 210 | .num_placement = 1, |
| 211 | .num_busy_placement = 1, |
| 212 | .placement = &mob_ne_placement_flags, |
| 213 | .busy_placement = &mob_ne_placement_flags |
| 214 | }; |
| 215 | |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 216 | struct vmw_ttm_tt { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 217 | struct ttm_dma_tt dma_ttm; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 218 | struct vmw_private *dev_priv; |
| 219 | int gmr_id; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 220 | struct vmw_mob *mob; |
| 221 | int mem_type; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 222 | struct sg_table sgt; |
| 223 | struct vmw_sg_table vsgt; |
| 224 | uint64_t sg_alloc_size; |
| 225 | bool mapped; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 226 | }; |
| 227 | |
Thomas Hellstrom | 308d17e | 2013-11-28 01:46:56 -0800 | [diff] [blame] | 228 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
| 229 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 230 | /** |
| 231 | * Helper functions to advance a struct vmw_piter iterator. |
| 232 | * |
| 233 | * @viter: Pointer to the iterator. |
| 234 | * |
| 235 | * These functions return false if past the end of the list, |
| 236 | * true otherwise. Functions are selected depending on the current |
| 237 | * DMA mapping mode. |
| 238 | */ |
| 239 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) |
| 240 | { |
| 241 | return ++(viter->i) < viter->num_pages; |
| 242 | } |
| 243 | |
| 244 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
| 245 | { |
| 246 | return __sg_page_iter_next(&viter->iter); |
| 247 | } |
| 248 | |
| 249 | |
| 250 | /** |
| 251 | * Helper functions to return a pointer to the current page. |
| 252 | * |
| 253 | * @viter: Pointer to the iterator |
| 254 | * |
| 255 | * These functions return a pointer to the page currently |
| 256 | * pointed to by @viter. Functions are selected depending on the |
| 257 | * current mapping mode. |
| 258 | */ |
| 259 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) |
| 260 | { |
| 261 | return viter->pages[viter->i]; |
| 262 | } |
| 263 | |
| 264 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
| 265 | { |
| 266 | return sg_page_iter_page(&viter->iter); |
| 267 | } |
| 268 | |
| 269 | |
| 270 | /** |
| 271 | * Helper functions to return the DMA address of the current page. |
| 272 | * |
| 273 | * @viter: Pointer to the iterator |
| 274 | * |
| 275 | * These functions return the DMA address of the page currently |
| 276 | * pointed to by @viter. Functions are selected depending on the |
| 277 | * current mapping mode. |
| 278 | */ |
| 279 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) |
| 280 | { |
| 281 | return page_to_phys(viter->pages[viter->i]); |
| 282 | } |
| 283 | |
| 284 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
| 285 | { |
| 286 | return viter->addrs[viter->i]; |
| 287 | } |
| 288 | |
| 289 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) |
| 290 | { |
| 291 | return sg_page_iter_dma_address(&viter->iter); |
| 292 | } |
| 293 | |
| 294 | |
| 295 | /** |
| 296 | * vmw_piter_start - Initialize a struct vmw_piter. |
| 297 | * |
| 298 | * @viter: Pointer to the iterator to initialize |
| 299 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from |
| 300 | * |
| 301 | * Note that we're following the convention of __sg_page_iter_start, so that |
| 302 | * the iterator doesn't point to a valid page after initialization; it has |
| 303 | * to be advanced one step first. |
| 304 | */ |
| 305 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, |
| 306 | unsigned long p_offset) |
| 307 | { |
| 308 | viter->i = p_offset - 1; |
| 309 | viter->num_pages = vsgt->num_pages; |
| 310 | switch (vsgt->mode) { |
| 311 | case vmw_dma_phys: |
| 312 | viter->next = &__vmw_piter_non_sg_next; |
| 313 | viter->dma_address = &__vmw_piter_phys_addr; |
| 314 | viter->page = &__vmw_piter_non_sg_page; |
| 315 | viter->pages = vsgt->pages; |
| 316 | break; |
| 317 | case vmw_dma_alloc_coherent: |
| 318 | viter->next = &__vmw_piter_non_sg_next; |
| 319 | viter->dma_address = &__vmw_piter_dma_addr; |
| 320 | viter->page = &__vmw_piter_non_sg_page; |
| 321 | viter->addrs = vsgt->addrs; |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 322 | viter->pages = vsgt->pages; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 323 | break; |
| 324 | case vmw_dma_map_populate: |
| 325 | case vmw_dma_map_bind: |
| 326 | viter->next = &__vmw_piter_sg_next; |
| 327 | viter->dma_address = &__vmw_piter_sg_addr; |
| 328 | viter->page = &__vmw_piter_sg_page; |
| 329 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
| 330 | vsgt->sgt->orig_nents, p_offset); |
| 331 | break; |
| 332 | default: |
| 333 | BUG(); |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | /** |
| 338 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for |
| 339 | * TTM pages |
| 340 | * |
| 341 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
| 342 | * |
| 343 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. |
| 344 | */ |
| 345 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) |
| 346 | { |
| 347 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
| 348 | |
| 349 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
| 350 | DMA_BIDIRECTIONAL); |
| 351 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
| 352 | } |
| 353 | |
| 354 | /** |
| 355 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses |
| 356 | * |
| 357 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
| 358 | * |
| 359 | * This function is used to get device addresses from the kernel DMA layer. |
| 360 | * However, it's violating the DMA API in that when this operation has been |
| 361 | * performed, it's illegal for the CPU to write to the pages without first |
| 362 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is |
| 363 | * therefore only legal to call this function if we know that the function |
| 364 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most |
| 365 | * a CPU write buffer flush. |
| 366 | */ |
| 367 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
| 368 | { |
| 369 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
| 370 | int ret; |
| 371 | |
| 372 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
| 373 | DMA_BIDIRECTIONAL); |
| 374 | if (unlikely(ret == 0)) |
| 375 | return -ENOMEM; |
| 376 | |
| 377 | vmw_tt->sgt.nents = ret; |
| 378 | |
| 379 | return 0; |
| 380 | } |
| 381 | |
| 382 | /** |
| 383 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device |
| 384 | * |
| 385 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
| 386 | * |
| 387 | * Select the correct function for and make sure the TTM pages are |
| 388 | * visible to the device. Allocate storage for the device mappings. |
| 389 | * If a mapping has already been performed, indicated by the storage |
| 390 | * pointer being non NULL, the function returns success. |
| 391 | */ |
| 392 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) |
| 393 | { |
| 394 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 395 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
| 396 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
| 397 | struct vmw_piter iter; |
| 398 | dma_addr_t old; |
| 399 | int ret = 0; |
| 400 | static size_t sgl_size; |
| 401 | static size_t sgt_size; |
| 402 | |
| 403 | if (vmw_tt->mapped) |
| 404 | return 0; |
| 405 | |
| 406 | vsgt->mode = dev_priv->map_mode; |
| 407 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; |
| 408 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; |
| 409 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
| 410 | vsgt->sgt = &vmw_tt->sgt; |
| 411 | |
| 412 | switch (dev_priv->map_mode) { |
| 413 | case vmw_dma_map_bind: |
| 414 | case vmw_dma_map_populate: |
| 415 | if (unlikely(!sgl_size)) { |
| 416 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); |
| 417 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); |
| 418 | } |
| 419 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; |
| 420 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, |
| 421 | true); |
| 422 | if (unlikely(ret != 0)) |
| 423 | return ret; |
| 424 | |
| 425 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
| 426 | vsgt->num_pages, 0, |
| 427 | (unsigned long) |
| 428 | vsgt->num_pages << PAGE_SHIFT, |
| 429 | GFP_KERNEL); |
| 430 | if (unlikely(ret != 0)) |
| 431 | goto out_sg_alloc_fail; |
| 432 | |
| 433 | if (vsgt->num_pages > vmw_tt->sgt.nents) { |
| 434 | uint64_t over_alloc = |
| 435 | sgl_size * (vsgt->num_pages - |
| 436 | vmw_tt->sgt.nents); |
| 437 | |
| 438 | ttm_mem_global_free(glob, over_alloc); |
| 439 | vmw_tt->sg_alloc_size -= over_alloc; |
| 440 | } |
| 441 | |
| 442 | ret = vmw_ttm_map_for_dma(vmw_tt); |
| 443 | if (unlikely(ret != 0)) |
| 444 | goto out_map_fail; |
| 445 | |
| 446 | break; |
| 447 | default: |
| 448 | break; |
| 449 | } |
| 450 | |
| 451 | old = ~((dma_addr_t) 0); |
| 452 | vmw_tt->vsgt.num_regions = 0; |
| 453 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { |
| 454 | dma_addr_t cur = vmw_piter_dma_addr(&iter); |
| 455 | |
| 456 | if (cur != old + PAGE_SIZE) |
| 457 | vmw_tt->vsgt.num_regions++; |
| 458 | old = cur; |
| 459 | } |
| 460 | |
| 461 | vmw_tt->mapped = true; |
| 462 | return 0; |
| 463 | |
| 464 | out_map_fail: |
| 465 | sg_free_table(vmw_tt->vsgt.sgt); |
| 466 | vmw_tt->vsgt.sgt = NULL; |
| 467 | out_sg_alloc_fail: |
| 468 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); |
| 469 | return ret; |
| 470 | } |
| 471 | |
| 472 | /** |
| 473 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings |
| 474 | * |
| 475 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
| 476 | * |
| 477 | * Tear down any previously set up device DMA mappings and free |
| 478 | * any storage space allocated for them. If there are no mappings set up, |
| 479 | * this function is a NOP. |
| 480 | */ |
| 481 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) |
| 482 | { |
| 483 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 484 | |
| 485 | if (!vmw_tt->vsgt.sgt) |
| 486 | return; |
| 487 | |
| 488 | switch (dev_priv->map_mode) { |
| 489 | case vmw_dma_map_bind: |
| 490 | case vmw_dma_map_populate: |
| 491 | vmw_ttm_unmap_from_dma(vmw_tt); |
| 492 | sg_free_table(vmw_tt->vsgt.sgt); |
| 493 | vmw_tt->vsgt.sgt = NULL; |
| 494 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 495 | vmw_tt->sg_alloc_size); |
| 496 | break; |
| 497 | default: |
| 498 | break; |
| 499 | } |
| 500 | vmw_tt->mapped = false; |
| 501 | } |
| 502 | |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 503 | |
| 504 | /** |
| 505 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
| 506 | * |
| 507 | * @bo: Pointer to a struct ttm_buffer_object |
| 508 | * |
| 509 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
| 510 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
| 511 | * Note that the buffer object must be either pinned or reserved before |
| 512 | * calling this function. |
| 513 | */ |
| 514 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
| 515 | { |
| 516 | struct vmw_ttm_tt *vmw_tt = |
| 517 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 518 | |
| 519 | return vmw_ttm_map_dma(vmw_tt); |
| 520 | } |
| 521 | |
| 522 | |
| 523 | /** |
| 524 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
| 525 | * |
| 526 | * @bo: Pointer to a struct ttm_buffer_object |
| 527 | * |
| 528 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
| 529 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
| 530 | */ |
| 531 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
| 532 | { |
| 533 | struct vmw_ttm_tt *vmw_tt = |
| 534 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 535 | |
| 536 | vmw_ttm_unmap_dma(vmw_tt); |
| 537 | } |
| 538 | |
| 539 | |
| 540 | /** |
| 541 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
| 542 | * TTM buffer object |
| 543 | * |
| 544 | * @bo: Pointer to a struct ttm_buffer_object |
| 545 | * |
| 546 | * Returns a pointer to a struct vmw_sg_table object. The object should |
| 547 | * not be freed after use. |
| 548 | * Note that for the device addresses to be valid, the buffer object must |
| 549 | * either be reserved or pinned. |
| 550 | */ |
| 551 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) |
| 552 | { |
| 553 | struct vmw_ttm_tt *vmw_tt = |
| 554 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 555 | |
| 556 | return &vmw_tt->vsgt; |
| 557 | } |
| 558 | |
| 559 | |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 560 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 561 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 562 | struct vmw_ttm_tt *vmw_be = |
| 563 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 564 | int ret; |
| 565 | |
| 566 | ret = vmw_ttm_map_dma(vmw_be); |
| 567 | if (unlikely(ret != 0)) |
| 568 | return ret; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 569 | |
| 570 | vmw_be->gmr_id = bo_mem->start; |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 571 | vmw_be->mem_type = bo_mem->mem_type; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 572 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 573 | switch (bo_mem->mem_type) { |
| 574 | case VMW_PL_GMR: |
| 575 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
| 576 | ttm->num_pages, vmw_be->gmr_id); |
| 577 | case VMW_PL_MOB: |
| 578 | if (unlikely(vmw_be->mob == NULL)) { |
| 579 | vmw_be->mob = |
| 580 | vmw_mob_create(ttm->num_pages); |
| 581 | if (unlikely(vmw_be->mob == NULL)) |
| 582 | return -ENOMEM; |
| 583 | } |
| 584 | |
| 585 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 586 | &vmw_be->vsgt, ttm->num_pages, |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 587 | vmw_be->gmr_id); |
| 588 | default: |
| 589 | BUG(); |
| 590 | } |
| 591 | return 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 592 | } |
| 593 | |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 594 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 595 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 596 | struct vmw_ttm_tt *vmw_be = |
| 597 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 598 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 599 | switch (vmw_be->mem_type) { |
| 600 | case VMW_PL_GMR: |
| 601 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
| 602 | break; |
| 603 | case VMW_PL_MOB: |
| 604 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); |
| 605 | break; |
| 606 | default: |
| 607 | BUG(); |
| 608 | } |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 609 | |
| 610 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
| 611 | vmw_ttm_unmap_dma(vmw_be); |
| 612 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 613 | return 0; |
| 614 | } |
| 615 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 616 | |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 617 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 618 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 619 | struct vmw_ttm_tt *vmw_be = |
| 620 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 621 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 622 | vmw_ttm_unmap_dma(vmw_be); |
| 623 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 624 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
| 625 | else |
| 626 | ttm_tt_fini(ttm); |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 627 | |
| 628 | if (vmw_be->mob) |
| 629 | vmw_mob_destroy(vmw_be->mob); |
| 630 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 631 | kfree(vmw_be); |
| 632 | } |
| 633 | |
Thomas Hellstrom | 0fd53cf | 2013-10-24 13:27:38 -0700 | [diff] [blame] | 634 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 635 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
| 636 | { |
| 637 | struct vmw_ttm_tt *vmw_tt = |
| 638 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 639 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 640 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
| 641 | int ret; |
| 642 | |
| 643 | if (ttm->state != tt_unpopulated) |
| 644 | return 0; |
| 645 | |
| 646 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
| 647 | size_t size = |
| 648 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
| 649 | ret = ttm_mem_global_alloc(glob, size, false, true); |
| 650 | if (unlikely(ret != 0)) |
| 651 | return ret; |
| 652 | |
| 653 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
| 654 | if (unlikely(ret != 0)) |
| 655 | ttm_mem_global_free(glob, size); |
| 656 | } else |
| 657 | ret = ttm_pool_populate(ttm); |
| 658 | |
| 659 | return ret; |
| 660 | } |
| 661 | |
| 662 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
| 663 | { |
| 664 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
| 665 | dma_ttm.ttm); |
| 666 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 667 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
| 668 | |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 669 | |
| 670 | if (vmw_tt->mob) { |
| 671 | vmw_mob_destroy(vmw_tt->mob); |
| 672 | vmw_tt->mob = NULL; |
| 673 | } |
| 674 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 675 | vmw_ttm_unmap_dma(vmw_tt); |
| 676 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
| 677 | size_t size = |
| 678 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
| 679 | |
| 680 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
| 681 | ttm_mem_global_free(glob, size); |
| 682 | } else |
| 683 | ttm_pool_unpopulate(ttm); |
| 684 | } |
| 685 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 686 | static struct ttm_backend_func vmw_ttm_func = { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 687 | .bind = vmw_ttm_bind, |
| 688 | .unbind = vmw_ttm_unbind, |
| 689 | .destroy = vmw_ttm_destroy, |
| 690 | }; |
| 691 | |
Rashika Kheria | 8227622 | 2014-01-06 22:20:21 +0530 | [diff] [blame] | 692 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 693 | unsigned long size, uint32_t page_flags, |
| 694 | struct page *dummy_read_page) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 695 | { |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 696 | struct vmw_ttm_tt *vmw_be; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 697 | int ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 698 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 699 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 700 | if (!vmw_be) |
| 701 | return NULL; |
| 702 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 703 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 704 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 705 | vmw_be->mob = NULL; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 706 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 707 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 708 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
| 709 | dummy_read_page); |
| 710 | else |
| 711 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, |
| 712 | dummy_read_page); |
| 713 | if (unlikely(ret != 0)) |
| 714 | goto out_no_init; |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 715 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 716 | return &vmw_be->dma_ttm.ttm; |
| 717 | out_no_init: |
| 718 | kfree(vmw_be); |
| 719 | return NULL; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 720 | } |
| 721 | |
Rashika Kheria | 8227622 | 2014-01-06 22:20:21 +0530 | [diff] [blame] | 722 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 723 | { |
| 724 | return 0; |
| 725 | } |
| 726 | |
Rashika Kheria | 8227622 | 2014-01-06 22:20:21 +0530 | [diff] [blame] | 727 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 728 | struct ttm_mem_type_manager *man) |
| 729 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 730 | switch (type) { |
| 731 | case TTM_PL_SYSTEM: |
| 732 | /* System memory */ |
| 733 | |
| 734 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 735 | man->available_caching = TTM_PL_FLAG_CACHED; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 736 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 737 | break; |
| 738 | case TTM_PL_VRAM: |
| 739 | /* "On-card" video ram */ |
Ben Skeggs | d961db7 | 2010-08-05 10:48:18 +1000 | [diff] [blame] | 740 | man->func = &ttm_bo_manager_func; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 741 | man->gpu_offset = 0; |
Jerome Glisse | 96bf8b8 | 2010-04-09 14:39:26 +0200 | [diff] [blame] | 742 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 743 | man->available_caching = TTM_PL_FLAG_CACHED; |
| 744 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 745 | break; |
| 746 | case VMW_PL_GMR: |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 747 | case VMW_PL_MOB: |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 748 | /* |
| 749 | * "Guest Memory Regions" is an aperture like feature with |
| 750 | * one slot per bo. There is an upper limit of the number of |
| 751 | * slots as well as the bo size. |
| 752 | */ |
| 753 | man->func = &vmw_gmrid_manager_func; |
| 754 | man->gpu_offset = 0; |
| 755 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 756 | man->available_caching = TTM_PL_FLAG_CACHED; |
| 757 | man->default_caching = TTM_PL_FLAG_CACHED; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 758 | break; |
| 759 | default: |
| 760 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
| 761 | return -EINVAL; |
| 762 | } |
| 763 | return 0; |
| 764 | } |
| 765 | |
Rashika Kheria | 8227622 | 2014-01-06 22:20:21 +0530 | [diff] [blame] | 766 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 767 | struct ttm_placement *placement) |
| 768 | { |
| 769 | *placement = vmw_sys_placement; |
| 770 | } |
| 771 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 772 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 773 | { |
Thomas Hellstrom | d08a9b9 | 2012-11-21 16:04:18 +0100 | [diff] [blame] | 774 | struct ttm_object_file *tfile = |
| 775 | vmw_fpriv((struct drm_file *)filp->private_data)->tfile; |
| 776 | |
| 777 | return vmw_user_dmabuf_verify_access(bo, tfile); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 778 | } |
| 779 | |
Jerome Glisse | 96bf8b8 | 2010-04-09 14:39:26 +0200 | [diff] [blame] | 780 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 781 | { |
| 782 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| 783 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); |
| 784 | |
| 785 | mem->bus.addr = NULL; |
| 786 | mem->bus.is_iomem = false; |
| 787 | mem->bus.offset = 0; |
| 788 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
| 789 | mem->bus.base = 0; |
| 790 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 791 | return -EINVAL; |
| 792 | switch (mem->mem_type) { |
| 793 | case TTM_PL_SYSTEM: |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 794 | case VMW_PL_GMR: |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 795 | case VMW_PL_MOB: |
Jerome Glisse | 96bf8b8 | 2010-04-09 14:39:26 +0200 | [diff] [blame] | 796 | return 0; |
| 797 | case TTM_PL_VRAM: |
Ben Skeggs | d961db7 | 2010-08-05 10:48:18 +1000 | [diff] [blame] | 798 | mem->bus.offset = mem->start << PAGE_SHIFT; |
Jerome Glisse | 96bf8b8 | 2010-04-09 14:39:26 +0200 | [diff] [blame] | 799 | mem->bus.base = dev_priv->vram_start; |
| 800 | mem->bus.is_iomem = true; |
| 801 | break; |
| 802 | default: |
| 803 | return -EINVAL; |
| 804 | } |
| 805 | return 0; |
| 806 | } |
| 807 | |
| 808 | static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 809 | { |
| 810 | } |
| 811 | |
| 812 | static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 813 | { |
| 814 | return 0; |
| 815 | } |
| 816 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 817 | /** |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 818 | * vmw_move_notify - TTM move_notify_callback |
| 819 | * |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 820 | * @bo: The TTM buffer object about to move. |
| 821 | * @mem: The struct ttm_mem_reg indicating to what memory |
| 822 | * region the move is taking place. |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 823 | * |
| 824 | * Calls move_notify for all subsystems needing it. |
| 825 | * (currently only resources). |
| 826 | */ |
| 827 | static void vmw_move_notify(struct ttm_buffer_object *bo, |
Nicolai Hähnle | 66257db | 2016-12-15 17:23:49 +0100 | [diff] [blame] | 828 | bool evict, |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 829 | struct ttm_mem_reg *mem) |
| 830 | { |
| 831 | vmw_resource_move_notify(bo, mem); |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 832 | vmw_query_move_notify(bo, mem); |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 833 | } |
| 834 | |
| 835 | |
| 836 | /** |
| 837 | * vmw_swap_notify - TTM move_notify_callback |
| 838 | * |
Sinclair Yeh | fd11a3c | 2015-08-10 10:56:15 -0700 | [diff] [blame] | 839 | * @bo: The TTM buffer object about to be swapped out. |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 840 | */ |
| 841 | static void vmw_swap_notify(struct ttm_buffer_object *bo) |
| 842 | { |
Thomas Hellstrom | f08c86c | 2017-01-19 10:57:00 -0800 | [diff] [blame] | 843 | (void) ttm_bo_wait(bo, false, false); |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 844 | } |
| 845 | |
| 846 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 847 | struct ttm_bo_driver vmw_bo_driver = { |
Jerome Glisse | 649bf3c | 2011-11-01 20:46:13 -0400 | [diff] [blame] | 848 | .ttm_tt_create = &vmw_ttm_tt_create, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame] | 849 | .ttm_tt_populate = &vmw_ttm_populate, |
| 850 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 851 | .invalidate_caches = vmw_invalidate_caches, |
| 852 | .init_mem_type = vmw_init_mem_type, |
Christian König | a2ab19fe | 2016-08-30 17:26:04 +0200 | [diff] [blame] | 853 | .eviction_valuable = ttm_bo_eviction_valuable, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 854 | .evict_flags = vmw_evict_flags, |
| 855 | .move = NULL, |
| 856 | .verify_access = vmw_verify_access, |
Thomas Hellstrom | 6da768a | 2012-11-21 11:06:22 +0100 | [diff] [blame] | 857 | .move_notify = vmw_move_notify, |
| 858 | .swap_notify = vmw_swap_notify, |
Jerome Glisse | 96bf8b8 | 2010-04-09 14:39:26 +0200 | [diff] [blame] | 859 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
| 860 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
| 861 | .io_mem_free = &vmw_ttm_io_mem_free, |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 862 | }; |