Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 3 | * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #include "vmwgfx_drv.h" |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
| 30 | #include <drm/ttm/ttm_bo_driver.h> |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 31 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 32 | #define VMW_PPN_SIZE (sizeof(unsigned long)) |
| 33 | /* A future safe maximum remap size. */ |
| 34 | #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 35 | #define DMA_ADDR_INVALID ((dma_addr_t) 0) |
| 36 | #define DMA_PAGE_INVALID 0UL |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 37 | |
| 38 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 39 | struct vmw_piter *iter, |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 40 | unsigned long num_pages, |
| 41 | int gmr_id) |
| 42 | { |
| 43 | SVGAFifoCmdDefineGMR2 define_cmd; |
| 44 | SVGAFifoCmdRemapGMR2 remap_cmd; |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 45 | uint32_t *cmd; |
| 46 | uint32_t *cmd_orig; |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 47 | uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); |
| 48 | uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); |
| 49 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; |
| 50 | uint32_t remap_pos = 0; |
| 51 | uint32_t cmd_size = define_size + remap_size; |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 52 | uint32_t i; |
| 53 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 54 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 55 | if (unlikely(cmd == NULL)) |
| 56 | return -ENOMEM; |
| 57 | |
| 58 | define_cmd.gmrId = gmr_id; |
| 59 | define_cmd.numPages = num_pages; |
| 60 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 61 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
| 62 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
| 63 | cmd += sizeof(define_cmd) / sizeof(*cmd); |
| 64 | |
| 65 | /* |
| 66 | * Need to split the command if there are too many |
| 67 | * pages that goes into the gmr. |
| 68 | */ |
| 69 | |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 70 | remap_cmd.gmrId = gmr_id; |
| 71 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
| 72 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 73 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 74 | while (num_pages > 0) { |
| 75 | unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 76 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 77 | remap_cmd.offsetPages = remap_pos; |
| 78 | remap_cmd.numPages = nr; |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 79 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 80 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
| 81 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
| 82 | cmd += sizeof(remap_cmd) / sizeof(*cmd); |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 83 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 84 | for (i = 0; i < nr; ++i) { |
| 85 | if (VMW_PPN_SIZE <= 4) |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 86 | *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 87 | else |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 88 | *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >> |
| 89 | PAGE_SHIFT; |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 90 | |
| 91 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 92 | vmw_piter_next(iter); |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | num_pages -= nr; |
| 96 | remap_pos += nr; |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 97 | } |
| 98 | |
Jakob Bornecrantz | 6e4dcff | 2013-08-29 02:32:53 +0200 | [diff] [blame] | 99 | BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); |
| 100 | |
| 101 | vmw_fifo_commit(dev_priv, cmd_size); |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 102 | |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | static void vmw_gmr2_unbind(struct vmw_private *dev_priv, |
| 107 | int gmr_id) |
| 108 | { |
| 109 | SVGAFifoCmdDefineGMR2 define_cmd; |
| 110 | uint32_t define_size = sizeof(define_cmd) + 4; |
| 111 | uint32_t *cmd; |
| 112 | |
| 113 | cmd = vmw_fifo_reserve(dev_priv, define_size); |
| 114 | if (unlikely(cmd == NULL)) { |
| 115 | DRM_ERROR("GMR2 unbind failed.\n"); |
| 116 | return; |
| 117 | } |
| 118 | define_cmd.gmrId = gmr_id; |
| 119 | define_cmd.numPages = 0; |
| 120 | |
| 121 | *cmd++ = SVGA_CMD_DEFINE_GMR2; |
| 122 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); |
| 123 | |
| 124 | vmw_fifo_commit(dev_priv, define_size); |
| 125 | } |
| 126 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 127 | |
| 128 | static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, |
| 129 | struct list_head *desc_pages) |
| 130 | { |
| 131 | struct page *page, *next; |
| 132 | struct svga_guest_mem_descriptor *page_virtual; |
| 133 | unsigned int desc_per_page = PAGE_SIZE / |
| 134 | sizeof(struct svga_guest_mem_descriptor) - 1; |
| 135 | |
| 136 | if (list_empty(desc_pages)) |
| 137 | return; |
| 138 | |
| 139 | list_for_each_entry_safe(page, next, desc_pages, lru) { |
| 140 | list_del_init(&page->lru); |
| 141 | |
| 142 | if (likely(desc_dma != DMA_ADDR_INVALID)) { |
| 143 | dma_unmap_page(dev, desc_dma, PAGE_SIZE, |
| 144 | DMA_TO_DEVICE); |
| 145 | } |
| 146 | |
| 147 | page_virtual = kmap_atomic(page); |
| 148 | desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; |
| 149 | kunmap_atomic(page_virtual); |
| 150 | |
| 151 | __free_page(page); |
| 152 | } |
| 153 | } |
| 154 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 155 | /** |
| 156 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize |
| 157 | * the number of used descriptors. |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 158 | * |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 159 | */ |
| 160 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 161 | static int vmw_gmr_build_descriptors(struct device *dev, |
| 162 | struct list_head *desc_pages, |
| 163 | struct vmw_piter *iter, |
| 164 | unsigned long num_pages, |
| 165 | dma_addr_t *first_dma) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 166 | { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 167 | struct page *page; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 168 | struct svga_guest_mem_descriptor *page_virtual = NULL; |
| 169 | struct svga_guest_mem_descriptor *desc_virtual = NULL; |
| 170 | unsigned int desc_per_page; |
| 171 | unsigned long prev_pfn; |
| 172 | unsigned long pfn; |
| 173 | int ret; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 174 | dma_addr_t desc_dma; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 175 | |
| 176 | desc_per_page = PAGE_SIZE / |
| 177 | sizeof(struct svga_guest_mem_descriptor) - 1; |
| 178 | |
| 179 | while (likely(num_pages != 0)) { |
| 180 | page = alloc_page(__GFP_HIGHMEM); |
| 181 | if (unlikely(page == NULL)) { |
| 182 | ret = -ENOMEM; |
| 183 | goto out_err; |
| 184 | } |
| 185 | |
| 186 | list_add_tail(&page->lru, desc_pages); |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 187 | page_virtual = kmap_atomic(page); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 188 | desc_virtual = page_virtual - 1; |
| 189 | prev_pfn = ~(0UL); |
| 190 | |
| 191 | while (likely(num_pages != 0)) { |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 192 | pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 193 | |
| 194 | if (pfn != prev_pfn + 1) { |
| 195 | |
| 196 | if (desc_virtual - page_virtual == |
| 197 | desc_per_page - 1) |
| 198 | break; |
| 199 | |
| 200 | (++desc_virtual)->ppn = cpu_to_le32(pfn); |
| 201 | desc_virtual->num_pages = cpu_to_le32(1); |
| 202 | } else { |
| 203 | uint32_t tmp = |
| 204 | le32_to_cpu(desc_virtual->num_pages); |
| 205 | desc_virtual->num_pages = cpu_to_le32(tmp + 1); |
| 206 | } |
| 207 | prev_pfn = pfn; |
| 208 | --num_pages; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 209 | vmw_piter_next(iter); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 210 | } |
| 211 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 212 | (++desc_virtual)->ppn = DMA_PAGE_INVALID; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 213 | desc_virtual->num_pages = cpu_to_le32(0); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 214 | kunmap_atomic(page_virtual); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 215 | } |
| 216 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 217 | desc_dma = 0; |
| 218 | list_for_each_entry_reverse(page, desc_pages, lru) { |
| 219 | page_virtual = kmap_atomic(page); |
| 220 | page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; |
Cong Wang | 1c9c20f | 2011-11-25 23:14:20 +0800 | [diff] [blame] | 221 | kunmap_atomic(page_virtual); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 222 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, |
| 223 | DMA_TO_DEVICE); |
| 224 | |
| 225 | if (unlikely(dma_mapping_error(dev, desc_dma))) |
| 226 | goto out_err; |
| 227 | } |
| 228 | *first_dma = desc_dma; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 229 | |
| 230 | return 0; |
| 231 | out_err: |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 232 | vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 233 | return ret; |
| 234 | } |
| 235 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 236 | static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 237 | int gmr_id, dma_addr_t desc_dma) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 238 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 239 | mutex_lock(&dev_priv->hw_mutex); |
| 240 | |
| 241 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
| 242 | wmb(); |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 243 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 244 | mb(); |
| 245 | |
| 246 | mutex_unlock(&dev_priv->hw_mutex); |
| 247 | |
| 248 | } |
| 249 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 250 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 251 | const struct vmw_sg_table *vsgt, |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 252 | unsigned long num_pages, |
| 253 | int gmr_id) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 254 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 255 | struct list_head desc_pages; |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 256 | dma_addr_t desc_dma = 0; |
| 257 | struct device *dev = dev_priv->dev->dev; |
| 258 | struct vmw_piter data_iter; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 259 | int ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 260 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 261 | vmw_piter_start(&data_iter, vsgt, 0); |
| 262 | |
| 263 | if (unlikely(!vmw_piter_next(&data_iter))) |
| 264 | return 0; |
| 265 | |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 266 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 267 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 268 | |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 269 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 270 | return -EINVAL; |
| 271 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 272 | if (vsgt->num_regions > dev_priv->max_gmr_descriptors) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 273 | return -EINVAL; |
| 274 | |
| 275 | INIT_LIST_HEAD(&desc_pages); |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 276 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 277 | ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, |
| 278 | num_pages, &desc_dma); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 279 | if (unlikely(ret != 0)) |
| 280 | return ret; |
| 281 | |
Thomas Hellstrom | d92d985 | 2013-10-24 01:49:26 -0700 | [diff] [blame^] | 282 | vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); |
| 283 | vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 284 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 285 | return 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 286 | } |
| 287 | |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 288 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 289 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
| 290 | { |
Thomas Hellstrom | 2de59d0 | 2011-08-31 09:42:55 +0200 | [diff] [blame] | 291 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { |
| 292 | vmw_gmr2_unbind(dev_priv, gmr_id); |
| 293 | return; |
| 294 | } |
| 295 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 296 | mutex_lock(&dev_priv->hw_mutex); |
| 297 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
| 298 | wmb(); |
| 299 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); |
| 300 | mb(); |
| 301 | mutex_unlock(&dev_priv->hw_mutex); |
| 302 | } |