blob: 6ef0b035becbc5959f78ecf7b978629f90ae98e2 [file] [log] [blame]
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00001/**************************************************************************
2 *
Thomas Hellstrom2de59d02011-08-31 09:42:55 +02003 * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +00004 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/ttm/ttm_bo_driver.h>
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +000031
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020032#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
Thomas Hellstromd92d9852013-10-24 01:49:26 -070035#define DMA_ADDR_INVALID ((dma_addr_t) 0)
36#define DMA_PAGE_INVALID 0UL
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020037
38static int vmw_gmr2_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -070039 struct vmw_piter *iter,
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020040 unsigned long num_pages,
41 int gmr_id)
42{
43 SVGAFifoCmdDefineGMR2 define_cmd;
44 SVGAFifoCmdRemapGMR2 remap_cmd;
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020045 uint32_t *cmd;
46 uint32_t *cmd_orig;
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020047 uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
48 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
49 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
50 uint32_t remap_pos = 0;
51 uint32_t cmd_size = define_size + remap_size;
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020052 uint32_t i;
53
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020054 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020055 if (unlikely(cmd == NULL))
56 return -ENOMEM;
57
58 define_cmd.gmrId = gmr_id;
59 define_cmd.numPages = num_pages;
60
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020061 *cmd++ = SVGA_CMD_DEFINE_GMR2;
62 memcpy(cmd, &define_cmd, sizeof(define_cmd));
63 cmd += sizeof(define_cmd) / sizeof(*cmd);
64
65 /*
66 * Need to split the command if there are too many
67 * pages that goes into the gmr.
68 */
69
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020070 remap_cmd.gmrId = gmr_id;
71 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
72 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020073
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020074 while (num_pages > 0) {
75 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020076
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020077 remap_cmd.offsetPages = remap_pos;
78 remap_cmd.numPages = nr;
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020079
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020080 *cmd++ = SVGA_CMD_REMAP_GMR2;
81 memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
82 cmd += sizeof(remap_cmd) / sizeof(*cmd);
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020083
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020084 for (i = 0; i < nr; ++i) {
85 if (VMW_PPN_SIZE <= 4)
Thomas Hellstromd92d9852013-10-24 01:49:26 -070086 *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020087 else
Thomas Hellstromd92d9852013-10-24 01:49:26 -070088 *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
89 PAGE_SHIFT;
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020090
91 cmd += VMW_PPN_SIZE / sizeof(*cmd);
Thomas Hellstromd92d9852013-10-24 01:49:26 -070092 vmw_piter_next(iter);
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020093 }
94
95 num_pages -= nr;
96 remap_pos += nr;
Thomas Hellstrom2de59d02011-08-31 09:42:55 +020097 }
98
Jakob Bornecrantz6e4dcff2013-08-29 02:32:53 +020099 BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
100
101 vmw_fifo_commit(dev_priv, cmd_size);
Thomas Hellstrom2de59d02011-08-31 09:42:55 +0200102
103 return 0;
104}
105
106static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
107 int gmr_id)
108{
109 SVGAFifoCmdDefineGMR2 define_cmd;
110 uint32_t define_size = sizeof(define_cmd) + 4;
111 uint32_t *cmd;
112
113 cmd = vmw_fifo_reserve(dev_priv, define_size);
114 if (unlikely(cmd == NULL)) {
115 DRM_ERROR("GMR2 unbind failed.\n");
116 return;
117 }
118 define_cmd.gmrId = gmr_id;
119 define_cmd.numPages = 0;
120
121 *cmd++ = SVGA_CMD_DEFINE_GMR2;
122 memcpy(cmd, &define_cmd, sizeof(define_cmd));
123
124 vmw_fifo_commit(dev_priv, define_size);
125}
126
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700127
128static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129 struct list_head *desc_pages)
130{
131 struct page *page, *next;
132 struct svga_guest_mem_descriptor *page_virtual;
133 unsigned int desc_per_page = PAGE_SIZE /
134 sizeof(struct svga_guest_mem_descriptor) - 1;
135
136 if (list_empty(desc_pages))
137 return;
138
139 list_for_each_entry_safe(page, next, desc_pages, lru) {
140 list_del_init(&page->lru);
141
142 if (likely(desc_dma != DMA_ADDR_INVALID)) {
143 dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144 DMA_TO_DEVICE);
145 }
146
147 page_virtual = kmap_atomic(page);
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800148 desc_dma = (dma_addr_t)
149 le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150 PAGE_SHIFT;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700151 kunmap_atomic(page_virtual);
152
153 __free_page(page);
154 }
155}
156
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000157/**
158 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
159 * the number of used descriptors.
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700160 *
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000161 */
162
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700163static int vmw_gmr_build_descriptors(struct device *dev,
164 struct list_head *desc_pages,
165 struct vmw_piter *iter,
166 unsigned long num_pages,
167 dma_addr_t *first_dma)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000168{
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700169 struct page *page;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000170 struct svga_guest_mem_descriptor *page_virtual = NULL;
171 struct svga_guest_mem_descriptor *desc_virtual = NULL;
172 unsigned int desc_per_page;
173 unsigned long prev_pfn;
174 unsigned long pfn;
175 int ret;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700176 dma_addr_t desc_dma;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000177
178 desc_per_page = PAGE_SIZE /
179 sizeof(struct svga_guest_mem_descriptor) - 1;
180
181 while (likely(num_pages != 0)) {
182 page = alloc_page(__GFP_HIGHMEM);
183 if (unlikely(page == NULL)) {
184 ret = -ENOMEM;
185 goto out_err;
186 }
187
188 list_add_tail(&page->lru, desc_pages);
Cong Wang1c9c20f2011-11-25 23:14:20 +0800189 page_virtual = kmap_atomic(page);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000190 desc_virtual = page_virtual - 1;
191 prev_pfn = ~(0UL);
192
193 while (likely(num_pages != 0)) {
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700194 pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000195
196 if (pfn != prev_pfn + 1) {
197
198 if (desc_virtual - page_virtual ==
199 desc_per_page - 1)
200 break;
201
202 (++desc_virtual)->ppn = cpu_to_le32(pfn);
203 desc_virtual->num_pages = cpu_to_le32(1);
204 } else {
205 uint32_t tmp =
206 le32_to_cpu(desc_virtual->num_pages);
207 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
208 }
209 prev_pfn = pfn;
210 --num_pages;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700211 vmw_piter_next(iter);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000212 }
213
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700214 (++desc_virtual)->ppn = DMA_PAGE_INVALID;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000215 desc_virtual->num_pages = cpu_to_le32(0);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700216 kunmap_atomic(page_virtual);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000217 }
218
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700219 desc_dma = 0;
220 list_for_each_entry_reverse(page, desc_pages, lru) {
221 page_virtual = kmap_atomic(page);
Thomas Hellstrome14cd952013-11-11 23:49:26 -0800222 page_virtual[desc_per_page].ppn = cpu_to_le32
223 (desc_dma >> PAGE_SHIFT);
Cong Wang1c9c20f2011-11-25 23:14:20 +0800224 kunmap_atomic(page_virtual);
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700225 desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226 DMA_TO_DEVICE);
227
228 if (unlikely(dma_mapping_error(dev, desc_dma)))
229 goto out_err;
230 }
231 *first_dma = desc_dma;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000232
233 return 0;
234out_err:
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700235 vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000236 return ret;
237}
238
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000239static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700240 int gmr_id, dma_addr_t desc_dma)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000241{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000242 mutex_lock(&dev_priv->hw_mutex);
243
244 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
245 wmb();
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700246 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000247 mb();
248
249 mutex_unlock(&dev_priv->hw_mutex);
250
251}
252
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000253int vmw_gmr_bind(struct vmw_private *dev_priv,
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700254 const struct vmw_sg_table *vsgt,
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200255 unsigned long num_pages,
256 int gmr_id)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000257{
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000258 struct list_head desc_pages;
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700259 dma_addr_t desc_dma = 0;
260 struct device *dev = dev_priv->dev->dev;
261 struct vmw_piter data_iter;
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200262 int ret;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000263
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700264 vmw_piter_start(&data_iter, vsgt, 0);
265
266 if (unlikely(!vmw_piter_next(&data_iter)))
267 return 0;
268
Thomas Hellstrom2de59d02011-08-31 09:42:55 +0200269 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700270 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
Thomas Hellstrom2de59d02011-08-31 09:42:55 +0200271
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200272 if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000273 return -EINVAL;
274
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700275 if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000276 return -EINVAL;
277
278 INIT_LIST_HEAD(&desc_pages);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200279
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700280 ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281 num_pages, &desc_dma);
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000282 if (unlikely(ret != 0))
283 return ret;
284
Thomas Hellstromd92d9852013-10-24 01:49:26 -0700285 vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
286 vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200287
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000288 return 0;
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000289}
290
Thomas Hellstrom135cba02010-10-26 21:21:47 +0200291
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000292void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
293{
Thomas Hellstrom2de59d02011-08-31 09:42:55 +0200294 if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
295 vmw_gmr2_unbind(dev_priv, gmr_id);
296 return;
297 }
298
Jakob Bornecrantzfb1d9732009-12-10 00:19:58 +0000299 mutex_lock(&dev_priv->hw_mutex);
300 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
301 wmb();
302 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
303 mb();
304 mutex_unlock(&dev_priv->hw_mutex);
305}