blob: ec4840cb8a07c0182f6a847302de1a1719002128 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
Marek Olšák43304412014-03-02 00:56:20 +010027#include <linux/list_sort.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "radeon_reg.h"
31#include "radeon.h"
Christian König860024e2013-09-07 18:29:01 +020032#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033
Marek Olšákc9b76542014-03-02 00:56:21 +010034#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
Lauri Kasanen1109ca02012-08-31 13:43:50 -040075static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020076{
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
Marek Olšákc9b76542014-03-02 00:56:21 +010079 struct radeon_cs_buckets buckets;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020080 unsigned i, j;
Christian Königf72a113a2014-08-07 09:36:00 +020081 bool duplicate, need_mmap_lock = false;
82 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020083
84 if (p->chunk_relocs_idx == -1) {
85 return 0;
86 }
87 chunk = &p->chunks[p->chunk_relocs_idx];
Alex Deuchercf4ccd02011-11-18 10:19:47 -050088 p->dma_reloc_idx = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020089 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
97 return -ENOMEM;
98 }
Marek Olšákc9b76542014-03-02 00:56:21 +010099
100 radeon_cs_buckets_init(&buckets);
101
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
Marek Olšákc9b76542014-03-02 00:56:21 +0100104 unsigned priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200105
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
Christian König16557f12011-10-24 14:59:17 +0200108 for (j = 0; j < i; j++) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
Christian König4474f3a2013-04-08 12:41:28 +0200115 if (duplicate) {
Christian König16557f12011-10-24 14:59:17 +0200116 p->relocs[i].handle = 0;
Christian König4474f3a2013-04-08 12:41:28 +0200117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle);
125 return -ENOENT;
126 }
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
Marek Olšákc9b76542014-03-02 00:56:21 +0100129
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
135 */
Christian König701e1e72014-08-15 11:52:53 +0200136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
137 + !!r->write_domain;
Christian König4474f3a2013-04-08 12:41:28 +0200138
Christian König4f66c592013-09-15 13:31:28 +0200139 /* the first reloc of an UVD job is the msg and that must be in
Christian Königb6a7eee2013-04-16 15:41:25 +0200140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
Christian König4f66c592013-09-15 13:31:28 +0200142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
Christian Königb6a7eee2013-04-16 15:41:25 +0200143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
146
Christian Königbcf6f1e2013-10-15 20:12:03 +0200147 /* TODO: is this still needed for NI+ ? */
Christian Königce6758c2014-06-02 17:33:07 +0200148 p->relocs[i].prefered_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200149 RADEON_GEM_DOMAIN_VRAM;
150
Christian Königce6758c2014-06-02 17:33:07 +0200151 p->relocs[i].allowed_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200152 RADEON_GEM_DOMAIN_VRAM;
153
Marek Olšákc9b76542014-03-02 00:56:21 +0100154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
Christian Königf2ba57b2013-04-08 12:41:29 +0200156 } else {
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
159
Marek Olšákec65da32014-05-27 02:56:36 +0200160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
163 return -EINVAL;
164 }
165
Christian Königce6758c2014-06-02 17:33:07 +0200166 p->relocs[i].prefered_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
Christian Königce6758c2014-06-02 17:33:07 +0200169 p->relocs[i].allowed_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200170 }
Christian König4474f3a2013-04-08 12:41:28 +0200171
Christian Königf72a113a2014-08-07 09:36:00 +0200172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
177 return -EINVAL;
178 }
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
183 }
184
Christian Königdf0af442014-03-03 12:38:08 +0100185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
Christian Königae9c0af2014-09-04 20:01:52 +0200186 p->relocs[i].tv.shared = false;
Christian König4474f3a2013-04-08 12:41:28 +0200187 p->relocs[i].handle = r->handle;
188
Christian Königdf0af442014-03-03 12:38:08 +0100189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
Marek Olšákc9b76542014-03-02 00:56:21 +0100190 priority);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200191 }
Marek Olšákc9b76542014-03-02 00:56:21 +0100192
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
194
Christian König6d2f2942014-02-20 13:42:17 +0100195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
197 &p->validated);
Christian Königf72a113a2014-08-07 09:36:00 +0200198 if (need_mmap_lock)
199 down_read(&current->mm->mmap_sem);
Christian König6d2f2942014-02-20 13:42:17 +0100200
Christian Königf72a113a2014-08-07 09:36:00 +0200201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
202
203 if (need_mmap_lock)
204 up_read(&current->mm->mmap_sem);
205
206 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200207}
208
Jerome Glisse721604a2012-01-05 22:11:05 -0500209static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
210{
211 p->priority = priority;
212
213 switch (ring) {
214 default:
215 DRM_ERROR("unknown ring id: %d\n", ring);
216 return -EINVAL;
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
219 break;
220 case RADEON_CS_RING_COMPUTE:
Alex Deucher963e81f2013-06-26 17:37:11 -0400221 if (p->rdev->family >= CHIP_TAHITI) {
Alex Deucher8d5ef7b2012-03-20 17:18:24 -0400222 if (p->priority > 0)
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
224 else
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
226 } else
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
Jerome Glisse721604a2012-01-05 22:11:05 -0500228 break;
Alex Deucher278a3342012-12-13 12:27:28 -0500229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
231 if (p->priority > 0)
232 p->ring = R600_RING_TYPE_DMA_INDEX;
233 else
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
Alex Deucherb9ace362014-01-27 10:59:51 -0500235 } else if (p->rdev->family >= CHIP_RV770) {
Alex Deucher278a3342012-12-13 12:27:28 -0500236 p->ring = R600_RING_TYPE_DMA_INDEX;
237 } else {
238 return -EINVAL;
239 }
240 break;
Christian Königf2ba57b2013-04-08 12:41:29 +0200241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
243 break;
Christian Königd93f7932013-05-23 12:10:04 +0200244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
247 break;
Jerome Glisse721604a2012-01-05 22:11:05 -0500248 }
249 return 0;
250}
251
Christian König220907d2012-05-10 16:46:43 +0200252static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
Christian König93504fc2012-01-05 22:11:06 -0500253{
Christian König220907d2012-05-10 16:46:43 +0200254 int i;
Christian König93504fc2012-01-05 22:11:06 -0500255
Christian Königcdac5502012-02-23 15:18:42 +0100256 for (i = 0; i < p->nrelocs; i++) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200257 struct reservation_object *resv;
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200258
Christian Königf82cbdd2012-08-09 16:35:36 +0200259 if (!p->relocs[i].robj)
Christian Königcdac5502012-02-23 15:18:42 +0100260 continue;
261
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200262 resv = p->relocs[i].robj->tbo.resv;
Christian König57d20a42014-09-04 20:01:53 +0200263 radeon_semaphore_sync_resv(p->ib.semaphore, resv, false);
Christian Königcdac5502012-02-23 15:18:42 +0100264 }
Christian König93504fc2012-01-05 22:11:06 -0500265}
266
Alex Deucher9b001472012-05-30 10:09:30 -0400267/* XXX: note that this is called from the legacy UMS CS ioctl as well */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200268int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
269{
270 struct drm_radeon_cs *cs = data;
271 uint64_t *chunk_array_ptr;
Jerome Glisse721604a2012-01-05 22:11:05 -0500272 unsigned size, i;
273 u32 ring = RADEON_CS_RING_GFX;
274 s32 priority = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200275
276 if (!cs->num_chunks) {
277 return 0;
278 }
279 /* get chunks */
280 INIT_LIST_HEAD(&p->validated);
281 p->idx = 0;
Jerome Glissef2e39222012-05-09 15:35:02 +0200282 p->ib.sa_bo = NULL;
283 p->ib.semaphore = NULL;
284 p->const_ib.sa_bo = NULL;
285 p->const_ib.semaphore = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200286 p->chunk_ib_idx = -1;
287 p->chunk_relocs_idx = -1;
Jerome Glisse721604a2012-01-05 22:11:05 -0500288 p->chunk_flags_idx = -1;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400289 p->chunk_const_ib_idx = -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200290 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
291 if (p->chunks_array == NULL) {
292 return -ENOMEM;
293 }
294 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100295 if (copy_from_user(p->chunks_array, chunk_array_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200296 sizeof(uint64_t)*cs->num_chunks)) {
297 return -EFAULT;
298 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500299 p->cs_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300 p->nchunks = cs->num_chunks;
301 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
302 if (p->chunks == NULL) {
303 return -ENOMEM;
304 }
305 for (i = 0; i < p->nchunks; i++) {
306 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
307 struct drm_radeon_cs_chunk user_chunk;
308 uint32_t __user *cdata;
309
310 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100311 if (copy_from_user(&user_chunk, chunk_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200312 sizeof(struct drm_radeon_cs_chunk))) {
313 return -EFAULT;
314 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000315 p->chunks[i].length_dw = user_chunk.length_dw;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200316 p->chunks[i].chunk_id = user_chunk.chunk_id;
317 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
318 p->chunk_relocs_idx = i;
319 }
320 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
321 p->chunk_ib_idx = i;
Dave Airlie5176fdc2009-06-30 11:47:14 +1000322 /* zero length IB isn't useful */
323 if (p->chunks[i].length_dw == 0)
324 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200325 }
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400326 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
327 p->chunk_const_ib_idx = i;
328 /* zero length CONST IB isn't useful */
329 if (p->chunks[i].length_dw == 0)
330 return -EINVAL;
331 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500332 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
333 p->chunk_flags_idx = i;
334 /* zero length flags aren't useful */
335 if (p->chunks[i].length_dw == 0)
336 return -EINVAL;
Marek Olšáke70f2242011-10-25 01:38:45 +0200337 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000338
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200339 size = p->chunks[i].length_dw;
340 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
341 p->chunks[i].user_ptr = cdata;
342 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
343 continue;
344
345 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
346 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
347 continue;
348 }
349
350 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
351 size *= sizeof(uint32_t);
352 if (p->chunks[i].kdata == NULL) {
353 return -ENOMEM;
354 }
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100355 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200356 return -EFAULT;
357 }
358 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
359 p->cs_flags = p->chunks[i].kdata[0];
360 if (p->chunks[i].length_dw > 1)
361 ring = p->chunks[i].kdata[1];
362 if (p->chunks[i].length_dw > 2)
363 priority = (s32)p->chunks[i].kdata[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200364 }
365 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500366
Alex Deucher9b001472012-05-30 10:09:30 -0400367 /* these are KMS only */
368 if (p->rdev) {
369 if ((p->cs_flags & RADEON_CS_USE_VM) &&
370 !p->rdev->vm_manager.enabled) {
371 DRM_ERROR("VM not active on asic!\n");
372 return -EINVAL;
373 }
374
Alex Deucher9b001472012-05-30 10:09:30 -0400375 if (radeon_cs_get_ring(p, ring, priority))
376 return -EINVAL;
Christian König57449042013-04-08 12:41:27 +0200377
378 /* we only support VM on some SI+ rings */
Christian König60a44542014-05-21 17:43:59 +0200379 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
380 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
381 DRM_ERROR("Ring %d requires VM!\n", p->ring);
382 return -EINVAL;
383 }
384 } else {
385 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
386 DRM_ERROR("VM not supported on ring %d!\n",
387 p->ring);
388 return -EINVAL;
389 }
Christian König57449042013-04-08 12:41:27 +0200390 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200391 }
Marek Olšáke70f2242011-10-25 01:38:45 +0200392
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200393 return 0;
394}
395
Marek Olšák43304412014-03-02 00:56:20 +0100396static int cmp_size_smaller_first(void *priv, struct list_head *a,
397 struct list_head *b)
398{
Christian Königdf0af442014-03-03 12:38:08 +0100399 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
400 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
Marek Olšák43304412014-03-02 00:56:20 +0100401
402 /* Sort A before B if A is smaller. */
Christian Königdf0af442014-03-03 12:38:08 +0100403 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
Marek Olšák43304412014-03-02 00:56:20 +0100404}
405
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200406/**
407 * cs_parser_fini() - clean parser states
408 * @parser: parser structure holding parsing context.
409 * @error: error number
410 *
411 * If error is set than unvalidate buffer, otherwise just free memory
412 * used by parsing context.
413 **/
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200414static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200415{
416 unsigned i;
417
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400418 if (!error) {
Marek Olšák43304412014-03-02 00:56:20 +0100419 /* Sort the buffer list from the smallest to largest buffer,
420 * which affects the order of buffers in the LRU list.
421 * This assures that the smallest buffers are added first
422 * to the LRU list, so they are likely to be later evicted
423 * first, instead of large buffers whose eviction is more
424 * expensive.
425 *
426 * This slightly lowers the number of bytes moved by TTM
427 * per frame under memory pressure.
428 */
429 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
430
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200431 ttm_eu_fence_buffer_objects(&parser->ticket,
432 &parser->validated,
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200433 &parser->ib.fence->base);
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200434 } else if (backoff) {
435 ttm_eu_backoff_reservation(&parser->ticket,
436 &parser->validated);
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400437 }
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000438
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000439 if (parser->relocs != NULL) {
440 for (i = 0; i < parser->nrelocs; i++) {
441 if (parser->relocs[i].gobj)
442 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
443 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200444 }
Michel Dänzer48e113e2009-09-15 17:09:32 +0200445 kfree(parser->track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200446 kfree(parser->relocs);
447 kfree(parser->relocs_ptr);
Christian König6d2f2942014-02-20 13:42:17 +0100448 kfree(parser->vm_bos);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200449 for (i = 0; i < parser->nchunks; i++)
450 drm_free_large(parser->chunks[i].kdata);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200451 kfree(parser->chunks);
452 kfree(parser->chunks_array);
453 radeon_ib_free(parser->rdev, &parser->ib);
Jerome Glissef2e39222012-05-09 15:35:02 +0200454 radeon_ib_free(parser->rdev, &parser->const_ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200455}
456
Jerome Glisse721604a2012-01-05 22:11:05 -0500457static int radeon_cs_ib_chunk(struct radeon_device *rdev,
458 struct radeon_cs_parser *parser)
459{
Jerome Glisse721604a2012-01-05 22:11:05 -0500460 int r;
461
462 if (parser->chunk_ib_idx == -1)
463 return 0;
464
465 if (parser->cs_flags & RADEON_CS_USE_VM)
466 return 0;
467
Christian Königeb0c19c2012-02-23 15:18:44 +0100468 r = radeon_cs_parse(rdev, parser->ring, parser);
Jerome Glisse721604a2012-01-05 22:11:05 -0500469 if (r || parser->parser_error) {
470 DRM_ERROR("Invalid command stream !\n");
471 return r;
472 }
Alex Deucherce3537d2013-07-24 12:12:49 -0400473
474 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
475 radeon_uvd_note_usage(rdev);
Alex Deucher03afe6f2013-08-23 11:56:26 -0400476 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
477 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
478 radeon_vce_note_usage(rdev);
Alex Deucherce3537d2013-07-24 12:12:49 -0400479
Christian König220907d2012-05-10 16:46:43 +0200480 radeon_cs_sync_rings(parser);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900481 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Jerome Glisse721604a2012-01-05 22:11:05 -0500482 if (r) {
483 DRM_ERROR("Failed to schedule IB !\n");
484 }
Christian König93bf8882012-07-03 14:05:41 +0200485 return r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500486}
487
Christian König6d2f2942014-02-20 13:42:17 +0100488static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
Jerome Glisse721604a2012-01-05 22:11:05 -0500489 struct radeon_vm *vm)
490{
Christian König6d2f2942014-02-20 13:42:17 +0100491 struct radeon_device *rdev = p->rdev;
Christian König036bf462014-07-18 08:56:40 +0200492 struct radeon_bo_va *bo_va;
Christian König6d2f2942014-02-20 13:42:17 +0100493 int i, r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500494
Christian König6d2f2942014-02-20 13:42:17 +0100495 r = radeon_vm_update_page_directory(rdev, vm);
496 if (r)
Jerome Glisse3e8970f2012-08-13 12:07:33 -0400497 return r;
Christian König6d2f2942014-02-20 13:42:17 +0100498
Christian König036bf462014-07-18 08:56:40 +0200499 r = radeon_vm_clear_freed(rdev, vm);
500 if (r)
501 return r;
502
Christian Königcc9e67e2014-07-18 13:48:10 +0200503 if (vm->ib_bo_va == NULL) {
Christian König036bf462014-07-18 08:56:40 +0200504 DRM_ERROR("Tmp BO not in VM!\n");
505 return -EINVAL;
506 }
507
Christian Königcc9e67e2014-07-18 13:48:10 +0200508 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
509 &rdev->ring_tmp_bo.bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100510 if (r)
511 return r;
512
513 for (i = 0; i < p->nrelocs; i++) {
514 struct radeon_bo *bo;
515
516 /* ignore duplicates */
517 if (p->relocs_ptr[i] != &p->relocs[i])
518 continue;
519
520 bo = p->relocs[i].robj;
Christian König036bf462014-07-18 08:56:40 +0200521 bo_va = radeon_vm_bo_find(vm, bo);
522 if (bo_va == NULL) {
523 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
524 return -EINVAL;
525 }
526
527 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100528 if (r)
Jerome Glisse721604a2012-01-05 22:11:05 -0500529 return r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500530 }
Christian Könige31ad962014-07-18 09:24:53 +0200531
532 return radeon_vm_clear_invalids(rdev, vm);
Jerome Glisse721604a2012-01-05 22:11:05 -0500533}
534
535static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
536 struct radeon_cs_parser *parser)
537{
Jerome Glisse721604a2012-01-05 22:11:05 -0500538 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
539 struct radeon_vm *vm = &fpriv->vm;
540 int r;
541
542 if (parser->chunk_ib_idx == -1)
543 return 0;
Jerome Glisse721604a2012-01-05 22:11:05 -0500544 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
545 return 0;
546
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200547 if (parser->const_ib.length_dw) {
Jerome Glissef2e39222012-05-09 15:35:02 +0200548 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400549 if (r) {
550 return r;
551 }
552 }
553
Jerome Glissef2e39222012-05-09 15:35:02 +0200554 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
Jerome Glisse721604a2012-01-05 22:11:05 -0500555 if (r) {
556 return r;
557 }
558
Alex Deucherce3537d2013-07-24 12:12:49 -0400559 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
560 radeon_uvd_note_usage(rdev);
561
Jerome Glisse721604a2012-01-05 22:11:05 -0500562 mutex_lock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500563 r = radeon_bo_vm_update_pte(parser, vm);
564 if (r) {
565 goto out;
566 }
Christian König220907d2012-05-10 16:46:43 +0200567 radeon_cs_sync_rings(parser);
Christian König57d20a42014-09-04 20:01:53 +0200568 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
Christian König4ef72562012-07-13 13:06:00 +0200569
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400570 if ((rdev->family >= CHIP_TAHITI) &&
571 (parser->chunk_const_ib_idx != -1)) {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900572 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
Christian König4ef72562012-07-13 13:06:00 +0200573 } else {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900574 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400575 }
576
Christian Königee60e292012-08-09 16:21:08 +0200577out:
Christian König36ff39c2012-05-09 10:07:08 +0200578 mutex_unlock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500579 return r;
580}
581
Christian König6c6f4782012-05-02 15:11:19 +0200582static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
583{
584 if (r == -EDEADLK) {
585 r = radeon_gpu_reset(rdev);
586 if (!r)
587 r = -EAGAIN;
588 }
589 return r;
590}
591
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200592static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
593{
594 struct radeon_cs_chunk *ib_chunk;
595 struct radeon_vm *vm = NULL;
596 int r;
597
598 if (parser->chunk_ib_idx == -1)
599 return 0;
600
601 if (parser->cs_flags & RADEON_CS_USE_VM) {
602 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
603 vm = &fpriv->vm;
604
605 if ((rdev->family >= CHIP_TAHITI) &&
606 (parser->chunk_const_ib_idx != -1)) {
607 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
608 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
609 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
610 return -EINVAL;
611 }
612 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
613 vm, ib_chunk->length_dw * 4);
614 if (r) {
615 DRM_ERROR("Failed to get const ib !\n");
616 return r;
617 }
618 parser->const_ib.is_const_ib = true;
619 parser->const_ib.length_dw = ib_chunk->length_dw;
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100620 if (copy_from_user(parser->const_ib.ptr,
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200621 ib_chunk->user_ptr,
622 ib_chunk->length_dw * 4))
623 return -EFAULT;
624 }
625
626 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
627 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
628 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
629 return -EINVAL;
630 }
631 }
632 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
633
634 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
635 vm, ib_chunk->length_dw * 4);
636 if (r) {
637 DRM_ERROR("Failed to get ib !\n");
638 return r;
639 }
640 parser->ib.length_dw = ib_chunk->length_dw;
641 if (ib_chunk->kdata)
642 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100643 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200644 return -EFAULT;
645 return 0;
646}
647
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200648int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
649{
650 struct radeon_device *rdev = dev->dev_private;
651 struct radeon_cs_parser parser;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200652 int r;
653
Jerome Glissedee53e72012-07-02 12:45:19 -0400654 down_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500655 if (!rdev->accel_working) {
Jerome Glissedee53e72012-07-02 12:45:19 -0400656 up_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500657 return -EBUSY;
658 }
Maarten Lankhorst9bb39ff2014-08-27 16:45:18 -0400659 if (rdev->in_reset) {
660 up_read(&rdev->exclusive_lock);
661 r = radeon_gpu_reset(rdev);
662 if (!r)
663 r = -EAGAIN;
664 return r;
665 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200666 /* initialize parser */
667 memset(&parser, 0, sizeof(struct radeon_cs_parser));
668 parser.filp = filp;
669 parser.rdev = rdev;
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100670 parser.dev = rdev->dev;
Dave Airlie428c6e32011-06-08 19:58:29 +1000671 parser.family = rdev->family;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200672 r = radeon_cs_parser_init(&parser, data);
673 if (r) {
674 DRM_ERROR("Failed to initialize parser !\n");
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200675 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400676 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200677 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200678 return r;
679 }
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200680
681 r = radeon_cs_ib_fill(rdev, &parser);
682 if (!r) {
683 r = radeon_cs_parser_relocs(&parser);
684 if (r && r != -ERESTARTSYS)
Dave Airlie97f23b32010-03-19 10:33:44 +1000685 DRM_ERROR("Failed to parse relocation %d!\n", r);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200686 }
687
688 if (r) {
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200689 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400690 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200691 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200692 return r;
693 }
Christian König55b51c82013-04-18 15:25:59 +0200694
Christian König860024e2013-09-07 18:29:01 +0200695 trace_radeon_cs(&parser);
696
Jerome Glisse721604a2012-01-05 22:11:05 -0500697 r = radeon_cs_ib_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200698 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500699 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200700 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500701 r = radeon_cs_ib_vm_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200702 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500703 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200704 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500705out:
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200706 radeon_cs_parser_fini(&parser, r, true);
Jerome Glissedee53e72012-07-02 12:45:19 -0400707 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200708 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200709 return r;
710}
Dave Airlie513bcb42009-09-23 16:56:27 +1000711
Ilija Hadzic4db01312013-01-02 18:27:40 -0500712/**
713 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
714 * @parser: parser structure holding parsing context.
715 * @pkt: where to store packet information
716 *
717 * Assume that chunk_ib_index is properly set. Will return -EINVAL
718 * if packet is bigger than remaining ib size. or if packets is unknown.
719 **/
720int radeon_cs_packet_parse(struct radeon_cs_parser *p,
721 struct radeon_cs_packet *pkt,
722 unsigned idx)
723{
724 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
725 struct radeon_device *rdev = p->rdev;
726 uint32_t header;
727
728 if (idx >= ib_chunk->length_dw) {
729 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
730 idx, ib_chunk->length_dw);
731 return -EINVAL;
732 }
733 header = radeon_get_ib_value(p, idx);
734 pkt->idx = idx;
735 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
736 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
737 pkt->one_reg_wr = 0;
738 switch (pkt->type) {
739 case RADEON_PACKET_TYPE0:
740 if (rdev->family < CHIP_R600) {
741 pkt->reg = R100_CP_PACKET0_GET_REG(header);
742 pkt->one_reg_wr =
743 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
744 } else
745 pkt->reg = R600_CP_PACKET0_GET_REG(header);
746 break;
747 case RADEON_PACKET_TYPE3:
748 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
749 break;
750 case RADEON_PACKET_TYPE2:
751 pkt->count = -1;
752 break;
753 default:
754 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
755 return -EINVAL;
756 }
757 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
758 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
759 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
760 return -EINVAL;
761 }
762 return 0;
763}
Ilija Hadzic9ffb7a62013-01-02 18:27:42 -0500764
765/**
766 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
767 * @p: structure holding the parser context.
768 *
769 * Check if the next packet is NOP relocation packet3.
770 **/
771bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
772{
773 struct radeon_cs_packet p3reloc;
774 int r;
775
776 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
777 if (r)
778 return false;
779 if (p3reloc.type != RADEON_PACKET_TYPE3)
780 return false;
781 if (p3reloc.opcode != RADEON_PACKET3_NOP)
782 return false;
783 return true;
784}
Ilija Hadzicc3ad63a2013-01-02 18:27:45 -0500785
786/**
787 * radeon_cs_dump_packet() - dump raw packet context
788 * @p: structure holding the parser context.
789 * @pkt: structure holding the packet.
790 *
791 * Used mostly for debugging and error reporting.
792 **/
793void radeon_cs_dump_packet(struct radeon_cs_parser *p,
794 struct radeon_cs_packet *pkt)
795{
796 volatile uint32_t *ib;
797 unsigned i;
798 unsigned idx;
799
800 ib = p->ib.ptr;
801 idx = pkt->idx;
802 for (i = 0; i <= (pkt->count + 1); i++, idx++)
803 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
804}
805
Ilija Hadzice9716992013-01-02 18:27:46 -0500806/**
807 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
808 * @parser: parser structure holding parsing context.
809 * @data: pointer to relocation data
810 * @offset_start: starting offset
811 * @offset_mask: offset mask (to align start offset on)
812 * @reloc: reloc informations
813 *
814 * Check if next packet is relocation packet3, do bo validation and compute
815 * GPU offset using the provided start.
816 **/
817int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
818 struct radeon_cs_reloc **cs_reloc,
819 int nomm)
820{
821 struct radeon_cs_chunk *relocs_chunk;
822 struct radeon_cs_packet p3reloc;
823 unsigned idx;
824 int r;
825
826 if (p->chunk_relocs_idx == -1) {
827 DRM_ERROR("No relocation chunk !\n");
828 return -EINVAL;
829 }
830 *cs_reloc = NULL;
831 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
832 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
833 if (r)
834 return r;
835 p->idx += p3reloc.count + 2;
836 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
837 p3reloc.opcode != RADEON_PACKET3_NOP) {
838 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
839 p3reloc.idx);
840 radeon_cs_dump_packet(p, &p3reloc);
841 return -EINVAL;
842 }
843 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
844 if (idx >= relocs_chunk->length_dw) {
845 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
846 idx, relocs_chunk->length_dw);
847 radeon_cs_dump_packet(p, &p3reloc);
848 return -EINVAL;
849 }
850 /* FIXME: we assume reloc size is 4 dwords */
851 if (nomm) {
852 *cs_reloc = p->relocs;
Christian Königdf0af442014-03-03 12:38:08 +0100853 (*cs_reloc)->gpu_offset =
Ilija Hadzice9716992013-01-02 18:27:46 -0500854 (u64)relocs_chunk->kdata[idx + 3] << 32;
Christian Königdf0af442014-03-03 12:38:08 +0100855 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
Ilija Hadzice9716992013-01-02 18:27:46 -0500856 } else
857 *cs_reloc = p->relocs_ptr[(idx / 4)];
858 return 0;
859}