blob: 4d0f96cc3da4488b0a324058533d26500011293f [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
Marek Olšák43304412014-03-02 00:56:20 +010027#include <linux/list_sort.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "radeon_reg.h"
31#include "radeon.h"
Christian König860024e2013-09-07 18:29:01 +020032#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033
Marek Olšákc9b76542014-03-02 00:56:21 +010034#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
Lauri Kasanen1109ca02012-08-31 13:43:50 -040075static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020076{
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
Marek Olšákc9b76542014-03-02 00:56:21 +010079 struct radeon_cs_buckets buckets;
Christian König466be332014-12-03 15:46:49 +010080 unsigned i;
81 bool need_mmap_lock = false;
Christian Königf72a113a2014-08-07 09:36:00 +020082 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020083
Christian König6d2d13d2014-12-03 15:53:24 +010084 if (p->chunk_relocs == NULL) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +020085 return 0;
86 }
Christian König6d2d13d2014-12-03 15:53:24 +010087 chunk = p->chunk_relocs;
Alex Deuchercf4ccd02011-11-18 10:19:47 -050088 p->dma_reloc_idx = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020089 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
Christian König1d0c0942014-11-27 14:48:42 +010091 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020092 if (p->relocs == NULL) {
93 return -ENOMEM;
94 }
Marek Olšákc9b76542014-03-02 00:56:21 +010095
96 radeon_cs_buckets_init(&buckets);
97
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098 for (i = 0; i < p->nrelocs; i++) {
99 struct drm_radeon_cs_reloc *r;
Christian Königd33a8fc2014-11-27 14:48:40 +0100100 struct drm_gem_object *gobj;
Marek Olšákc9b76542014-03-02 00:56:21 +0100101 unsigned priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200102
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200103 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
Christian Königd33a8fc2014-11-27 14:48:40 +0100104 gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
105 if (gobj == NULL) {
Christian König4474f3a2013-04-08 12:41:28 +0200106 DRM_ERROR("gem object lookup failed 0x%x\n",
107 r->handle);
108 return -ENOENT;
109 }
Christian Königd33a8fc2014-11-27 14:48:40 +0100110 p->relocs[i].robj = gem_to_radeon_bo(gobj);
Marek Olšákc9b76542014-03-02 00:56:21 +0100111
112 /* The userspace buffer priorities are from 0 to 15. A higher
113 * number means the buffer is more important.
114 * Also, the buffers used for write have a higher priority than
115 * the buffers used for read only, which doubles the range
116 * to 0 to 31. 32 is reserved for the kernel driver.
117 */
Christian König701e1e72014-08-15 11:52:53 +0200118 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
119 + !!r->write_domain;
Christian König4474f3a2013-04-08 12:41:28 +0200120
Christian König4f66c592013-09-15 13:31:28 +0200121 /* the first reloc of an UVD job is the msg and that must be in
Christian Königb6a7eee2013-04-16 15:41:25 +0200122 VRAM, also but everything into VRAM on AGP cards and older
123 IGP chips to avoid image corruptions */
Christian König4f66c592013-09-15 13:31:28 +0200124 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
Christian Königb6a7eee2013-04-16 15:41:25 +0200125 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
126 p->rdev->family == CHIP_RS780 ||
127 p->rdev->family == CHIP_RS880)) {
128
Christian Königbcf6f1e2013-10-15 20:12:03 +0200129 /* TODO: is this still needed for NI+ ? */
Christian Königce6758c2014-06-02 17:33:07 +0200130 p->relocs[i].prefered_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200131 RADEON_GEM_DOMAIN_VRAM;
132
Christian Königce6758c2014-06-02 17:33:07 +0200133 p->relocs[i].allowed_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200134 RADEON_GEM_DOMAIN_VRAM;
135
Marek Olšákc9b76542014-03-02 00:56:21 +0100136 /* prioritize this over any other relocation */
137 priority = RADEON_CS_MAX_PRIORITY;
Christian Königf2ba57b2013-04-08 12:41:29 +0200138 } else {
139 uint32_t domain = r->write_domain ?
140 r->write_domain : r->read_domains;
141
Marek Olšákec65da32014-05-27 02:56:36 +0200142 if (domain & RADEON_GEM_DOMAIN_CPU) {
143 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
144 "for command submission\n");
145 return -EINVAL;
146 }
147
Christian Königce6758c2014-06-02 17:33:07 +0200148 p->relocs[i].prefered_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200149 if (domain == RADEON_GEM_DOMAIN_VRAM)
150 domain |= RADEON_GEM_DOMAIN_GTT;
Christian Königce6758c2014-06-02 17:33:07 +0200151 p->relocs[i].allowed_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200152 }
Christian König4474f3a2013-04-08 12:41:28 +0200153
Christian Königf72a113a2014-08-07 09:36:00 +0200154 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
155 uint32_t domain = p->relocs[i].prefered_domains;
156 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
157 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
158 "allowed for userptr BOs\n");
159 return -EINVAL;
160 }
161 need_mmap_lock = true;
162 domain = RADEON_GEM_DOMAIN_GTT;
163 p->relocs[i].prefered_domains = domain;
164 p->relocs[i].allowed_domains = domain;
165 }
166
Christian Königdf0af442014-03-03 12:38:08 +0100167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
Christian König298593b2014-09-04 20:01:54 +0200168 p->relocs[i].tv.shared = !r->write_domain;
Christian König4474f3a2013-04-08 12:41:28 +0200169
Christian Königdf0af442014-03-03 12:38:08 +0100170 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
Marek Olšákc9b76542014-03-02 00:56:21 +0100171 priority);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200172 }
Marek Olšákc9b76542014-03-02 00:56:21 +0100173
174 radeon_cs_buckets_get_list(&buckets, &p->validated);
175
Christian König6d2f2942014-02-20 13:42:17 +0100176 if (p->cs_flags & RADEON_CS_USE_VM)
177 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
178 &p->validated);
Christian Königf72a113a2014-08-07 09:36:00 +0200179 if (need_mmap_lock)
180 down_read(&current->mm->mmap_sem);
Christian König6d2f2942014-02-20 13:42:17 +0100181
Christian Königf72a113a2014-08-07 09:36:00 +0200182 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
183
184 if (need_mmap_lock)
185 up_read(&current->mm->mmap_sem);
186
187 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188}
189
Jerome Glisse721604a2012-01-05 22:11:05 -0500190static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
191{
192 p->priority = priority;
193
194 switch (ring) {
195 default:
196 DRM_ERROR("unknown ring id: %d\n", ring);
197 return -EINVAL;
198 case RADEON_CS_RING_GFX:
199 p->ring = RADEON_RING_TYPE_GFX_INDEX;
200 break;
201 case RADEON_CS_RING_COMPUTE:
Alex Deucher963e81f2013-06-26 17:37:11 -0400202 if (p->rdev->family >= CHIP_TAHITI) {
Alex Deucher8d5ef7b2012-03-20 17:18:24 -0400203 if (p->priority > 0)
204 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
205 else
206 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
207 } else
208 p->ring = RADEON_RING_TYPE_GFX_INDEX;
Jerome Glisse721604a2012-01-05 22:11:05 -0500209 break;
Alex Deucher278a3342012-12-13 12:27:28 -0500210 case RADEON_CS_RING_DMA:
211 if (p->rdev->family >= CHIP_CAYMAN) {
212 if (p->priority > 0)
213 p->ring = R600_RING_TYPE_DMA_INDEX;
214 else
215 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
Alex Deucherb9ace362014-01-27 10:59:51 -0500216 } else if (p->rdev->family >= CHIP_RV770) {
Alex Deucher278a3342012-12-13 12:27:28 -0500217 p->ring = R600_RING_TYPE_DMA_INDEX;
218 } else {
219 return -EINVAL;
220 }
221 break;
Christian Königf2ba57b2013-04-08 12:41:29 +0200222 case RADEON_CS_RING_UVD:
223 p->ring = R600_RING_TYPE_UVD_INDEX;
224 break;
Christian Königd93f7932013-05-23 12:10:04 +0200225 case RADEON_CS_RING_VCE:
226 /* TODO: only use the low priority ring for now */
227 p->ring = TN_RING_TYPE_VCE1_INDEX;
228 break;
Jerome Glisse721604a2012-01-05 22:11:05 -0500229 }
230 return 0;
231}
232
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200233static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
Christian König93504fc2012-01-05 22:11:06 -0500234{
Christian König1d0c0942014-11-27 14:48:42 +0100235 struct radeon_bo_list *reloc;
Christian Königc1f0a9c2014-11-26 16:29:33 +0100236 int r;
Christian König93504fc2012-01-05 22:11:06 -0500237
Christian Königc1f0a9c2014-11-26 16:29:33 +0100238 list_for_each_entry(reloc, &p->validated, tv.head) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200239 struct reservation_object *resv;
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200240
Christian Königc1f0a9c2014-11-26 16:29:33 +0100241 resv = reloc->robj->tbo.resv;
Christian König975700d22014-11-19 14:01:22 +0100242 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
Christian Königc1f0a9c2014-11-26 16:29:33 +0100243 reloc->tv.shared);
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200244 if (r)
Christian Königc1f0a9c2014-11-26 16:29:33 +0100245 return r;
Christian Königcdac5502012-02-23 15:18:42 +0100246 }
Christian Königc1f0a9c2014-11-26 16:29:33 +0100247 return 0;
Christian König93504fc2012-01-05 22:11:06 -0500248}
249
Alex Deucher9b001472012-05-30 10:09:30 -0400250/* XXX: note that this is called from the legacy UMS CS ioctl as well */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200251int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
252{
253 struct drm_radeon_cs *cs = data;
254 uint64_t *chunk_array_ptr;
Jerome Glisse721604a2012-01-05 22:11:05 -0500255 unsigned size, i;
256 u32 ring = RADEON_CS_RING_GFX;
257 s32 priority = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200258
Tommi Rantalaa28b2a42015-03-02 21:36:07 +0200259 INIT_LIST_HEAD(&p->validated);
260
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200261 if (!cs->num_chunks) {
262 return 0;
263 }
Tommi Rantalaa28b2a42015-03-02 21:36:07 +0200264
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200265 /* get chunks */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200266 p->idx = 0;
Jerome Glissef2e39222012-05-09 15:35:02 +0200267 p->ib.sa_bo = NULL;
Jerome Glissef2e39222012-05-09 15:35:02 +0200268 p->const_ib.sa_bo = NULL;
Christian König6d2d13d2014-12-03 15:53:24 +0100269 p->chunk_ib = NULL;
270 p->chunk_relocs = NULL;
271 p->chunk_flags = NULL;
272 p->chunk_const_ib = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200273 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
274 if (p->chunks_array == NULL) {
275 return -ENOMEM;
276 }
277 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100278 if (copy_from_user(p->chunks_array, chunk_array_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200279 sizeof(uint64_t)*cs->num_chunks)) {
280 return -EFAULT;
281 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500282 p->cs_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200283 p->nchunks = cs->num_chunks;
284 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
285 if (p->chunks == NULL) {
286 return -ENOMEM;
287 }
288 for (i = 0; i < p->nchunks; i++) {
289 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
290 struct drm_radeon_cs_chunk user_chunk;
291 uint32_t __user *cdata;
292
293 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100294 if (copy_from_user(&user_chunk, chunk_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200295 sizeof(struct drm_radeon_cs_chunk))) {
296 return -EFAULT;
297 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000298 p->chunks[i].length_dw = user_chunk.length_dw;
Christian König6d2d13d2014-12-03 15:53:24 +0100299 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
300 p->chunk_relocs = &p->chunks[i];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200301 }
Christian König6d2d13d2014-12-03 15:53:24 +0100302 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
303 p->chunk_ib = &p->chunks[i];
Dave Airlie5176fdc2009-06-30 11:47:14 +1000304 /* zero length IB isn't useful */
305 if (p->chunks[i].length_dw == 0)
306 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200307 }
Christian König6d2d13d2014-12-03 15:53:24 +0100308 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
309 p->chunk_const_ib = &p->chunks[i];
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400310 /* zero length CONST IB isn't useful */
311 if (p->chunks[i].length_dw == 0)
312 return -EINVAL;
313 }
Christian König6d2d13d2014-12-03 15:53:24 +0100314 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
315 p->chunk_flags = &p->chunks[i];
Jerome Glisse721604a2012-01-05 22:11:05 -0500316 /* zero length flags aren't useful */
317 if (p->chunks[i].length_dw == 0)
318 return -EINVAL;
Marek Olšáke70f2242011-10-25 01:38:45 +0200319 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000320
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200321 size = p->chunks[i].length_dw;
322 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
323 p->chunks[i].user_ptr = cdata;
Christian König6d2d13d2014-12-03 15:53:24 +0100324 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200325 continue;
326
Christian König6d2d13d2014-12-03 15:53:24 +0100327 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200328 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
329 continue;
330 }
331
332 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
333 size *= sizeof(uint32_t);
334 if (p->chunks[i].kdata == NULL) {
335 return -ENOMEM;
336 }
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100337 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200338 return -EFAULT;
339 }
Christian König6d2d13d2014-12-03 15:53:24 +0100340 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200341 p->cs_flags = p->chunks[i].kdata[0];
342 if (p->chunks[i].length_dw > 1)
343 ring = p->chunks[i].kdata[1];
344 if (p->chunks[i].length_dw > 2)
345 priority = (s32)p->chunks[i].kdata[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200346 }
347 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500348
Alex Deucher9b001472012-05-30 10:09:30 -0400349 /* these are KMS only */
350 if (p->rdev) {
351 if ((p->cs_flags & RADEON_CS_USE_VM) &&
352 !p->rdev->vm_manager.enabled) {
353 DRM_ERROR("VM not active on asic!\n");
354 return -EINVAL;
355 }
356
Alex Deucher9b001472012-05-30 10:09:30 -0400357 if (radeon_cs_get_ring(p, ring, priority))
358 return -EINVAL;
Christian König57449042013-04-08 12:41:27 +0200359
360 /* we only support VM on some SI+ rings */
Christian König60a44542014-05-21 17:43:59 +0200361 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
362 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
363 DRM_ERROR("Ring %d requires VM!\n", p->ring);
364 return -EINVAL;
365 }
366 } else {
367 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
368 DRM_ERROR("VM not supported on ring %d!\n",
369 p->ring);
370 return -EINVAL;
371 }
Christian König57449042013-04-08 12:41:27 +0200372 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200373 }
Marek Olšáke70f2242011-10-25 01:38:45 +0200374
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200375 return 0;
376}
377
Marek Olšák43304412014-03-02 00:56:20 +0100378static int cmp_size_smaller_first(void *priv, struct list_head *a,
379 struct list_head *b)
380{
Christian König1d0c0942014-11-27 14:48:42 +0100381 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
382 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
Marek Olšák43304412014-03-02 00:56:20 +0100383
384 /* Sort A before B if A is smaller. */
Christian Königdf0af442014-03-03 12:38:08 +0100385 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
Marek Olšák43304412014-03-02 00:56:20 +0100386}
387
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200388/**
389 * cs_parser_fini() - clean parser states
390 * @parser: parser structure holding parsing context.
391 * @error: error number
392 *
393 * If error is set than unvalidate buffer, otherwise just free memory
394 * used by parsing context.
395 **/
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200396static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200397{
398 unsigned i;
399
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400400 if (!error) {
Marek Olšák43304412014-03-02 00:56:20 +0100401 /* Sort the buffer list from the smallest to largest buffer,
402 * which affects the order of buffers in the LRU list.
403 * This assures that the smallest buffers are added first
404 * to the LRU list, so they are likely to be later evicted
405 * first, instead of large buffers whose eviction is more
406 * expensive.
407 *
408 * This slightly lowers the number of bytes moved by TTM
409 * per frame under memory pressure.
410 */
411 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
412
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200413 ttm_eu_fence_buffer_objects(&parser->ticket,
414 &parser->validated,
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200415 &parser->ib.fence->base);
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200416 } else if (backoff) {
417 ttm_eu_backoff_reservation(&parser->ticket,
418 &parser->validated);
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400419 }
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000420
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000421 if (parser->relocs != NULL) {
422 for (i = 0; i < parser->nrelocs; i++) {
Christian Königd33a8fc2014-11-27 14:48:40 +0100423 struct radeon_bo *bo = parser->relocs[i].robj;
424 if (bo == NULL)
425 continue;
426
427 drm_gem_object_unreference_unlocked(&bo->gem_base);
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000428 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200429 }
Michel Dänzer48e113e2009-09-15 17:09:32 +0200430 kfree(parser->track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200431 kfree(parser->relocs);
Michel Dänzere5a5fd42014-10-20 18:40:54 +0900432 drm_free_large(parser->vm_bos);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200433 for (i = 0; i < parser->nchunks; i++)
434 drm_free_large(parser->chunks[i].kdata);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200435 kfree(parser->chunks);
436 kfree(parser->chunks_array);
437 radeon_ib_free(parser->rdev, &parser->ib);
Jerome Glissef2e39222012-05-09 15:35:02 +0200438 radeon_ib_free(parser->rdev, &parser->const_ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200439}
440
Jerome Glisse721604a2012-01-05 22:11:05 -0500441static int radeon_cs_ib_chunk(struct radeon_device *rdev,
442 struct radeon_cs_parser *parser)
443{
Jerome Glisse721604a2012-01-05 22:11:05 -0500444 int r;
445
Christian König6d2d13d2014-12-03 15:53:24 +0100446 if (parser->chunk_ib == NULL)
Jerome Glisse721604a2012-01-05 22:11:05 -0500447 return 0;
448
449 if (parser->cs_flags & RADEON_CS_USE_VM)
450 return 0;
451
Christian Königeb0c19c2012-02-23 15:18:44 +0100452 r = radeon_cs_parse(rdev, parser->ring, parser);
Jerome Glisse721604a2012-01-05 22:11:05 -0500453 if (r || parser->parser_error) {
454 DRM_ERROR("Invalid command stream !\n");
455 return r;
456 }
Alex Deucherce3537d2013-07-24 12:12:49 -0400457
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200458 r = radeon_cs_sync_rings(parser);
459 if (r) {
460 if (r != -ERESTARTSYS)
461 DRM_ERROR("Failed to sync rings: %i\n", r);
462 return r;
463 }
464
Alex Deucherce3537d2013-07-24 12:12:49 -0400465 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
466 radeon_uvd_note_usage(rdev);
Alex Deucher03afe6f2013-08-23 11:56:26 -0400467 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
468 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
469 radeon_vce_note_usage(rdev);
Alex Deucherce3537d2013-07-24 12:12:49 -0400470
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900471 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Jerome Glisse721604a2012-01-05 22:11:05 -0500472 if (r) {
473 DRM_ERROR("Failed to schedule IB !\n");
474 }
Christian König93bf8882012-07-03 14:05:41 +0200475 return r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500476}
477
Christian König6d2f2942014-02-20 13:42:17 +0100478static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
Jerome Glisse721604a2012-01-05 22:11:05 -0500479 struct radeon_vm *vm)
480{
Christian König6d2f2942014-02-20 13:42:17 +0100481 struct radeon_device *rdev = p->rdev;
Christian König036bf462014-07-18 08:56:40 +0200482 struct radeon_bo_va *bo_va;
Christian König6d2f2942014-02-20 13:42:17 +0100483 int i, r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500484
Christian König6d2f2942014-02-20 13:42:17 +0100485 r = radeon_vm_update_page_directory(rdev, vm);
486 if (r)
Jerome Glisse3e8970f2012-08-13 12:07:33 -0400487 return r;
Christian König6d2f2942014-02-20 13:42:17 +0100488
Christian König036bf462014-07-18 08:56:40 +0200489 r = radeon_vm_clear_freed(rdev, vm);
490 if (r)
491 return r;
492
Christian Königcc9e67e2014-07-18 13:48:10 +0200493 if (vm->ib_bo_va == NULL) {
Christian König036bf462014-07-18 08:56:40 +0200494 DRM_ERROR("Tmp BO not in VM!\n");
495 return -EINVAL;
496 }
497
Christian Königcc9e67e2014-07-18 13:48:10 +0200498 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
499 &rdev->ring_tmp_bo.bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100500 if (r)
501 return r;
502
503 for (i = 0; i < p->nrelocs; i++) {
504 struct radeon_bo *bo;
505
Christian König6d2f2942014-02-20 13:42:17 +0100506 bo = p->relocs[i].robj;
Christian König036bf462014-07-18 08:56:40 +0200507 bo_va = radeon_vm_bo_find(vm, bo);
508 if (bo_va == NULL) {
509 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
510 return -EINVAL;
511 }
512
513 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100514 if (r)
Jerome Glisse721604a2012-01-05 22:11:05 -0500515 return r;
Christian König94214632014-11-19 14:01:26 +0100516
517 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
Jerome Glisse721604a2012-01-05 22:11:05 -0500518 }
Christian Könige31ad962014-07-18 09:24:53 +0200519
520 return radeon_vm_clear_invalids(rdev, vm);
Jerome Glisse721604a2012-01-05 22:11:05 -0500521}
522
523static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
524 struct radeon_cs_parser *parser)
525{
Jerome Glisse721604a2012-01-05 22:11:05 -0500526 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
527 struct radeon_vm *vm = &fpriv->vm;
528 int r;
529
Christian König6d2d13d2014-12-03 15:53:24 +0100530 if (parser->chunk_ib == NULL)
Jerome Glisse721604a2012-01-05 22:11:05 -0500531 return 0;
Jerome Glisse721604a2012-01-05 22:11:05 -0500532 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
533 return 0;
534
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200535 if (parser->const_ib.length_dw) {
Jerome Glissef2e39222012-05-09 15:35:02 +0200536 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400537 if (r) {
538 return r;
539 }
540 }
541
Jerome Glissef2e39222012-05-09 15:35:02 +0200542 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
Jerome Glisse721604a2012-01-05 22:11:05 -0500543 if (r) {
544 return r;
545 }
546
Alex Deucherce3537d2013-07-24 12:12:49 -0400547 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
548 radeon_uvd_note_usage(rdev);
549
Jerome Glisse721604a2012-01-05 22:11:05 -0500550 mutex_lock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500551 r = radeon_bo_vm_update_pte(parser, vm);
552 if (r) {
553 goto out;
554 }
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200555
556 r = radeon_cs_sync_rings(parser);
557 if (r) {
558 if (r != -ERESTARTSYS)
559 DRM_ERROR("Failed to sync rings: %i\n", r);
560 goto out;
561 }
Christian König4ef72562012-07-13 13:06:00 +0200562
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400563 if ((rdev->family >= CHIP_TAHITI) &&
Christian König6d2d13d2014-12-03 15:53:24 +0100564 (parser->chunk_const_ib != NULL)) {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900565 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
Christian König4ef72562012-07-13 13:06:00 +0200566 } else {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900567 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400568 }
569
Christian Königee60e292012-08-09 16:21:08 +0200570out:
Christian König36ff39c2012-05-09 10:07:08 +0200571 mutex_unlock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500572 return r;
573}
574
Christian König6c6f4782012-05-02 15:11:19 +0200575static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
576{
577 if (r == -EDEADLK) {
578 r = radeon_gpu_reset(rdev);
579 if (!r)
580 r = -EAGAIN;
581 }
582 return r;
583}
584
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200585static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
586{
587 struct radeon_cs_chunk *ib_chunk;
588 struct radeon_vm *vm = NULL;
589 int r;
590
Christian König6d2d13d2014-12-03 15:53:24 +0100591 if (parser->chunk_ib == NULL)
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200592 return 0;
593
594 if (parser->cs_flags & RADEON_CS_USE_VM) {
595 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
596 vm = &fpriv->vm;
597
598 if ((rdev->family >= CHIP_TAHITI) &&
Christian König6d2d13d2014-12-03 15:53:24 +0100599 (parser->chunk_const_ib != NULL)) {
600 ib_chunk = parser->chunk_const_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200601 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
602 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
603 return -EINVAL;
604 }
605 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
606 vm, ib_chunk->length_dw * 4);
607 if (r) {
608 DRM_ERROR("Failed to get const ib !\n");
609 return r;
610 }
611 parser->const_ib.is_const_ib = true;
612 parser->const_ib.length_dw = ib_chunk->length_dw;
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100613 if (copy_from_user(parser->const_ib.ptr,
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200614 ib_chunk->user_ptr,
615 ib_chunk->length_dw * 4))
616 return -EFAULT;
617 }
618
Christian König6d2d13d2014-12-03 15:53:24 +0100619 ib_chunk = parser->chunk_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200620 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
621 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
622 return -EINVAL;
623 }
624 }
Christian König6d2d13d2014-12-03 15:53:24 +0100625 ib_chunk = parser->chunk_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200626
627 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
628 vm, ib_chunk->length_dw * 4);
629 if (r) {
630 DRM_ERROR("Failed to get ib !\n");
631 return r;
632 }
633 parser->ib.length_dw = ib_chunk->length_dw;
634 if (ib_chunk->kdata)
635 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100636 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200637 return -EFAULT;
638 return 0;
639}
640
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200641int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
642{
643 struct radeon_device *rdev = dev->dev_private;
644 struct radeon_cs_parser parser;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200645 int r;
646
Jerome Glissedee53e72012-07-02 12:45:19 -0400647 down_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500648 if (!rdev->accel_working) {
Jerome Glissedee53e72012-07-02 12:45:19 -0400649 up_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500650 return -EBUSY;
651 }
Maarten Lankhorst9bb39ff2014-08-27 16:45:18 -0400652 if (rdev->in_reset) {
653 up_read(&rdev->exclusive_lock);
654 r = radeon_gpu_reset(rdev);
655 if (!r)
656 r = -EAGAIN;
657 return r;
658 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200659 /* initialize parser */
660 memset(&parser, 0, sizeof(struct radeon_cs_parser));
661 parser.filp = filp;
662 parser.rdev = rdev;
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100663 parser.dev = rdev->dev;
Dave Airlie428c6e32011-06-08 19:58:29 +1000664 parser.family = rdev->family;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200665 r = radeon_cs_parser_init(&parser, data);
666 if (r) {
667 DRM_ERROR("Failed to initialize parser !\n");
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200668 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400669 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200670 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200671 return r;
672 }
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200673
674 r = radeon_cs_ib_fill(rdev, &parser);
675 if (!r) {
676 r = radeon_cs_parser_relocs(&parser);
677 if (r && r != -ERESTARTSYS)
Dave Airlie97f23b32010-03-19 10:33:44 +1000678 DRM_ERROR("Failed to parse relocation %d!\n", r);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200679 }
680
681 if (r) {
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200682 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400683 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200684 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200685 return r;
686 }
Christian König55b51c82013-04-18 15:25:59 +0200687
Christian König860024e2013-09-07 18:29:01 +0200688 trace_radeon_cs(&parser);
689
Jerome Glisse721604a2012-01-05 22:11:05 -0500690 r = radeon_cs_ib_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200691 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500692 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200693 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500694 r = radeon_cs_ib_vm_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200695 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500696 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200697 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500698out:
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200699 radeon_cs_parser_fini(&parser, r, true);
Jerome Glissedee53e72012-07-02 12:45:19 -0400700 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200701 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200702 return r;
703}
Dave Airlie513bcb42009-09-23 16:56:27 +1000704
Ilija Hadzic4db01312013-01-02 18:27:40 -0500705/**
706 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
707 * @parser: parser structure holding parsing context.
708 * @pkt: where to store packet information
709 *
710 * Assume that chunk_ib_index is properly set. Will return -EINVAL
711 * if packet is bigger than remaining ib size. or if packets is unknown.
712 **/
713int radeon_cs_packet_parse(struct radeon_cs_parser *p,
714 struct radeon_cs_packet *pkt,
715 unsigned idx)
716{
Christian König6d2d13d2014-12-03 15:53:24 +0100717 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500718 struct radeon_device *rdev = p->rdev;
719 uint32_t header;
Alex Deuchere1b4e722015-02-18 10:15:10 -0500720 int ret = 0, i;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500721
722 if (idx >= ib_chunk->length_dw) {
723 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
724 idx, ib_chunk->length_dw);
725 return -EINVAL;
726 }
727 header = radeon_get_ib_value(p, idx);
728 pkt->idx = idx;
729 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
730 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
731 pkt->one_reg_wr = 0;
732 switch (pkt->type) {
733 case RADEON_PACKET_TYPE0:
734 if (rdev->family < CHIP_R600) {
735 pkt->reg = R100_CP_PACKET0_GET_REG(header);
736 pkt->one_reg_wr =
737 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
738 } else
739 pkt->reg = R600_CP_PACKET0_GET_REG(header);
740 break;
741 case RADEON_PACKET_TYPE3:
742 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
743 break;
744 case RADEON_PACKET_TYPE2:
745 pkt->count = -1;
746 break;
747 default:
748 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
Alex Deuchere1b4e722015-02-18 10:15:10 -0500749 ret = -EINVAL;
750 goto dump_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500751 }
752 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
753 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
754 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
Alex Deuchere1b4e722015-02-18 10:15:10 -0500755 ret = -EINVAL;
756 goto dump_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500757 }
758 return 0;
Alex Deuchere1b4e722015-02-18 10:15:10 -0500759
760dump_ib:
761 for (i = 0; i < ib_chunk->length_dw; i++) {
762 if (i == idx)
763 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
764 else
765 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
766 }
767 return ret;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500768}
Ilija Hadzic9ffb7a62013-01-02 18:27:42 -0500769
770/**
771 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
772 * @p: structure holding the parser context.
773 *
774 * Check if the next packet is NOP relocation packet3.
775 **/
776bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
777{
778 struct radeon_cs_packet p3reloc;
779 int r;
780
781 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
782 if (r)
783 return false;
784 if (p3reloc.type != RADEON_PACKET_TYPE3)
785 return false;
786 if (p3reloc.opcode != RADEON_PACKET3_NOP)
787 return false;
788 return true;
789}
Ilija Hadzicc3ad63a2013-01-02 18:27:45 -0500790
791/**
792 * radeon_cs_dump_packet() - dump raw packet context
793 * @p: structure holding the parser context.
794 * @pkt: structure holding the packet.
795 *
796 * Used mostly for debugging and error reporting.
797 **/
798void radeon_cs_dump_packet(struct radeon_cs_parser *p,
799 struct radeon_cs_packet *pkt)
800{
801 volatile uint32_t *ib;
802 unsigned i;
803 unsigned idx;
804
805 ib = p->ib.ptr;
806 idx = pkt->idx;
807 for (i = 0; i <= (pkt->count + 1); i++, idx++)
808 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
809}
810
Ilija Hadzice9716992013-01-02 18:27:46 -0500811/**
812 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
813 * @parser: parser structure holding parsing context.
814 * @data: pointer to relocation data
815 * @offset_start: starting offset
816 * @offset_mask: offset mask (to align start offset on)
817 * @reloc: reloc informations
818 *
819 * Check if next packet is relocation packet3, do bo validation and compute
820 * GPU offset using the provided start.
821 **/
822int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
Christian König1d0c0942014-11-27 14:48:42 +0100823 struct radeon_bo_list **cs_reloc,
Ilija Hadzice9716992013-01-02 18:27:46 -0500824 int nomm)
825{
826 struct radeon_cs_chunk *relocs_chunk;
827 struct radeon_cs_packet p3reloc;
828 unsigned idx;
829 int r;
830
Christian König6d2d13d2014-12-03 15:53:24 +0100831 if (p->chunk_relocs == NULL) {
Ilija Hadzice9716992013-01-02 18:27:46 -0500832 DRM_ERROR("No relocation chunk !\n");
833 return -EINVAL;
834 }
835 *cs_reloc = NULL;
Christian König6d2d13d2014-12-03 15:53:24 +0100836 relocs_chunk = p->chunk_relocs;
Ilija Hadzice9716992013-01-02 18:27:46 -0500837 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
838 if (r)
839 return r;
840 p->idx += p3reloc.count + 2;
841 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
842 p3reloc.opcode != RADEON_PACKET3_NOP) {
843 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
844 p3reloc.idx);
845 radeon_cs_dump_packet(p, &p3reloc);
846 return -EINVAL;
847 }
848 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
849 if (idx >= relocs_chunk->length_dw) {
850 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
851 idx, relocs_chunk->length_dw);
852 radeon_cs_dump_packet(p, &p3reloc);
853 return -EINVAL;
854 }
855 /* FIXME: we assume reloc size is 4 dwords */
856 if (nomm) {
857 *cs_reloc = p->relocs;
Christian Königdf0af442014-03-03 12:38:08 +0100858 (*cs_reloc)->gpu_offset =
Ilija Hadzice9716992013-01-02 18:27:46 -0500859 (u64)relocs_chunk->kdata[idx + 3] << 32;
Christian Königdf0af442014-03-03 12:38:08 +0100860 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
Ilija Hadzice9716992013-01-02 18:27:46 -0500861 } else
Christian König466be332014-12-03 15:46:49 +0100862 *cs_reloc = &p->relocs[(idx / 4)];
Ilija Hadzice9716992013-01-02 18:27:46 -0500863 return 0;
864}