blob: 510ea371dacc6f063fa6f80b8131b53437ef35eb [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
Marek Olšák43304412014-03-02 00:56:20 +010027#include <linux/list_sort.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "radeon_reg.h"
31#include "radeon.h"
Christian König860024e2013-09-07 18:29:01 +020032#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033
Marek Olšákc9b76542014-03-02 00:56:21 +010034#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
Lauri Kasanen1109ca02012-08-31 13:43:50 -040075static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020076{
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077 struct radeon_cs_chunk *chunk;
Marek Olšákc9b76542014-03-02 00:56:21 +010078 struct radeon_cs_buckets buckets;
Christian König466be332014-12-03 15:46:49 +010079 unsigned i;
80 bool need_mmap_lock = false;
Christian Königf72a113a2014-08-07 09:36:00 +020081 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020082
Christian König6d2d13d2014-12-03 15:53:24 +010083 if (p->chunk_relocs == NULL) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084 return 0;
85 }
Christian König6d2d13d2014-12-03 15:53:24 +010086 chunk = p->chunk_relocs;
Alex Deuchercf4ccd02011-11-18 10:19:47 -050087 p->dma_reloc_idx = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088 /* FIXME: we assume that each relocs use 4 dwords */
89 p->nrelocs = chunk->length_dw / 4;
Michel Dänzerb421ed12015-04-16 11:17:27 +090090 p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
Jerome Glisse771fe6b2009-06-05 14:42:42 +020091 if (p->relocs == NULL) {
92 return -ENOMEM;
93 }
Marek Olšákc9b76542014-03-02 00:56:21 +010094
95 radeon_cs_buckets_init(&buckets);
96
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097 for (i = 0; i < p->nrelocs; i++) {
98 struct drm_radeon_cs_reloc *r;
Christian Königd33a8fc2014-11-27 14:48:40 +010099 struct drm_gem_object *gobj;
Marek Olšákc9b76542014-03-02 00:56:21 +0100100 unsigned priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200102 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100103 gobj = drm_gem_object_lookup(p->filp, r->handle);
Christian Königd33a8fc2014-11-27 14:48:40 +0100104 if (gobj == NULL) {
Christian König4474f3a2013-04-08 12:41:28 +0200105 DRM_ERROR("gem object lookup failed 0x%x\n",
106 r->handle);
107 return -ENOENT;
108 }
Christian Königd33a8fc2014-11-27 14:48:40 +0100109 p->relocs[i].robj = gem_to_radeon_bo(gobj);
Marek Olšákc9b76542014-03-02 00:56:21 +0100110
111 /* The userspace buffer priorities are from 0 to 15. A higher
112 * number means the buffer is more important.
113 * Also, the buffers used for write have a higher priority than
114 * the buffers used for read only, which doubles the range
115 * to 0 to 31. 32 is reserved for the kernel driver.
116 */
Christian König701e1e72014-08-15 11:52:53 +0200117 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
118 + !!r->write_domain;
Christian König4474f3a2013-04-08 12:41:28 +0200119
Christian König4f66c592013-09-15 13:31:28 +0200120 /* the first reloc of an UVD job is the msg and that must be in
Christian Königb6a7eee2013-04-16 15:41:25 +0200121 VRAM, also but everything into VRAM on AGP cards and older
122 IGP chips to avoid image corruptions */
Christian König4f66c592013-09-15 13:31:28 +0200123 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
Christian Königb6a7eee2013-04-16 15:41:25 +0200124 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
125 p->rdev->family == CHIP_RS780 ||
126 p->rdev->family == CHIP_RS880)) {
127
Christian Königbcf6f1e2013-10-15 20:12:03 +0200128 /* TODO: is this still needed for NI+ ? */
Christian Königce6758c2014-06-02 17:33:07 +0200129 p->relocs[i].prefered_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200130 RADEON_GEM_DOMAIN_VRAM;
131
Christian Königce6758c2014-06-02 17:33:07 +0200132 p->relocs[i].allowed_domains =
Christian Königf2ba57b2013-04-08 12:41:29 +0200133 RADEON_GEM_DOMAIN_VRAM;
134
Marek Olšákc9b76542014-03-02 00:56:21 +0100135 /* prioritize this over any other relocation */
136 priority = RADEON_CS_MAX_PRIORITY;
Christian Königf2ba57b2013-04-08 12:41:29 +0200137 } else {
138 uint32_t domain = r->write_domain ?
139 r->write_domain : r->read_domains;
140
Marek Olšákec65da32014-05-27 02:56:36 +0200141 if (domain & RADEON_GEM_DOMAIN_CPU) {
142 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
143 "for command submission\n");
144 return -EINVAL;
145 }
146
Christian Königce6758c2014-06-02 17:33:07 +0200147 p->relocs[i].prefered_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200148 if (domain == RADEON_GEM_DOMAIN_VRAM)
149 domain |= RADEON_GEM_DOMAIN_GTT;
Christian Königce6758c2014-06-02 17:33:07 +0200150 p->relocs[i].allowed_domains = domain;
Christian Königf2ba57b2013-04-08 12:41:29 +0200151 }
Christian König4474f3a2013-04-08 12:41:28 +0200152
Christian Königf72a113a2014-08-07 09:36:00 +0200153 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
154 uint32_t domain = p->relocs[i].prefered_domains;
155 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
156 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
157 "allowed for userptr BOs\n");
158 return -EINVAL;
159 }
160 need_mmap_lock = true;
161 domain = RADEON_GEM_DOMAIN_GTT;
162 p->relocs[i].prefered_domains = domain;
163 p->relocs[i].allowed_domains = domain;
164 }
165
Christian Königdf0af442014-03-03 12:38:08 +0100166 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
Christian König298593b2014-09-04 20:01:54 +0200167 p->relocs[i].tv.shared = !r->write_domain;
Christian König4474f3a2013-04-08 12:41:28 +0200168
Christian Königdf0af442014-03-03 12:38:08 +0100169 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
Marek Olšákc9b76542014-03-02 00:56:21 +0100170 priority);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200171 }
Marek Olšákc9b76542014-03-02 00:56:21 +0100172
173 radeon_cs_buckets_get_list(&buckets, &p->validated);
174
Christian König6d2f2942014-02-20 13:42:17 +0100175 if (p->cs_flags & RADEON_CS_USE_VM)
176 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
177 &p->validated);
Christian Königf72a113a2014-08-07 09:36:00 +0200178 if (need_mmap_lock)
179 down_read(&current->mm->mmap_sem);
Christian König6d2f2942014-02-20 13:42:17 +0100180
Christian Königf72a113a2014-08-07 09:36:00 +0200181 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
182
183 if (need_mmap_lock)
184 up_read(&current->mm->mmap_sem);
185
186 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200187}
188
Jerome Glisse721604a2012-01-05 22:11:05 -0500189static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
190{
191 p->priority = priority;
192
193 switch (ring) {
194 default:
195 DRM_ERROR("unknown ring id: %d\n", ring);
196 return -EINVAL;
197 case RADEON_CS_RING_GFX:
198 p->ring = RADEON_RING_TYPE_GFX_INDEX;
199 break;
200 case RADEON_CS_RING_COMPUTE:
Alex Deucher963e81f2013-06-26 17:37:11 -0400201 if (p->rdev->family >= CHIP_TAHITI) {
Alex Deucher8d5ef7b2012-03-20 17:18:24 -0400202 if (p->priority > 0)
203 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
204 else
205 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
206 } else
207 p->ring = RADEON_RING_TYPE_GFX_INDEX;
Jerome Glisse721604a2012-01-05 22:11:05 -0500208 break;
Alex Deucher278a3342012-12-13 12:27:28 -0500209 case RADEON_CS_RING_DMA:
210 if (p->rdev->family >= CHIP_CAYMAN) {
211 if (p->priority > 0)
212 p->ring = R600_RING_TYPE_DMA_INDEX;
213 else
214 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
Alex Deucherb9ace362014-01-27 10:59:51 -0500215 } else if (p->rdev->family >= CHIP_RV770) {
Alex Deucher278a3342012-12-13 12:27:28 -0500216 p->ring = R600_RING_TYPE_DMA_INDEX;
217 } else {
218 return -EINVAL;
219 }
220 break;
Christian Königf2ba57b2013-04-08 12:41:29 +0200221 case RADEON_CS_RING_UVD:
222 p->ring = R600_RING_TYPE_UVD_INDEX;
223 break;
Christian Königd93f7932013-05-23 12:10:04 +0200224 case RADEON_CS_RING_VCE:
225 /* TODO: only use the low priority ring for now */
226 p->ring = TN_RING_TYPE_VCE1_INDEX;
227 break;
Jerome Glisse721604a2012-01-05 22:11:05 -0500228 }
229 return 0;
230}
231
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200232static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
Christian König93504fc2012-01-05 22:11:06 -0500233{
Christian König1d0c0942014-11-27 14:48:42 +0100234 struct radeon_bo_list *reloc;
Christian Königc1f0a9c2014-11-26 16:29:33 +0100235 int r;
Christian König93504fc2012-01-05 22:11:06 -0500236
Christian Königc1f0a9c2014-11-26 16:29:33 +0100237 list_for_each_entry(reloc, &p->validated, tv.head) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200238 struct reservation_object *resv;
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200239
Christian Königc1f0a9c2014-11-26 16:29:33 +0100240 resv = reloc->robj->tbo.resv;
Christian König975700d22014-11-19 14:01:22 +0100241 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
Christian Königc1f0a9c2014-11-26 16:29:33 +0100242 reloc->tv.shared);
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200243 if (r)
Christian Königc1f0a9c2014-11-26 16:29:33 +0100244 return r;
Christian Königcdac5502012-02-23 15:18:42 +0100245 }
Christian Königc1f0a9c2014-11-26 16:29:33 +0100246 return 0;
Christian König93504fc2012-01-05 22:11:06 -0500247}
248
Alex Deucher9b001472012-05-30 10:09:30 -0400249/* XXX: note that this is called from the legacy UMS CS ioctl as well */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200250int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
251{
252 struct drm_radeon_cs *cs = data;
253 uint64_t *chunk_array_ptr;
Jerome Glisse721604a2012-01-05 22:11:05 -0500254 unsigned size, i;
255 u32 ring = RADEON_CS_RING_GFX;
256 s32 priority = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200257
Tommi Rantalaa28b2a42015-03-02 21:36:07 +0200258 INIT_LIST_HEAD(&p->validated);
259
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200260 if (!cs->num_chunks) {
261 return 0;
262 }
Tommi Rantalaa28b2a42015-03-02 21:36:07 +0200263
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200264 /* get chunks */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200265 p->idx = 0;
Jerome Glissef2e39222012-05-09 15:35:02 +0200266 p->ib.sa_bo = NULL;
Jerome Glissef2e39222012-05-09 15:35:02 +0200267 p->const_ib.sa_bo = NULL;
Christian König6d2d13d2014-12-03 15:53:24 +0100268 p->chunk_ib = NULL;
269 p->chunk_relocs = NULL;
270 p->chunk_flags = NULL;
271 p->chunk_const_ib = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200272 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
273 if (p->chunks_array == NULL) {
274 return -ENOMEM;
275 }
276 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100277 if (copy_from_user(p->chunks_array, chunk_array_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200278 sizeof(uint64_t)*cs->num_chunks)) {
279 return -EFAULT;
280 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500281 p->cs_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200282 p->nchunks = cs->num_chunks;
283 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
284 if (p->chunks == NULL) {
285 return -ENOMEM;
286 }
287 for (i = 0; i < p->nchunks; i++) {
288 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
289 struct drm_radeon_cs_chunk user_chunk;
290 uint32_t __user *cdata;
291
292 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100293 if (copy_from_user(&user_chunk, chunk_ptr,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200294 sizeof(struct drm_radeon_cs_chunk))) {
295 return -EFAULT;
296 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000297 p->chunks[i].length_dw = user_chunk.length_dw;
Christian König6d2d13d2014-12-03 15:53:24 +0100298 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
299 p->chunk_relocs = &p->chunks[i];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300 }
Christian König6d2d13d2014-12-03 15:53:24 +0100301 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
302 p->chunk_ib = &p->chunks[i];
Dave Airlie5176fdc2009-06-30 11:47:14 +1000303 /* zero length IB isn't useful */
304 if (p->chunks[i].length_dw == 0)
305 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306 }
Christian König6d2d13d2014-12-03 15:53:24 +0100307 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
308 p->chunk_const_ib = &p->chunks[i];
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400309 /* zero length CONST IB isn't useful */
310 if (p->chunks[i].length_dw == 0)
311 return -EINVAL;
312 }
Christian König6d2d13d2014-12-03 15:53:24 +0100313 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
314 p->chunk_flags = &p->chunks[i];
Jerome Glisse721604a2012-01-05 22:11:05 -0500315 /* zero length flags aren't useful */
316 if (p->chunks[i].length_dw == 0)
317 return -EINVAL;
Marek Olšáke70f2242011-10-25 01:38:45 +0200318 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000319
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200320 size = p->chunks[i].length_dw;
321 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
322 p->chunks[i].user_ptr = cdata;
Christian König6d2d13d2014-12-03 15:53:24 +0100323 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200324 continue;
325
Christian König6d2d13d2014-12-03 15:53:24 +0100326 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200327 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
328 continue;
329 }
330
331 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
332 size *= sizeof(uint32_t);
333 if (p->chunks[i].kdata == NULL) {
334 return -ENOMEM;
335 }
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100336 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200337 return -EFAULT;
338 }
Christian König6d2d13d2014-12-03 15:53:24 +0100339 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200340 p->cs_flags = p->chunks[i].kdata[0];
341 if (p->chunks[i].length_dw > 1)
342 ring = p->chunks[i].kdata[1];
343 if (p->chunks[i].length_dw > 2)
344 priority = (s32)p->chunks[i].kdata[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200345 }
346 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500347
Alex Deucher9b001472012-05-30 10:09:30 -0400348 /* these are KMS only */
349 if (p->rdev) {
350 if ((p->cs_flags & RADEON_CS_USE_VM) &&
351 !p->rdev->vm_manager.enabled) {
352 DRM_ERROR("VM not active on asic!\n");
353 return -EINVAL;
354 }
355
Alex Deucher9b001472012-05-30 10:09:30 -0400356 if (radeon_cs_get_ring(p, ring, priority))
357 return -EINVAL;
Christian König57449042013-04-08 12:41:27 +0200358
359 /* we only support VM on some SI+ rings */
Christian König60a44542014-05-21 17:43:59 +0200360 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
361 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
362 DRM_ERROR("Ring %d requires VM!\n", p->ring);
363 return -EINVAL;
364 }
365 } else {
366 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
367 DRM_ERROR("VM not supported on ring %d!\n",
368 p->ring);
369 return -EINVAL;
370 }
Christian König57449042013-04-08 12:41:27 +0200371 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200372 }
Marek Olšáke70f2242011-10-25 01:38:45 +0200373
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200374 return 0;
375}
376
Marek Olšák43304412014-03-02 00:56:20 +0100377static int cmp_size_smaller_first(void *priv, struct list_head *a,
378 struct list_head *b)
379{
Christian König1d0c0942014-11-27 14:48:42 +0100380 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
381 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
Marek Olšák43304412014-03-02 00:56:20 +0100382
383 /* Sort A before B if A is smaller. */
Christian Königdf0af442014-03-03 12:38:08 +0100384 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
Marek Olšák43304412014-03-02 00:56:20 +0100385}
386
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200387/**
388 * cs_parser_fini() - clean parser states
389 * @parser: parser structure holding parsing context.
390 * @error: error number
391 *
392 * If error is set than unvalidate buffer, otherwise just free memory
393 * used by parsing context.
394 **/
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200395static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200396{
397 unsigned i;
398
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400399 if (!error) {
Marek Olšák43304412014-03-02 00:56:20 +0100400 /* Sort the buffer list from the smallest to largest buffer,
401 * which affects the order of buffers in the LRU list.
402 * This assures that the smallest buffers are added first
403 * to the LRU list, so they are likely to be later evicted
404 * first, instead of large buffers whose eviction is more
405 * expensive.
406 *
407 * This slightly lowers the number of bytes moved by TTM
408 * per frame under memory pressure.
409 */
410 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
411
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200412 ttm_eu_fence_buffer_objects(&parser->ticket,
413 &parser->validated,
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200414 &parser->ib.fence->base);
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200415 } else if (backoff) {
416 ttm_eu_backoff_reservation(&parser->ticket,
417 &parser->validated);
Jerome Glissee43b5ec2012-08-06 12:32:21 -0400418 }
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000419
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000420 if (parser->relocs != NULL) {
421 for (i = 0; i < parser->nrelocs; i++) {
Christian Königd33a8fc2014-11-27 14:48:40 +0100422 struct radeon_bo *bo = parser->relocs[i].robj;
423 if (bo == NULL)
424 continue;
425
426 drm_gem_object_unreference_unlocked(&bo->gem_base);
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000427 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200428 }
Michel Dänzer48e113e2009-09-15 17:09:32 +0200429 kfree(parser->track);
Michel Dänzerb421ed12015-04-16 11:17:27 +0900430 drm_free_large(parser->relocs);
Michel Dänzere5a5fd4d2014-10-20 18:40:54 +0900431 drm_free_large(parser->vm_bos);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200432 for (i = 0; i < parser->nchunks; i++)
433 drm_free_large(parser->chunks[i].kdata);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200434 kfree(parser->chunks);
435 kfree(parser->chunks_array);
436 radeon_ib_free(parser->rdev, &parser->ib);
Jerome Glissef2e39222012-05-09 15:35:02 +0200437 radeon_ib_free(parser->rdev, &parser->const_ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200438}
439
Jerome Glisse721604a2012-01-05 22:11:05 -0500440static int radeon_cs_ib_chunk(struct radeon_device *rdev,
441 struct radeon_cs_parser *parser)
442{
Jerome Glisse721604a2012-01-05 22:11:05 -0500443 int r;
444
Christian König6d2d13d2014-12-03 15:53:24 +0100445 if (parser->chunk_ib == NULL)
Jerome Glisse721604a2012-01-05 22:11:05 -0500446 return 0;
447
448 if (parser->cs_flags & RADEON_CS_USE_VM)
449 return 0;
450
Christian Königeb0c19c2012-02-23 15:18:44 +0100451 r = radeon_cs_parse(rdev, parser->ring, parser);
Jerome Glisse721604a2012-01-05 22:11:05 -0500452 if (r || parser->parser_error) {
453 DRM_ERROR("Invalid command stream !\n");
454 return r;
455 }
Alex Deucherce3537d2013-07-24 12:12:49 -0400456
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200457 r = radeon_cs_sync_rings(parser);
458 if (r) {
459 if (r != -ERESTARTSYS)
460 DRM_ERROR("Failed to sync rings: %i\n", r);
461 return r;
462 }
463
Alex Deucherce3537d2013-07-24 12:12:49 -0400464 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
465 radeon_uvd_note_usage(rdev);
Alex Deucher03afe6f2013-08-23 11:56:26 -0400466 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
467 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
468 radeon_vce_note_usage(rdev);
Alex Deucherce3537d2013-07-24 12:12:49 -0400469
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900470 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Jerome Glisse721604a2012-01-05 22:11:05 -0500471 if (r) {
472 DRM_ERROR("Failed to schedule IB !\n");
473 }
Christian König93bf8882012-07-03 14:05:41 +0200474 return r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500475}
476
Christian König6d2f2942014-02-20 13:42:17 +0100477static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
Jerome Glisse721604a2012-01-05 22:11:05 -0500478 struct radeon_vm *vm)
479{
Christian König6d2f2942014-02-20 13:42:17 +0100480 struct radeon_device *rdev = p->rdev;
Christian König036bf462014-07-18 08:56:40 +0200481 struct radeon_bo_va *bo_va;
Christian König6d2f2942014-02-20 13:42:17 +0100482 int i, r;
Jerome Glisse721604a2012-01-05 22:11:05 -0500483
Christian König6d2f2942014-02-20 13:42:17 +0100484 r = radeon_vm_update_page_directory(rdev, vm);
485 if (r)
Jerome Glisse3e8970f2012-08-13 12:07:33 -0400486 return r;
Christian König6d2f2942014-02-20 13:42:17 +0100487
Christian König036bf462014-07-18 08:56:40 +0200488 r = radeon_vm_clear_freed(rdev, vm);
489 if (r)
490 return r;
491
Christian Königcc9e67e2014-07-18 13:48:10 +0200492 if (vm->ib_bo_va == NULL) {
Christian König036bf462014-07-18 08:56:40 +0200493 DRM_ERROR("Tmp BO not in VM!\n");
494 return -EINVAL;
495 }
496
Christian Königcc9e67e2014-07-18 13:48:10 +0200497 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
498 &rdev->ring_tmp_bo.bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100499 if (r)
500 return r;
501
502 for (i = 0; i < p->nrelocs; i++) {
503 struct radeon_bo *bo;
504
Christian König6d2f2942014-02-20 13:42:17 +0100505 bo = p->relocs[i].robj;
Christian König036bf462014-07-18 08:56:40 +0200506 bo_va = radeon_vm_bo_find(vm, bo);
507 if (bo_va == NULL) {
508 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
509 return -EINVAL;
510 }
511
512 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
Christian König6d2f2942014-02-20 13:42:17 +0100513 if (r)
Jerome Glisse721604a2012-01-05 22:11:05 -0500514 return r;
Christian König94214632014-11-19 14:01:26 +0100515
516 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
Jerome Glisse721604a2012-01-05 22:11:05 -0500517 }
Christian Könige31ad962014-07-18 09:24:53 +0200518
519 return radeon_vm_clear_invalids(rdev, vm);
Jerome Glisse721604a2012-01-05 22:11:05 -0500520}
521
522static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
523 struct radeon_cs_parser *parser)
524{
Jerome Glisse721604a2012-01-05 22:11:05 -0500525 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
526 struct radeon_vm *vm = &fpriv->vm;
527 int r;
528
Christian König6d2d13d2014-12-03 15:53:24 +0100529 if (parser->chunk_ib == NULL)
Jerome Glisse721604a2012-01-05 22:11:05 -0500530 return 0;
Jerome Glisse721604a2012-01-05 22:11:05 -0500531 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
532 return 0;
533
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200534 if (parser->const_ib.length_dw) {
Jerome Glissef2e39222012-05-09 15:35:02 +0200535 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400536 if (r) {
537 return r;
538 }
539 }
540
Jerome Glissef2e39222012-05-09 15:35:02 +0200541 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
Jerome Glisse721604a2012-01-05 22:11:05 -0500542 if (r) {
543 return r;
544 }
545
Alex Deucherce3537d2013-07-24 12:12:49 -0400546 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
547 radeon_uvd_note_usage(rdev);
548
Jerome Glisse721604a2012-01-05 22:11:05 -0500549 mutex_lock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500550 r = radeon_bo_vm_update_pte(parser, vm);
551 if (r) {
552 goto out;
553 }
Maarten Lankhorst392a2502014-09-25 12:39:38 +0200554
555 r = radeon_cs_sync_rings(parser);
556 if (r) {
557 if (r != -ERESTARTSYS)
558 DRM_ERROR("Failed to sync rings: %i\n", r);
559 goto out;
560 }
Christian König4ef72562012-07-13 13:06:00 +0200561
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400562 if ((rdev->family >= CHIP_TAHITI) &&
Christian König6d2d13d2014-12-03 15:53:24 +0100563 (parser->chunk_const_ib != NULL)) {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900564 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
Christian König4ef72562012-07-13 13:06:00 +0200565 } else {
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900566 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400567 }
568
Christian Königee60e292012-08-09 16:21:08 +0200569out:
Christian König36ff39c2012-05-09 10:07:08 +0200570 mutex_unlock(&vm->mutex);
Jerome Glisse721604a2012-01-05 22:11:05 -0500571 return r;
572}
573
Christian König6c6f4782012-05-02 15:11:19 +0200574static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
575{
576 if (r == -EDEADLK) {
577 r = radeon_gpu_reset(rdev);
578 if (!r)
579 r = -EAGAIN;
580 }
581 return r;
582}
583
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200584static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
585{
586 struct radeon_cs_chunk *ib_chunk;
587 struct radeon_vm *vm = NULL;
588 int r;
589
Christian König6d2d13d2014-12-03 15:53:24 +0100590 if (parser->chunk_ib == NULL)
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200591 return 0;
592
593 if (parser->cs_flags & RADEON_CS_USE_VM) {
594 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
595 vm = &fpriv->vm;
596
597 if ((rdev->family >= CHIP_TAHITI) &&
Christian König6d2d13d2014-12-03 15:53:24 +0100598 (parser->chunk_const_ib != NULL)) {
599 ib_chunk = parser->chunk_const_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200600 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
601 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
602 return -EINVAL;
603 }
604 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
605 vm, ib_chunk->length_dw * 4);
606 if (r) {
607 DRM_ERROR("Failed to get const ib !\n");
608 return r;
609 }
610 parser->const_ib.is_const_ib = true;
611 parser->const_ib.length_dw = ib_chunk->length_dw;
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100612 if (copy_from_user(parser->const_ib.ptr,
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200613 ib_chunk->user_ptr,
614 ib_chunk->length_dw * 4))
615 return -EFAULT;
616 }
617
Christian König6d2d13d2014-12-03 15:53:24 +0100618 ib_chunk = parser->chunk_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200619 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
620 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
621 return -EINVAL;
622 }
623 }
Christian König6d2d13d2014-12-03 15:53:24 +0100624 ib_chunk = parser->chunk_ib;
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200625
626 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
627 vm, ib_chunk->length_dw * 4);
628 if (r) {
629 DRM_ERROR("Failed to get ib !\n");
630 return r;
631 }
632 parser->ib.length_dw = ib_chunk->length_dw;
633 if (ib_chunk->kdata)
634 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100635 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200636 return -EFAULT;
637 return 0;
638}
639
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200640int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
641{
642 struct radeon_device *rdev = dev->dev_private;
643 struct radeon_cs_parser parser;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200644 int r;
645
Jerome Glissedee53e72012-07-02 12:45:19 -0400646 down_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500647 if (!rdev->accel_working) {
Jerome Glissedee53e72012-07-02 12:45:19 -0400648 up_read(&rdev->exclusive_lock);
Jerome Glisse6b7746e2012-02-20 17:57:20 -0500649 return -EBUSY;
650 }
Maarten Lankhorst9bb39ff2014-08-27 16:45:18 -0400651 if (rdev->in_reset) {
652 up_read(&rdev->exclusive_lock);
653 r = radeon_gpu_reset(rdev);
654 if (!r)
655 r = -EAGAIN;
656 return r;
657 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200658 /* initialize parser */
659 memset(&parser, 0, sizeof(struct radeon_cs_parser));
660 parser.filp = filp;
661 parser.rdev = rdev;
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100662 parser.dev = rdev->dev;
Dave Airlie428c6e32011-06-08 19:58:29 +1000663 parser.family = rdev->family;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200664 r = radeon_cs_parser_init(&parser, data);
665 if (r) {
666 DRM_ERROR("Failed to initialize parser !\n");
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200667 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400668 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200669 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200670 return r;
671 }
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200672
673 r = radeon_cs_ib_fill(rdev, &parser);
674 if (!r) {
675 r = radeon_cs_parser_relocs(&parser);
676 if (r && r != -ERESTARTSYS)
Dave Airlie97f23b32010-03-19 10:33:44 +1000677 DRM_ERROR("Failed to parse relocation %d!\n", r);
Maarten Lankhorst28a326c2013-10-09 14:36:57 +0200678 }
679
680 if (r) {
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200681 radeon_cs_parser_fini(&parser, r, false);
Jerome Glissedee53e72012-07-02 12:45:19 -0400682 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200683 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200684 return r;
685 }
Christian König55b51c82013-04-18 15:25:59 +0200686
Christian König860024e2013-09-07 18:29:01 +0200687 trace_radeon_cs(&parser);
688
Jerome Glisse721604a2012-01-05 22:11:05 -0500689 r = radeon_cs_ib_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200690 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500691 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200692 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500693 r = radeon_cs_ib_vm_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200694 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500695 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200696 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500697out:
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200698 radeon_cs_parser_fini(&parser, r, true);
Jerome Glissedee53e72012-07-02 12:45:19 -0400699 up_read(&rdev->exclusive_lock);
Christian König6c6f4782012-05-02 15:11:19 +0200700 r = radeon_cs_handle_lockup(rdev, r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200701 return r;
702}
Dave Airlie513bcb42009-09-23 16:56:27 +1000703
Ilija Hadzic4db01312013-01-02 18:27:40 -0500704/**
705 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
706 * @parser: parser structure holding parsing context.
707 * @pkt: where to store packet information
708 *
709 * Assume that chunk_ib_index is properly set. Will return -EINVAL
710 * if packet is bigger than remaining ib size. or if packets is unknown.
711 **/
712int radeon_cs_packet_parse(struct radeon_cs_parser *p,
713 struct radeon_cs_packet *pkt,
714 unsigned idx)
715{
Christian König6d2d13d2014-12-03 15:53:24 +0100716 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500717 struct radeon_device *rdev = p->rdev;
718 uint32_t header;
Alex Deuchere1b4e722015-02-18 10:15:10 -0500719 int ret = 0, i;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500720
721 if (idx >= ib_chunk->length_dw) {
722 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
723 idx, ib_chunk->length_dw);
724 return -EINVAL;
725 }
726 header = radeon_get_ib_value(p, idx);
727 pkt->idx = idx;
728 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
729 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
730 pkt->one_reg_wr = 0;
731 switch (pkt->type) {
732 case RADEON_PACKET_TYPE0:
733 if (rdev->family < CHIP_R600) {
734 pkt->reg = R100_CP_PACKET0_GET_REG(header);
735 pkt->one_reg_wr =
736 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
737 } else
738 pkt->reg = R600_CP_PACKET0_GET_REG(header);
739 break;
740 case RADEON_PACKET_TYPE3:
741 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
742 break;
743 case RADEON_PACKET_TYPE2:
744 pkt->count = -1;
745 break;
746 default:
747 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
Alex Deuchere1b4e722015-02-18 10:15:10 -0500748 ret = -EINVAL;
749 goto dump_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500750 }
751 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
752 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
753 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
Alex Deuchere1b4e722015-02-18 10:15:10 -0500754 ret = -EINVAL;
755 goto dump_ib;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500756 }
757 return 0;
Alex Deuchere1b4e722015-02-18 10:15:10 -0500758
759dump_ib:
760 for (i = 0; i < ib_chunk->length_dw; i++) {
761 if (i == idx)
762 printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
763 else
764 printk("\t0x%08x\n", radeon_get_ib_value(p, i));
765 }
766 return ret;
Ilija Hadzic4db01312013-01-02 18:27:40 -0500767}
Ilija Hadzic9ffb7a62013-01-02 18:27:42 -0500768
769/**
770 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
771 * @p: structure holding the parser context.
772 *
773 * Check if the next packet is NOP relocation packet3.
774 **/
775bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
776{
777 struct radeon_cs_packet p3reloc;
778 int r;
779
780 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
781 if (r)
782 return false;
783 if (p3reloc.type != RADEON_PACKET_TYPE3)
784 return false;
785 if (p3reloc.opcode != RADEON_PACKET3_NOP)
786 return false;
787 return true;
788}
Ilija Hadzicc3ad63a2013-01-02 18:27:45 -0500789
790/**
791 * radeon_cs_dump_packet() - dump raw packet context
792 * @p: structure holding the parser context.
793 * @pkt: structure holding the packet.
794 *
795 * Used mostly for debugging and error reporting.
796 **/
797void radeon_cs_dump_packet(struct radeon_cs_parser *p,
798 struct radeon_cs_packet *pkt)
799{
800 volatile uint32_t *ib;
801 unsigned i;
802 unsigned idx;
803
804 ib = p->ib.ptr;
805 idx = pkt->idx;
806 for (i = 0; i <= (pkt->count + 1); i++, idx++)
807 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
808}
809
Ilija Hadzice9716992013-01-02 18:27:46 -0500810/**
811 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
812 * @parser: parser structure holding parsing context.
813 * @data: pointer to relocation data
814 * @offset_start: starting offset
815 * @offset_mask: offset mask (to align start offset on)
816 * @reloc: reloc informations
817 *
818 * Check if next packet is relocation packet3, do bo validation and compute
819 * GPU offset using the provided start.
820 **/
821int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
Christian König1d0c0942014-11-27 14:48:42 +0100822 struct radeon_bo_list **cs_reloc,
Ilija Hadzice9716992013-01-02 18:27:46 -0500823 int nomm)
824{
825 struct radeon_cs_chunk *relocs_chunk;
826 struct radeon_cs_packet p3reloc;
827 unsigned idx;
828 int r;
829
Christian König6d2d13d2014-12-03 15:53:24 +0100830 if (p->chunk_relocs == NULL) {
Ilija Hadzice9716992013-01-02 18:27:46 -0500831 DRM_ERROR("No relocation chunk !\n");
832 return -EINVAL;
833 }
834 *cs_reloc = NULL;
Christian König6d2d13d2014-12-03 15:53:24 +0100835 relocs_chunk = p->chunk_relocs;
Ilija Hadzice9716992013-01-02 18:27:46 -0500836 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
837 if (r)
838 return r;
839 p->idx += p3reloc.count + 2;
840 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
841 p3reloc.opcode != RADEON_PACKET3_NOP) {
842 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
843 p3reloc.idx);
844 radeon_cs_dump_packet(p, &p3reloc);
845 return -EINVAL;
846 }
847 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
848 if (idx >= relocs_chunk->length_dw) {
849 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
850 idx, relocs_chunk->length_dw);
851 radeon_cs_dump_packet(p, &p3reloc);
852 return -EINVAL;
853 }
854 /* FIXME: we assume reloc size is 4 dwords */
855 if (nomm) {
856 *cs_reloc = p->relocs;
Christian Königdf0af442014-03-03 12:38:08 +0100857 (*cs_reloc)->gpu_offset =
Ilija Hadzice9716992013-01-02 18:27:46 -0500858 (u64)relocs_chunk->kdata[idx + 3] << 32;
Christian Königdf0af442014-03-03 12:38:08 +0100859 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
Ilija Hadzice9716992013-01-02 18:27:46 -0500860 } else
Christian König466be332014-12-03 15:46:49 +0100861 *cs_reloc = &p->relocs[(idx / 4)];
Ilija Hadzice9716992013-01-02 18:27:46 -0500862 return 0;
863}