blob: 17af0e83c328503e395cd0437e384733c38b3079 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt);
34
35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{
37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk;
39 unsigned i, j;
40 bool duplicate;
41
42 if (p->chunk_relocs_idx == -1) {
43 return 0;
44 }
45 chunk = &p->chunks[p->chunk_relocs_idx];
46 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49 if (p->relocs_ptr == NULL) {
50 return -ENOMEM;
51 }
52 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53 if (p->relocs == NULL) {
54 return -ENOMEM;
55 }
56 for (i = 0; i < p->nrelocs; i++) {
57 struct drm_radeon_cs_reloc *r;
58
59 duplicate = false;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
Christian König16557f12011-10-24 14:59:17 +020061 for (j = 0; j < i; j++) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +020062 if (r->handle == p->relocs[j].handle) {
63 p->relocs_ptr[i] = &p->relocs[j];
64 duplicate = true;
65 break;
66 }
67 }
68 if (!duplicate) {
69 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70 p->filp,
71 r->handle);
72 if (p->relocs[i].gobj == NULL) {
73 DRM_ERROR("gem object lookup failed 0x%x\n",
74 r->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +010075 return -ENOENT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020076 }
77 p->relocs_ptr[i] = &p->relocs[i];
Daniel Vetter7e4d15d2011-02-18 17:59:17 +010078 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
Jerome Glisse4c788672009-11-20 14:29:23 +010079 p->relocs[i].lobj.bo = p->relocs[i].robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020080 p->relocs[i].lobj.wdomain = r->write_domain;
Thomas Hellstrom147666f2010-11-17 12:38:32 +000081 p->relocs[i].lobj.rdomain = r->read_domains;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020083 p->relocs[i].handle = r->handle;
84 p->relocs[i].flags = r->flags;
Jerome Glisse4c788672009-11-20 14:29:23 +010085 radeon_bo_list_add_object(&p->relocs[i].lobj,
Thomas Hellstrom147666f2010-11-17 12:38:32 +000086 &p->validated);
Christian König93504fc2012-01-05 22:11:06 -050087
88 if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
89 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
90 if (!radeon_fence_signaled(fence)) {
91 p->sync_to_ring[fence->ring] = true;
92 }
93 }
Christian König16557f12011-10-24 14:59:17 +020094 } else
95 p->relocs[i].handle = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020096 }
Jerome Glisse94429bb2010-02-15 21:36:33 +010097 return radeon_bo_list_validate(&p->validated);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098}
99
Jerome Glisse721604a2012-01-05 22:11:05 -0500100static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
101{
102 p->priority = priority;
103
104 switch (ring) {
105 default:
106 DRM_ERROR("unknown ring id: %d\n", ring);
107 return -EINVAL;
108 case RADEON_CS_RING_GFX:
109 p->ring = RADEON_RING_TYPE_GFX_INDEX;
110 break;
111 case RADEON_CS_RING_COMPUTE:
112 /* for now */
113 p->ring = RADEON_RING_TYPE_GFX_INDEX;
114 break;
115 }
116 return 0;
117}
118
Christian König93504fc2012-01-05 22:11:06 -0500119static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
120{
121 int i, r;
122
123 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
124 /* no need to sync to our own or unused rings */
125 if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
126 continue;
127
128 if (!p->ib->fence->semaphore) {
129 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
130 if (r)
131 return r;
132 }
133
134 r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
135 if (r)
136 return r;
137 radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
138 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
139
140 r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
141 if (r)
142 return r;
143 radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
144 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
145 }
146 return 0;
147}
148
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200149int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
150{
151 struct drm_radeon_cs *cs = data;
152 uint64_t *chunk_array_ptr;
Jerome Glisse721604a2012-01-05 22:11:05 -0500153 unsigned size, i;
154 u32 ring = RADEON_CS_RING_GFX;
155 s32 priority = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200156
157 if (!cs->num_chunks) {
158 return 0;
159 }
160 /* get chunks */
161 INIT_LIST_HEAD(&p->validated);
162 p->idx = 0;
163 p->chunk_ib_idx = -1;
164 p->chunk_relocs_idx = -1;
Jerome Glisse721604a2012-01-05 22:11:05 -0500165 p->chunk_flags_idx = -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200166 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
167 if (p->chunks_array == NULL) {
168 return -ENOMEM;
169 }
170 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
171 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
172 sizeof(uint64_t)*cs->num_chunks)) {
173 return -EFAULT;
174 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500175 p->cs_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200176 p->nchunks = cs->num_chunks;
177 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
178 if (p->chunks == NULL) {
179 return -ENOMEM;
180 }
181 for (i = 0; i < p->nchunks; i++) {
182 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
183 struct drm_radeon_cs_chunk user_chunk;
184 uint32_t __user *cdata;
185
186 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
187 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
188 sizeof(struct drm_radeon_cs_chunk))) {
189 return -EFAULT;
190 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000191 p->chunks[i].length_dw = user_chunk.length_dw;
192 p->chunks[i].kdata = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200193 p->chunks[i].chunk_id = user_chunk.chunk_id;
Dave Airlie5176fdc2009-06-30 11:47:14 +1000194
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
196 p->chunk_relocs_idx = i;
197 }
198 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
199 p->chunk_ib_idx = i;
Dave Airlie5176fdc2009-06-30 11:47:14 +1000200 /* zero length IB isn't useful */
201 if (p->chunks[i].length_dw == 0)
202 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200203 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500204 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
205 p->chunk_flags_idx = i;
206 /* zero length flags aren't useful */
207 if (p->chunks[i].length_dw == 0)
208 return -EINVAL;
Marek Olšáke70f2242011-10-25 01:38:45 +0200209 }
Dave Airlie5176fdc2009-06-30 11:47:14 +1000210
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211 p->chunks[i].length_dw = user_chunk.length_dw;
Dave Airlie513bcb42009-09-23 16:56:27 +1000212 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200213
Dave Airlie513bcb42009-09-23 16:56:27 +1000214 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
Jerome Glisse721604a2012-01-05 22:11:05 -0500215 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
216 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
Dave Airlie513bcb42009-09-23 16:56:27 +1000217 size = p->chunks[i].length_dw * sizeof(uint32_t);
218 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
219 if (p->chunks[i].kdata == NULL) {
220 return -ENOMEM;
221 }
222 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
223 p->chunks[i].user_ptr, size)) {
224 return -EFAULT;
225 }
Marek Olšáke70f2242011-10-25 01:38:45 +0200226 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500227 p->cs_flags = p->chunks[i].kdata[0];
228 if (p->chunks[i].length_dw > 1)
229 ring = p->chunks[i].kdata[1];
230 if (p->chunks[i].length_dw > 2)
231 priority = (s32)p->chunks[i].kdata[2];
Marek Olšáke70f2242011-10-25 01:38:45 +0200232 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200233 }
234 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500235
236 if ((p->cs_flags & RADEON_CS_USE_VM) &&
237 (p->rdev->family < CHIP_CAYMAN)) {
238 DRM_ERROR("VM not supported on asic!\n");
239 if (p->chunk_relocs_idx != -1)
240 kfree(p->chunks[p->chunk_relocs_idx].kdata);
241 if (p->chunk_flags_idx != -1)
242 kfree(p->chunks[p->chunk_flags_idx].kdata);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200243 return -EINVAL;
244 }
Marek Olšáke70f2242011-10-25 01:38:45 +0200245
Jerome Glisse721604a2012-01-05 22:11:05 -0500246 if (radeon_cs_get_ring(p, ring, priority)) {
247 if (p->chunk_relocs_idx != -1)
248 kfree(p->chunks[p->chunk_relocs_idx].kdata);
249 if (p->chunk_flags_idx != -1)
250 kfree(p->chunks[p->chunk_flags_idx].kdata);
251 return -EINVAL;
252 }
253
254
255 /* deal with non-vm */
256 if ((p->chunk_ib_idx != -1) &&
257 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
258 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
259 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
260 DRM_ERROR("cs IB too big: %d\n",
261 p->chunks[p->chunk_ib_idx].length_dw);
262 return -EINVAL;
263 }
264 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
265 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
266 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
267 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
268 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
269 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
270 return -ENOMEM;
271 }
272 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
273 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
274 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
275 p->chunks[p->chunk_ib_idx].last_page_index =
276 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
277 }
278
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200279 return 0;
280}
281
282/**
283 * cs_parser_fini() - clean parser states
284 * @parser: parser structure holding parsing context.
285 * @error: error number
286 *
287 * If error is set than unvalidate buffer, otherwise just free memory
288 * used by parsing context.
289 **/
290static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
291{
292 unsigned i;
293
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000294
295 if (!error && parser->ib)
296 ttm_eu_fence_buffer_objects(&parser->validated,
297 parser->ib->fence);
298 else
299 ttm_eu_backoff_reservation(&parser->validated);
300
Pauli Nieminenfcbc4512010-03-19 07:44:33 +0000301 if (parser->relocs != NULL) {
302 for (i = 0; i < parser->nrelocs; i++) {
303 if (parser->relocs[i].gobj)
304 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
305 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306 }
Michel Dänzer48e113e2009-09-15 17:09:32 +0200307 kfree(parser->track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200308 kfree(parser->relocs);
309 kfree(parser->relocs_ptr);
310 for (i = 0; i < parser->nchunks; i++) {
311 kfree(parser->chunks[i].kdata);
Dave Airlie513bcb42009-09-23 16:56:27 +1000312 kfree(parser->chunks[i].kpage[0]);
313 kfree(parser->chunks[i].kpage[1]);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200314 }
315 kfree(parser->chunks);
316 kfree(parser->chunks_array);
317 radeon_ib_free(parser->rdev, &parser->ib);
318}
319
Jerome Glisse721604a2012-01-05 22:11:05 -0500320static int radeon_cs_ib_chunk(struct radeon_device *rdev,
321 struct radeon_cs_parser *parser)
322{
323 struct radeon_cs_chunk *ib_chunk;
324 int r;
325
326 if (parser->chunk_ib_idx == -1)
327 return 0;
328
329 if (parser->cs_flags & RADEON_CS_USE_VM)
330 return 0;
331
332 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
333 /* Copy the packet into the IB, the parser will read from the
334 * input memory (cached) and write to the IB (which can be
335 * uncached).
336 */
337 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
338 ib_chunk->length_dw * 4);
339 if (r) {
340 DRM_ERROR("Failed to get ib !\n");
341 return r;
342 }
343 parser->ib->length_dw = ib_chunk->length_dw;
344 r = radeon_cs_parse(parser);
345 if (r || parser->parser_error) {
346 DRM_ERROR("Invalid command stream !\n");
347 return r;
348 }
349 r = radeon_cs_finish_pages(parser);
350 if (r) {
351 DRM_ERROR("Invalid command stream !\n");
352 return r;
353 }
Christian König93504fc2012-01-05 22:11:06 -0500354 r = radeon_cs_sync_rings(parser);
355 if (r) {
356 DRM_ERROR("Failed to synchronize rings !\n");
357 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500358 parser->ib->vm_id = 0;
359 r = radeon_ib_schedule(rdev, parser->ib);
360 if (r) {
361 DRM_ERROR("Failed to schedule IB !\n");
362 }
363 return 0;
364}
365
366static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
367 struct radeon_vm *vm)
368{
369 struct radeon_bo_list *lobj;
370 struct radeon_bo *bo;
371 int r;
372
373 list_for_each_entry(lobj, &parser->validated, tv.head) {
374 bo = lobj->bo;
375 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
376 if (r) {
377 return r;
378 }
379 }
380 return 0;
381}
382
383static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
384 struct radeon_cs_parser *parser)
385{
386 struct radeon_cs_chunk *ib_chunk;
387 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
388 struct radeon_vm *vm = &fpriv->vm;
389 int r;
390
391 if (parser->chunk_ib_idx == -1)
392 return 0;
393
394 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
395 return 0;
396
397 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
398 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
399 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
400 return -EINVAL;
401 }
402 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
403 ib_chunk->length_dw * 4);
404 if (r) {
405 DRM_ERROR("Failed to get ib !\n");
406 return r;
407 }
408 parser->ib->length_dw = ib_chunk->length_dw;
409 /* Copy the packet into the IB */
410 if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
411 ib_chunk->length_dw * 4)) {
412 return -EFAULT;
413 }
414 r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
415 if (r) {
416 return r;
417 }
418
419 mutex_lock(&vm->mutex);
420 r = radeon_vm_bind(rdev, vm);
421 if (r) {
422 goto out;
423 }
424 r = radeon_bo_vm_update_pte(parser, vm);
425 if (r) {
426 goto out;
427 }
Christian König93504fc2012-01-05 22:11:06 -0500428 r = radeon_cs_sync_rings(parser);
429 if (r) {
430 DRM_ERROR("Failed to synchronize rings !\n");
431 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500432 parser->ib->vm_id = vm->id;
433 /* ib pool is bind at 0 in virtual address space to gpu_addr is the
434 * offset inside the pool bo
435 */
436 parser->ib->gpu_addr = parser->ib->sa_bo.offset;
437 r = radeon_ib_schedule(rdev, parser->ib);
438out:
439 if (!r) {
440 if (vm->fence) {
441 radeon_fence_unref(&vm->fence);
442 }
443 vm->fence = radeon_fence_ref(parser->ib->fence);
444 }
445 mutex_unlock(&fpriv->vm.mutex);
446 return r;
447}
448
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200449int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
450{
451 struct radeon_device *rdev = dev->dev_private;
452 struct radeon_cs_parser parser;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200453 int r;
454
Michel Dänzer7a1619b2011-11-10 18:57:26 +0100455 radeon_mutex_lock(&rdev->cs_mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200456 /* initialize parser */
457 memset(&parser, 0, sizeof(struct radeon_cs_parser));
458 parser.filp = filp;
459 parser.rdev = rdev;
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100460 parser.dev = rdev->dev;
Dave Airlie428c6e32011-06-08 19:58:29 +1000461 parser.family = rdev->family;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200462 r = radeon_cs_parser_init(&parser, data);
463 if (r) {
464 DRM_ERROR("Failed to initialize parser !\n");
465 radeon_cs_parser_fini(&parser, r);
Michel Dänzer7a1619b2011-11-10 18:57:26 +0100466 radeon_mutex_unlock(&rdev->cs_mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200467 return r;
468 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200469 r = radeon_cs_parser_relocs(&parser);
470 if (r) {
Dave Airlie97f23b32010-03-19 10:33:44 +1000471 if (r != -ERESTARTSYS)
472 DRM_ERROR("Failed to parse relocation %d!\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200473 radeon_cs_parser_fini(&parser, r);
Michel Dänzer7a1619b2011-11-10 18:57:26 +0100474 radeon_mutex_unlock(&rdev->cs_mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200475 return r;
476 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500477 r = radeon_cs_ib_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200478 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500479 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200480 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500481 r = radeon_cs_ib_vm_chunk(rdev, &parser);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200482 if (r) {
Jerome Glisse721604a2012-01-05 22:11:05 -0500483 goto out;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200484 }
Jerome Glisse721604a2012-01-05 22:11:05 -0500485out:
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200486 radeon_cs_parser_fini(&parser, r);
Michel Dänzer7a1619b2011-11-10 18:57:26 +0100487 radeon_mutex_unlock(&rdev->cs_mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200488 return r;
489}
Dave Airlie513bcb42009-09-23 16:56:27 +1000490
491int radeon_cs_finish_pages(struct radeon_cs_parser *p)
492{
493 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
494 int i;
495 int size = PAGE_SIZE;
496
497 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
498 if (i == ibc->last_page_index) {
499 size = (ibc->length_dw * 4) % PAGE_SIZE;
500 if (size == 0)
501 size = PAGE_SIZE;
502 }
503
504 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
505 ibc->user_ptr + (i * PAGE_SIZE),
506 size))
507 return -EFAULT;
508 }
509 return 0;
510}
511
512int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
513{
514 int new_page;
Dave Airlie513bcb42009-09-23 16:56:27 +1000515 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
516 int i;
517 int size = PAGE_SIZE;
518
Dave Airliec5e617e2009-09-26 09:03:39 +1000519 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
Dave Airlie513bcb42009-09-23 16:56:27 +1000520 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
521 ibc->user_ptr + (i * PAGE_SIZE),
522 PAGE_SIZE)) {
523 p->parser_error = -EFAULT;
524 return 0;
525 }
526 }
527
528 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
529
530 if (pg_idx == ibc->last_page_index) {
531 size = (ibc->length_dw * 4) % PAGE_SIZE;
532 if (size == 0)
533 size = PAGE_SIZE;
534 }
535
536 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
537 ibc->user_ptr + (pg_idx * PAGE_SIZE),
538 size)) {
539 p->parser_error = -EFAULT;
540 return 0;
541 }
542
543 /* copy to IB here */
544 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
545
546 ibc->last_copied_page = pg_idx;
547 ibc->kpage_idx[new_page] = pg_idx;
548
549 return new_page;
550}