blob: 8db88e34ef7b8d2db75b1cb1d19c3da43d7855a7 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40};
41
42/*
43 * Set the next domain for the specified object. This
44 * may not actually perform the necessary flushing/invaliding though,
45 * as that may want to be batched with other set_domain operations
46 *
47 * This is (we hope) the only really tricky part of gem. The goal
48 * is fairly simple -- track which caches hold bits of the object
49 * and make sure they remain coherent. A few concrete examples may
50 * help to explain how it works. For shorthand, we use the notation
51 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
52 * a pair of read and write domain masks.
53 *
54 * Case 1: the batch buffer
55 *
56 * 1. Allocated
57 * 2. Written by CPU
58 * 3. Mapped to GTT
59 * 4. Read by GPU
60 * 5. Unmapped from GTT
61 * 6. Freed
62 *
63 * Let's take these a step at a time
64 *
65 * 1. Allocated
66 * Pages allocated from the kernel may still have
67 * cache contents, so we set them to (CPU, CPU) always.
68 * 2. Written by CPU (using pwrite)
69 * The pwrite function calls set_domain (CPU, CPU) and
70 * this function does nothing (as nothing changes)
71 * 3. Mapped by GTT
72 * This function asserts that the object is not
73 * currently in any GPU-based read or write domains
74 * 4. Read by GPU
75 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
76 * As write_domain is zero, this function adds in the
77 * current read domains (CPU+COMMAND, 0).
78 * flush_domains is set to CPU.
79 * invalidate_domains is set to COMMAND
80 * clflush is run to get data out of the CPU caches
81 * then i915_dev_set_domain calls i915_gem_flush to
82 * emit an MI_FLUSH and drm_agp_chipset_flush
83 * 5. Unmapped from GTT
84 * i915_gem_object_unbind calls set_domain (CPU, CPU)
85 * flush_domains and invalidate_domains end up both zero
86 * so no flushing/invalidating happens
87 * 6. Freed
88 * yay, done
89 *
90 * Case 2: The shared render buffer
91 *
92 * 1. Allocated
93 * 2. Mapped to GTT
94 * 3. Read/written by GPU
95 * 4. set_domain to (CPU,CPU)
96 * 5. Read/written by CPU
97 * 6. Read/written by GPU
98 *
99 * 1. Allocated
100 * Same as last example, (CPU, CPU)
101 * 2. Mapped to GTT
102 * Nothing changes (assertions find that it is not in the GPU)
103 * 3. Read/written by GPU
104 * execbuffer calls set_domain (RENDER, RENDER)
105 * flush_domains gets CPU
106 * invalidate_domains gets GPU
107 * clflush (obj)
108 * MI_FLUSH and drm_agp_chipset_flush
109 * 4. set_domain (CPU, CPU)
110 * flush_domains gets GPU
111 * invalidate_domains gets CPU
112 * wait_rendering (obj) to make sure all drawing is complete.
113 * This will include an MI_FLUSH to get the data from GPU
114 * to memory
115 * clflush (obj) to invalidate the CPU cache
116 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
117 * 5. Read/written by CPU
118 * cache lines are loaded and dirtied
119 * 6. Read written by GPU
120 * Same as last GPU access
121 *
122 * Case 3: The constant buffer
123 *
124 * 1. Allocated
125 * 2. Written by CPU
126 * 3. Read by GPU
127 * 4. Updated (written) by CPU again
128 * 5. Read by GPU
129 *
130 * 1. Allocated
131 * (CPU, CPU)
132 * 2. Written by CPU
133 * (CPU, CPU)
134 * 3. Read by GPU
135 * (CPU+RENDER, 0)
136 * flush_domains = CPU
137 * invalidate_domains = RENDER
138 * clflush (obj)
139 * MI_FLUSH
140 * drm_agp_chipset_flush
141 * 4. Updated (written) by CPU again
142 * (CPU, CPU)
143 * flush_domains = 0 (no previous write domain)
144 * invalidate_domains = 0 (no new read domains)
145 * 5. Read by GPU
146 * (CPU+RENDER, 0)
147 * flush_domains = CPU
148 * invalidate_domains = RENDER
149 * clflush (obj)
150 * MI_FLUSH
151 * drm_agp_chipset_flush
152 */
153static void
154i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
155 struct intel_ring_buffer *ring,
156 struct change_domains *cd)
157{
158 uint32_t invalidate_domains = 0, flush_domains = 0;
159
160 /*
161 * If the object isn't moving to a new write domain,
162 * let the object stay in multiple read domains
163 */
164 if (obj->base.pending_write_domain == 0)
165 obj->base.pending_read_domains |= obj->base.read_domains;
166
167 /*
168 * Flush the current write domain if
169 * the new read domains don't match. Invalidate
170 * any read domains which differ from the old
171 * write domain
172 */
173 if (obj->base.write_domain &&
174 (((obj->base.write_domain != obj->base.pending_read_domains ||
175 obj->ring != ring)) ||
176 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
177 flush_domains |= obj->base.write_domain;
178 invalidate_domains |=
179 obj->base.pending_read_domains & ~obj->base.write_domain;
180 }
181 /*
182 * Invalidate any read caches which may have
183 * stale data. That is, any new read domains.
184 */
185 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
186 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
187 i915_gem_clflush_object(obj);
188
189 /* blow away mappings if mapped through GTT */
190 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191 i915_gem_release_mmap(obj);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= obj->ring->id;
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= ring->id;
208}
209
Chris Wilson67731b82010-12-08 10:38:14 +0000210struct eb_objects {
211 int and;
212 struct hlist_head buckets[0];
213};
214
215static struct eb_objects *
216eb_create(int size)
217{
218 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
220 while (count > size)
221 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) +
223 sizeof(struct eb_objects),
224 GFP_KERNEL);
225 if (eb == NULL)
226 return eb;
227
228 eb->and = count - 1;
229 return eb;
230}
231
232static void
233eb_reset(struct eb_objects *eb)
234{
235 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
236}
237
238static void
239eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
240{
241 hlist_add_head(&obj->exec_node,
242 &eb->buckets[obj->exec_handle & eb->and]);
243}
244
245static struct drm_i915_gem_object *
246eb_get_object(struct eb_objects *eb, unsigned long handle)
247{
248 struct hlist_head *head;
249 struct hlist_node *node;
250 struct drm_i915_gem_object *obj;
251
252 head = &eb->buckets[handle & eb->and];
253 hlist_for_each(node, head) {
254 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
255 if (obj->exec_handle == handle)
256 return obj;
257 }
258
259 return NULL;
260}
261
262static void
263eb_destroy(struct eb_objects *eb)
264{
265 kfree(eb);
266}
267
Chris Wilson54cf91d2010-11-25 18:00:26 +0000268static int
269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000270 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000271 struct drm_i915_gem_relocation_entry *reloc)
272{
273 struct drm_device *dev = obj->base.dev;
274 struct drm_gem_object *target_obj;
275 uint32_t target_offset;
276 int ret = -EINVAL;
277
Chris Wilson67731b82010-12-08 10:38:14 +0000278 /* we've already hold a reference to all valid objects */
279 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
280 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000281 return -ENOENT;
282
283 target_offset = to_intel_bo(target_obj)->gtt_offset;
284
285#if WATCH_RELOC
286 DRM_INFO("%s: obj %p offset %08x target %d "
287 "read %08x write %08x gtt %08x "
288 "presumed %08x delta %08x\n",
289 __func__,
290 obj,
291 (int) reloc->offset,
292 (int) reloc->target_handle,
293 (int) reloc->read_domains,
294 (int) reloc->write_domain,
295 (int) target_offset,
296 (int) reloc->presumed_offset,
297 reloc->delta);
298#endif
299
300 /* The target buffer should have appeared before us in the
301 * exec_object list, so it should have a GTT space bound by now.
302 */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000303 if (unlikely(target_offset == 0)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000304 DRM_ERROR("No GTT space found for object %d\n",
305 reloc->target_handle);
Chris Wilson67731b82010-12-08 10:38:14 +0000306 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000307 }
308
309 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000310 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000311 DRM_ERROR("reloc with multiple write domains: "
312 "obj %p target %d offset %d "
313 "read %08x write %08x",
314 obj, reloc->target_handle,
315 (int) reloc->offset,
316 reloc->read_domains,
317 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000318 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000319 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000320 if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000321 DRM_ERROR("reloc with read/write CPU domains: "
322 "obj %p target %d offset %d "
323 "read %08x write %08x",
324 obj, reloc->target_handle,
325 (int) reloc->offset,
326 reloc->read_domains,
327 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000328 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000329 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000330 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
331 reloc->write_domain != target_obj->pending_write_domain)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000332 DRM_ERROR("Write domain conflict: "
333 "obj %p target %d offset %d "
334 "new %08x old %08x\n",
335 obj, reloc->target_handle,
336 (int) reloc->offset,
337 reloc->write_domain,
338 target_obj->pending_write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000339 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000340 }
341
342 target_obj->pending_read_domains |= reloc->read_domains;
343 target_obj->pending_write_domain |= reloc->write_domain;
344
345 /* If the relocation already has the right value in it, no
346 * more work needs to be done.
347 */
348 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000349 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000350
351 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000352 if (unlikely(reloc->offset > obj->base.size - 4)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000353 DRM_ERROR("Relocation beyond object bounds: "
354 "obj %p target %d offset %d size %d.\n",
355 obj, reloc->target_handle,
356 (int) reloc->offset,
357 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000358 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000359 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000360 if (unlikely(reloc->offset & 3)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000361 DRM_ERROR("Relocation not 4-byte aligned: "
362 "obj %p target %d offset %d.\n",
363 obj, reloc->target_handle,
364 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000365 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000366 }
367
368 /* and points to somewhere within the target object. */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000369 if (unlikely(reloc->delta >= target_obj->size)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000370 DRM_ERROR("Relocation beyond target object bounds: "
371 "obj %p target %d delta %d size %d.\n",
372 obj, reloc->target_handle,
373 (int) reloc->delta,
374 (int) target_obj->size);
Chris Wilson67731b82010-12-08 10:38:14 +0000375 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000376 }
377
378 reloc->delta += target_offset;
379 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
380 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
381 char *vaddr;
382
383 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
384 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
385 kunmap_atomic(vaddr);
386 } else {
387 struct drm_i915_private *dev_priv = dev->dev_private;
388 uint32_t __iomem *reloc_entry;
389 void __iomem *reloc_page;
390
391 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
392 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000393 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000394
395 /* Map the page containing the relocation we're going to perform. */
396 reloc->offset += obj->gtt_offset;
397 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
398 reloc->offset & PAGE_MASK);
399 reloc_entry = (uint32_t __iomem *)
400 (reloc_page + (reloc->offset & ~PAGE_MASK));
401 iowrite32(reloc->delta, reloc_entry);
402 io_mapping_unmap_atomic(reloc_page);
403 }
404
405 /* and update the user's relocation entry */
406 reloc->presumed_offset = target_offset;
407
Chris Wilson67731b82010-12-08 10:38:14 +0000408 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000409}
410
411static int
412i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000413 struct eb_objects *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000414{
415 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000416 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000417 int i, ret;
418
419 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
420 for (i = 0; i < entry->relocation_count; i++) {
421 struct drm_i915_gem_relocation_entry reloc;
422
423 if (__copy_from_user_inatomic(&reloc,
424 user_relocs+i,
425 sizeof(reloc)))
426 return -EFAULT;
427
Chris Wilson6fe4f142011-01-10 17:35:37 +0000428 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000429 if (ret)
430 return ret;
431
432 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
433 &reloc.presumed_offset,
434 sizeof(reloc.presumed_offset)))
435 return -EFAULT;
436 }
437
438 return 0;
439}
440
441static int
442i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000443 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000444 struct drm_i915_gem_relocation_entry *relocs)
445{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000446 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000447 int i, ret;
448
449 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000450 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000451 if (ret)
452 return ret;
453 }
454
455 return 0;
456}
457
458static int
459i915_gem_execbuffer_relocate(struct drm_device *dev,
Chris Wilson67731b82010-12-08 10:38:14 +0000460 struct eb_objects *eb,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000461 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000462{
Chris Wilson432e58e2010-11-25 19:32:06 +0000463 struct drm_i915_gem_object *obj;
464 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000465
Chris Wilson432e58e2010-11-25 19:32:06 +0000466 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000467 ret = i915_gem_execbuffer_relocate_object(obj, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000468 if (ret)
469 return ret;
470 }
471
472 return 0;
473}
474
475static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000476i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000477 struct drm_file *file,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000478 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000479{
Chris Wilson432e58e2010-11-25 19:32:06 +0000480 struct drm_i915_gem_object *obj;
Chris Wilson432e58e2010-11-25 19:32:06 +0000481 int ret, retry;
Chris Wilson9b3826b2010-12-05 17:11:54 +0000482 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000483 struct list_head ordered_objects;
484
485 INIT_LIST_HEAD(&ordered_objects);
486 while (!list_empty(objects)) {
487 struct drm_i915_gem_exec_object2 *entry;
488 bool need_fence, need_mappable;
489
490 obj = list_first_entry(objects,
491 struct drm_i915_gem_object,
492 exec_list);
493 entry = obj->exec_entry;
494
495 need_fence =
496 has_fenced_gpu_access &&
497 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
498 obj->tiling_mode != I915_TILING_NONE;
499 need_mappable =
500 entry->relocation_count ? true : need_fence;
501
502 if (need_mappable)
503 list_move(&obj->exec_list, &ordered_objects);
504 else
505 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000506
507 obj->base.pending_read_domains = 0;
508 obj->base.pending_write_domain = 0;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000509 }
510 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000511
512 /* Attempt to pin all of the buffers into the GTT.
513 * This is done in 3 phases:
514 *
515 * 1a. Unbind all objects that do not match the GTT constraints for
516 * the execbuffer (fenceable, mappable, alignment etc).
517 * 1b. Increment pin count for already bound objects.
518 * 2. Bind new objects.
519 * 3. Decrement pin count.
520 *
521 * This avoid unnecessary unbinding of later objects in order to makr
522 * room for the earlier objects *unless* we need to defragment.
523 */
524 retry = 0;
525 do {
526 ret = 0;
527
528 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000529 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000530 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000531 bool need_fence, need_mappable;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000532 if (!obj->gtt_space)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000533 continue;
534
535 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000536 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000537 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
538 obj->tiling_mode != I915_TILING_NONE;
539 need_mappable =
540 entry->relocation_count ? true : need_fence;
541
542 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
543 (need_mappable && !obj->map_and_fenceable))
544 ret = i915_gem_object_unbind(obj);
545 else
546 ret = i915_gem_object_pin(obj,
547 entry->alignment,
548 need_mappable);
Chris Wilson432e58e2010-11-25 19:32:06 +0000549 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000550 goto err;
Chris Wilson432e58e2010-11-25 19:32:06 +0000551
552 entry++;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000553 }
554
555 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000556 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000557 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000558 bool need_fence;
559
560 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000561 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000562 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
563 obj->tiling_mode != I915_TILING_NONE;
564
565 if (!obj->gtt_space) {
566 bool need_mappable =
567 entry->relocation_count ? true : need_fence;
568
569 ret = i915_gem_object_pin(obj,
570 entry->alignment,
571 need_mappable);
572 if (ret)
573 break;
574 }
575
Chris Wilson9b3826b2010-12-05 17:11:54 +0000576 if (has_fenced_gpu_access) {
577 if (need_fence) {
578 ret = i915_gem_object_get_fence(obj, ring, 1);
579 if (ret)
580 break;
581 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
582 obj->tiling_mode == I915_TILING_NONE) {
583 /* XXX pipelined! */
584 ret = i915_gem_object_put_fence(obj);
585 if (ret)
586 break;
587 }
588 obj->pending_fenced_gpu_access = need_fence;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000589 }
590
591 entry->offset = obj->gtt_offset;
592 }
593
Chris Wilson432e58e2010-11-25 19:32:06 +0000594 /* Decrement pin count for bound objects */
595 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000596 if (obj->gtt_space)
597 i915_gem_object_unpin(obj);
598 }
599
600 if (ret != -ENOSPC || retry > 1)
601 return ret;
602
603 /* First attempt, just clear anything that is purgeable.
604 * Second attempt, clear the entire GTT.
605 */
Chris Wilsond9e86c02010-11-10 16:40:20 +0000606 ret = i915_gem_evict_everything(ring->dev, retry == 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000607 if (ret)
608 return ret;
609
610 retry++;
611 } while (1);
Chris Wilson432e58e2010-11-25 19:32:06 +0000612
613err:
Chris Wilson602606a2010-11-28 15:31:02 +0000614 obj = list_entry(obj->exec_list.prev,
615 struct drm_i915_gem_object,
616 exec_list);
Chris Wilson432e58e2010-11-25 19:32:06 +0000617 while (objects != &obj->exec_list) {
618 if (obj->gtt_space)
619 i915_gem_object_unpin(obj);
620
621 obj = list_entry(obj->exec_list.prev,
622 struct drm_i915_gem_object,
623 exec_list);
624 }
625
626 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000627}
628
629static int
630i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
631 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000632 struct intel_ring_buffer *ring,
Chris Wilson432e58e2010-11-25 19:32:06 +0000633 struct list_head *objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000634 struct eb_objects *eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000635 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000636 int count)
637{
638 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000639 struct drm_i915_gem_object *obj;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000640 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000641 int i, total, ret;
642
Chris Wilson67731b82010-12-08 10:38:14 +0000643 /* We may process another execbuffer during the unlock... */
Chris Wilson36cf1742011-01-10 12:09:12 +0000644 while (!list_empty(objects)) {
Chris Wilson67731b82010-12-08 10:38:14 +0000645 obj = list_first_entry(objects,
646 struct drm_i915_gem_object,
647 exec_list);
648 list_del_init(&obj->exec_list);
649 drm_gem_object_unreference(&obj->base);
650 }
651
Chris Wilson54cf91d2010-11-25 18:00:26 +0000652 mutex_unlock(&dev->struct_mutex);
653
654 total = 0;
655 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000656 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000657
Chris Wilsondd6864a2011-01-12 23:49:13 +0000658 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000659 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000660 if (reloc == NULL || reloc_offset == NULL) {
661 drm_free_large(reloc);
662 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000663 mutex_lock(&dev->struct_mutex);
664 return -ENOMEM;
665 }
666
667 total = 0;
668 for (i = 0; i < count; i++) {
669 struct drm_i915_gem_relocation_entry __user *user_relocs;
670
Chris Wilson432e58e2010-11-25 19:32:06 +0000671 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000672
673 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000674 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000675 ret = -EFAULT;
676 mutex_lock(&dev->struct_mutex);
677 goto err;
678 }
679
Chris Wilsondd6864a2011-01-12 23:49:13 +0000680 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000681 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000682 }
683
684 ret = i915_mutex_lock_interruptible(dev);
685 if (ret) {
686 mutex_lock(&dev->struct_mutex);
687 goto err;
688 }
689
Chris Wilson67731b82010-12-08 10:38:14 +0000690 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000691 eb_reset(eb);
692 for (i = 0; i < count; i++) {
693 struct drm_i915_gem_object *obj;
694
695 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
696 exec[i].handle));
697 if (obj == NULL) {
698 DRM_ERROR("Invalid object handle %d at index %d\n",
699 exec[i].handle, i);
700 ret = -ENOENT;
701 goto err;
702 }
703
704 list_add_tail(&obj->exec_list, objects);
705 obj->exec_handle = exec[i].handle;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000706 obj->exec_entry = &exec[i];
Chris Wilson67731b82010-12-08 10:38:14 +0000707 eb_add_object(eb, obj);
708 }
709
Chris Wilson6fe4f142011-01-10 17:35:37 +0000710 ret = i915_gem_execbuffer_reserve(ring, file, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000711 if (ret)
712 goto err;
713
Chris Wilson432e58e2010-11-25 19:32:06 +0000714 list_for_each_entry(obj, objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000715 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000716 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Chris Wilsondd6864a2011-01-12 23:49:13 +0000717 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000718 if (ret)
719 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000720 }
721
722 /* Leave the user relocations as are, this is the painfully slow path,
723 * and we want to avoid the complication of dropping the lock whilst
724 * having buffers reserved in the aperture and so causing spurious
725 * ENOSPC for random operations.
726 */
727
728err:
729 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000730 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000731 return ret;
732}
733
Chris Wilson88241782011-01-07 17:09:48 +0000734static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000735i915_gem_execbuffer_flush(struct drm_device *dev,
736 uint32_t invalidate_domains,
737 uint32_t flush_domains,
738 uint32_t flush_rings)
739{
740 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson88241782011-01-07 17:09:48 +0000741 int i, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000742
743 if (flush_domains & I915_GEM_DOMAIN_CPU)
744 intel_gtt_chipset_flush();
745
Chris Wilson63256ec2011-01-04 18:42:07 +0000746 if (flush_domains & I915_GEM_DOMAIN_GTT)
747 wmb();
748
Chris Wilson54cf91d2010-11-25 18:00:26 +0000749 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000750 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilson88241782011-01-07 17:09:48 +0000751 if (flush_rings & (1 << i)) {
752 ret = i915_gem_flush_ring(dev,
753 &dev_priv->ring[i],
754 invalidate_domains,
755 flush_domains);
756 if (ret)
757 return ret;
758 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000759 }
Chris Wilson88241782011-01-07 17:09:48 +0000760
761 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000762}
763
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000764static int
765i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
766 struct intel_ring_buffer *to)
767{
768 struct intel_ring_buffer *from = obj->ring;
769 u32 seqno;
770 int ret, idx;
771
772 if (from == NULL || to == from)
773 return 0;
774
775 if (INTEL_INFO(obj->base.dev)->gen < 6)
776 return i915_gem_object_wait_rendering(obj, true);
777
778 idx = intel_ring_sync_index(from, to);
779
780 seqno = obj->last_rendering_seqno;
781 if (seqno <= from->sync_seqno[idx])
782 return 0;
783
784 if (seqno == from->outstanding_lazy_request) {
785 struct drm_i915_gem_request *request;
786
787 request = kzalloc(sizeof(*request), GFP_KERNEL);
788 if (request == NULL)
789 return -ENOMEM;
790
791 ret = i915_add_request(obj->base.dev, NULL, request, from);
792 if (ret) {
793 kfree(request);
794 return ret;
795 }
796
797 seqno = request->seqno;
798 }
799
800 from->sync_seqno[idx] = seqno;
801 return intel_ring_sync(to, from, seqno - 1);
802}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000803
804static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000805i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
806 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000807{
Chris Wilson432e58e2010-11-25 19:32:06 +0000808 struct drm_i915_gem_object *obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000809 struct change_domains cd;
Chris Wilson432e58e2010-11-25 19:32:06 +0000810 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000811
812 cd.invalidate_domains = 0;
813 cd.flush_domains = 0;
814 cd.flush_rings = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000815 list_for_each_entry(obj, objects, exec_list)
816 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000817
818 if (cd.invalidate_domains | cd.flush_domains) {
819#if WATCH_EXEC
820 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
821 __func__,
822 cd.invalidate_domains,
823 cd.flush_domains);
824#endif
Chris Wilson88241782011-01-07 17:09:48 +0000825 ret = i915_gem_execbuffer_flush(ring->dev,
826 cd.invalidate_domains,
827 cd.flush_domains,
828 cd.flush_rings);
829 if (ret)
830 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000831 }
832
Chris Wilson432e58e2010-11-25 19:32:06 +0000833 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000834 ret = i915_gem_execbuffer_sync_rings(obj, ring);
835 if (ret)
836 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000837 }
838
839 return 0;
840}
841
Chris Wilson432e58e2010-11-25 19:32:06 +0000842static bool
843i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000844{
Chris Wilson432e58e2010-11-25 19:32:06 +0000845 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000846}
847
848static int
849validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
850 int count)
851{
852 int i;
853
854 for (i = 0; i < count; i++) {
855 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
856 int length; /* limited by fault_in_pages_readable() */
857
858 /* First check for malicious input causing overflow */
859 if (exec[i].relocation_count >
860 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
861 return -EINVAL;
862
863 length = exec[i].relocation_count *
864 sizeof(struct drm_i915_gem_relocation_entry);
865 if (!access_ok(VERIFY_READ, ptr, length))
866 return -EFAULT;
867
868 /* we may also need to update the presumed offsets */
869 if (!access_ok(VERIFY_WRITE, ptr, length))
870 return -EFAULT;
871
872 if (fault_in_pages_readable(ptr, length))
873 return -EFAULT;
874 }
875
876 return 0;
877}
878
Chris Wilson432e58e2010-11-25 19:32:06 +0000879static int
880i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
881 struct list_head *objects)
882{
883 struct drm_i915_gem_object *obj;
884 int flips;
885
886 /* Check for any pending flips. As we only maintain a flip queue depth
887 * of 1, we can simply insert a WAIT for the next display flip prior
888 * to executing the batch and avoid stalling the CPU.
889 */
890 flips = 0;
891 list_for_each_entry(obj, objects, exec_list) {
892 if (obj->base.write_domain)
893 flips |= atomic_read(&obj->pending_flip);
894 }
895 if (flips) {
896 int plane, flip_mask, ret;
897
898 for (plane = 0; flips >> plane; plane++) {
899 if (((flips >> plane) & 1) == 0)
900 continue;
901
902 if (plane)
903 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
904 else
905 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
906
907 ret = intel_ring_begin(ring, 2);
908 if (ret)
909 return ret;
910
911 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
912 intel_ring_emit(ring, MI_NOOP);
913 intel_ring_advance(ring);
914 }
915 }
916
917 return 0;
918}
919
920static void
921i915_gem_execbuffer_move_to_active(struct list_head *objects,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000922 struct intel_ring_buffer *ring,
923 u32 seqno)
Chris Wilson432e58e2010-11-25 19:32:06 +0000924{
925 struct drm_i915_gem_object *obj;
926
927 list_for_each_entry(obj, objects, exec_list) {
928 obj->base.read_domains = obj->base.pending_read_domains;
929 obj->base.write_domain = obj->base.pending_write_domain;
930 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
931
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000932 i915_gem_object_move_to_active(obj, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +0000933 if (obj->base.write_domain) {
934 obj->dirty = 1;
Chris Wilson87ca9c82010-12-02 09:42:56 +0000935 obj->pending_gpu_write = true;
Chris Wilson432e58e2010-11-25 19:32:06 +0000936 list_move_tail(&obj->gpu_write_list,
937 &ring->gpu_write_list);
938 intel_mark_busy(ring->dev, obj);
939 }
940
941 trace_i915_gem_object_change_domain(obj,
942 obj->base.read_domains,
943 obj->base.write_domain);
944 }
945}
946
Chris Wilson54cf91d2010-11-25 18:00:26 +0000947static void
948i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000949 struct drm_file *file,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000950 struct intel_ring_buffer *ring)
951{
Chris Wilson432e58e2010-11-25 19:32:06 +0000952 struct drm_i915_gem_request *request;
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000953 u32 invalidate;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000954
Chris Wilson432e58e2010-11-25 19:32:06 +0000955 /*
956 * Ensure that the commands in the batch buffer are
957 * finished before the interrupt fires.
958 *
959 * The sampler always gets flushed on i965 (sigh).
960 */
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000961 invalidate = I915_GEM_DOMAIN_COMMAND;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000962 if (INTEL_INFO(dev)->gen >= 4)
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000963 invalidate |= I915_GEM_DOMAIN_SAMPLER;
964 if (ring->flush(ring, invalidate, 0)) {
965 i915_gem_next_request_seqno(dev, ring);
966 return;
967 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000968
Chris Wilson432e58e2010-11-25 19:32:06 +0000969 /* Add a breadcrumb for the completion of the batch buffer */
970 request = kzalloc(sizeof(*request), GFP_KERNEL);
971 if (request == NULL || i915_add_request(dev, file, request, ring)) {
972 i915_gem_next_request_seqno(dev, ring);
973 kfree(request);
974 }
975}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000976
977static int
978i915_gem_do_execbuffer(struct drm_device *dev, void *data,
979 struct drm_file *file,
980 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson432e58e2010-11-25 19:32:06 +0000981 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000982{
983 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson432e58e2010-11-25 19:32:06 +0000984 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +0000985 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000986 struct drm_i915_gem_object *batch_obj;
987 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000988 struct intel_ring_buffer *ring;
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000989 u32 exec_start, exec_len;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000990 u32 seqno;
Chris Wilson72bfa192010-12-19 11:42:05 +0000991 int ret, mode, i;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000992
Chris Wilson432e58e2010-11-25 19:32:06 +0000993 if (!i915_gem_check_execbuffer(args)) {
994 DRM_ERROR("execbuf with invalid offset/length\n");
995 return -EINVAL;
996 }
997
998 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000999 if (ret)
1000 return ret;
1001
1002#if WATCH_EXEC
1003 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1004 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1005#endif
1006 switch (args->flags & I915_EXEC_RING_MASK) {
1007 case I915_EXEC_DEFAULT:
1008 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001009 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +00001010 break;
1011 case I915_EXEC_BSD:
1012 if (!HAS_BSD(dev)) {
1013 DRM_ERROR("execbuf with invalid ring (BSD)\n");
1014 return -EINVAL;
1015 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001016 ring = &dev_priv->ring[VCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +00001017 break;
1018 case I915_EXEC_BLT:
1019 if (!HAS_BLT(dev)) {
1020 DRM_ERROR("execbuf with invalid ring (BLT)\n");
1021 return -EINVAL;
1022 }
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001023 ring = &dev_priv->ring[BCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +00001024 break;
1025 default:
1026 DRM_ERROR("execbuf with unknown ring: %d\n",
1027 (int)(args->flags & I915_EXEC_RING_MASK));
1028 return -EINVAL;
1029 }
1030
Chris Wilson72bfa192010-12-19 11:42:05 +00001031 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1032 switch (mode) {
1033 case I915_EXEC_CONSTANTS_REL_GENERAL:
1034 case I915_EXEC_CONSTANTS_ABSOLUTE:
1035 case I915_EXEC_CONSTANTS_REL_SURFACE:
1036 if (ring == &dev_priv->ring[RCS] &&
1037 mode != dev_priv->relative_constants_mode) {
1038 if (INTEL_INFO(dev)->gen < 4)
1039 return -EINVAL;
1040
1041 if (INTEL_INFO(dev)->gen > 5 &&
1042 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1043 return -EINVAL;
1044
1045 ret = intel_ring_begin(ring, 4);
1046 if (ret)
1047 return ret;
1048
1049 intel_ring_emit(ring, MI_NOOP);
1050 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1051 intel_ring_emit(ring, INSTPM);
1052 intel_ring_emit(ring,
1053 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1054 intel_ring_advance(ring);
1055
1056 dev_priv->relative_constants_mode = mode;
1057 }
1058 break;
1059 default:
1060 DRM_ERROR("execbuf with unknown constants: %d\n", mode);
1061 return -EINVAL;
1062 }
1063
Chris Wilson54cf91d2010-11-25 18:00:26 +00001064 if (args->buffer_count < 1) {
1065 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1066 return -EINVAL;
1067 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001068
1069 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001070 if (ring != &dev_priv->ring[RCS]) {
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001071 DRM_ERROR("clip rectangles are only valid with the render ring\n");
1072 return -EINVAL;
1073 }
1074
Chris Wilson432e58e2010-11-25 19:32:06 +00001075 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001076 GFP_KERNEL);
1077 if (cliprects == NULL) {
1078 ret = -ENOMEM;
1079 goto pre_mutex_err;
1080 }
1081
Chris Wilson432e58e2010-11-25 19:32:06 +00001082 if (copy_from_user(cliprects,
1083 (struct drm_clip_rect __user *)(uintptr_t)
1084 args->cliprects_ptr,
1085 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +00001086 ret = -EFAULT;
1087 goto pre_mutex_err;
1088 }
1089 }
1090
Chris Wilson54cf91d2010-11-25 18:00:26 +00001091 ret = i915_mutex_lock_interruptible(dev);
1092 if (ret)
1093 goto pre_mutex_err;
1094
1095 if (dev_priv->mm.suspended) {
1096 mutex_unlock(&dev->struct_mutex);
1097 ret = -EBUSY;
1098 goto pre_mutex_err;
1099 }
1100
Chris Wilson67731b82010-12-08 10:38:14 +00001101 eb = eb_create(args->buffer_count);
1102 if (eb == NULL) {
1103 mutex_unlock(&dev->struct_mutex);
1104 ret = -ENOMEM;
1105 goto pre_mutex_err;
1106 }
1107
Chris Wilson54cf91d2010-11-25 18:00:26 +00001108 /* Look up object handles */
Chris Wilson432e58e2010-11-25 19:32:06 +00001109 INIT_LIST_HEAD(&objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001110 for (i = 0; i < args->buffer_count; i++) {
1111 struct drm_i915_gem_object *obj;
1112
Chris Wilson432e58e2010-11-25 19:32:06 +00001113 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1114 exec[i].handle));
Chris Wilson54cf91d2010-11-25 18:00:26 +00001115 if (obj == NULL) {
1116 DRM_ERROR("Invalid object handle %d at index %d\n",
Chris Wilson432e58e2010-11-25 19:32:06 +00001117 exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001118 /* prevent error path from reading uninitialized data */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001119 ret = -ENOENT;
1120 goto err;
1121 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001122
Chris Wilson432e58e2010-11-25 19:32:06 +00001123 if (!list_empty(&obj->exec_list)) {
1124 DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
1125 obj, exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001126 ret = -EINVAL;
1127 goto err;
1128 }
Chris Wilson432e58e2010-11-25 19:32:06 +00001129
1130 list_add_tail(&obj->exec_list, &objects);
Chris Wilson67731b82010-12-08 10:38:14 +00001131 obj->exec_handle = exec[i].handle;
Chris Wilson6fe4f142011-01-10 17:35:37 +00001132 obj->exec_entry = &exec[i];
Chris Wilson67731b82010-12-08 10:38:14 +00001133 eb_add_object(eb, obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001134 }
1135
Chris Wilson6fe4f142011-01-10 17:35:37 +00001136 /* take note of the batch buffer before we might reorder the lists */
1137 batch_obj = list_entry(objects.prev,
1138 struct drm_i915_gem_object,
1139 exec_list);
1140
Chris Wilson54cf91d2010-11-25 18:00:26 +00001141 /* Move the objects en-masse into the GTT, evicting if necessary. */
Chris Wilson6fe4f142011-01-10 17:35:37 +00001142 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001143 if (ret)
1144 goto err;
1145
1146 /* The objects are in their final locations, apply the relocations. */
Chris Wilson6fe4f142011-01-10 17:35:37 +00001147 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001148 if (ret) {
1149 if (ret == -EFAULT) {
Chris Wilsond9e86c02010-11-10 16:40:20 +00001150 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
Chris Wilson67731b82010-12-08 10:38:14 +00001151 &objects, eb,
1152 exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001153 args->buffer_count);
1154 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1155 }
1156 if (ret)
1157 goto err;
1158 }
1159
1160 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001161 if (batch_obj->base.pending_write_domain) {
1162 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1163 ret = -EINVAL;
1164 goto err;
1165 }
1166 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1167
Chris Wilson432e58e2010-11-25 19:32:06 +00001168 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001169 if (ret)
1170 goto err;
1171
Chris Wilson432e58e2010-11-25 19:32:06 +00001172 ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1173 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001174 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001175
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001176 seqno = i915_gem_next_request_seqno(dev, ring);
1177 for (i = 0; i < I915_NUM_RINGS-1; i++) {
1178 if (seqno < ring->sync_seqno[i]) {
1179 /* The GPU can not handle its semaphore value wrapping,
1180 * so every billion or so execbuffers, we need to stall
1181 * the GPU in order to reset the counters.
1182 */
1183 ret = i915_gpu_idle(dev);
1184 if (ret)
1185 goto err;
1186
1187 BUG_ON(ring->sync_seqno[i]);
1188 }
1189 }
1190
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001191 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1192 exec_len = args->batch_len;
1193 if (cliprects) {
1194 for (i = 0; i < args->num_cliprects; i++) {
1195 ret = i915_emit_box(dev, &cliprects[i],
1196 args->DR1, args->DR4);
1197 if (ret)
1198 goto err;
1199
1200 ret = ring->dispatch_execbuffer(ring,
1201 exec_start, exec_len);
1202 if (ret)
1203 goto err;
1204 }
1205 } else {
1206 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1207 if (ret)
1208 goto err;
1209 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001210
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001211 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +00001212 i915_gem_execbuffer_retire_commands(dev, file, ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001213
1214err:
Chris Wilson67731b82010-12-08 10:38:14 +00001215 eb_destroy(eb);
Chris Wilson432e58e2010-11-25 19:32:06 +00001216 while (!list_empty(&objects)) {
1217 struct drm_i915_gem_object *obj;
1218
1219 obj = list_first_entry(&objects,
1220 struct drm_i915_gem_object,
1221 exec_list);
1222 list_del_init(&obj->exec_list);
1223 drm_gem_object_unreference(&obj->base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001224 }
1225
1226 mutex_unlock(&dev->struct_mutex);
1227
1228pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001229 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001230 return ret;
1231}
1232
1233/*
1234 * Legacy execbuffer just creates an exec2 list from the original exec object
1235 * list array and passes it to the real function.
1236 */
1237int
1238i915_gem_execbuffer(struct drm_device *dev, void *data,
1239 struct drm_file *file)
1240{
1241 struct drm_i915_gem_execbuffer *args = data;
1242 struct drm_i915_gem_execbuffer2 exec2;
1243 struct drm_i915_gem_exec_object *exec_list = NULL;
1244 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1245 int ret, i;
1246
1247#if WATCH_EXEC
1248 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1249 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1250#endif
1251
1252 if (args->buffer_count < 1) {
1253 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1254 return -EINVAL;
1255 }
1256
1257 /* Copy in the exec list from userland */
1258 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1259 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1260 if (exec_list == NULL || exec2_list == NULL) {
1261 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1262 args->buffer_count);
1263 drm_free_large(exec_list);
1264 drm_free_large(exec2_list);
1265 return -ENOMEM;
1266 }
1267 ret = copy_from_user(exec_list,
1268 (struct drm_i915_relocation_entry __user *)
1269 (uintptr_t) args->buffers_ptr,
1270 sizeof(*exec_list) * args->buffer_count);
1271 if (ret != 0) {
1272 DRM_ERROR("copy %d exec entries failed %d\n",
1273 args->buffer_count, ret);
1274 drm_free_large(exec_list);
1275 drm_free_large(exec2_list);
1276 return -EFAULT;
1277 }
1278
1279 for (i = 0; i < args->buffer_count; i++) {
1280 exec2_list[i].handle = exec_list[i].handle;
1281 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1282 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1283 exec2_list[i].alignment = exec_list[i].alignment;
1284 exec2_list[i].offset = exec_list[i].offset;
1285 if (INTEL_INFO(dev)->gen < 4)
1286 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1287 else
1288 exec2_list[i].flags = 0;
1289 }
1290
1291 exec2.buffers_ptr = args->buffers_ptr;
1292 exec2.buffer_count = args->buffer_count;
1293 exec2.batch_start_offset = args->batch_start_offset;
1294 exec2.batch_len = args->batch_len;
1295 exec2.DR1 = args->DR1;
1296 exec2.DR4 = args->DR4;
1297 exec2.num_cliprects = args->num_cliprects;
1298 exec2.cliprects_ptr = args->cliprects_ptr;
1299 exec2.flags = I915_EXEC_RENDER;
1300
1301 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1302 if (!ret) {
1303 /* Copy the new buffer offsets back to the user's exec list. */
1304 for (i = 0; i < args->buffer_count; i++)
1305 exec_list[i].offset = exec2_list[i].offset;
1306 /* ... and back out to userspace */
1307 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1308 (uintptr_t) args->buffers_ptr,
1309 exec_list,
1310 sizeof(*exec_list) * args->buffer_count);
1311 if (ret) {
1312 ret = -EFAULT;
1313 DRM_ERROR("failed to copy %d exec entries "
1314 "back to user (%d)\n",
1315 args->buffer_count, ret);
1316 }
1317 }
1318
1319 drm_free_large(exec_list);
1320 drm_free_large(exec2_list);
1321 return ret;
1322}
1323
1324int
1325i915_gem_execbuffer2(struct drm_device *dev, void *data,
1326 struct drm_file *file)
1327{
1328 struct drm_i915_gem_execbuffer2 *args = data;
1329 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1330 int ret;
1331
1332#if WATCH_EXEC
1333 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1334 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1335#endif
1336
1337 if (args->buffer_count < 1) {
1338 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1339 return -EINVAL;
1340 }
1341
1342 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1343 if (exec2_list == NULL) {
1344 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1345 args->buffer_count);
1346 return -ENOMEM;
1347 }
1348 ret = copy_from_user(exec2_list,
1349 (struct drm_i915_relocation_entry __user *)
1350 (uintptr_t) args->buffers_ptr,
1351 sizeof(*exec2_list) * args->buffer_count);
1352 if (ret != 0) {
1353 DRM_ERROR("copy %d exec entries failed %d\n",
1354 args->buffer_count, ret);
1355 drm_free_large(exec2_list);
1356 return -EFAULT;
1357 }
1358
1359 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1360 if (!ret) {
1361 /* Copy the new buffer offsets back to the user's exec list. */
1362 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1363 (uintptr_t) args->buffers_ptr,
1364 exec2_list,
1365 sizeof(*exec2_list) * args->buffer_count);
1366 if (ret) {
1367 ret = -EFAULT;
1368 DRM_ERROR("failed to copy %d exec entries "
1369 "back to user (%d)\n",
1370 args->buffer_count, ret);
1371 }
1372 }
1373
1374 drm_free_large(exec2_list);
1375 return ret;
1376}