blob: afbc9240a9924f8838758591547caac9f1b8de23 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080035#include <linux/dma_remapping.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000036
Chris Wilson67731b82010-12-08 10:38:14 +000037struct eb_objects {
38 int and;
39 struct hlist_head buckets[0];
40};
41
42static struct eb_objects *
43eb_create(int size)
44{
45 struct eb_objects *eb;
46 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Chris Wilson41783ee2012-09-18 10:04:02 +010047 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
Chris Wilson67731b82010-12-08 10:38:14 +000048 while (count > size)
49 count >>= 1;
50 eb = kzalloc(count*sizeof(struct hlist_head) +
51 sizeof(struct eb_objects),
52 GFP_KERNEL);
53 if (eb == NULL)
54 return eb;
55
56 eb->and = count - 1;
57 return eb;
58}
59
60static void
61eb_reset(struct eb_objects *eb)
62{
63 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
64}
65
66static void
67eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
68{
69 hlist_add_head(&obj->exec_node,
70 &eb->buckets[obj->exec_handle & eb->and]);
71}
72
73static struct drm_i915_gem_object *
74eb_get_object(struct eb_objects *eb, unsigned long handle)
75{
76 struct hlist_head *head;
77 struct hlist_node *node;
78 struct drm_i915_gem_object *obj;
79
80 head = &eb->buckets[handle & eb->and];
81 hlist_for_each(node, head) {
82 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
83 if (obj->exec_handle == handle)
84 return obj;
85 }
86
87 return NULL;
88}
89
90static void
91eb_destroy(struct eb_objects *eb)
92{
93 kfree(eb);
94}
95
Chris Wilsondabdfe02012-03-26 10:10:27 +020096static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
97{
98 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilson504c7262012-08-23 13:12:52 +010099 !obj->map_and_fenceable ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200100 obj->cache_level != I915_CACHE_NONE);
101}
102
Chris Wilson54cf91d2010-11-25 18:00:26 +0000103static int
104i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000105 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000106 struct drm_i915_gem_relocation_entry *reloc)
107{
108 struct drm_device *dev = obj->base.dev;
109 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100110 struct drm_i915_gem_object *target_i915_obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000111 uint32_t target_offset;
112 int ret = -EINVAL;
113
Chris Wilson67731b82010-12-08 10:38:14 +0000114 /* we've already hold a reference to all valid objects */
115 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
116 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000117 return -ENOENT;
118
Daniel Vetter149c8402012-02-15 23:50:23 +0100119 target_i915_obj = to_intel_bo(target_obj);
120 target_offset = target_i915_obj->gtt_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000121
Eric Anholte844b992012-07-31 15:35:01 -0700122 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
123 * pipe_control writes because the gpu doesn't properly redirect them
124 * through the ppgtt for non_secure batchbuffers. */
125 if (unlikely(IS_GEN6(dev) &&
126 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
127 !target_i915_obj->has_global_gtt_mapping)) {
128 i915_gem_gtt_bind_object(target_i915_obj,
129 target_i915_obj->cache_level);
130 }
131
Chris Wilson54cf91d2010-11-25 18:00:26 +0000132 /* The target buffer should have appeared before us in the
133 * exec_object list, so it should have a GTT space bound by now.
134 */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000135 if (unlikely(target_offset == 0)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100136 DRM_DEBUG("No GTT space found for object %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000137 reloc->target_handle);
Chris Wilson67731b82010-12-08 10:38:14 +0000138 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000139 }
140
141 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000142 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100143 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000144 "obj %p target %d offset %d "
145 "read %08x write %08x",
146 obj, reloc->target_handle,
147 (int) reloc->offset,
148 reloc->read_domains,
149 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000150 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000151 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100152 if (unlikely((reloc->write_domain | reloc->read_domains)
153 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100154 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000155 "obj %p target %d offset %d "
156 "read %08x write %08x",
157 obj, reloc->target_handle,
158 (int) reloc->offset,
159 reloc->read_domains,
160 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000161 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000162 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000163 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
164 reloc->write_domain != target_obj->pending_write_domain)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100165 DRM_DEBUG("Write domain conflict: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000166 "obj %p target %d offset %d "
167 "new %08x old %08x\n",
168 obj, reloc->target_handle,
169 (int) reloc->offset,
170 reloc->write_domain,
171 target_obj->pending_write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000172 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000173 }
174
175 target_obj->pending_read_domains |= reloc->read_domains;
176 target_obj->pending_write_domain |= reloc->write_domain;
177
178 /* If the relocation already has the right value in it, no
179 * more work needs to be done.
180 */
181 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000182 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000183
184 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000185 if (unlikely(reloc->offset > obj->base.size - 4)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100186 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000187 "obj %p target %d offset %d size %d.\n",
188 obj, reloc->target_handle,
189 (int) reloc->offset,
190 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000191 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000192 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000193 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100194 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000195 "obj %p target %d offset %d.\n",
196 obj, reloc->target_handle,
197 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000198 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000199 }
200
Chris Wilsondabdfe02012-03-26 10:10:27 +0200201 /* We can't wait for rendering with pagefaults disabled */
202 if (obj->active && in_atomic())
203 return -EFAULT;
204
Chris Wilson54cf91d2010-11-25 18:00:26 +0000205 reloc->delta += target_offset;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200206 if (use_cpu_reloc(obj)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000207 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
208 char *vaddr;
209
Chris Wilsondabdfe02012-03-26 10:10:27 +0200210 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
211 if (ret)
212 return ret;
213
Chris Wilson9da3da62012-06-01 15:20:22 +0100214 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
215 reloc->offset >> PAGE_SHIFT));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000216 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
217 kunmap_atomic(vaddr);
218 } else {
219 struct drm_i915_private *dev_priv = dev->dev_private;
220 uint32_t __iomem *reloc_entry;
221 void __iomem *reloc_page;
222
Chris Wilson7b096382012-04-14 09:55:51 +0100223 ret = i915_gem_object_set_to_gtt_domain(obj, true);
224 if (ret)
225 return ret;
226
227 ret = i915_gem_object_put_fence(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000228 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000229 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000230
231 /* Map the page containing the relocation we're going to perform. */
232 reloc->offset += obj->gtt_offset;
233 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
234 reloc->offset & PAGE_MASK);
235 reloc_entry = (uint32_t __iomem *)
236 (reloc_page + (reloc->offset & ~PAGE_MASK));
237 iowrite32(reloc->delta, reloc_entry);
238 io_mapping_unmap_atomic(reloc_page);
239 }
240
241 /* and update the user's relocation entry */
242 reloc->presumed_offset = target_offset;
243
Chris Wilson67731b82010-12-08 10:38:14 +0000244 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000245}
246
247static int
248i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000249 struct eb_objects *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000250{
Chris Wilson1d83f442012-03-24 20:12:53 +0000251#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
252 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000253 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000254 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000255 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000256
257 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000258
Chris Wilson1d83f442012-03-24 20:12:53 +0000259 remain = entry->relocation_count;
260 while (remain) {
261 struct drm_i915_gem_relocation_entry *r = stack_reloc;
262 int count = remain;
263 if (count > ARRAY_SIZE(stack_reloc))
264 count = ARRAY_SIZE(stack_reloc);
265 remain -= count;
266
267 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000268 return -EFAULT;
269
Chris Wilson1d83f442012-03-24 20:12:53 +0000270 do {
271 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000272
Chris Wilson1d83f442012-03-24 20:12:53 +0000273 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
274 if (ret)
275 return ret;
276
277 if (r->presumed_offset != offset &&
278 __copy_to_user_inatomic(&user_relocs->presumed_offset,
279 &r->presumed_offset,
280 sizeof(r->presumed_offset))) {
281 return -EFAULT;
282 }
283
284 user_relocs++;
285 r++;
286 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000287 }
288
289 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000290#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000291}
292
293static int
294i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000295 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000296 struct drm_i915_gem_relocation_entry *relocs)
297{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000298 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000299 int i, ret;
300
301 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000302 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000303 if (ret)
304 return ret;
305 }
306
307 return 0;
308}
309
310static int
311i915_gem_execbuffer_relocate(struct drm_device *dev,
Chris Wilson67731b82010-12-08 10:38:14 +0000312 struct eb_objects *eb,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000313 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000314{
Chris Wilson432e58e2010-11-25 19:32:06 +0000315 struct drm_i915_gem_object *obj;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000316 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000317
Chris Wilsond4aeee72011-03-14 15:11:24 +0000318 /* This is the fast path and we cannot handle a pagefault whilst
319 * holding the struct mutex lest the user pass in the relocations
320 * contained within a mmaped bo. For in such a case we, the page
321 * fault handler would call i915_gem_fault() and we would try to
322 * acquire the struct mutex again. Obviously this is bad and so
323 * lockdep complains vehemently.
324 */
325 pagefault_disable();
Chris Wilson432e58e2010-11-25 19:32:06 +0000326 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000327 ret = i915_gem_execbuffer_relocate_object(obj, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000328 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000329 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000330 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000331 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000332
Chris Wilsond4aeee72011-03-14 15:11:24 +0000333 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000334}
335
Chris Wilson7788a762012-08-24 19:18:18 +0100336#define __EXEC_OBJECT_HAS_PIN (1<<31)
337#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100338
339static int
Chris Wilsondabdfe02012-03-26 10:10:27 +0200340need_reloc_mappable(struct drm_i915_gem_object *obj)
341{
342 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
343 return entry->relocation_count && !use_cpu_reloc(obj);
344}
345
346static int
Chris Wilson7788a762012-08-24 19:18:18 +0100347i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
348 struct intel_ring_buffer *ring)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100349{
Chris Wilson7788a762012-08-24 19:18:18 +0100350 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100351 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
352 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
353 bool need_fence, need_mappable;
354 int ret;
355
356 need_fence =
357 has_fenced_gpu_access &&
358 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
359 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200360 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100361
Chris Wilson86a1ee22012-08-11 15:41:04 +0100362 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100363 if (ret)
364 return ret;
365
Chris Wilson7788a762012-08-24 19:18:18 +0100366 entry->flags |= __EXEC_OBJECT_HAS_PIN;
367
Chris Wilson1690e1e2011-12-14 13:57:08 +0100368 if (has_fenced_gpu_access) {
369 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson06d98132012-04-17 15:31:24 +0100370 ret = i915_gem_object_get_fence(obj);
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000371 if (ret)
Chris Wilson7788a762012-08-24 19:18:18 +0100372 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100373
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000374 if (i915_gem_object_pin_fence(obj))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100375 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000376
Chris Wilson7dd49062012-03-21 10:48:18 +0000377 obj->pending_fenced_gpu_access = true;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100378 }
Chris Wilson1690e1e2011-12-14 13:57:08 +0100379 }
380
Chris Wilson7788a762012-08-24 19:18:18 +0100381 /* Ensure ppgtt mapping exists if needed */
382 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
383 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
384 obj, obj->cache_level);
385
386 obj->has_aliasing_ppgtt_mapping = 1;
387 }
388
Chris Wilson1690e1e2011-12-14 13:57:08 +0100389 entry->offset = obj->gtt_offset;
390 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100391}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100392
Chris Wilson7788a762012-08-24 19:18:18 +0100393static void
394i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
395{
396 struct drm_i915_gem_exec_object2 *entry;
397
398 if (!obj->gtt_space)
399 return;
400
401 entry = obj->exec_entry;
402
403 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
404 i915_gem_object_unpin_fence(obj);
405
406 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
407 i915_gem_object_unpin(obj);
408
409 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100410}
411
Chris Wilson54cf91d2010-11-25 18:00:26 +0000412static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000413i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000414 struct drm_file *file,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000415 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000416{
Chris Wilson432e58e2010-11-25 19:32:06 +0000417 struct drm_i915_gem_object *obj;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000418 struct list_head ordered_objects;
Chris Wilson7788a762012-08-24 19:18:18 +0100419 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
420 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000421
422 INIT_LIST_HEAD(&ordered_objects);
423 while (!list_empty(objects)) {
424 struct drm_i915_gem_exec_object2 *entry;
425 bool need_fence, need_mappable;
426
427 obj = list_first_entry(objects,
428 struct drm_i915_gem_object,
429 exec_list);
430 entry = obj->exec_entry;
431
432 need_fence =
433 has_fenced_gpu_access &&
434 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
435 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200436 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000437
438 if (need_mappable)
439 list_move(&obj->exec_list, &ordered_objects);
440 else
441 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000442
443 obj->base.pending_read_domains = 0;
444 obj->base.pending_write_domain = 0;
Chris Wilson016fd0c2012-07-20 12:41:07 +0100445 obj->pending_fenced_gpu_access = false;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000446 }
447 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000448
449 /* Attempt to pin all of the buffers into the GTT.
450 * This is done in 3 phases:
451 *
452 * 1a. Unbind all objects that do not match the GTT constraints for
453 * the execbuffer (fenceable, mappable, alignment etc).
454 * 1b. Increment pin count for already bound objects.
455 * 2. Bind new objects.
456 * 3. Decrement pin count.
457 *
Chris Wilson7788a762012-08-24 19:18:18 +0100458 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000459 * room for the earlier objects *unless* we need to defragment.
460 */
461 retry = 0;
462 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100463 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000464
465 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000466 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000467 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000468 bool need_fence, need_mappable;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100469
Chris Wilson6fe4f142011-01-10 17:35:37 +0000470 if (!obj->gtt_space)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000471 continue;
472
473 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000474 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000475 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
476 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200477 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000478
479 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
480 (need_mappable && !obj->map_and_fenceable))
481 ret = i915_gem_object_unbind(obj);
482 else
Chris Wilson7788a762012-08-24 19:18:18 +0100483 ret = i915_gem_execbuffer_reserve_object(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000484 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000485 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000486 }
487
488 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000489 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +0100490 if (obj->gtt_space)
491 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000492
Chris Wilson7788a762012-08-24 19:18:18 +0100493 ret = i915_gem_execbuffer_reserve_object(obj, ring);
494 if (ret)
495 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000496 }
497
Chris Wilson7788a762012-08-24 19:18:18 +0100498err: /* Decrement pin count for bound objects */
499 list_for_each_entry(obj, objects, exec_list)
500 i915_gem_execbuffer_unreserve_object(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000501
Chris Wilson6c085a72012-08-20 11:40:46 +0200502 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000503 return ret;
504
Chris Wilson6c085a72012-08-20 11:40:46 +0200505 ret = i915_gem_evict_everything(ring->dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000506 if (ret)
507 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000508 } while (1);
509}
510
511static int
512i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
513 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000514 struct intel_ring_buffer *ring,
Chris Wilson432e58e2010-11-25 19:32:06 +0000515 struct list_head *objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000516 struct eb_objects *eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000517 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000518 int count)
519{
520 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000521 struct drm_i915_gem_object *obj;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000522 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000523 int i, total, ret;
524
Chris Wilson67731b82010-12-08 10:38:14 +0000525 /* We may process another execbuffer during the unlock... */
Chris Wilson36cf1742011-01-10 12:09:12 +0000526 while (!list_empty(objects)) {
Chris Wilson67731b82010-12-08 10:38:14 +0000527 obj = list_first_entry(objects,
528 struct drm_i915_gem_object,
529 exec_list);
530 list_del_init(&obj->exec_list);
531 drm_gem_object_unreference(&obj->base);
532 }
533
Chris Wilson54cf91d2010-11-25 18:00:26 +0000534 mutex_unlock(&dev->struct_mutex);
535
536 total = 0;
537 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000538 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000539
Chris Wilsondd6864a2011-01-12 23:49:13 +0000540 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000541 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000542 if (reloc == NULL || reloc_offset == NULL) {
543 drm_free_large(reloc);
544 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000545 mutex_lock(&dev->struct_mutex);
546 return -ENOMEM;
547 }
548
549 total = 0;
550 for (i = 0; i < count; i++) {
551 struct drm_i915_gem_relocation_entry __user *user_relocs;
552
Chris Wilson432e58e2010-11-25 19:32:06 +0000553 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000554
555 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000556 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000557 ret = -EFAULT;
558 mutex_lock(&dev->struct_mutex);
559 goto err;
560 }
561
Chris Wilsondd6864a2011-01-12 23:49:13 +0000562 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000563 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000564 }
565
566 ret = i915_mutex_lock_interruptible(dev);
567 if (ret) {
568 mutex_lock(&dev->struct_mutex);
569 goto err;
570 }
571
Chris Wilson67731b82010-12-08 10:38:14 +0000572 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000573 eb_reset(eb);
574 for (i = 0; i < count; i++) {
Chris Wilson67731b82010-12-08 10:38:14 +0000575 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
576 exec[i].handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000577 if (&obj->base == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +0100578 DRM_DEBUG("Invalid object handle %d at index %d\n",
Chris Wilson67731b82010-12-08 10:38:14 +0000579 exec[i].handle, i);
580 ret = -ENOENT;
581 goto err;
582 }
583
584 list_add_tail(&obj->exec_list, objects);
585 obj->exec_handle = exec[i].handle;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000586 obj->exec_entry = &exec[i];
Chris Wilson67731b82010-12-08 10:38:14 +0000587 eb_add_object(eb, obj);
588 }
589
Chris Wilson6fe4f142011-01-10 17:35:37 +0000590 ret = i915_gem_execbuffer_reserve(ring, file, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000591 if (ret)
592 goto err;
593
Chris Wilson432e58e2010-11-25 19:32:06 +0000594 list_for_each_entry(obj, objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000595 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000596 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Chris Wilsondd6864a2011-01-12 23:49:13 +0000597 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000598 if (ret)
599 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000600 }
601
602 /* Leave the user relocations as are, this is the painfully slow path,
603 * and we want to avoid the complication of dropping the lock whilst
604 * having buffers reserved in the aperture and so causing spurious
605 * ENOSPC for random operations.
606 */
607
608err:
609 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000610 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000611 return ret;
612}
613
Chris Wilson54cf91d2010-11-25 18:00:26 +0000614static int
Chris Wilsonc59a3332011-03-06 13:51:29 +0000615i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
616{
617 u32 plane, flip_mask;
618 int ret;
619
620 /* Check for any pending flips. As we only maintain a flip queue depth
621 * of 1, we can simply insert a WAIT for the next display flip prior
622 * to executing the batch and avoid stalling the CPU.
623 */
624
625 for (plane = 0; flips >> plane; plane++) {
626 if (((flips >> plane) & 1) == 0)
627 continue;
628
629 if (plane)
630 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
631 else
632 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
633
634 ret = intel_ring_begin(ring, 2);
635 if (ret)
636 return ret;
637
638 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
639 intel_ring_emit(ring, MI_NOOP);
640 intel_ring_advance(ring);
641 }
642
643 return 0;
644}
645
Chris Wilsonc59a3332011-03-06 13:51:29 +0000646static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000647i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
648 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000649{
Chris Wilson432e58e2010-11-25 19:32:06 +0000650 struct drm_i915_gem_object *obj;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200651 uint32_t flush_domains = 0;
652 uint32_t flips = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000653 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654
Chris Wilson432e58e2010-11-25 19:32:06 +0000655 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky2911a352012-04-05 14:47:36 -0700656 ret = i915_gem_object_sync(obj, ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000657 if (ret)
658 return ret;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200659
660 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
661 i915_gem_clflush_object(obj);
662
663 if (obj->base.pending_write_domain)
664 flips |= atomic_read(&obj->pending_flip);
665
666 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000667 }
668
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200669 if (flips) {
670 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
671 if (ret)
672 return ret;
673 }
674
675 if (flush_domains & I915_GEM_DOMAIN_CPU)
676 intel_gtt_chipset_flush();
677
678 if (flush_domains & I915_GEM_DOMAIN_GTT)
679 wmb();
680
Chris Wilson09cf7c92012-07-13 14:14:08 +0100681 /* Unconditionally invalidate gpu caches and ensure that we do flush
682 * any residual writes from the previous batch.
683 */
Chris Wilsona7b97612012-07-20 12:41:08 +0100684 return intel_ring_invalidate_all_caches(ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000685}
686
Chris Wilson432e58e2010-11-25 19:32:06 +0000687static bool
688i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000689{
Chris Wilson432e58e2010-11-25 19:32:06 +0000690 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000691}
692
693static int
694validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
695 int count)
696{
697 int i;
698
699 for (i = 0; i < count; i++) {
700 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
701 int length; /* limited by fault_in_pages_readable() */
702
703 /* First check for malicious input causing overflow */
704 if (exec[i].relocation_count >
705 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
706 return -EINVAL;
707
708 length = exec[i].relocation_count *
709 sizeof(struct drm_i915_gem_relocation_entry);
710 if (!access_ok(VERIFY_READ, ptr, length))
711 return -EFAULT;
712
713 /* we may also need to update the presumed offsets */
714 if (!access_ok(VERIFY_WRITE, ptr, length))
715 return -EFAULT;
716
Daniel Vetterf56f8212012-03-25 19:47:41 +0200717 if (fault_in_multipages_readable(ptr, length))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000718 return -EFAULT;
719 }
720
721 return 0;
722}
723
Chris Wilson432e58e2010-11-25 19:32:06 +0000724static void
725i915_gem_execbuffer_move_to_active(struct list_head *objects,
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000726 struct intel_ring_buffer *ring,
727 u32 seqno)
Chris Wilson432e58e2010-11-25 19:32:06 +0000728{
729 struct drm_i915_gem_object *obj;
730
731 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson69c2fc82012-07-20 12:41:03 +0100732 u32 old_read = obj->base.read_domains;
733 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +0000734
Chris Wilson432e58e2010-11-25 19:32:06 +0000735 obj->base.read_domains = obj->base.pending_read_domains;
736 obj->base.write_domain = obj->base.pending_write_domain;
737 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
738
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000739 i915_gem_object_move_to_active(obj, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +0000740 if (obj->base.write_domain) {
741 obj->dirty = 1;
Chris Wilson0201f1e2012-07-20 12:41:01 +0100742 obj->last_write_seqno = seqno;
Chris Wilsonacb87df2012-05-03 15:47:57 +0100743 if (obj->pin_count) /* check for potential scanout */
Chris Wilsonf047e392012-07-21 12:31:41 +0100744 intel_mark_fb_busy(obj);
Chris Wilson432e58e2010-11-25 19:32:06 +0000745 }
746
Chris Wilsondb53a302011-02-03 11:57:46 +0000747 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +0000748 }
749}
750
Chris Wilson54cf91d2010-11-25 18:00:26 +0000751static void
752i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000753 struct drm_file *file,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000754 struct intel_ring_buffer *ring)
755{
Daniel Vettercc889e02012-06-13 20:45:19 +0200756 /* Unconditionally force add_request to emit a full flush. */
757 ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000758
Chris Wilson432e58e2010-11-25 19:32:06 +0000759 /* Add a breadcrumb for the completion of the batch buffer */
Chris Wilson3bb73ab2012-07-20 12:40:59 +0100760 (void)i915_add_request(ring, file, NULL);
Chris Wilson432e58e2010-11-25 19:32:06 +0000761}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000762
763static int
Eric Anholtae662d32012-01-03 09:23:29 -0800764i915_reset_gen7_sol_offsets(struct drm_device *dev,
765 struct intel_ring_buffer *ring)
766{
767 drm_i915_private_t *dev_priv = dev->dev_private;
768 int ret, i;
769
770 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
771 return 0;
772
773 ret = intel_ring_begin(ring, 4 * 3);
774 if (ret)
775 return ret;
776
777 for (i = 0; i < 4; i++) {
778 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
779 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
780 intel_ring_emit(ring, 0);
781 }
782
783 intel_ring_advance(ring);
784
785 return 0;
786}
787
788static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000789i915_gem_do_execbuffer(struct drm_device *dev, void *data,
790 struct drm_file *file,
791 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson432e58e2010-11-25 19:32:06 +0000792 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000793{
794 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson432e58e2010-11-25 19:32:06 +0000795 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +0000796 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000797 struct drm_i915_gem_object *batch_obj;
798 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000799 struct intel_ring_buffer *ring;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700800 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000801 u32 exec_start, exec_len;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000802 u32 seqno;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800803 u32 mask;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100804 u32 flags;
Chris Wilson72bfa192010-12-19 11:42:05 +0000805 int ret, mode, i;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000806
Chris Wilson432e58e2010-11-25 19:32:06 +0000807 if (!i915_gem_check_execbuffer(args)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100808 DRM_DEBUG("execbuf with invalid offset/length\n");
Chris Wilson432e58e2010-11-25 19:32:06 +0000809 return -EINVAL;
810 }
811
812 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000813 if (ret)
814 return ret;
815
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100816 flags = 0;
817 if (args->flags & I915_EXEC_SECURE) {
818 if (!file->is_master || !capable(CAP_SYS_ADMIN))
819 return -EPERM;
820
821 flags |= I915_DISPATCH_SECURE;
822 }
823
Chris Wilson54cf91d2010-11-25 18:00:26 +0000824 switch (args->flags & I915_EXEC_RING_MASK) {
825 case I915_EXEC_DEFAULT:
826 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000827 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000828 break;
829 case I915_EXEC_BSD:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000830 ring = &dev_priv->ring[VCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700831 if (ctx_id != 0) {
832 DRM_DEBUG("Ring %s doesn't support contexts\n",
833 ring->name);
834 return -EPERM;
835 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000836 break;
837 case I915_EXEC_BLT:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000838 ring = &dev_priv->ring[BCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700839 if (ctx_id != 0) {
840 DRM_DEBUG("Ring %s doesn't support contexts\n",
841 ring->name);
842 return -EPERM;
843 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000844 break;
845 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100846 DRM_DEBUG("execbuf with unknown ring: %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000847 (int)(args->flags & I915_EXEC_RING_MASK));
848 return -EINVAL;
849 }
Chris Wilsona15817c2012-05-11 14:29:31 +0100850 if (!intel_ring_initialized(ring)) {
851 DRM_DEBUG("execbuf with invalid ring: %d\n",
852 (int)(args->flags & I915_EXEC_RING_MASK));
853 return -EINVAL;
854 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000855
Chris Wilson72bfa192010-12-19 11:42:05 +0000856 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800857 mask = I915_EXEC_CONSTANTS_MASK;
Chris Wilson72bfa192010-12-19 11:42:05 +0000858 switch (mode) {
859 case I915_EXEC_CONSTANTS_REL_GENERAL:
860 case I915_EXEC_CONSTANTS_ABSOLUTE:
861 case I915_EXEC_CONSTANTS_REL_SURFACE:
862 if (ring == &dev_priv->ring[RCS] &&
863 mode != dev_priv->relative_constants_mode) {
864 if (INTEL_INFO(dev)->gen < 4)
865 return -EINVAL;
866
867 if (INTEL_INFO(dev)->gen > 5 &&
868 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
869 return -EINVAL;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800870
871 /* The HW changed the meaning on this bit on gen6 */
872 if (INTEL_INFO(dev)->gen >= 6)
873 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
Chris Wilson72bfa192010-12-19 11:42:05 +0000874 }
875 break;
876 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100877 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
Chris Wilson72bfa192010-12-19 11:42:05 +0000878 return -EINVAL;
879 }
880
Chris Wilson54cf91d2010-11-25 18:00:26 +0000881 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +0100882 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000883 return -EINVAL;
884 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000885
886 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000887 if (ring != &dev_priv->ring[RCS]) {
Daniel Vetterff240192012-01-31 21:08:14 +0100888 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000889 return -EINVAL;
890 }
891
Daniel Vetter6ebebc92012-04-26 23:28:11 +0200892 if (INTEL_INFO(dev)->gen >= 5) {
893 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
894 return -EINVAL;
895 }
896
Xi Wang44afb3a2012-04-23 04:06:42 -0400897 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
898 DRM_DEBUG("execbuf with %u cliprects\n",
899 args->num_cliprects);
900 return -EINVAL;
901 }
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200902
Chris Wilson432e58e2010-11-25 19:32:06 +0000903 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +0000904 GFP_KERNEL);
905 if (cliprects == NULL) {
906 ret = -ENOMEM;
907 goto pre_mutex_err;
908 }
909
Chris Wilson432e58e2010-11-25 19:32:06 +0000910 if (copy_from_user(cliprects,
911 (struct drm_clip_rect __user *)(uintptr_t)
912 args->cliprects_ptr,
913 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000914 ret = -EFAULT;
915 goto pre_mutex_err;
916 }
917 }
918
Chris Wilson54cf91d2010-11-25 18:00:26 +0000919 ret = i915_mutex_lock_interruptible(dev);
920 if (ret)
921 goto pre_mutex_err;
922
923 if (dev_priv->mm.suspended) {
924 mutex_unlock(&dev->struct_mutex);
925 ret = -EBUSY;
926 goto pre_mutex_err;
927 }
928
Chris Wilson67731b82010-12-08 10:38:14 +0000929 eb = eb_create(args->buffer_count);
930 if (eb == NULL) {
931 mutex_unlock(&dev->struct_mutex);
932 ret = -ENOMEM;
933 goto pre_mutex_err;
934 }
935
Chris Wilson54cf91d2010-11-25 18:00:26 +0000936 /* Look up object handles */
Chris Wilson432e58e2010-11-25 19:32:06 +0000937 INIT_LIST_HEAD(&objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000938 for (i = 0; i < args->buffer_count; i++) {
939 struct drm_i915_gem_object *obj;
940
Chris Wilson432e58e2010-11-25 19:32:06 +0000941 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
942 exec[i].handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000943 if (&obj->base == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +0100944 DRM_DEBUG("Invalid object handle %d at index %d\n",
Chris Wilson432e58e2010-11-25 19:32:06 +0000945 exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000946 /* prevent error path from reading uninitialized data */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000947 ret = -ENOENT;
948 goto err;
949 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000950
Chris Wilson432e58e2010-11-25 19:32:06 +0000951 if (!list_empty(&obj->exec_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100952 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
Chris Wilson432e58e2010-11-25 19:32:06 +0000953 obj, exec[i].handle, i);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000954 ret = -EINVAL;
955 goto err;
956 }
Chris Wilson432e58e2010-11-25 19:32:06 +0000957
958 list_add_tail(&obj->exec_list, &objects);
Chris Wilson67731b82010-12-08 10:38:14 +0000959 obj->exec_handle = exec[i].handle;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000960 obj->exec_entry = &exec[i];
Chris Wilson67731b82010-12-08 10:38:14 +0000961 eb_add_object(eb, obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000962 }
963
Chris Wilson6fe4f142011-01-10 17:35:37 +0000964 /* take note of the batch buffer before we might reorder the lists */
965 batch_obj = list_entry(objects.prev,
966 struct drm_i915_gem_object,
967 exec_list);
968
Chris Wilson54cf91d2010-11-25 18:00:26 +0000969 /* Move the objects en-masse into the GTT, evicting if necessary. */
Chris Wilson6fe4f142011-01-10 17:35:37 +0000970 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000971 if (ret)
972 goto err;
973
974 /* The objects are in their final locations, apply the relocations. */
Chris Wilson6fe4f142011-01-10 17:35:37 +0000975 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000976 if (ret) {
977 if (ret == -EFAULT) {
Chris Wilsond9e86c02010-11-10 16:40:20 +0000978 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
Chris Wilson67731b82010-12-08 10:38:14 +0000979 &objects, eb,
980 exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000981 args->buffer_count);
982 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
983 }
984 if (ret)
985 goto err;
986 }
987
988 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000989 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +0100990 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +0000991 ret = -EINVAL;
992 goto err;
993 }
994 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
995
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100996 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
997 * batch" bit. Hence we need to pin secure batches into the global gtt.
998 * hsw should have this fixed, but let's be paranoid and do it
999 * unconditionally for now. */
1000 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1001 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1002
Chris Wilson432e58e2010-11-25 19:32:06 +00001003 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001004 if (ret)
1005 goto err;
1006
Chris Wilsondb53a302011-02-03 11:57:46 +00001007 seqno = i915_gem_next_request_seqno(ring);
Chris Wilson076e2c02011-01-21 10:07:18 +00001008 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001009 if (seqno < ring->sync_seqno[i]) {
1010 /* The GPU can not handle its semaphore value wrapping,
1011 * so every billion or so execbuffers, we need to stall
1012 * the GPU in order to reset the counters.
1013 */
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001014 ret = i915_gpu_idle(dev);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001015 if (ret)
1016 goto err;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07001017 i915_gem_retire_requests(dev);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001018
1019 BUG_ON(ring->sync_seqno[i]);
1020 }
1021 }
1022
Eric Anholt0da5cec2012-07-23 12:33:55 -07001023 ret = i915_switch_context(ring, file, ctx_id);
1024 if (ret)
1025 goto err;
1026
Ben Widawskye2971bd2011-12-12 19:21:57 -08001027 if (ring == &dev_priv->ring[RCS] &&
1028 mode != dev_priv->relative_constants_mode) {
1029 ret = intel_ring_begin(ring, 4);
1030 if (ret)
1031 goto err;
1032
1033 intel_ring_emit(ring, MI_NOOP);
1034 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1035 intel_ring_emit(ring, INSTPM);
Ben Widawsky84f9f932011-12-12 19:21:58 -08001036 intel_ring_emit(ring, mask << 16 | mode);
Ben Widawskye2971bd2011-12-12 19:21:57 -08001037 intel_ring_advance(ring);
1038
1039 dev_priv->relative_constants_mode = mode;
1040 }
1041
Eric Anholtae662d32012-01-03 09:23:29 -08001042 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1043 ret = i915_reset_gen7_sol_offsets(dev, ring);
1044 if (ret)
1045 goto err;
1046 }
1047
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001048 trace_i915_gem_ring_dispatch(ring, seqno, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +00001049
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001050 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1051 exec_len = args->batch_len;
1052 if (cliprects) {
1053 for (i = 0; i < args->num_cliprects; i++) {
1054 ret = i915_emit_box(dev, &cliprects[i],
1055 args->DR1, args->DR4);
1056 if (ret)
1057 goto err;
1058
1059 ret = ring->dispatch_execbuffer(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001060 exec_start, exec_len,
1061 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001062 if (ret)
1063 goto err;
1064 }
1065 } else {
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001066 ret = ring->dispatch_execbuffer(ring,
1067 exec_start, exec_len,
1068 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001069 if (ret)
1070 goto err;
1071 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001072
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001073 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
Chris Wilson432e58e2010-11-25 19:32:06 +00001074 i915_gem_execbuffer_retire_commands(dev, file, ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001075
1076err:
Chris Wilson67731b82010-12-08 10:38:14 +00001077 eb_destroy(eb);
Chris Wilson432e58e2010-11-25 19:32:06 +00001078 while (!list_empty(&objects)) {
1079 struct drm_i915_gem_object *obj;
1080
1081 obj = list_first_entry(&objects,
1082 struct drm_i915_gem_object,
1083 exec_list);
1084 list_del_init(&obj->exec_list);
1085 drm_gem_object_unreference(&obj->base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001086 }
1087
1088 mutex_unlock(&dev->struct_mutex);
1089
1090pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001091 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001092 return ret;
1093}
1094
1095/*
1096 * Legacy execbuffer just creates an exec2 list from the original exec object
1097 * list array and passes it to the real function.
1098 */
1099int
1100i915_gem_execbuffer(struct drm_device *dev, void *data,
1101 struct drm_file *file)
1102{
1103 struct drm_i915_gem_execbuffer *args = data;
1104 struct drm_i915_gem_execbuffer2 exec2;
1105 struct drm_i915_gem_exec_object *exec_list = NULL;
1106 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1107 int ret, i;
1108
Chris Wilson54cf91d2010-11-25 18:00:26 +00001109 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001110 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001111 return -EINVAL;
1112 }
1113
1114 /* Copy in the exec list from userland */
1115 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1116 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1117 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001118 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001119 args->buffer_count);
1120 drm_free_large(exec_list);
1121 drm_free_large(exec2_list);
1122 return -ENOMEM;
1123 }
1124 ret = copy_from_user(exec_list,
Chris Wilsonba7a6452012-09-14 11:46:00 +01001125 (void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001126 sizeof(*exec_list) * args->buffer_count);
1127 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001128 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001129 args->buffer_count, ret);
1130 drm_free_large(exec_list);
1131 drm_free_large(exec2_list);
1132 return -EFAULT;
1133 }
1134
1135 for (i = 0; i < args->buffer_count; i++) {
1136 exec2_list[i].handle = exec_list[i].handle;
1137 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1138 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1139 exec2_list[i].alignment = exec_list[i].alignment;
1140 exec2_list[i].offset = exec_list[i].offset;
1141 if (INTEL_INFO(dev)->gen < 4)
1142 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1143 else
1144 exec2_list[i].flags = 0;
1145 }
1146
1147 exec2.buffers_ptr = args->buffers_ptr;
1148 exec2.buffer_count = args->buffer_count;
1149 exec2.batch_start_offset = args->batch_start_offset;
1150 exec2.batch_len = args->batch_len;
1151 exec2.DR1 = args->DR1;
1152 exec2.DR4 = args->DR4;
1153 exec2.num_cliprects = args->num_cliprects;
1154 exec2.cliprects_ptr = args->cliprects_ptr;
1155 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001156 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001157
1158 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1159 if (!ret) {
1160 /* Copy the new buffer offsets back to the user's exec list. */
1161 for (i = 0; i < args->buffer_count; i++)
1162 exec_list[i].offset = exec2_list[i].offset;
1163 /* ... and back out to userspace */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001164 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001165 exec_list,
1166 sizeof(*exec_list) * args->buffer_count);
1167 if (ret) {
1168 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001169 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001170 "back to user (%d)\n",
1171 args->buffer_count, ret);
1172 }
1173 }
1174
1175 drm_free_large(exec_list);
1176 drm_free_large(exec2_list);
1177 return ret;
1178}
1179
1180int
1181i915_gem_execbuffer2(struct drm_device *dev, void *data,
1182 struct drm_file *file)
1183{
1184 struct drm_i915_gem_execbuffer2 *args = data;
1185 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1186 int ret;
1187
Xi Wanged8cd3b2012-04-23 04:06:41 -04001188 if (args->buffer_count < 1 ||
1189 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001190 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001191 return -EINVAL;
1192 }
1193
Chris Wilson8408c282011-02-21 12:54:48 +00001194 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1195 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1196 if (exec2_list == NULL)
1197 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1198 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001199 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001200 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001201 args->buffer_count);
1202 return -ENOMEM;
1203 }
1204 ret = copy_from_user(exec2_list,
1205 (struct drm_i915_relocation_entry __user *)
1206 (uintptr_t) args->buffers_ptr,
1207 sizeof(*exec2_list) * args->buffer_count);
1208 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001209 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001210 args->buffer_count, ret);
1211 drm_free_large(exec2_list);
1212 return -EFAULT;
1213 }
1214
1215 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1216 if (!ret) {
1217 /* Copy the new buffer offsets back to the user's exec list. */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001218 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001219 exec2_list,
1220 sizeof(*exec2_list) * args->buffer_count);
1221 if (ret) {
1222 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001223 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001224 "back to user (%d)\n",
1225 args->buffer_count, ret);
1226 }
1227 }
1228
1229 drm_free_large(exec2_list);
1230 return ret;
1231}