blob: 34f6cdffa9f883b8f0b161df0c04ac69418145fc [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000031#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080034#include <linux/dma_remapping.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000035
Chris Wilson67731b82010-12-08 10:38:14 +000036struct eb_objects {
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000037 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +000038 int and;
39 struct hlist_head buckets[0];
40};
41
42static struct eb_objects *
43eb_create(int size)
44{
45 struct eb_objects *eb;
46 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Chris Wilson41783ee2012-09-18 10:04:02 +010047 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
Chris Wilson67731b82010-12-08 10:38:14 +000048 while (count > size)
49 count >>= 1;
50 eb = kzalloc(count*sizeof(struct hlist_head) +
51 sizeof(struct eb_objects),
52 GFP_KERNEL);
53 if (eb == NULL)
54 return eb;
55
56 eb->and = count - 1;
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000057 INIT_LIST_HEAD(&eb->objects);
Chris Wilson67731b82010-12-08 10:38:14 +000058 return eb;
59}
60
61static void
62eb_reset(struct eb_objects *eb)
63{
64 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
65}
66
67static void
68eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
69{
70 hlist_add_head(&obj->exec_node,
71 &eb->buckets[obj->exec_handle & eb->and]);
72}
73
Chris Wilson3b96eff2013-01-08 10:53:14 +000074static int
75eb_lookup_objects(struct eb_objects *eb,
76 struct drm_i915_gem_exec_object2 *exec,
77 int count,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000078 struct drm_file *file)
Chris Wilson3b96eff2013-01-08 10:53:14 +000079{
80 int i;
81
82 spin_lock(&file->table_lock);
83 for (i = 0; i < count; i++) {
84 struct drm_i915_gem_object *obj;
85
86 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
87 if (obj == NULL) {
88 spin_unlock(&file->table_lock);
89 DRM_DEBUG("Invalid object handle %d at index %d\n",
90 exec[i].handle, i);
91 return -ENOENT;
92 }
93
94 if (!list_empty(&obj->exec_list)) {
95 spin_unlock(&file->table_lock);
96 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
97 obj, exec[i].handle, i);
98 return -EINVAL;
99 }
100
101 drm_gem_object_reference(&obj->base);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000102 list_add_tail(&obj->exec_list, &eb->objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000103
104 obj->exec_handle = exec[i].handle;
105 obj->exec_entry = &exec[i];
106 eb_add_object(eb, obj);
107 }
108 spin_unlock(&file->table_lock);
109
110 return 0;
111}
112
Chris Wilson67731b82010-12-08 10:38:14 +0000113static struct drm_i915_gem_object *
114eb_get_object(struct eb_objects *eb, unsigned long handle)
115{
116 struct hlist_head *head;
117 struct hlist_node *node;
118 struct drm_i915_gem_object *obj;
119
120 head = &eb->buckets[handle & eb->and];
121 hlist_for_each(node, head) {
122 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
123 if (obj->exec_handle == handle)
124 return obj;
125 }
126
127 return NULL;
128}
129
130static void
131eb_destroy(struct eb_objects *eb)
132{
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000133 while (!list_empty(&eb->objects)) {
134 struct drm_i915_gem_object *obj;
135
136 obj = list_first_entry(&eb->objects,
137 struct drm_i915_gem_object,
138 exec_list);
139 list_del_init(&obj->exec_list);
140 drm_gem_object_unreference(&obj->base);
141 }
Chris Wilson67731b82010-12-08 10:38:14 +0000142 kfree(eb);
143}
144
Chris Wilsondabdfe02012-03-26 10:10:27 +0200145static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
146{
147 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilson504c7262012-08-23 13:12:52 +0100148 !obj->map_and_fenceable ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200149 obj->cache_level != I915_CACHE_NONE);
150}
151
Chris Wilson54cf91d2010-11-25 18:00:26 +0000152static int
153i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000154 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000155 struct drm_i915_gem_relocation_entry *reloc)
156{
157 struct drm_device *dev = obj->base.dev;
158 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100159 struct drm_i915_gem_object *target_i915_obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000160 uint32_t target_offset;
161 int ret = -EINVAL;
162
Chris Wilson67731b82010-12-08 10:38:14 +0000163 /* we've already hold a reference to all valid objects */
164 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
165 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000166 return -ENOENT;
167
Daniel Vetter149c8402012-02-15 23:50:23 +0100168 target_i915_obj = to_intel_bo(target_obj);
169 target_offset = target_i915_obj->gtt_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000170
Eric Anholte844b992012-07-31 15:35:01 -0700171 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
172 * pipe_control writes because the gpu doesn't properly redirect them
173 * through the ppgtt for non_secure batchbuffers. */
174 if (unlikely(IS_GEN6(dev) &&
175 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
176 !target_i915_obj->has_global_gtt_mapping)) {
177 i915_gem_gtt_bind_object(target_i915_obj,
178 target_i915_obj->cache_level);
179 }
180
Chris Wilson54cf91d2010-11-25 18:00:26 +0000181 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000182 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100183 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000184 "obj %p target %d offset %d "
185 "read %08x write %08x",
186 obj, reloc->target_handle,
187 (int) reloc->offset,
188 reloc->read_domains,
189 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000190 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000191 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100192 if (unlikely((reloc->write_domain | reloc->read_domains)
193 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100194 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000195 "obj %p target %d offset %d "
196 "read %08x write %08x",
197 obj, reloc->target_handle,
198 (int) reloc->offset,
199 reloc->read_domains,
200 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000201 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000202 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000203
204 target_obj->pending_read_domains |= reloc->read_domains;
205 target_obj->pending_write_domain |= reloc->write_domain;
206
207 /* If the relocation already has the right value in it, no
208 * more work needs to be done.
209 */
210 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000211 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000212
213 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000214 if (unlikely(reloc->offset > obj->base.size - 4)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100215 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000216 "obj %p target %d offset %d size %d.\n",
217 obj, reloc->target_handle,
218 (int) reloc->offset,
219 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000220 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000221 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000222 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100223 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000224 "obj %p target %d offset %d.\n",
225 obj, reloc->target_handle,
226 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000227 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000228 }
229
Chris Wilsondabdfe02012-03-26 10:10:27 +0200230 /* We can't wait for rendering with pagefaults disabled */
231 if (obj->active && in_atomic())
232 return -EFAULT;
233
Chris Wilson54cf91d2010-11-25 18:00:26 +0000234 reloc->delta += target_offset;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200235 if (use_cpu_reloc(obj)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000236 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
237 char *vaddr;
238
Chris Wilsondabdfe02012-03-26 10:10:27 +0200239 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
240 if (ret)
241 return ret;
242
Chris Wilson9da3da62012-06-01 15:20:22 +0100243 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
244 reloc->offset >> PAGE_SHIFT));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000245 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
246 kunmap_atomic(vaddr);
247 } else {
248 struct drm_i915_private *dev_priv = dev->dev_private;
249 uint32_t __iomem *reloc_entry;
250 void __iomem *reloc_page;
251
Chris Wilson7b096382012-04-14 09:55:51 +0100252 ret = i915_gem_object_set_to_gtt_domain(obj, true);
253 if (ret)
254 return ret;
255
256 ret = i915_gem_object_put_fence(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000257 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000258 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000259
260 /* Map the page containing the relocation we're going to perform. */
261 reloc->offset += obj->gtt_offset;
262 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
263 reloc->offset & PAGE_MASK);
264 reloc_entry = (uint32_t __iomem *)
265 (reloc_page + (reloc->offset & ~PAGE_MASK));
266 iowrite32(reloc->delta, reloc_entry);
267 io_mapping_unmap_atomic(reloc_page);
268 }
269
270 /* and update the user's relocation entry */
271 reloc->presumed_offset = target_offset;
272
Chris Wilson67731b82010-12-08 10:38:14 +0000273 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000274}
275
276static int
277i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000278 struct eb_objects *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000279{
Chris Wilson1d83f442012-03-24 20:12:53 +0000280#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
281 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000282 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000283 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000284 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000285
286 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000287
Chris Wilson1d83f442012-03-24 20:12:53 +0000288 remain = entry->relocation_count;
289 while (remain) {
290 struct drm_i915_gem_relocation_entry *r = stack_reloc;
291 int count = remain;
292 if (count > ARRAY_SIZE(stack_reloc))
293 count = ARRAY_SIZE(stack_reloc);
294 remain -= count;
295
296 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000297 return -EFAULT;
298
Chris Wilson1d83f442012-03-24 20:12:53 +0000299 do {
300 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000301
Chris Wilson1d83f442012-03-24 20:12:53 +0000302 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
303 if (ret)
304 return ret;
305
306 if (r->presumed_offset != offset &&
307 __copy_to_user_inatomic(&user_relocs->presumed_offset,
308 &r->presumed_offset,
309 sizeof(r->presumed_offset))) {
310 return -EFAULT;
311 }
312
313 user_relocs++;
314 r++;
315 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000316 }
317
318 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000319#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000320}
321
322static int
323i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000324 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000325 struct drm_i915_gem_relocation_entry *relocs)
326{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000327 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000328 int i, ret;
329
330 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000331 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000332 if (ret)
333 return ret;
334 }
335
336 return 0;
337}
338
339static int
340i915_gem_execbuffer_relocate(struct drm_device *dev,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000341 struct eb_objects *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000342{
Chris Wilson432e58e2010-11-25 19:32:06 +0000343 struct drm_i915_gem_object *obj;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000344 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000345
Chris Wilsond4aeee72011-03-14 15:11:24 +0000346 /* This is the fast path and we cannot handle a pagefault whilst
347 * holding the struct mutex lest the user pass in the relocations
348 * contained within a mmaped bo. For in such a case we, the page
349 * fault handler would call i915_gem_fault() and we would try to
350 * acquire the struct mutex again. Obviously this is bad and so
351 * lockdep complains vehemently.
352 */
353 pagefault_disable();
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000354 list_for_each_entry(obj, &eb->objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000355 ret = i915_gem_execbuffer_relocate_object(obj, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000356 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000357 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000358 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000359 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000360
Chris Wilsond4aeee72011-03-14 15:11:24 +0000361 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000362}
363
Chris Wilson7788a762012-08-24 19:18:18 +0100364#define __EXEC_OBJECT_HAS_PIN (1<<31)
365#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100366
367static int
Chris Wilsondabdfe02012-03-26 10:10:27 +0200368need_reloc_mappable(struct drm_i915_gem_object *obj)
369{
370 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
371 return entry->relocation_count && !use_cpu_reloc(obj);
372}
373
374static int
Chris Wilson7788a762012-08-24 19:18:18 +0100375i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
Daniel Vettered5982e2013-01-17 22:23:36 +0100376 struct intel_ring_buffer *ring,
377 bool *need_reloc)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100378{
Chris Wilson7788a762012-08-24 19:18:18 +0100379 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100380 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
381 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
382 bool need_fence, need_mappable;
383 int ret;
384
385 need_fence =
386 has_fenced_gpu_access &&
387 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
388 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200389 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100390
Chris Wilson86a1ee22012-08-11 15:41:04 +0100391 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100392 if (ret)
393 return ret;
394
Chris Wilson7788a762012-08-24 19:18:18 +0100395 entry->flags |= __EXEC_OBJECT_HAS_PIN;
396
Chris Wilson1690e1e2011-12-14 13:57:08 +0100397 if (has_fenced_gpu_access) {
398 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson06d98132012-04-17 15:31:24 +0100399 ret = i915_gem_object_get_fence(obj);
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000400 if (ret)
Chris Wilson7788a762012-08-24 19:18:18 +0100401 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100402
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000403 if (i915_gem_object_pin_fence(obj))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100404 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000405
Chris Wilson7dd49062012-03-21 10:48:18 +0000406 obj->pending_fenced_gpu_access = true;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100407 }
Chris Wilson1690e1e2011-12-14 13:57:08 +0100408 }
409
Chris Wilson7788a762012-08-24 19:18:18 +0100410 /* Ensure ppgtt mapping exists if needed */
411 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
412 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
413 obj, obj->cache_level);
414
415 obj->has_aliasing_ppgtt_mapping = 1;
416 }
417
Daniel Vettered5982e2013-01-17 22:23:36 +0100418 if (entry->offset != obj->gtt_offset) {
419 entry->offset = obj->gtt_offset;
420 *need_reloc = true;
421 }
422
423 if (entry->flags & EXEC_OBJECT_WRITE) {
424 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
425 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
426 }
427
428 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
429 !obj->has_global_gtt_mapping)
430 i915_gem_gtt_bind_object(obj, obj->cache_level);
431
Chris Wilson1690e1e2011-12-14 13:57:08 +0100432 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100433}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100434
Chris Wilson7788a762012-08-24 19:18:18 +0100435static void
436i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
437{
438 struct drm_i915_gem_exec_object2 *entry;
439
440 if (!obj->gtt_space)
441 return;
442
443 entry = obj->exec_entry;
444
445 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
446 i915_gem_object_unpin_fence(obj);
447
448 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
449 i915_gem_object_unpin(obj);
450
451 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100452}
453
Chris Wilson54cf91d2010-11-25 18:00:26 +0000454static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000455i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000456 struct drm_file *file,
Daniel Vettered5982e2013-01-17 22:23:36 +0100457 struct list_head *objects,
458 bool *need_relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000459{
Chris Wilson432e58e2010-11-25 19:32:06 +0000460 struct drm_i915_gem_object *obj;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000461 struct list_head ordered_objects;
Chris Wilson7788a762012-08-24 19:18:18 +0100462 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
463 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000464
465 INIT_LIST_HEAD(&ordered_objects);
466 while (!list_empty(objects)) {
467 struct drm_i915_gem_exec_object2 *entry;
468 bool need_fence, need_mappable;
469
470 obj = list_first_entry(objects,
471 struct drm_i915_gem_object,
472 exec_list);
473 entry = obj->exec_entry;
474
475 need_fence =
476 has_fenced_gpu_access &&
477 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
478 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200479 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000480
481 if (need_mappable)
482 list_move(&obj->exec_list, &ordered_objects);
483 else
484 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000485
Daniel Vettered5982e2013-01-17 22:23:36 +0100486 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
Chris Wilson595dad72011-01-13 11:03:48 +0000487 obj->base.pending_write_domain = 0;
Chris Wilson016fd0c2012-07-20 12:41:07 +0100488 obj->pending_fenced_gpu_access = false;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000489 }
490 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000491
492 /* Attempt to pin all of the buffers into the GTT.
493 * This is done in 3 phases:
494 *
495 * 1a. Unbind all objects that do not match the GTT constraints for
496 * the execbuffer (fenceable, mappable, alignment etc).
497 * 1b. Increment pin count for already bound objects.
498 * 2. Bind new objects.
499 * 3. Decrement pin count.
500 *
Chris Wilson7788a762012-08-24 19:18:18 +0100501 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000502 * room for the earlier objects *unless* we need to defragment.
503 */
504 retry = 0;
505 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100506 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000507
508 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000509 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000510 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000511 bool need_fence, need_mappable;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100512
Chris Wilson6fe4f142011-01-10 17:35:37 +0000513 if (!obj->gtt_space)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000514 continue;
515
516 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000517 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000518 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
519 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200520 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000521
522 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
523 (need_mappable && !obj->map_and_fenceable))
524 ret = i915_gem_object_unbind(obj);
525 else
Daniel Vettered5982e2013-01-17 22:23:36 +0100526 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
Chris Wilson432e58e2010-11-25 19:32:06 +0000527 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000528 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000529 }
530
531 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000532 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +0100533 if (obj->gtt_space)
534 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000535
Daniel Vettered5982e2013-01-17 22:23:36 +0100536 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
Chris Wilson7788a762012-08-24 19:18:18 +0100537 if (ret)
538 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000539 }
540
Chris Wilson7788a762012-08-24 19:18:18 +0100541err: /* Decrement pin count for bound objects */
542 list_for_each_entry(obj, objects, exec_list)
543 i915_gem_execbuffer_unreserve_object(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000544
Chris Wilson6c085a72012-08-20 11:40:46 +0200545 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000546 return ret;
547
Chris Wilson6c085a72012-08-20 11:40:46 +0200548 ret = i915_gem_evict_everything(ring->dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000549 if (ret)
550 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000551 } while (1);
552}
553
554static int
555i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
Daniel Vettered5982e2013-01-17 22:23:36 +0100556 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000557 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000558 struct intel_ring_buffer *ring,
Chris Wilson67731b82010-12-08 10:38:14 +0000559 struct eb_objects *eb,
Daniel Vettered5982e2013-01-17 22:23:36 +0100560 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000561{
562 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000563 struct drm_i915_gem_object *obj;
Daniel Vettered5982e2013-01-17 22:23:36 +0100564 bool need_relocs;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000565 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000566 int i, total, ret;
Daniel Vettered5982e2013-01-17 22:23:36 +0100567 int count = args->buffer_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000568
Chris Wilson67731b82010-12-08 10:38:14 +0000569 /* We may process another execbuffer during the unlock... */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000570 while (!list_empty(&eb->objects)) {
571 obj = list_first_entry(&eb->objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000572 struct drm_i915_gem_object,
573 exec_list);
574 list_del_init(&obj->exec_list);
575 drm_gem_object_unreference(&obj->base);
576 }
577
Chris Wilson54cf91d2010-11-25 18:00:26 +0000578 mutex_unlock(&dev->struct_mutex);
579
580 total = 0;
581 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000582 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000583
Chris Wilsondd6864a2011-01-12 23:49:13 +0000584 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000585 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000586 if (reloc == NULL || reloc_offset == NULL) {
587 drm_free_large(reloc);
588 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000589 mutex_lock(&dev->struct_mutex);
590 return -ENOMEM;
591 }
592
593 total = 0;
594 for (i = 0; i < count; i++) {
595 struct drm_i915_gem_relocation_entry __user *user_relocs;
596
Chris Wilson432e58e2010-11-25 19:32:06 +0000597 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000598
599 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000600 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000601 ret = -EFAULT;
602 mutex_lock(&dev->struct_mutex);
603 goto err;
604 }
605
Chris Wilsondd6864a2011-01-12 23:49:13 +0000606 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000607 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000608 }
609
610 ret = i915_mutex_lock_interruptible(dev);
611 if (ret) {
612 mutex_lock(&dev->struct_mutex);
613 goto err;
614 }
615
Chris Wilson67731b82010-12-08 10:38:14 +0000616 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000617 eb_reset(eb);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000618 ret = eb_lookup_objects(eb, exec, count, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000619 if (ret)
620 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +0000621
Daniel Vettered5982e2013-01-17 22:23:36 +0100622 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
623 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000624 if (ret)
625 goto err;
626
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000627 list_for_each_entry(obj, &eb->objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000628 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000629 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Chris Wilsondd6864a2011-01-12 23:49:13 +0000630 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000631 if (ret)
632 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000633 }
634
635 /* Leave the user relocations as are, this is the painfully slow path,
636 * and we want to avoid the complication of dropping the lock whilst
637 * having buffers reserved in the aperture and so causing spurious
638 * ENOSPC for random operations.
639 */
640
641err:
642 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000643 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000644 return ret;
645}
646
Chris Wilson54cf91d2010-11-25 18:00:26 +0000647static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000648i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
649 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000650{
Chris Wilson432e58e2010-11-25 19:32:06 +0000651 struct drm_i915_gem_object *obj;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200652 uint32_t flush_domains = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000653 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654
Chris Wilson432e58e2010-11-25 19:32:06 +0000655 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky2911a352012-04-05 14:47:36 -0700656 ret = i915_gem_object_sync(obj, ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000657 if (ret)
658 return ret;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200659
660 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
661 i915_gem_clflush_object(obj);
662
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200663 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000664 }
665
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200666 if (flush_domains & I915_GEM_DOMAIN_CPU)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800667 i915_gem_chipset_flush(ring->dev);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200668
669 if (flush_domains & I915_GEM_DOMAIN_GTT)
670 wmb();
671
Chris Wilson09cf7c92012-07-13 14:14:08 +0100672 /* Unconditionally invalidate gpu caches and ensure that we do flush
673 * any residual writes from the previous batch.
674 */
Chris Wilsona7b97612012-07-20 12:41:08 +0100675 return intel_ring_invalidate_all_caches(ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000676}
677
Chris Wilson432e58e2010-11-25 19:32:06 +0000678static bool
679i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000680{
Daniel Vettered5982e2013-01-17 22:23:36 +0100681 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
682 return false;
683
Chris Wilson432e58e2010-11-25 19:32:06 +0000684 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000685}
686
687static int
688validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
689 int count)
690{
691 int i;
692
693 for (i = 0; i < count; i++) {
694 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
695 int length; /* limited by fault_in_pages_readable() */
696
Daniel Vettered5982e2013-01-17 22:23:36 +0100697 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
698 return -EINVAL;
699
Chris Wilson54cf91d2010-11-25 18:00:26 +0000700 /* First check for malicious input causing overflow */
701 if (exec[i].relocation_count >
702 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
703 return -EINVAL;
704
705 length = exec[i].relocation_count *
706 sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000707 /* we may also need to update the presumed offsets */
708 if (!access_ok(VERIFY_WRITE, ptr, length))
709 return -EFAULT;
710
Daniel Vetterf56f8212012-03-25 19:47:41 +0200711 if (fault_in_multipages_readable(ptr, length))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000712 return -EFAULT;
713 }
714
715 return 0;
716}
717
Chris Wilson432e58e2010-11-25 19:32:06 +0000718static void
719i915_gem_execbuffer_move_to_active(struct list_head *objects,
Chris Wilson9d7730912012-11-27 16:22:52 +0000720 struct intel_ring_buffer *ring)
Chris Wilson432e58e2010-11-25 19:32:06 +0000721{
722 struct drm_i915_gem_object *obj;
723
724 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson69c2fc82012-07-20 12:41:03 +0100725 u32 old_read = obj->base.read_domains;
726 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +0000727
Chris Wilson432e58e2010-11-25 19:32:06 +0000728 obj->base.write_domain = obj->base.pending_write_domain;
Daniel Vettered5982e2013-01-17 22:23:36 +0100729 if (obj->base.write_domain == 0)
730 obj->base.pending_read_domains |= obj->base.read_domains;
731 obj->base.read_domains = obj->base.pending_read_domains;
Chris Wilson432e58e2010-11-25 19:32:06 +0000732 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
733
Chris Wilson9d7730912012-11-27 16:22:52 +0000734 i915_gem_object_move_to_active(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000735 if (obj->base.write_domain) {
736 obj->dirty = 1;
Chris Wilson9d7730912012-11-27 16:22:52 +0000737 obj->last_write_seqno = intel_ring_get_seqno(ring);
Chris Wilsonacb87df2012-05-03 15:47:57 +0100738 if (obj->pin_count) /* check for potential scanout */
Chris Wilsonf047e392012-07-21 12:31:41 +0100739 intel_mark_fb_busy(obj);
Chris Wilson432e58e2010-11-25 19:32:06 +0000740 }
741
Chris Wilsondb53a302011-02-03 11:57:46 +0000742 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +0000743 }
744}
745
Chris Wilson54cf91d2010-11-25 18:00:26 +0000746static void
747i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000748 struct drm_file *file,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000749 struct intel_ring_buffer *ring)
750{
Daniel Vettercc889e02012-06-13 20:45:19 +0200751 /* Unconditionally force add_request to emit a full flush. */
752 ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000753
Chris Wilson432e58e2010-11-25 19:32:06 +0000754 /* Add a breadcrumb for the completion of the batch buffer */
Chris Wilson3bb73ab2012-07-20 12:40:59 +0100755 (void)i915_add_request(ring, file, NULL);
Chris Wilson432e58e2010-11-25 19:32:06 +0000756}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000757
758static int
Eric Anholtae662d32012-01-03 09:23:29 -0800759i915_reset_gen7_sol_offsets(struct drm_device *dev,
760 struct intel_ring_buffer *ring)
761{
762 drm_i915_private_t *dev_priv = dev->dev_private;
763 int ret, i;
764
765 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
766 return 0;
767
768 ret = intel_ring_begin(ring, 4 * 3);
769 if (ret)
770 return ret;
771
772 for (i = 0; i < 4; i++) {
773 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
774 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
775 intel_ring_emit(ring, 0);
776 }
777
778 intel_ring_advance(ring);
779
780 return 0;
781}
782
783static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000784i915_gem_do_execbuffer(struct drm_device *dev, void *data,
785 struct drm_file *file,
786 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson432e58e2010-11-25 19:32:06 +0000787 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000788{
789 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson67731b82010-12-08 10:38:14 +0000790 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000791 struct drm_i915_gem_object *batch_obj;
792 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000793 struct intel_ring_buffer *ring;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700794 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000795 u32 exec_start, exec_len;
Daniel Vettered5982e2013-01-17 22:23:36 +0100796 u32 mask, flags;
Chris Wilson72bfa192010-12-19 11:42:05 +0000797 int ret, mode, i;
Daniel Vettered5982e2013-01-17 22:23:36 +0100798 bool need_relocs;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000799
Daniel Vettered5982e2013-01-17 22:23:36 +0100800 if (!i915_gem_check_execbuffer(args))
Chris Wilson432e58e2010-11-25 19:32:06 +0000801 return -EINVAL;
Chris Wilson432e58e2010-11-25 19:32:06 +0000802
803 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000804 if (ret)
805 return ret;
806
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100807 flags = 0;
808 if (args->flags & I915_EXEC_SECURE) {
809 if (!file->is_master || !capable(CAP_SYS_ADMIN))
810 return -EPERM;
811
812 flags |= I915_DISPATCH_SECURE;
813 }
Daniel Vetterb45305f2012-12-17 16:21:27 +0100814 if (args->flags & I915_EXEC_IS_PINNED)
815 flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100816
Chris Wilson54cf91d2010-11-25 18:00:26 +0000817 switch (args->flags & I915_EXEC_RING_MASK) {
818 case I915_EXEC_DEFAULT:
819 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000820 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000821 break;
822 case I915_EXEC_BSD:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000823 ring = &dev_priv->ring[VCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700824 if (ctx_id != 0) {
825 DRM_DEBUG("Ring %s doesn't support contexts\n",
826 ring->name);
827 return -EPERM;
828 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000829 break;
830 case I915_EXEC_BLT:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000831 ring = &dev_priv->ring[BCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700832 if (ctx_id != 0) {
833 DRM_DEBUG("Ring %s doesn't support contexts\n",
834 ring->name);
835 return -EPERM;
836 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000837 break;
838 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100839 DRM_DEBUG("execbuf with unknown ring: %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000840 (int)(args->flags & I915_EXEC_RING_MASK));
841 return -EINVAL;
842 }
Chris Wilsona15817c2012-05-11 14:29:31 +0100843 if (!intel_ring_initialized(ring)) {
844 DRM_DEBUG("execbuf with invalid ring: %d\n",
845 (int)(args->flags & I915_EXEC_RING_MASK));
846 return -EINVAL;
847 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000848
Chris Wilson72bfa192010-12-19 11:42:05 +0000849 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800850 mask = I915_EXEC_CONSTANTS_MASK;
Chris Wilson72bfa192010-12-19 11:42:05 +0000851 switch (mode) {
852 case I915_EXEC_CONSTANTS_REL_GENERAL:
853 case I915_EXEC_CONSTANTS_ABSOLUTE:
854 case I915_EXEC_CONSTANTS_REL_SURFACE:
855 if (ring == &dev_priv->ring[RCS] &&
856 mode != dev_priv->relative_constants_mode) {
857 if (INTEL_INFO(dev)->gen < 4)
858 return -EINVAL;
859
860 if (INTEL_INFO(dev)->gen > 5 &&
861 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
862 return -EINVAL;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800863
864 /* The HW changed the meaning on this bit on gen6 */
865 if (INTEL_INFO(dev)->gen >= 6)
866 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
Chris Wilson72bfa192010-12-19 11:42:05 +0000867 }
868 break;
869 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100870 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
Chris Wilson72bfa192010-12-19 11:42:05 +0000871 return -EINVAL;
872 }
873
Chris Wilson54cf91d2010-11-25 18:00:26 +0000874 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +0100875 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000876 return -EINVAL;
877 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000878
879 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000880 if (ring != &dev_priv->ring[RCS]) {
Daniel Vetterff240192012-01-31 21:08:14 +0100881 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000882 return -EINVAL;
883 }
884
Daniel Vetter6ebebc92012-04-26 23:28:11 +0200885 if (INTEL_INFO(dev)->gen >= 5) {
886 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
887 return -EINVAL;
888 }
889
Xi Wang44afb3a2012-04-23 04:06:42 -0400890 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
891 DRM_DEBUG("execbuf with %u cliprects\n",
892 args->num_cliprects);
893 return -EINVAL;
894 }
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200895
Chris Wilson432e58e2010-11-25 19:32:06 +0000896 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +0000897 GFP_KERNEL);
898 if (cliprects == NULL) {
899 ret = -ENOMEM;
900 goto pre_mutex_err;
901 }
902
Chris Wilson432e58e2010-11-25 19:32:06 +0000903 if (copy_from_user(cliprects,
904 (struct drm_clip_rect __user *)(uintptr_t)
905 args->cliprects_ptr,
906 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000907 ret = -EFAULT;
908 goto pre_mutex_err;
909 }
910 }
911
Chris Wilson54cf91d2010-11-25 18:00:26 +0000912 ret = i915_mutex_lock_interruptible(dev);
913 if (ret)
914 goto pre_mutex_err;
915
916 if (dev_priv->mm.suspended) {
917 mutex_unlock(&dev->struct_mutex);
918 ret = -EBUSY;
919 goto pre_mutex_err;
920 }
921
Chris Wilson67731b82010-12-08 10:38:14 +0000922 eb = eb_create(args->buffer_count);
923 if (eb == NULL) {
924 mutex_unlock(&dev->struct_mutex);
925 ret = -ENOMEM;
926 goto pre_mutex_err;
927 }
928
Chris Wilson54cf91d2010-11-25 18:00:26 +0000929 /* Look up object handles */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000930 ret = eb_lookup_objects(eb, exec, args->buffer_count, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000931 if (ret)
932 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000933
Chris Wilson6fe4f142011-01-10 17:35:37 +0000934 /* take note of the batch buffer before we might reorder the lists */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000935 batch_obj = list_entry(eb->objects.prev,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000936 struct drm_i915_gem_object,
937 exec_list);
938
Chris Wilson54cf91d2010-11-25 18:00:26 +0000939 /* Move the objects en-masse into the GTT, evicting if necessary. */
Daniel Vettered5982e2013-01-17 22:23:36 +0100940 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
941 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000942 if (ret)
943 goto err;
944
945 /* The objects are in their final locations, apply the relocations. */
Daniel Vettered5982e2013-01-17 22:23:36 +0100946 if (need_relocs)
947 ret = i915_gem_execbuffer_relocate(dev, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000948 if (ret) {
949 if (ret == -EFAULT) {
Daniel Vettered5982e2013-01-17 22:23:36 +0100950 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
951 eb, exec);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000952 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
953 }
954 if (ret)
955 goto err;
956 }
957
958 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000959 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +0100960 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +0000961 ret = -EINVAL;
962 goto err;
963 }
964 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
965
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100966 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
967 * batch" bit. Hence we need to pin secure batches into the global gtt.
968 * hsw should have this fixed, but let's be paranoid and do it
969 * unconditionally for now. */
970 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
971 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
972
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000973 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000974 if (ret)
975 goto err;
976
Eric Anholt0da5cec2012-07-23 12:33:55 -0700977 ret = i915_switch_context(ring, file, ctx_id);
978 if (ret)
979 goto err;
980
Ben Widawskye2971bd2011-12-12 19:21:57 -0800981 if (ring == &dev_priv->ring[RCS] &&
982 mode != dev_priv->relative_constants_mode) {
983 ret = intel_ring_begin(ring, 4);
984 if (ret)
985 goto err;
986
987 intel_ring_emit(ring, MI_NOOP);
988 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
989 intel_ring_emit(ring, INSTPM);
Ben Widawsky84f9f932011-12-12 19:21:58 -0800990 intel_ring_emit(ring, mask << 16 | mode);
Ben Widawskye2971bd2011-12-12 19:21:57 -0800991 intel_ring_advance(ring);
992
993 dev_priv->relative_constants_mode = mode;
994 }
995
Eric Anholtae662d32012-01-03 09:23:29 -0800996 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
997 ret = i915_reset_gen7_sol_offsets(dev, ring);
998 if (ret)
999 goto err;
1000 }
1001
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001002 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1003 exec_len = args->batch_len;
1004 if (cliprects) {
1005 for (i = 0; i < args->num_cliprects; i++) {
1006 ret = i915_emit_box(dev, &cliprects[i],
1007 args->DR1, args->DR4);
1008 if (ret)
1009 goto err;
1010
1011 ret = ring->dispatch_execbuffer(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001012 exec_start, exec_len,
1013 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001014 if (ret)
1015 goto err;
1016 }
1017 } else {
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001018 ret = ring->dispatch_execbuffer(ring,
1019 exec_start, exec_len,
1020 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001021 if (ret)
1022 goto err;
1023 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001024
Chris Wilson9d7730912012-11-27 16:22:52 +00001025 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1026
Chris Wilsonbcffc3f2013-01-08 10:53:15 +00001027 i915_gem_execbuffer_move_to_active(&eb->objects, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +00001028 i915_gem_execbuffer_retire_commands(dev, file, ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001029
1030err:
Chris Wilson67731b82010-12-08 10:38:14 +00001031 eb_destroy(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001032
1033 mutex_unlock(&dev->struct_mutex);
1034
1035pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001036 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001037 return ret;
1038}
1039
1040/*
1041 * Legacy execbuffer just creates an exec2 list from the original exec object
1042 * list array and passes it to the real function.
1043 */
1044int
1045i915_gem_execbuffer(struct drm_device *dev, void *data,
1046 struct drm_file *file)
1047{
1048 struct drm_i915_gem_execbuffer *args = data;
1049 struct drm_i915_gem_execbuffer2 exec2;
1050 struct drm_i915_gem_exec_object *exec_list = NULL;
1051 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1052 int ret, i;
1053
Chris Wilson54cf91d2010-11-25 18:00:26 +00001054 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001055 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001056 return -EINVAL;
1057 }
1058
1059 /* Copy in the exec list from userland */
1060 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1061 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1062 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001063 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001064 args->buffer_count);
1065 drm_free_large(exec_list);
1066 drm_free_large(exec2_list);
1067 return -ENOMEM;
1068 }
1069 ret = copy_from_user(exec_list,
Chris Wilsonba7a6452012-09-14 11:46:00 +01001070 (void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001071 sizeof(*exec_list) * args->buffer_count);
1072 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001073 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001074 args->buffer_count, ret);
1075 drm_free_large(exec_list);
1076 drm_free_large(exec2_list);
1077 return -EFAULT;
1078 }
1079
1080 for (i = 0; i < args->buffer_count; i++) {
1081 exec2_list[i].handle = exec_list[i].handle;
1082 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1083 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1084 exec2_list[i].alignment = exec_list[i].alignment;
1085 exec2_list[i].offset = exec_list[i].offset;
1086 if (INTEL_INFO(dev)->gen < 4)
1087 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1088 else
1089 exec2_list[i].flags = 0;
1090 }
1091
1092 exec2.buffers_ptr = args->buffers_ptr;
1093 exec2.buffer_count = args->buffer_count;
1094 exec2.batch_start_offset = args->batch_start_offset;
1095 exec2.batch_len = args->batch_len;
1096 exec2.DR1 = args->DR1;
1097 exec2.DR4 = args->DR4;
1098 exec2.num_cliprects = args->num_cliprects;
1099 exec2.cliprects_ptr = args->cliprects_ptr;
1100 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001101 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001102
1103 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1104 if (!ret) {
1105 /* Copy the new buffer offsets back to the user's exec list. */
1106 for (i = 0; i < args->buffer_count; i++)
1107 exec_list[i].offset = exec2_list[i].offset;
1108 /* ... and back out to userspace */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001109 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001110 exec_list,
1111 sizeof(*exec_list) * args->buffer_count);
1112 if (ret) {
1113 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001114 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001115 "back to user (%d)\n",
1116 args->buffer_count, ret);
1117 }
1118 }
1119
1120 drm_free_large(exec_list);
1121 drm_free_large(exec2_list);
1122 return ret;
1123}
1124
1125int
1126i915_gem_execbuffer2(struct drm_device *dev, void *data,
1127 struct drm_file *file)
1128{
1129 struct drm_i915_gem_execbuffer2 *args = data;
1130 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1131 int ret;
1132
Xi Wanged8cd3b2012-04-23 04:06:41 -04001133 if (args->buffer_count < 1 ||
1134 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001135 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001136 return -EINVAL;
1137 }
1138
Chris Wilson8408c282011-02-21 12:54:48 +00001139 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
Chris Wilson419fa722013-01-08 10:53:13 +00001140 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
Chris Wilson8408c282011-02-21 12:54:48 +00001141 if (exec2_list == NULL)
1142 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1143 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001144 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001145 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001146 args->buffer_count);
1147 return -ENOMEM;
1148 }
1149 ret = copy_from_user(exec2_list,
1150 (struct drm_i915_relocation_entry __user *)
1151 (uintptr_t) args->buffers_ptr,
1152 sizeof(*exec2_list) * args->buffer_count);
1153 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001154 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001155 args->buffer_count, ret);
1156 drm_free_large(exec2_list);
1157 return -EFAULT;
1158 }
1159
1160 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1161 if (!ret) {
1162 /* Copy the new buffer offsets back to the user's exec list. */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001163 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001164 exec2_list,
1165 sizeof(*exec2_list) * args->buffer_count);
1166 if (ret) {
1167 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001168 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001169 "back to user (%d)\n",
1170 args->buffer_count, ret);
1171 }
1172 }
1173
1174 drm_free_large(exec2_list);
1175 return ret;
1176}