blob: da103c179e3fa5ff104c24dcecf7488fb6a3a495 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000031#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080034#include <linux/dma_remapping.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000035
Chris Wilson67731b82010-12-08 10:38:14 +000036struct eb_objects {
37 int and;
38 struct hlist_head buckets[0];
39};
40
41static struct eb_objects *
42eb_create(int size)
43{
44 struct eb_objects *eb;
45 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Chris Wilson41783ee2012-09-18 10:04:02 +010046 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
Chris Wilson67731b82010-12-08 10:38:14 +000047 while (count > size)
48 count >>= 1;
49 eb = kzalloc(count*sizeof(struct hlist_head) +
50 sizeof(struct eb_objects),
51 GFP_KERNEL);
52 if (eb == NULL)
53 return eb;
54
55 eb->and = count - 1;
56 return eb;
57}
58
59static void
60eb_reset(struct eb_objects *eb)
61{
62 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
63}
64
65static void
66eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
67{
68 hlist_add_head(&obj->exec_node,
69 &eb->buckets[obj->exec_handle & eb->and]);
70}
71
Chris Wilson3b96eff2013-01-08 10:53:14 +000072static int
73eb_lookup_objects(struct eb_objects *eb,
74 struct drm_i915_gem_exec_object2 *exec,
75 int count,
76 struct drm_file *file,
77 struct list_head *objects)
78{
79 int i;
80
81 spin_lock(&file->table_lock);
82 for (i = 0; i < count; i++) {
83 struct drm_i915_gem_object *obj;
84
85 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
86 if (obj == NULL) {
87 spin_unlock(&file->table_lock);
88 DRM_DEBUG("Invalid object handle %d at index %d\n",
89 exec[i].handle, i);
90 return -ENOENT;
91 }
92
93 if (!list_empty(&obj->exec_list)) {
94 spin_unlock(&file->table_lock);
95 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
96 obj, exec[i].handle, i);
97 return -EINVAL;
98 }
99
100 drm_gem_object_reference(&obj->base);
101 list_add_tail(&obj->exec_list, objects);
102
103 obj->exec_handle = exec[i].handle;
104 obj->exec_entry = &exec[i];
105 eb_add_object(eb, obj);
106 }
107 spin_unlock(&file->table_lock);
108
109 return 0;
110}
111
Chris Wilson67731b82010-12-08 10:38:14 +0000112static struct drm_i915_gem_object *
113eb_get_object(struct eb_objects *eb, unsigned long handle)
114{
115 struct hlist_head *head;
116 struct hlist_node *node;
117 struct drm_i915_gem_object *obj;
118
119 head = &eb->buckets[handle & eb->and];
120 hlist_for_each(node, head) {
121 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
122 if (obj->exec_handle == handle)
123 return obj;
124 }
125
126 return NULL;
127}
128
129static void
130eb_destroy(struct eb_objects *eb)
131{
132 kfree(eb);
133}
134
Chris Wilsondabdfe02012-03-26 10:10:27 +0200135static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
136{
137 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilson504c7262012-08-23 13:12:52 +0100138 !obj->map_and_fenceable ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200139 obj->cache_level != I915_CACHE_NONE);
140}
141
Chris Wilson54cf91d2010-11-25 18:00:26 +0000142static int
143i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000144 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145 struct drm_i915_gem_relocation_entry *reloc)
146{
147 struct drm_device *dev = obj->base.dev;
148 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100149 struct drm_i915_gem_object *target_i915_obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000150 uint32_t target_offset;
151 int ret = -EINVAL;
152
Chris Wilson67731b82010-12-08 10:38:14 +0000153 /* we've already hold a reference to all valid objects */
154 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
155 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000156 return -ENOENT;
157
Daniel Vetter149c8402012-02-15 23:50:23 +0100158 target_i915_obj = to_intel_bo(target_obj);
159 target_offset = target_i915_obj->gtt_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000160
Eric Anholte844b992012-07-31 15:35:01 -0700161 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
162 * pipe_control writes because the gpu doesn't properly redirect them
163 * through the ppgtt for non_secure batchbuffers. */
164 if (unlikely(IS_GEN6(dev) &&
165 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
166 !target_i915_obj->has_global_gtt_mapping)) {
167 i915_gem_gtt_bind_object(target_i915_obj,
168 target_i915_obj->cache_level);
169 }
170
Chris Wilson54cf91d2010-11-25 18:00:26 +0000171 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000172 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100173 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000174 "obj %p target %d offset %d "
175 "read %08x write %08x",
176 obj, reloc->target_handle,
177 (int) reloc->offset,
178 reloc->read_domains,
179 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000180 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000181 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100182 if (unlikely((reloc->write_domain | reloc->read_domains)
183 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100184 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000185 "obj %p target %d offset %d "
186 "read %08x write %08x",
187 obj, reloc->target_handle,
188 (int) reloc->offset,
189 reloc->read_domains,
190 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000191 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000192 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000193
194 target_obj->pending_read_domains |= reloc->read_domains;
195 target_obj->pending_write_domain |= reloc->write_domain;
196
197 /* If the relocation already has the right value in it, no
198 * more work needs to be done.
199 */
200 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000201 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000202
203 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000204 if (unlikely(reloc->offset > obj->base.size - 4)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100205 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000206 "obj %p target %d offset %d size %d.\n",
207 obj, reloc->target_handle,
208 (int) reloc->offset,
209 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000210 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000211 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000212 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100213 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000214 "obj %p target %d offset %d.\n",
215 obj, reloc->target_handle,
216 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000217 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000218 }
219
Chris Wilsondabdfe02012-03-26 10:10:27 +0200220 /* We can't wait for rendering with pagefaults disabled */
221 if (obj->active && in_atomic())
222 return -EFAULT;
223
Chris Wilson54cf91d2010-11-25 18:00:26 +0000224 reloc->delta += target_offset;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200225 if (use_cpu_reloc(obj)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000226 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
227 char *vaddr;
228
Chris Wilsondabdfe02012-03-26 10:10:27 +0200229 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
230 if (ret)
231 return ret;
232
Chris Wilson9da3da62012-06-01 15:20:22 +0100233 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234 reloc->offset >> PAGE_SHIFT));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000235 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
236 kunmap_atomic(vaddr);
237 } else {
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 uint32_t __iomem *reloc_entry;
240 void __iomem *reloc_page;
241
Chris Wilson7b096382012-04-14 09:55:51 +0100242 ret = i915_gem_object_set_to_gtt_domain(obj, true);
243 if (ret)
244 return ret;
245
246 ret = i915_gem_object_put_fence(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000247 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000248 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000249
250 /* Map the page containing the relocation we're going to perform. */
251 reloc->offset += obj->gtt_offset;
252 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
253 reloc->offset & PAGE_MASK);
254 reloc_entry = (uint32_t __iomem *)
255 (reloc_page + (reloc->offset & ~PAGE_MASK));
256 iowrite32(reloc->delta, reloc_entry);
257 io_mapping_unmap_atomic(reloc_page);
258 }
259
260 /* and update the user's relocation entry */
261 reloc->presumed_offset = target_offset;
262
Chris Wilson67731b82010-12-08 10:38:14 +0000263 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000264}
265
266static int
267i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000268 struct eb_objects *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000269{
Chris Wilson1d83f442012-03-24 20:12:53 +0000270#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
271 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000272 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000273 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000274 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000275
276 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000277
Chris Wilson1d83f442012-03-24 20:12:53 +0000278 remain = entry->relocation_count;
279 while (remain) {
280 struct drm_i915_gem_relocation_entry *r = stack_reloc;
281 int count = remain;
282 if (count > ARRAY_SIZE(stack_reloc))
283 count = ARRAY_SIZE(stack_reloc);
284 remain -= count;
285
286 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000287 return -EFAULT;
288
Chris Wilson1d83f442012-03-24 20:12:53 +0000289 do {
290 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000291
Chris Wilson1d83f442012-03-24 20:12:53 +0000292 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
293 if (ret)
294 return ret;
295
296 if (r->presumed_offset != offset &&
297 __copy_to_user_inatomic(&user_relocs->presumed_offset,
298 &r->presumed_offset,
299 sizeof(r->presumed_offset))) {
300 return -EFAULT;
301 }
302
303 user_relocs++;
304 r++;
305 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000306 }
307
308 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000309#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000310}
311
312static int
313i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000314 struct eb_objects *eb,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000315 struct drm_i915_gem_relocation_entry *relocs)
316{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000317 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000318 int i, ret;
319
320 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000321 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000322 if (ret)
323 return ret;
324 }
325
326 return 0;
327}
328
329static int
330i915_gem_execbuffer_relocate(struct drm_device *dev,
Chris Wilson67731b82010-12-08 10:38:14 +0000331 struct eb_objects *eb,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000332 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000333{
Chris Wilson432e58e2010-11-25 19:32:06 +0000334 struct drm_i915_gem_object *obj;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000335 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000336
Chris Wilsond4aeee72011-03-14 15:11:24 +0000337 /* This is the fast path and we cannot handle a pagefault whilst
338 * holding the struct mutex lest the user pass in the relocations
339 * contained within a mmaped bo. For in such a case we, the page
340 * fault handler would call i915_gem_fault() and we would try to
341 * acquire the struct mutex again. Obviously this is bad and so
342 * lockdep complains vehemently.
343 */
344 pagefault_disable();
Chris Wilson432e58e2010-11-25 19:32:06 +0000345 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000346 ret = i915_gem_execbuffer_relocate_object(obj, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000347 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000348 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000349 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000350 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000351
Chris Wilsond4aeee72011-03-14 15:11:24 +0000352 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000353}
354
Chris Wilson7788a762012-08-24 19:18:18 +0100355#define __EXEC_OBJECT_HAS_PIN (1<<31)
356#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100357
358static int
Chris Wilsondabdfe02012-03-26 10:10:27 +0200359need_reloc_mappable(struct drm_i915_gem_object *obj)
360{
361 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
362 return entry->relocation_count && !use_cpu_reloc(obj);
363}
364
365static int
Chris Wilson7788a762012-08-24 19:18:18 +0100366i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
367 struct intel_ring_buffer *ring)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100368{
Chris Wilson7788a762012-08-24 19:18:18 +0100369 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100370 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
371 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
372 bool need_fence, need_mappable;
373 int ret;
374
375 need_fence =
376 has_fenced_gpu_access &&
377 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
378 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200379 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100380
Chris Wilson86a1ee22012-08-11 15:41:04 +0100381 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100382 if (ret)
383 return ret;
384
Chris Wilson7788a762012-08-24 19:18:18 +0100385 entry->flags |= __EXEC_OBJECT_HAS_PIN;
386
Chris Wilson1690e1e2011-12-14 13:57:08 +0100387 if (has_fenced_gpu_access) {
388 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson06d98132012-04-17 15:31:24 +0100389 ret = i915_gem_object_get_fence(obj);
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000390 if (ret)
Chris Wilson7788a762012-08-24 19:18:18 +0100391 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100392
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000393 if (i915_gem_object_pin_fence(obj))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100394 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000395
Chris Wilson7dd49062012-03-21 10:48:18 +0000396 obj->pending_fenced_gpu_access = true;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100397 }
Chris Wilson1690e1e2011-12-14 13:57:08 +0100398 }
399
Chris Wilson7788a762012-08-24 19:18:18 +0100400 /* Ensure ppgtt mapping exists if needed */
401 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
402 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
403 obj, obj->cache_level);
404
405 obj->has_aliasing_ppgtt_mapping = 1;
406 }
407
Chris Wilson1690e1e2011-12-14 13:57:08 +0100408 entry->offset = obj->gtt_offset;
409 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100410}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100411
Chris Wilson7788a762012-08-24 19:18:18 +0100412static void
413i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
414{
415 struct drm_i915_gem_exec_object2 *entry;
416
417 if (!obj->gtt_space)
418 return;
419
420 entry = obj->exec_entry;
421
422 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
423 i915_gem_object_unpin_fence(obj);
424
425 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
426 i915_gem_object_unpin(obj);
427
428 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100429}
430
Chris Wilson54cf91d2010-11-25 18:00:26 +0000431static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000432i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000433 struct drm_file *file,
Chris Wilson6fe4f142011-01-10 17:35:37 +0000434 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000435{
Chris Wilson432e58e2010-11-25 19:32:06 +0000436 struct drm_i915_gem_object *obj;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000437 struct list_head ordered_objects;
Chris Wilson7788a762012-08-24 19:18:18 +0100438 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
439 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000440
441 INIT_LIST_HEAD(&ordered_objects);
442 while (!list_empty(objects)) {
443 struct drm_i915_gem_exec_object2 *entry;
444 bool need_fence, need_mappable;
445
446 obj = list_first_entry(objects,
447 struct drm_i915_gem_object,
448 exec_list);
449 entry = obj->exec_entry;
450
451 need_fence =
452 has_fenced_gpu_access &&
453 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
454 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200455 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000456
457 if (need_mappable)
458 list_move(&obj->exec_list, &ordered_objects);
459 else
460 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000461
462 obj->base.pending_read_domains = 0;
463 obj->base.pending_write_domain = 0;
Chris Wilson016fd0c2012-07-20 12:41:07 +0100464 obj->pending_fenced_gpu_access = false;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000465 }
466 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000467
468 /* Attempt to pin all of the buffers into the GTT.
469 * This is done in 3 phases:
470 *
471 * 1a. Unbind all objects that do not match the GTT constraints for
472 * the execbuffer (fenceable, mappable, alignment etc).
473 * 1b. Increment pin count for already bound objects.
474 * 2. Bind new objects.
475 * 3. Decrement pin count.
476 *
Chris Wilson7788a762012-08-24 19:18:18 +0100477 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000478 * room for the earlier objects *unless* we need to defragment.
479 */
480 retry = 0;
481 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100482 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000483
484 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000485 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000486 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000487 bool need_fence, need_mappable;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100488
Chris Wilson6fe4f142011-01-10 17:35:37 +0000489 if (!obj->gtt_space)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000490 continue;
491
492 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000493 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000494 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
495 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200496 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000497
498 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
499 (need_mappable && !obj->map_and_fenceable))
500 ret = i915_gem_object_unbind(obj);
501 else
Chris Wilson7788a762012-08-24 19:18:18 +0100502 ret = i915_gem_execbuffer_reserve_object(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000503 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000504 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000505 }
506
507 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000508 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +0100509 if (obj->gtt_space)
510 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000511
Chris Wilson7788a762012-08-24 19:18:18 +0100512 ret = i915_gem_execbuffer_reserve_object(obj, ring);
513 if (ret)
514 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000515 }
516
Chris Wilson7788a762012-08-24 19:18:18 +0100517err: /* Decrement pin count for bound objects */
518 list_for_each_entry(obj, objects, exec_list)
519 i915_gem_execbuffer_unreserve_object(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000520
Chris Wilson6c085a72012-08-20 11:40:46 +0200521 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000522 return ret;
523
Chris Wilson6c085a72012-08-20 11:40:46 +0200524 ret = i915_gem_evict_everything(ring->dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000525 if (ret)
526 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000527 } while (1);
528}
529
530static int
531i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
532 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000533 struct intel_ring_buffer *ring,
Chris Wilson432e58e2010-11-25 19:32:06 +0000534 struct list_head *objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000535 struct eb_objects *eb,
Chris Wilson432e58e2010-11-25 19:32:06 +0000536 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000537 int count)
538{
539 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000540 struct drm_i915_gem_object *obj;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000541 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000542 int i, total, ret;
543
Chris Wilson67731b82010-12-08 10:38:14 +0000544 /* We may process another execbuffer during the unlock... */
Chris Wilson36cf1742011-01-10 12:09:12 +0000545 while (!list_empty(objects)) {
Chris Wilson67731b82010-12-08 10:38:14 +0000546 obj = list_first_entry(objects,
547 struct drm_i915_gem_object,
548 exec_list);
549 list_del_init(&obj->exec_list);
550 drm_gem_object_unreference(&obj->base);
551 }
552
Chris Wilson54cf91d2010-11-25 18:00:26 +0000553 mutex_unlock(&dev->struct_mutex);
554
555 total = 0;
556 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000557 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000558
Chris Wilsondd6864a2011-01-12 23:49:13 +0000559 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000560 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000561 if (reloc == NULL || reloc_offset == NULL) {
562 drm_free_large(reloc);
563 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000564 mutex_lock(&dev->struct_mutex);
565 return -ENOMEM;
566 }
567
568 total = 0;
569 for (i = 0; i < count; i++) {
570 struct drm_i915_gem_relocation_entry __user *user_relocs;
571
Chris Wilson432e58e2010-11-25 19:32:06 +0000572 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000573
574 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000575 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000576 ret = -EFAULT;
577 mutex_lock(&dev->struct_mutex);
578 goto err;
579 }
580
Chris Wilsondd6864a2011-01-12 23:49:13 +0000581 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000582 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000583 }
584
585 ret = i915_mutex_lock_interruptible(dev);
586 if (ret) {
587 mutex_lock(&dev->struct_mutex);
588 goto err;
589 }
590
Chris Wilson67731b82010-12-08 10:38:14 +0000591 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000592 eb_reset(eb);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000593 ret = eb_lookup_objects(eb, exec, count, file, objects);
594 if (ret)
595 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +0000596
Chris Wilson6fe4f142011-01-10 17:35:37 +0000597 ret = i915_gem_execbuffer_reserve(ring, file, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000598 if (ret)
599 goto err;
600
Chris Wilson432e58e2010-11-25 19:32:06 +0000601 list_for_each_entry(obj, objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000602 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000603 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Chris Wilsondd6864a2011-01-12 23:49:13 +0000604 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000605 if (ret)
606 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000607 }
608
609 /* Leave the user relocations as are, this is the painfully slow path,
610 * and we want to avoid the complication of dropping the lock whilst
611 * having buffers reserved in the aperture and so causing spurious
612 * ENOSPC for random operations.
613 */
614
615err:
616 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000617 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000618 return ret;
619}
620
Chris Wilson54cf91d2010-11-25 18:00:26 +0000621static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000622i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
623 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000624{
Chris Wilson432e58e2010-11-25 19:32:06 +0000625 struct drm_i915_gem_object *obj;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200626 uint32_t flush_domains = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000627 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000628
Chris Wilson432e58e2010-11-25 19:32:06 +0000629 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky2911a352012-04-05 14:47:36 -0700630 ret = i915_gem_object_sync(obj, ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000631 if (ret)
632 return ret;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200633
634 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
635 i915_gem_clflush_object(obj);
636
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200637 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000638 }
639
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200640 if (flush_domains & I915_GEM_DOMAIN_CPU)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800641 i915_gem_chipset_flush(ring->dev);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200642
643 if (flush_domains & I915_GEM_DOMAIN_GTT)
644 wmb();
645
Chris Wilson09cf7c92012-07-13 14:14:08 +0100646 /* Unconditionally invalidate gpu caches and ensure that we do flush
647 * any residual writes from the previous batch.
648 */
Chris Wilsona7b97612012-07-20 12:41:08 +0100649 return intel_ring_invalidate_all_caches(ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000650}
651
Chris Wilson432e58e2010-11-25 19:32:06 +0000652static bool
653i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654{
Chris Wilson432e58e2010-11-25 19:32:06 +0000655 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000656}
657
658static int
659validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
660 int count)
661{
662 int i;
663
664 for (i = 0; i < count; i++) {
665 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
666 int length; /* limited by fault_in_pages_readable() */
667
668 /* First check for malicious input causing overflow */
669 if (exec[i].relocation_count >
670 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
671 return -EINVAL;
672
673 length = exec[i].relocation_count *
674 sizeof(struct drm_i915_gem_relocation_entry);
675 if (!access_ok(VERIFY_READ, ptr, length))
676 return -EFAULT;
677
678 /* we may also need to update the presumed offsets */
679 if (!access_ok(VERIFY_WRITE, ptr, length))
680 return -EFAULT;
681
Daniel Vetterf56f8212012-03-25 19:47:41 +0200682 if (fault_in_multipages_readable(ptr, length))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000683 return -EFAULT;
684 }
685
686 return 0;
687}
688
Chris Wilson432e58e2010-11-25 19:32:06 +0000689static void
690i915_gem_execbuffer_move_to_active(struct list_head *objects,
Chris Wilson9d7730912012-11-27 16:22:52 +0000691 struct intel_ring_buffer *ring)
Chris Wilson432e58e2010-11-25 19:32:06 +0000692{
693 struct drm_i915_gem_object *obj;
694
695 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson69c2fc82012-07-20 12:41:03 +0100696 u32 old_read = obj->base.read_domains;
697 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +0000698
Chris Wilson432e58e2010-11-25 19:32:06 +0000699 obj->base.read_domains = obj->base.pending_read_domains;
700 obj->base.write_domain = obj->base.pending_write_domain;
701 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
702
Chris Wilson9d7730912012-11-27 16:22:52 +0000703 i915_gem_object_move_to_active(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000704 if (obj->base.write_domain) {
705 obj->dirty = 1;
Chris Wilson9d7730912012-11-27 16:22:52 +0000706 obj->last_write_seqno = intel_ring_get_seqno(ring);
Chris Wilsonacb87df2012-05-03 15:47:57 +0100707 if (obj->pin_count) /* check for potential scanout */
Chris Wilsonf047e392012-07-21 12:31:41 +0100708 intel_mark_fb_busy(obj);
Chris Wilson432e58e2010-11-25 19:32:06 +0000709 }
710
Chris Wilsondb53a302011-02-03 11:57:46 +0000711 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +0000712 }
713}
714
Chris Wilson54cf91d2010-11-25 18:00:26 +0000715static void
716i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000717 struct drm_file *file,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000718 struct intel_ring_buffer *ring)
719{
Daniel Vettercc889e02012-06-13 20:45:19 +0200720 /* Unconditionally force add_request to emit a full flush. */
721 ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000722
Chris Wilson432e58e2010-11-25 19:32:06 +0000723 /* Add a breadcrumb for the completion of the batch buffer */
Chris Wilson3bb73ab2012-07-20 12:40:59 +0100724 (void)i915_add_request(ring, file, NULL);
Chris Wilson432e58e2010-11-25 19:32:06 +0000725}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000726
727static int
Eric Anholtae662d32012-01-03 09:23:29 -0800728i915_reset_gen7_sol_offsets(struct drm_device *dev,
729 struct intel_ring_buffer *ring)
730{
731 drm_i915_private_t *dev_priv = dev->dev_private;
732 int ret, i;
733
734 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
735 return 0;
736
737 ret = intel_ring_begin(ring, 4 * 3);
738 if (ret)
739 return ret;
740
741 for (i = 0; i < 4; i++) {
742 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
743 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
744 intel_ring_emit(ring, 0);
745 }
746
747 intel_ring_advance(ring);
748
749 return 0;
750}
751
752static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000753i915_gem_do_execbuffer(struct drm_device *dev, void *data,
754 struct drm_file *file,
755 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson432e58e2010-11-25 19:32:06 +0000756 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000757{
758 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson432e58e2010-11-25 19:32:06 +0000759 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +0000760 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000761 struct drm_i915_gem_object *batch_obj;
762 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000763 struct intel_ring_buffer *ring;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700764 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000765 u32 exec_start, exec_len;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800766 u32 mask;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100767 u32 flags;
Chris Wilson72bfa192010-12-19 11:42:05 +0000768 int ret, mode, i;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000769
Chris Wilson432e58e2010-11-25 19:32:06 +0000770 if (!i915_gem_check_execbuffer(args)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100771 DRM_DEBUG("execbuf with invalid offset/length\n");
Chris Wilson432e58e2010-11-25 19:32:06 +0000772 return -EINVAL;
773 }
774
775 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000776 if (ret)
777 return ret;
778
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100779 flags = 0;
780 if (args->flags & I915_EXEC_SECURE) {
781 if (!file->is_master || !capable(CAP_SYS_ADMIN))
782 return -EPERM;
783
784 flags |= I915_DISPATCH_SECURE;
785 }
Daniel Vetterb45305f2012-12-17 16:21:27 +0100786 if (args->flags & I915_EXEC_IS_PINNED)
787 flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100788
Chris Wilson54cf91d2010-11-25 18:00:26 +0000789 switch (args->flags & I915_EXEC_RING_MASK) {
790 case I915_EXEC_DEFAULT:
791 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000792 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000793 break;
794 case I915_EXEC_BSD:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000795 ring = &dev_priv->ring[VCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700796 if (ctx_id != 0) {
797 DRM_DEBUG("Ring %s doesn't support contexts\n",
798 ring->name);
799 return -EPERM;
800 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000801 break;
802 case I915_EXEC_BLT:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000803 ring = &dev_priv->ring[BCS];
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700804 if (ctx_id != 0) {
805 DRM_DEBUG("Ring %s doesn't support contexts\n",
806 ring->name);
807 return -EPERM;
808 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000809 break;
810 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100811 DRM_DEBUG("execbuf with unknown ring: %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000812 (int)(args->flags & I915_EXEC_RING_MASK));
813 return -EINVAL;
814 }
Chris Wilsona15817c2012-05-11 14:29:31 +0100815 if (!intel_ring_initialized(ring)) {
816 DRM_DEBUG("execbuf with invalid ring: %d\n",
817 (int)(args->flags & I915_EXEC_RING_MASK));
818 return -EINVAL;
819 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000820
Chris Wilson72bfa192010-12-19 11:42:05 +0000821 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800822 mask = I915_EXEC_CONSTANTS_MASK;
Chris Wilson72bfa192010-12-19 11:42:05 +0000823 switch (mode) {
824 case I915_EXEC_CONSTANTS_REL_GENERAL:
825 case I915_EXEC_CONSTANTS_ABSOLUTE:
826 case I915_EXEC_CONSTANTS_REL_SURFACE:
827 if (ring == &dev_priv->ring[RCS] &&
828 mode != dev_priv->relative_constants_mode) {
829 if (INTEL_INFO(dev)->gen < 4)
830 return -EINVAL;
831
832 if (INTEL_INFO(dev)->gen > 5 &&
833 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
834 return -EINVAL;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800835
836 /* The HW changed the meaning on this bit on gen6 */
837 if (INTEL_INFO(dev)->gen >= 6)
838 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
Chris Wilson72bfa192010-12-19 11:42:05 +0000839 }
840 break;
841 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100842 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
Chris Wilson72bfa192010-12-19 11:42:05 +0000843 return -EINVAL;
844 }
845
Chris Wilson54cf91d2010-11-25 18:00:26 +0000846 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +0100847 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000848 return -EINVAL;
849 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000850
851 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000852 if (ring != &dev_priv->ring[RCS]) {
Daniel Vetterff240192012-01-31 21:08:14 +0100853 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000854 return -EINVAL;
855 }
856
Daniel Vetter6ebebc92012-04-26 23:28:11 +0200857 if (INTEL_INFO(dev)->gen >= 5) {
858 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
859 return -EINVAL;
860 }
861
Xi Wang44afb3a2012-04-23 04:06:42 -0400862 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
863 DRM_DEBUG("execbuf with %u cliprects\n",
864 args->num_cliprects);
865 return -EINVAL;
866 }
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200867
Chris Wilson432e58e2010-11-25 19:32:06 +0000868 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +0000869 GFP_KERNEL);
870 if (cliprects == NULL) {
871 ret = -ENOMEM;
872 goto pre_mutex_err;
873 }
874
Chris Wilson432e58e2010-11-25 19:32:06 +0000875 if (copy_from_user(cliprects,
876 (struct drm_clip_rect __user *)(uintptr_t)
877 args->cliprects_ptr,
878 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000879 ret = -EFAULT;
880 goto pre_mutex_err;
881 }
882 }
883
Chris Wilson54cf91d2010-11-25 18:00:26 +0000884 ret = i915_mutex_lock_interruptible(dev);
885 if (ret)
886 goto pre_mutex_err;
887
888 if (dev_priv->mm.suspended) {
889 mutex_unlock(&dev->struct_mutex);
890 ret = -EBUSY;
891 goto pre_mutex_err;
892 }
893
Chris Wilson67731b82010-12-08 10:38:14 +0000894 eb = eb_create(args->buffer_count);
895 if (eb == NULL) {
896 mutex_unlock(&dev->struct_mutex);
897 ret = -ENOMEM;
898 goto pre_mutex_err;
899 }
900
Chris Wilson54cf91d2010-11-25 18:00:26 +0000901 /* Look up object handles */
Chris Wilson432e58e2010-11-25 19:32:06 +0000902 INIT_LIST_HEAD(&objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000903 ret = eb_lookup_objects(eb, exec, args->buffer_count, file, &objects);
904 if (ret)
905 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000906
Chris Wilson6fe4f142011-01-10 17:35:37 +0000907 /* take note of the batch buffer before we might reorder the lists */
908 batch_obj = list_entry(objects.prev,
909 struct drm_i915_gem_object,
910 exec_list);
911
Chris Wilson54cf91d2010-11-25 18:00:26 +0000912 /* Move the objects en-masse into the GTT, evicting if necessary. */
Chris Wilson6fe4f142011-01-10 17:35:37 +0000913 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000914 if (ret)
915 goto err;
916
917 /* The objects are in their final locations, apply the relocations. */
Chris Wilson6fe4f142011-01-10 17:35:37 +0000918 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000919 if (ret) {
920 if (ret == -EFAULT) {
Chris Wilsond9e86c02010-11-10 16:40:20 +0000921 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
Chris Wilson67731b82010-12-08 10:38:14 +0000922 &objects, eb,
923 exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000924 args->buffer_count);
925 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
926 }
927 if (ret)
928 goto err;
929 }
930
931 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000932 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +0100933 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +0000934 ret = -EINVAL;
935 goto err;
936 }
937 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
938
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100939 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
940 * batch" bit. Hence we need to pin secure batches into the global gtt.
941 * hsw should have this fixed, but let's be paranoid and do it
942 * unconditionally for now. */
943 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
944 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
945
Chris Wilson432e58e2010-11-25 19:32:06 +0000946 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000947 if (ret)
948 goto err;
949
Eric Anholt0da5cec2012-07-23 12:33:55 -0700950 ret = i915_switch_context(ring, file, ctx_id);
951 if (ret)
952 goto err;
953
Ben Widawskye2971bd2011-12-12 19:21:57 -0800954 if (ring == &dev_priv->ring[RCS] &&
955 mode != dev_priv->relative_constants_mode) {
956 ret = intel_ring_begin(ring, 4);
957 if (ret)
958 goto err;
959
960 intel_ring_emit(ring, MI_NOOP);
961 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
962 intel_ring_emit(ring, INSTPM);
Ben Widawsky84f9f932011-12-12 19:21:58 -0800963 intel_ring_emit(ring, mask << 16 | mode);
Ben Widawskye2971bd2011-12-12 19:21:57 -0800964 intel_ring_advance(ring);
965
966 dev_priv->relative_constants_mode = mode;
967 }
968
Eric Anholtae662d32012-01-03 09:23:29 -0800969 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
970 ret = i915_reset_gen7_sol_offsets(dev, ring);
971 if (ret)
972 goto err;
973 }
974
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000975 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
976 exec_len = args->batch_len;
977 if (cliprects) {
978 for (i = 0; i < args->num_cliprects; i++) {
979 ret = i915_emit_box(dev, &cliprects[i],
980 args->DR1, args->DR4);
981 if (ret)
982 goto err;
983
984 ret = ring->dispatch_execbuffer(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100985 exec_start, exec_len,
986 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000987 if (ret)
988 goto err;
989 }
990 } else {
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100991 ret = ring->dispatch_execbuffer(ring,
992 exec_start, exec_len,
993 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000994 if (ret)
995 goto err;
996 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000997
Chris Wilson9d7730912012-11-27 16:22:52 +0000998 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
999
1000 i915_gem_execbuffer_move_to_active(&objects, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +00001001 i915_gem_execbuffer_retire_commands(dev, file, ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001002
1003err:
Chris Wilson67731b82010-12-08 10:38:14 +00001004 eb_destroy(eb);
Chris Wilson432e58e2010-11-25 19:32:06 +00001005 while (!list_empty(&objects)) {
1006 struct drm_i915_gem_object *obj;
1007
1008 obj = list_first_entry(&objects,
1009 struct drm_i915_gem_object,
1010 exec_list);
1011 list_del_init(&obj->exec_list);
1012 drm_gem_object_unreference(&obj->base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001013 }
1014
1015 mutex_unlock(&dev->struct_mutex);
1016
1017pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001018 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001019 return ret;
1020}
1021
1022/*
1023 * Legacy execbuffer just creates an exec2 list from the original exec object
1024 * list array and passes it to the real function.
1025 */
1026int
1027i915_gem_execbuffer(struct drm_device *dev, void *data,
1028 struct drm_file *file)
1029{
1030 struct drm_i915_gem_execbuffer *args = data;
1031 struct drm_i915_gem_execbuffer2 exec2;
1032 struct drm_i915_gem_exec_object *exec_list = NULL;
1033 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1034 int ret, i;
1035
Chris Wilson54cf91d2010-11-25 18:00:26 +00001036 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001037 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001038 return -EINVAL;
1039 }
1040
1041 /* Copy in the exec list from userland */
1042 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1043 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1044 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001045 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001046 args->buffer_count);
1047 drm_free_large(exec_list);
1048 drm_free_large(exec2_list);
1049 return -ENOMEM;
1050 }
1051 ret = copy_from_user(exec_list,
Chris Wilsonba7a6452012-09-14 11:46:00 +01001052 (void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001053 sizeof(*exec_list) * args->buffer_count);
1054 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001055 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001056 args->buffer_count, ret);
1057 drm_free_large(exec_list);
1058 drm_free_large(exec2_list);
1059 return -EFAULT;
1060 }
1061
1062 for (i = 0; i < args->buffer_count; i++) {
1063 exec2_list[i].handle = exec_list[i].handle;
1064 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1065 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1066 exec2_list[i].alignment = exec_list[i].alignment;
1067 exec2_list[i].offset = exec_list[i].offset;
1068 if (INTEL_INFO(dev)->gen < 4)
1069 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1070 else
1071 exec2_list[i].flags = 0;
1072 }
1073
1074 exec2.buffers_ptr = args->buffers_ptr;
1075 exec2.buffer_count = args->buffer_count;
1076 exec2.batch_start_offset = args->batch_start_offset;
1077 exec2.batch_len = args->batch_len;
1078 exec2.DR1 = args->DR1;
1079 exec2.DR4 = args->DR4;
1080 exec2.num_cliprects = args->num_cliprects;
1081 exec2.cliprects_ptr = args->cliprects_ptr;
1082 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001083 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001084
1085 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1086 if (!ret) {
1087 /* Copy the new buffer offsets back to the user's exec list. */
1088 for (i = 0; i < args->buffer_count; i++)
1089 exec_list[i].offset = exec2_list[i].offset;
1090 /* ... and back out to userspace */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001091 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001092 exec_list,
1093 sizeof(*exec_list) * args->buffer_count);
1094 if (ret) {
1095 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001096 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001097 "back to user (%d)\n",
1098 args->buffer_count, ret);
1099 }
1100 }
1101
1102 drm_free_large(exec_list);
1103 drm_free_large(exec2_list);
1104 return ret;
1105}
1106
1107int
1108i915_gem_execbuffer2(struct drm_device *dev, void *data,
1109 struct drm_file *file)
1110{
1111 struct drm_i915_gem_execbuffer2 *args = data;
1112 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1113 int ret;
1114
Xi Wanged8cd3b2012-04-23 04:06:41 -04001115 if (args->buffer_count < 1 ||
1116 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001117 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001118 return -EINVAL;
1119 }
1120
Chris Wilson8408c282011-02-21 12:54:48 +00001121 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
Chris Wilson419fa722013-01-08 10:53:13 +00001122 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
Chris Wilson8408c282011-02-21 12:54:48 +00001123 if (exec2_list == NULL)
1124 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1125 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001126 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001127 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001128 args->buffer_count);
1129 return -ENOMEM;
1130 }
1131 ret = copy_from_user(exec2_list,
1132 (struct drm_i915_relocation_entry __user *)
1133 (uintptr_t) args->buffers_ptr,
1134 sizeof(*exec2_list) * args->buffer_count);
1135 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001136 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001137 args->buffer_count, ret);
1138 drm_free_large(exec2_list);
1139 return -EFAULT;
1140 }
1141
1142 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1143 if (!ret) {
1144 /* Copy the new buffer offsets back to the user's exec list. */
Chris Wilsonba7a6452012-09-14 11:46:00 +01001145 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001146 exec2_list,
1147 sizeof(*exec2_list) * args->buffer_count);
1148 if (ret) {
1149 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001150 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001151 "back to user (%d)\n",
1152 args->buffer_count, ret);
1153 }
1154 }
1155
1156 drm_free_large(exec2_list);
1157 return ret;
1158}