blob: 792c52a235eeb4cf7aee039688160bf16e21bc96 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000031#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080034#include <linux/dma_remapping.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000035
Chris Wilson67731b82010-12-08 10:38:14 +000036struct eb_objects {
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000037 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +000038 int and;
Chris Wilsoneef90cc2013-01-08 10:53:17 +000039 union {
40 struct drm_i915_gem_object *lut[0];
41 struct hlist_head buckets[0];
42 };
Chris Wilson67731b82010-12-08 10:38:14 +000043};
44
45static struct eb_objects *
Chris Wilsoneef90cc2013-01-08 10:53:17 +000046eb_create(struct drm_i915_gem_execbuffer2 *args)
Chris Wilson67731b82010-12-08 10:38:14 +000047{
Chris Wilsoneef90cc2013-01-08 10:53:17 +000048 struct eb_objects *eb = NULL;
Chris Wilson67731b82010-12-08 10:38:14 +000049
Chris Wilsoneef90cc2013-01-08 10:53:17 +000050 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *);
53 size += sizeof(struct eb_objects);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 }
56
57 if (eb == NULL) {
58 int size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Lauri Kasanen27b7c632013-03-27 15:04:55 +020060 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
Chris Wilsoneef90cc2013-01-08 10:53:17 +000061 while (count > 2*size)
62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects),
65 GFP_TEMPORARY);
66 if (eb == NULL)
67 return eb;
68
69 eb->and = count - 1;
70 } else
71 eb->and = -args->buffer_count;
72
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000073 INIT_LIST_HEAD(&eb->objects);
Chris Wilson67731b82010-12-08 10:38:14 +000074 return eb;
75}
76
77static void
78eb_reset(struct eb_objects *eb)
79{
Chris Wilsoneef90cc2013-01-08 10:53:17 +000080 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
Chris Wilson67731b82010-12-08 10:38:14 +000082}
83
Chris Wilson3b96eff2013-01-08 10:53:14 +000084static int
85eb_lookup_objects(struct eb_objects *eb,
86 struct drm_i915_gem_exec_object2 *exec,
Chris Wilsoneef90cc2013-01-08 10:53:17 +000087 const struct drm_i915_gem_execbuffer2 *args,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000088 struct drm_file *file)
Chris Wilson3b96eff2013-01-08 10:53:14 +000089{
90 int i;
91
92 spin_lock(&file->table_lock);
Chris Wilsoneef90cc2013-01-08 10:53:17 +000093 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson3b96eff2013-01-08 10:53:14 +000094 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) {
98 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i);
101 return -ENOENT;
102 }
103
104 if (!list_empty(&obj->exec_list)) {
105 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i);
108 return -EINVAL;
109 }
110
111 drm_gem_object_reference(&obj->base);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000112 list_add_tail(&obj->exec_list, &eb->objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000113
Chris Wilson3b96eff2013-01-08 10:53:14 +0000114 obj->exec_entry = &exec[i];
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000115 if (eb->and < 0) {
116 eb->lut[i] = obj;
117 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle;
120 hlist_add_head(&obj->exec_node,
121 &eb->buckets[handle & eb->and]);
122 }
Chris Wilson3b96eff2013-01-08 10:53:14 +0000123 }
124 spin_unlock(&file->table_lock);
125
126 return 0;
127}
128
Chris Wilson67731b82010-12-08 10:38:14 +0000129static struct drm_i915_gem_object *
130eb_get_object(struct eb_objects *eb, unsigned long handle)
131{
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000132 if (eb->and < 0) {
133 if (handle >= -eb->and)
134 return NULL;
135 return eb->lut[handle];
136 } else {
137 struct hlist_head *head;
138 struct hlist_node *node;
Chris Wilson67731b82010-12-08 10:38:14 +0000139
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000140 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj;
143
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
145 if (obj->exec_handle == handle)
146 return obj;
147 }
148 return NULL;
Chris Wilson67731b82010-12-08 10:38:14 +0000149 }
Chris Wilson67731b82010-12-08 10:38:14 +0000150}
151
152static void
153eb_destroy(struct eb_objects *eb)
154{
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157
158 obj = list_first_entry(&eb->objects,
159 struct drm_i915_gem_object,
160 exec_list);
161 list_del_init(&obj->exec_list);
162 drm_gem_object_unreference(&obj->base);
163 }
Chris Wilson67731b82010-12-08 10:38:14 +0000164 kfree(eb);
165}
166
Chris Wilsondabdfe02012-03-26 10:10:27 +0200167static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168{
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilson504c7262012-08-23 13:12:52 +0100170 !obj->map_and_fenceable ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200171 obj->cache_level != I915_CACHE_NONE);
172}
173
Chris Wilson54cf91d2010-11-25 18:00:26 +0000174static int
Rafael Barbalho5032d872013-08-21 17:10:51 +0100175relocate_entry_cpu(struct drm_i915_gem_object *obj,
176 struct drm_i915_gem_relocation_entry *reloc)
177{
178 uint32_t page_offset = offset_in_page(reloc->offset);
179 char *vaddr;
180 int ret = -EINVAL;
181
182 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
183 if (ret)
184 return ret;
185
186 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
187 reloc->offset >> PAGE_SHIFT));
188 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
189 kunmap_atomic(vaddr);
190
191 return 0;
192}
193
194static int
195relocate_entry_gtt(struct drm_i915_gem_object *obj,
196 struct drm_i915_gem_relocation_entry *reloc)
197{
198 struct drm_device *dev = obj->base.dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 uint32_t __iomem *reloc_entry;
201 void __iomem *reloc_page;
202 int ret = -EINVAL;
203
204 ret = i915_gem_object_set_to_gtt_domain(obj, true);
205 if (ret)
206 return ret;
207
208 ret = i915_gem_object_put_fence(obj);
209 if (ret)
210 return ret;
211
212 /* Map the page containing the relocation we're going to perform. */
213 reloc->offset += i915_gem_obj_ggtt_offset(obj);
214 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
215 reloc->offset & PAGE_MASK);
216 reloc_entry = (uint32_t __iomem *)
217 (reloc_page + offset_in_page(reloc->offset));
218 iowrite32(reloc->delta, reloc_entry);
219 io_mapping_unmap_atomic(reloc_page);
220
221 return 0;
222}
223
224static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000225i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000226 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700227 struct drm_i915_gem_relocation_entry *reloc,
228 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000229{
230 struct drm_device *dev = obj->base.dev;
231 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100232 struct drm_i915_gem_object *target_i915_obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000233 uint32_t target_offset;
234 int ret = -EINVAL;
235
Chris Wilson67731b82010-12-08 10:38:14 +0000236 /* we've already hold a reference to all valid objects */
237 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
238 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000239 return -ENOENT;
240
Daniel Vetter149c8402012-02-15 23:50:23 +0100241 target_i915_obj = to_intel_bo(target_obj);
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700242 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000243
Eric Anholte844b992012-07-31 15:35:01 -0700244 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
245 * pipe_control writes because the gpu doesn't properly redirect them
246 * through the ppgtt for non_secure batchbuffers. */
247 if (unlikely(IS_GEN6(dev) &&
248 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
249 !target_i915_obj->has_global_gtt_mapping)) {
250 i915_gem_gtt_bind_object(target_i915_obj,
251 target_i915_obj->cache_level);
252 }
253
Chris Wilson54cf91d2010-11-25 18:00:26 +0000254 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000255 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100256 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000257 "obj %p target %d offset %d "
258 "read %08x write %08x",
259 obj, reloc->target_handle,
260 (int) reloc->offset,
261 reloc->read_domains,
262 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000263 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000264 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100265 if (unlikely((reloc->write_domain | reloc->read_domains)
266 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100267 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000268 "obj %p target %d offset %d "
269 "read %08x write %08x",
270 obj, reloc->target_handle,
271 (int) reloc->offset,
272 reloc->read_domains,
273 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000274 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000275 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000276
277 target_obj->pending_read_domains |= reloc->read_domains;
278 target_obj->pending_write_domain |= reloc->write_domain;
279
280 /* If the relocation already has the right value in it, no
281 * more work needs to be done.
282 */
283 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000284 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000285
286 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000287 if (unlikely(reloc->offset > obj->base.size - 4)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100288 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000289 "obj %p target %d offset %d size %d.\n",
290 obj, reloc->target_handle,
291 (int) reloc->offset,
292 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000293 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000294 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000295 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100296 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000297 "obj %p target %d offset %d.\n",
298 obj, reloc->target_handle,
299 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000300 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000301 }
302
Chris Wilsondabdfe02012-03-26 10:10:27 +0200303 /* We can't wait for rendering with pagefaults disabled */
304 if (obj->active && in_atomic())
305 return -EFAULT;
306
Chris Wilson54cf91d2010-11-25 18:00:26 +0000307 reloc->delta += target_offset;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100308 if (use_cpu_reloc(obj))
309 ret = relocate_entry_cpu(obj, reloc);
310 else
311 ret = relocate_entry_gtt(obj, reloc);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000312
313 /* and update the user's relocation entry */
314 reloc->presumed_offset = target_offset;
315
Chris Wilson67731b82010-12-08 10:38:14 +0000316 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000317}
318
319static int
320i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700321 struct eb_objects *eb,
322 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000323{
Chris Wilson1d83f442012-03-24 20:12:53 +0000324#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
325 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000326 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000327 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000328 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000329
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200330 user_relocs = to_user_ptr(entry->relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000331
Chris Wilson1d83f442012-03-24 20:12:53 +0000332 remain = entry->relocation_count;
333 while (remain) {
334 struct drm_i915_gem_relocation_entry *r = stack_reloc;
335 int count = remain;
336 if (count > ARRAY_SIZE(stack_reloc))
337 count = ARRAY_SIZE(stack_reloc);
338 remain -= count;
339
340 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000341 return -EFAULT;
342
Chris Wilson1d83f442012-03-24 20:12:53 +0000343 do {
344 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000345
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700346 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
347 vm);
Chris Wilson1d83f442012-03-24 20:12:53 +0000348 if (ret)
349 return ret;
350
351 if (r->presumed_offset != offset &&
352 __copy_to_user_inatomic(&user_relocs->presumed_offset,
353 &r->presumed_offset,
354 sizeof(r->presumed_offset))) {
355 return -EFAULT;
356 }
357
358 user_relocs++;
359 r++;
360 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000361 }
362
363 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000364#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000365}
366
367static int
368i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000369 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700370 struct drm_i915_gem_relocation_entry *relocs,
371 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000372{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000373 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000374 int i, ret;
375
376 for (i = 0; i < entry->relocation_count; i++) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700377 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
378 vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000379 if (ret)
380 return ret;
381 }
382
383 return 0;
384}
385
386static int
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700387i915_gem_execbuffer_relocate(struct eb_objects *eb,
388 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000389{
Chris Wilson432e58e2010-11-25 19:32:06 +0000390 struct drm_i915_gem_object *obj;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000391 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000392
Chris Wilsond4aeee72011-03-14 15:11:24 +0000393 /* This is the fast path and we cannot handle a pagefault whilst
394 * holding the struct mutex lest the user pass in the relocations
395 * contained within a mmaped bo. For in such a case we, the page
396 * fault handler would call i915_gem_fault() and we would try to
397 * acquire the struct mutex again. Obviously this is bad and so
398 * lockdep complains vehemently.
399 */
400 pagefault_disable();
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000401 list_for_each_entry(obj, &eb->objects, exec_list) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700402 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000403 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000404 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000405 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000406 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000407
Chris Wilsond4aeee72011-03-14 15:11:24 +0000408 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000409}
410
Chris Wilson7788a762012-08-24 19:18:18 +0100411#define __EXEC_OBJECT_HAS_PIN (1<<31)
412#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100413
414static int
Chris Wilsondabdfe02012-03-26 10:10:27 +0200415need_reloc_mappable(struct drm_i915_gem_object *obj)
416{
417 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
418 return entry->relocation_count && !use_cpu_reloc(obj);
419}
420
421static int
Chris Wilson7788a762012-08-24 19:18:18 +0100422i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
Daniel Vettered5982e2013-01-17 22:23:36 +0100423 struct intel_ring_buffer *ring,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700424 struct i915_address_space *vm,
Daniel Vettered5982e2013-01-17 22:23:36 +0100425 bool *need_reloc)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100426{
Chris Wilson7788a762012-08-24 19:18:18 +0100427 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100428 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
429 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
430 bool need_fence, need_mappable;
431 int ret;
432
433 need_fence =
434 has_fenced_gpu_access &&
435 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
436 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200437 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100438
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700439 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
440 false);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100441 if (ret)
442 return ret;
443
Chris Wilson7788a762012-08-24 19:18:18 +0100444 entry->flags |= __EXEC_OBJECT_HAS_PIN;
445
Chris Wilson1690e1e2011-12-14 13:57:08 +0100446 if (has_fenced_gpu_access) {
447 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson06d98132012-04-17 15:31:24 +0100448 ret = i915_gem_object_get_fence(obj);
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000449 if (ret)
Chris Wilson7788a762012-08-24 19:18:18 +0100450 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100451
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000452 if (i915_gem_object_pin_fence(obj))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100453 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000454
Chris Wilson7dd49062012-03-21 10:48:18 +0000455 obj->pending_fenced_gpu_access = true;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100456 }
Chris Wilson1690e1e2011-12-14 13:57:08 +0100457 }
458
Chris Wilson7788a762012-08-24 19:18:18 +0100459 /* Ensure ppgtt mapping exists if needed */
460 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
461 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
462 obj, obj->cache_level);
463
464 obj->has_aliasing_ppgtt_mapping = 1;
465 }
466
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700467 if (entry->offset != i915_gem_obj_offset(obj, vm)) {
468 entry->offset = i915_gem_obj_offset(obj, vm);
Daniel Vettered5982e2013-01-17 22:23:36 +0100469 *need_reloc = true;
470 }
471
472 if (entry->flags & EXEC_OBJECT_WRITE) {
473 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
474 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
475 }
476
477 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
478 !obj->has_global_gtt_mapping)
479 i915_gem_gtt_bind_object(obj, obj->cache_level);
480
Chris Wilson1690e1e2011-12-14 13:57:08 +0100481 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100482}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100483
Chris Wilson7788a762012-08-24 19:18:18 +0100484static void
485i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
486{
487 struct drm_i915_gem_exec_object2 *entry;
488
Ben Widawsky98438772013-07-31 17:00:12 -0700489 if (!i915_gem_obj_bound_any(obj))
Chris Wilson7788a762012-08-24 19:18:18 +0100490 return;
491
492 entry = obj->exec_entry;
493
494 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
495 i915_gem_object_unpin_fence(obj);
496
497 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
498 i915_gem_object_unpin(obj);
499
500 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100501}
502
Chris Wilson54cf91d2010-11-25 18:00:26 +0000503static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000504i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Daniel Vettered5982e2013-01-17 22:23:36 +0100505 struct list_head *objects,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700506 struct i915_address_space *vm,
Daniel Vettered5982e2013-01-17 22:23:36 +0100507 bool *need_relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000508{
Chris Wilson432e58e2010-11-25 19:32:06 +0000509 struct drm_i915_gem_object *obj;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000510 struct list_head ordered_objects;
Chris Wilson7788a762012-08-24 19:18:18 +0100511 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
512 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000513
514 INIT_LIST_HEAD(&ordered_objects);
515 while (!list_empty(objects)) {
516 struct drm_i915_gem_exec_object2 *entry;
517 bool need_fence, need_mappable;
518
519 obj = list_first_entry(objects,
520 struct drm_i915_gem_object,
521 exec_list);
522 entry = obj->exec_entry;
523
524 need_fence =
525 has_fenced_gpu_access &&
526 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
527 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200528 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000529
530 if (need_mappable)
531 list_move(&obj->exec_list, &ordered_objects);
532 else
533 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000534
Daniel Vettered5982e2013-01-17 22:23:36 +0100535 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
Chris Wilson595dad72011-01-13 11:03:48 +0000536 obj->base.pending_write_domain = 0;
Chris Wilson016fd0c2012-07-20 12:41:07 +0100537 obj->pending_fenced_gpu_access = false;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000538 }
539 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000540
541 /* Attempt to pin all of the buffers into the GTT.
542 * This is done in 3 phases:
543 *
544 * 1a. Unbind all objects that do not match the GTT constraints for
545 * the execbuffer (fenceable, mappable, alignment etc).
546 * 1b. Increment pin count for already bound objects.
547 * 2. Bind new objects.
548 * 3. Decrement pin count.
549 *
Chris Wilson7788a762012-08-24 19:18:18 +0100550 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000551 * room for the earlier objects *unless* we need to defragment.
552 */
553 retry = 0;
554 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100555 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000556
557 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000558 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000559 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000560 bool need_fence, need_mappable;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700561 u32 obj_offset;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100562
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700563 if (!i915_gem_obj_bound(obj, vm))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000564 continue;
565
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700566 obj_offset = i915_gem_obj_offset(obj, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000567 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000568 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000569 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
570 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200571 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000572
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700573 WARN_ON((need_mappable || need_fence) &&
574 !i915_is_ggtt(vm));
575
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700576 if ((entry->alignment &&
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700577 obj_offset & (entry->alignment - 1)) ||
Chris Wilson54cf91d2010-11-25 18:00:26 +0000578 (need_mappable && !obj->map_and_fenceable))
Ben Widawsky07fe0b12013-07-31 17:00:10 -0700579 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000580 else
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700581 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
Chris Wilson432e58e2010-11-25 19:32:06 +0000582 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000583 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000584 }
585
586 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000587 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700588 if (i915_gem_obj_bound(obj, vm))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100589 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000590
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700591 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
Chris Wilson7788a762012-08-24 19:18:18 +0100592 if (ret)
593 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000594 }
595
Chris Wilson7788a762012-08-24 19:18:18 +0100596err: /* Decrement pin count for bound objects */
597 list_for_each_entry(obj, objects, exec_list)
598 i915_gem_execbuffer_unreserve_object(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000599
Chris Wilson6c085a72012-08-20 11:40:46 +0200600 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000601 return ret;
602
Chris Wilson6c085a72012-08-20 11:40:46 +0200603 ret = i915_gem_evict_everything(ring->dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000604 if (ret)
605 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000606 } while (1);
607}
608
609static int
610i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
Daniel Vettered5982e2013-01-17 22:23:36 +0100611 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000612 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000613 struct intel_ring_buffer *ring,
Chris Wilson67731b82010-12-08 10:38:14 +0000614 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700615 struct drm_i915_gem_exec_object2 *exec,
616 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000617{
618 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000619 struct drm_i915_gem_object *obj;
Daniel Vettered5982e2013-01-17 22:23:36 +0100620 bool need_relocs;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000621 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000622 int i, total, ret;
Daniel Vettered5982e2013-01-17 22:23:36 +0100623 int count = args->buffer_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000624
Chris Wilson67731b82010-12-08 10:38:14 +0000625 /* We may process another execbuffer during the unlock... */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000626 while (!list_empty(&eb->objects)) {
627 obj = list_first_entry(&eb->objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000628 struct drm_i915_gem_object,
629 exec_list);
630 list_del_init(&obj->exec_list);
631 drm_gem_object_unreference(&obj->base);
632 }
633
Chris Wilson54cf91d2010-11-25 18:00:26 +0000634 mutex_unlock(&dev->struct_mutex);
635
636 total = 0;
637 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000638 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000639
Chris Wilsondd6864a2011-01-12 23:49:13 +0000640 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000641 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000642 if (reloc == NULL || reloc_offset == NULL) {
643 drm_free_large(reloc);
644 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000645 mutex_lock(&dev->struct_mutex);
646 return -ENOMEM;
647 }
648
649 total = 0;
650 for (i = 0; i < count; i++) {
651 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson262b6d32013-01-15 16:17:54 +0000652 u64 invalid_offset = (u64)-1;
653 int j;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200655 user_relocs = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000656
657 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000658 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000659 ret = -EFAULT;
660 mutex_lock(&dev->struct_mutex);
661 goto err;
662 }
663
Chris Wilson262b6d32013-01-15 16:17:54 +0000664 /* As we do not update the known relocation offsets after
665 * relocating (due to the complexities in lock handling),
666 * we need to mark them as invalid now so that we force the
667 * relocation processing next time. Just in case the target
668 * object is evicted and then rebound into its old
669 * presumed_offset before the next execbuffer - if that
670 * happened we would make the mistake of assuming that the
671 * relocations were valid.
672 */
673 for (j = 0; j < exec[i].relocation_count; j++) {
674 if (copy_to_user(&user_relocs[j].presumed_offset,
675 &invalid_offset,
676 sizeof(invalid_offset))) {
677 ret = -EFAULT;
678 mutex_lock(&dev->struct_mutex);
679 goto err;
680 }
681 }
682
Chris Wilsondd6864a2011-01-12 23:49:13 +0000683 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000684 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000685 }
686
687 ret = i915_mutex_lock_interruptible(dev);
688 if (ret) {
689 mutex_lock(&dev->struct_mutex);
690 goto err;
691 }
692
Chris Wilson67731b82010-12-08 10:38:14 +0000693 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000694 eb_reset(eb);
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000695 ret = eb_lookup_objects(eb, exec, args, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000696 if (ret)
697 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +0000698
Daniel Vettered5982e2013-01-17 22:23:36 +0100699 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700700 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000701 if (ret)
702 goto err;
703
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000704 list_for_each_entry(obj, &eb->objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000705 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000706 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700707 reloc + reloc_offset[offset],
708 vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000709 if (ret)
710 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000711 }
712
713 /* Leave the user relocations as are, this is the painfully slow path,
714 * and we want to avoid the complication of dropping the lock whilst
715 * having buffers reserved in the aperture and so causing spurious
716 * ENOSPC for random operations.
717 */
718
719err:
720 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000721 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000722 return ret;
723}
724
Chris Wilson54cf91d2010-11-25 18:00:26 +0000725static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000726i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
727 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000728{
Chris Wilson432e58e2010-11-25 19:32:06 +0000729 struct drm_i915_gem_object *obj;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200730 uint32_t flush_domains = 0;
Chris Wilson000433b2013-08-08 14:41:09 +0100731 bool flush_chipset = false;
Chris Wilson432e58e2010-11-25 19:32:06 +0000732 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000733
Chris Wilson432e58e2010-11-25 19:32:06 +0000734 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky2911a352012-04-05 14:47:36 -0700735 ret = i915_gem_object_sync(obj, ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000736 if (ret)
737 return ret;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200738
739 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
Chris Wilson000433b2013-08-08 14:41:09 +0100740 flush_chipset |= i915_gem_clflush_object(obj, false);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200741
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200742 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000743 }
744
Chris Wilson000433b2013-08-08 14:41:09 +0100745 if (flush_chipset)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800746 i915_gem_chipset_flush(ring->dev);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200747
748 if (flush_domains & I915_GEM_DOMAIN_GTT)
749 wmb();
750
Chris Wilson09cf7c92012-07-13 14:14:08 +0100751 /* Unconditionally invalidate gpu caches and ensure that we do flush
752 * any residual writes from the previous batch.
753 */
Chris Wilsona7b97612012-07-20 12:41:08 +0100754 return intel_ring_invalidate_all_caches(ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000755}
756
Chris Wilson432e58e2010-11-25 19:32:06 +0000757static bool
758i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000759{
Daniel Vettered5982e2013-01-17 22:23:36 +0100760 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
761 return false;
762
Chris Wilson432e58e2010-11-25 19:32:06 +0000763 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000764}
765
766static int
767validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
768 int count)
769{
770 int i;
Kees Cook3118a4f2013-03-11 17:31:45 -0700771 int relocs_total = 0;
772 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000773
774 for (i = 0; i < count; i++) {
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200775 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000776 int length; /* limited by fault_in_pages_readable() */
777
Daniel Vettered5982e2013-01-17 22:23:36 +0100778 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
779 return -EINVAL;
780
Kees Cook3118a4f2013-03-11 17:31:45 -0700781 /* First check for malicious input causing overflow in
782 * the worst case where we need to allocate the entire
783 * relocation tree as a single array.
784 */
785 if (exec[i].relocation_count > relocs_max - relocs_total)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000786 return -EINVAL;
Kees Cook3118a4f2013-03-11 17:31:45 -0700787 relocs_total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000788
789 length = exec[i].relocation_count *
790 sizeof(struct drm_i915_gem_relocation_entry);
Kees Cook30587532013-03-11 14:37:35 -0700791 /*
792 * We must check that the entire relocation array is safe
793 * to read, but since we may need to update the presumed
794 * offsets during execution, check for full write access.
795 */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000796 if (!access_ok(VERIFY_WRITE, ptr, length))
797 return -EFAULT;
798
Xiong Zhang0b74b502013-07-19 13:51:24 +0800799 if (likely(!i915_prefault_disable)) {
800 if (fault_in_multipages_readable(ptr, length))
801 return -EFAULT;
802 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000803 }
804
805 return 0;
806}
807
Chris Wilson432e58e2010-11-25 19:32:06 +0000808static void
809i915_gem_execbuffer_move_to_active(struct list_head *objects,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700810 struct i915_address_space *vm,
Chris Wilson9d7730912012-11-27 16:22:52 +0000811 struct intel_ring_buffer *ring)
Chris Wilson432e58e2010-11-25 19:32:06 +0000812{
813 struct drm_i915_gem_object *obj;
814
815 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson69c2fc82012-07-20 12:41:03 +0100816 u32 old_read = obj->base.read_domains;
817 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +0000818
Chris Wilson432e58e2010-11-25 19:32:06 +0000819 obj->base.write_domain = obj->base.pending_write_domain;
Daniel Vettered5982e2013-01-17 22:23:36 +0100820 if (obj->base.write_domain == 0)
821 obj->base.pending_read_domains |= obj->base.read_domains;
822 obj->base.read_domains = obj->base.pending_read_domains;
Chris Wilson432e58e2010-11-25 19:32:06 +0000823 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
824
Ben Widawskyca191b12013-07-31 17:00:14 -0700825 /* FIXME: This lookup gets fixed later <-- danvet */
826 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
Chris Wilson9d7730912012-11-27 16:22:52 +0000827 i915_gem_object_move_to_active(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000828 if (obj->base.write_domain) {
829 obj->dirty = 1;
Chris Wilson9d7730912012-11-27 16:22:52 +0000830 obj->last_write_seqno = intel_ring_get_seqno(ring);
Chris Wilsonacb87df2012-05-03 15:47:57 +0100831 if (obj->pin_count) /* check for potential scanout */
Chris Wilsonc65355b2013-06-06 16:53:41 -0300832 intel_mark_fb_busy(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000833 }
834
Chris Wilsondb53a302011-02-03 11:57:46 +0000835 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +0000836 }
837}
838
Chris Wilson54cf91d2010-11-25 18:00:26 +0000839static void
840i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000841 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +0300842 struct intel_ring_buffer *ring,
843 struct drm_i915_gem_object *obj)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000844{
Daniel Vettercc889e02012-06-13 20:45:19 +0200845 /* Unconditionally force add_request to emit a full flush. */
846 ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000847
Chris Wilson432e58e2010-11-25 19:32:06 +0000848 /* Add a breadcrumb for the completion of the batch buffer */
Mika Kuoppala7d736f42013-06-12 15:01:39 +0300849 (void)__i915_add_request(ring, file, obj, NULL);
Chris Wilson432e58e2010-11-25 19:32:06 +0000850}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000851
852static int
Eric Anholtae662d32012-01-03 09:23:29 -0800853i915_reset_gen7_sol_offsets(struct drm_device *dev,
854 struct intel_ring_buffer *ring)
855{
856 drm_i915_private_t *dev_priv = dev->dev_private;
857 int ret, i;
858
859 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
860 return 0;
861
862 ret = intel_ring_begin(ring, 4 * 3);
863 if (ret)
864 return ret;
865
866 for (i = 0; i < 4; i++) {
867 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
868 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
869 intel_ring_emit(ring, 0);
870 }
871
872 intel_ring_advance(ring);
873
874 return 0;
875}
876
877static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000878i915_gem_do_execbuffer(struct drm_device *dev, void *data,
879 struct drm_file *file,
880 struct drm_i915_gem_execbuffer2 *args,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700881 struct drm_i915_gem_exec_object2 *exec,
882 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000883{
884 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson67731b82010-12-08 10:38:14 +0000885 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000886 struct drm_i915_gem_object *batch_obj;
887 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000888 struct intel_ring_buffer *ring;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700889 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000890 u32 exec_start, exec_len;
Daniel Vettered5982e2013-01-17 22:23:36 +0100891 u32 mask, flags;
Chris Wilson72bfa192010-12-19 11:42:05 +0000892 int ret, mode, i;
Daniel Vettered5982e2013-01-17 22:23:36 +0100893 bool need_relocs;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000894
Daniel Vettered5982e2013-01-17 22:23:36 +0100895 if (!i915_gem_check_execbuffer(args))
Chris Wilson432e58e2010-11-25 19:32:06 +0000896 return -EINVAL;
Chris Wilson432e58e2010-11-25 19:32:06 +0000897
898 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000899 if (ret)
900 return ret;
901
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100902 flags = 0;
903 if (args->flags & I915_EXEC_SECURE) {
904 if (!file->is_master || !capable(CAP_SYS_ADMIN))
905 return -EPERM;
906
907 flags |= I915_DISPATCH_SECURE;
908 }
Daniel Vetterb45305f2012-12-17 16:21:27 +0100909 if (args->flags & I915_EXEC_IS_PINNED)
910 flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100911
Chris Wilson54cf91d2010-11-25 18:00:26 +0000912 switch (args->flags & I915_EXEC_RING_MASK) {
913 case I915_EXEC_DEFAULT:
914 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000915 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000916 break;
917 case I915_EXEC_BSD:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000918 ring = &dev_priv->ring[VCS];
Chris Wilsone8520962013-07-03 17:22:07 +0300919 if (ctx_id != DEFAULT_CONTEXT_ID) {
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700920 DRM_DEBUG("Ring %s doesn't support contexts\n",
921 ring->name);
922 return -EPERM;
923 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000924 break;
925 case I915_EXEC_BLT:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000926 ring = &dev_priv->ring[BCS];
Chris Wilsone8520962013-07-03 17:22:07 +0300927 if (ctx_id != DEFAULT_CONTEXT_ID) {
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700928 DRM_DEBUG("Ring %s doesn't support contexts\n",
929 ring->name);
930 return -EPERM;
931 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000932 break;
Xiang, Haihao82f91b62013-05-28 19:22:33 -0700933 case I915_EXEC_VEBOX:
934 ring = &dev_priv->ring[VECS];
Chris Wilsone8520962013-07-03 17:22:07 +0300935 if (ctx_id != DEFAULT_CONTEXT_ID) {
Xiang, Haihao82f91b62013-05-28 19:22:33 -0700936 DRM_DEBUG("Ring %s doesn't support contexts\n",
937 ring->name);
938 return -EPERM;
939 }
940 break;
941
Chris Wilson54cf91d2010-11-25 18:00:26 +0000942 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100943 DRM_DEBUG("execbuf with unknown ring: %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000944 (int)(args->flags & I915_EXEC_RING_MASK));
945 return -EINVAL;
946 }
Chris Wilsona15817c2012-05-11 14:29:31 +0100947 if (!intel_ring_initialized(ring)) {
948 DRM_DEBUG("execbuf with invalid ring: %d\n",
949 (int)(args->flags & I915_EXEC_RING_MASK));
950 return -EINVAL;
951 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000952
Chris Wilson72bfa192010-12-19 11:42:05 +0000953 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800954 mask = I915_EXEC_CONSTANTS_MASK;
Chris Wilson72bfa192010-12-19 11:42:05 +0000955 switch (mode) {
956 case I915_EXEC_CONSTANTS_REL_GENERAL:
957 case I915_EXEC_CONSTANTS_ABSOLUTE:
958 case I915_EXEC_CONSTANTS_REL_SURFACE:
959 if (ring == &dev_priv->ring[RCS] &&
960 mode != dev_priv->relative_constants_mode) {
961 if (INTEL_INFO(dev)->gen < 4)
962 return -EINVAL;
963
964 if (INTEL_INFO(dev)->gen > 5 &&
965 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
966 return -EINVAL;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800967
968 /* The HW changed the meaning on this bit on gen6 */
969 if (INTEL_INFO(dev)->gen >= 6)
970 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
Chris Wilson72bfa192010-12-19 11:42:05 +0000971 }
972 break;
973 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100974 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
Chris Wilson72bfa192010-12-19 11:42:05 +0000975 return -EINVAL;
976 }
977
Chris Wilson54cf91d2010-11-25 18:00:26 +0000978 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +0100979 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000980 return -EINVAL;
981 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000982
983 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000984 if (ring != &dev_priv->ring[RCS]) {
Daniel Vetterff240192012-01-31 21:08:14 +0100985 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000986 return -EINVAL;
987 }
988
Daniel Vetter6ebebc92012-04-26 23:28:11 +0200989 if (INTEL_INFO(dev)->gen >= 5) {
990 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
991 return -EINVAL;
992 }
993
Xi Wang44afb3a2012-04-23 04:06:42 -0400994 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
995 DRM_DEBUG("execbuf with %u cliprects\n",
996 args->num_cliprects);
997 return -EINVAL;
998 }
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200999
Chris Wilson432e58e2010-11-25 19:32:06 +00001000 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001001 GFP_KERNEL);
1002 if (cliprects == NULL) {
1003 ret = -ENOMEM;
1004 goto pre_mutex_err;
1005 }
1006
Chris Wilson432e58e2010-11-25 19:32:06 +00001007 if (copy_from_user(cliprects,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001008 to_user_ptr(args->cliprects_ptr),
1009 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +00001010 ret = -EFAULT;
1011 goto pre_mutex_err;
1012 }
1013 }
1014
Chris Wilson54cf91d2010-11-25 18:00:26 +00001015 ret = i915_mutex_lock_interruptible(dev);
1016 if (ret)
1017 goto pre_mutex_err;
1018
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02001019 if (dev_priv->ums.mm_suspended) {
Chris Wilson54cf91d2010-11-25 18:00:26 +00001020 mutex_unlock(&dev->struct_mutex);
1021 ret = -EBUSY;
1022 goto pre_mutex_err;
1023 }
1024
Chris Wilsoneef90cc2013-01-08 10:53:17 +00001025 eb = eb_create(args);
Chris Wilson67731b82010-12-08 10:38:14 +00001026 if (eb == NULL) {
1027 mutex_unlock(&dev->struct_mutex);
1028 ret = -ENOMEM;
1029 goto pre_mutex_err;
1030 }
1031
Chris Wilson54cf91d2010-11-25 18:00:26 +00001032 /* Look up object handles */
Chris Wilsoneef90cc2013-01-08 10:53:17 +00001033 ret = eb_lookup_objects(eb, exec, args, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +00001034 if (ret)
1035 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001036
Chris Wilson6fe4f142011-01-10 17:35:37 +00001037 /* take note of the batch buffer before we might reorder the lists */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +00001038 batch_obj = list_entry(eb->objects.prev,
Chris Wilson6fe4f142011-01-10 17:35:37 +00001039 struct drm_i915_gem_object,
1040 exec_list);
1041
Chris Wilson54cf91d2010-11-25 18:00:26 +00001042 /* Move the objects en-masse into the GTT, evicting if necessary. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001043 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001044 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001045 if (ret)
1046 goto err;
1047
1048 /* The objects are in their final locations, apply the relocations. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001049 if (need_relocs)
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001050 ret = i915_gem_execbuffer_relocate(eb, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001051 if (ret) {
1052 if (ret == -EFAULT) {
Daniel Vettered5982e2013-01-17 22:23:36 +01001053 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001054 eb, exec, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001055 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1056 }
1057 if (ret)
1058 goto err;
1059 }
1060
1061 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001062 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +01001063 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +00001064 ret = -EINVAL;
1065 goto err;
1066 }
1067 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1068
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001069 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1070 * batch" bit. Hence we need to pin secure batches into the global gtt.
1071 * hsw should have this fixed, but let's be paranoid and do it
1072 * unconditionally for now. */
1073 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1074 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1075
Chris Wilsonbcffc3f2013-01-08 10:53:15 +00001076 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001077 if (ret)
1078 goto err;
1079
Eric Anholt0da5cec2012-07-23 12:33:55 -07001080 ret = i915_switch_context(ring, file, ctx_id);
1081 if (ret)
1082 goto err;
1083
Ben Widawskye2971bd2011-12-12 19:21:57 -08001084 if (ring == &dev_priv->ring[RCS] &&
1085 mode != dev_priv->relative_constants_mode) {
1086 ret = intel_ring_begin(ring, 4);
1087 if (ret)
1088 goto err;
1089
1090 intel_ring_emit(ring, MI_NOOP);
1091 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1092 intel_ring_emit(ring, INSTPM);
Ben Widawsky84f9f932011-12-12 19:21:58 -08001093 intel_ring_emit(ring, mask << 16 | mode);
Ben Widawskye2971bd2011-12-12 19:21:57 -08001094 intel_ring_advance(ring);
1095
1096 dev_priv->relative_constants_mode = mode;
1097 }
1098
Eric Anholtae662d32012-01-03 09:23:29 -08001099 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1100 ret = i915_reset_gen7_sol_offsets(dev, ring);
1101 if (ret)
1102 goto err;
1103 }
1104
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001105 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1106 args->batch_start_offset;
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001107 exec_len = args->batch_len;
1108 if (cliprects) {
1109 for (i = 0; i < args->num_cliprects; i++) {
1110 ret = i915_emit_box(dev, &cliprects[i],
1111 args->DR1, args->DR4);
1112 if (ret)
1113 goto err;
1114
1115 ret = ring->dispatch_execbuffer(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001116 exec_start, exec_len,
1117 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001118 if (ret)
1119 goto err;
1120 }
1121 } else {
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001122 ret = ring->dispatch_execbuffer(ring,
1123 exec_start, exec_len,
1124 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001125 if (ret)
1126 goto err;
1127 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001128
Chris Wilson9d7730912012-11-27 16:22:52 +00001129 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1130
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001131 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001132 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001133
1134err:
Chris Wilson67731b82010-12-08 10:38:14 +00001135 eb_destroy(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001136
1137 mutex_unlock(&dev->struct_mutex);
1138
1139pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001140 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001141 return ret;
1142}
1143
1144/*
1145 * Legacy execbuffer just creates an exec2 list from the original exec object
1146 * list array and passes it to the real function.
1147 */
1148int
1149i915_gem_execbuffer(struct drm_device *dev, void *data,
1150 struct drm_file *file)
1151{
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001152 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001153 struct drm_i915_gem_execbuffer *args = data;
1154 struct drm_i915_gem_execbuffer2 exec2;
1155 struct drm_i915_gem_exec_object *exec_list = NULL;
1156 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1157 int ret, i;
1158
Chris Wilson54cf91d2010-11-25 18:00:26 +00001159 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001160 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001161 return -EINVAL;
1162 }
1163
1164 /* Copy in the exec list from userland */
1165 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1166 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1167 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001168 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001169 args->buffer_count);
1170 drm_free_large(exec_list);
1171 drm_free_large(exec2_list);
1172 return -ENOMEM;
1173 }
1174 ret = copy_from_user(exec_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001175 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001176 sizeof(*exec_list) * args->buffer_count);
1177 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001178 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001179 args->buffer_count, ret);
1180 drm_free_large(exec_list);
1181 drm_free_large(exec2_list);
1182 return -EFAULT;
1183 }
1184
1185 for (i = 0; i < args->buffer_count; i++) {
1186 exec2_list[i].handle = exec_list[i].handle;
1187 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1188 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1189 exec2_list[i].alignment = exec_list[i].alignment;
1190 exec2_list[i].offset = exec_list[i].offset;
1191 if (INTEL_INFO(dev)->gen < 4)
1192 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1193 else
1194 exec2_list[i].flags = 0;
1195 }
1196
1197 exec2.buffers_ptr = args->buffers_ptr;
1198 exec2.buffer_count = args->buffer_count;
1199 exec2.batch_start_offset = args->batch_start_offset;
1200 exec2.batch_len = args->batch_len;
1201 exec2.DR1 = args->DR1;
1202 exec2.DR4 = args->DR4;
1203 exec2.num_cliprects = args->num_cliprects;
1204 exec2.cliprects_ptr = args->cliprects_ptr;
1205 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001206 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001207
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001208 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1209 &dev_priv->gtt.base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001210 if (!ret) {
1211 /* Copy the new buffer offsets back to the user's exec list. */
1212 for (i = 0; i < args->buffer_count; i++)
1213 exec_list[i].offset = exec2_list[i].offset;
1214 /* ... and back out to userspace */
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001215 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001216 exec_list,
1217 sizeof(*exec_list) * args->buffer_count);
1218 if (ret) {
1219 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001220 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001221 "back to user (%d)\n",
1222 args->buffer_count, ret);
1223 }
1224 }
1225
1226 drm_free_large(exec_list);
1227 drm_free_large(exec2_list);
1228 return ret;
1229}
1230
1231int
1232i915_gem_execbuffer2(struct drm_device *dev, void *data,
1233 struct drm_file *file)
1234{
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001235 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001236 struct drm_i915_gem_execbuffer2 *args = data;
1237 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1238 int ret;
1239
Xi Wanged8cd3b2012-04-23 04:06:41 -04001240 if (args->buffer_count < 1 ||
1241 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001242 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001243 return -EINVAL;
1244 }
1245
Chris Wilson8408c282011-02-21 12:54:48 +00001246 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
Chris Wilson419fa722013-01-08 10:53:13 +00001247 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
Chris Wilson8408c282011-02-21 12:54:48 +00001248 if (exec2_list == NULL)
1249 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1250 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001251 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001252 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001253 args->buffer_count);
1254 return -ENOMEM;
1255 }
1256 ret = copy_from_user(exec2_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001257 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001258 sizeof(*exec2_list) * args->buffer_count);
1259 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001260 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001261 args->buffer_count, ret);
1262 drm_free_large(exec2_list);
1263 return -EFAULT;
1264 }
1265
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001266 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1267 &dev_priv->gtt.base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001268 if (!ret) {
1269 /* Copy the new buffer offsets back to the user's exec list. */
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001270 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001271 exec2_list,
1272 sizeof(*exec2_list) * args->buffer_count);
1273 if (ret) {
1274 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001275 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001276 "back to user (%d)\n",
1277 args->buffer_count, ret);
1278 }
1279 }
1280
1281 drm_free_large(exec2_list);
1282 return ret;
1283}