blob: 8ccc29ac962938739da9407dedb07e21aaa2a1f2 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000031#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080034#include <linux/dma_remapping.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000035
Chris Wilson67731b82010-12-08 10:38:14 +000036struct eb_objects {
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000037 struct list_head objects;
Chris Wilson67731b82010-12-08 10:38:14 +000038 int and;
Chris Wilsoneef90cc2013-01-08 10:53:17 +000039 union {
40 struct drm_i915_gem_object *lut[0];
41 struct hlist_head buckets[0];
42 };
Chris Wilson67731b82010-12-08 10:38:14 +000043};
44
45static struct eb_objects *
Chris Wilsoneef90cc2013-01-08 10:53:17 +000046eb_create(struct drm_i915_gem_execbuffer2 *args)
Chris Wilson67731b82010-12-08 10:38:14 +000047{
Chris Wilsoneef90cc2013-01-08 10:53:17 +000048 struct eb_objects *eb = NULL;
Chris Wilson67731b82010-12-08 10:38:14 +000049
Chris Wilsoneef90cc2013-01-08 10:53:17 +000050 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count;
52 size *= sizeof(struct drm_i915_gem_object *);
53 size += sizeof(struct eb_objects);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 }
56
57 if (eb == NULL) {
58 int size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Lauri Kasanen27b7c632013-03-27 15:04:55 +020060 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
Chris Wilsoneef90cc2013-01-08 10:53:17 +000061 while (count > 2*size)
62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects),
65 GFP_TEMPORARY);
66 if (eb == NULL)
67 return eb;
68
69 eb->and = count - 1;
70 } else
71 eb->and = -args->buffer_count;
72
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000073 INIT_LIST_HEAD(&eb->objects);
Chris Wilson67731b82010-12-08 10:38:14 +000074 return eb;
75}
76
77static void
78eb_reset(struct eb_objects *eb)
79{
Chris Wilsoneef90cc2013-01-08 10:53:17 +000080 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
Chris Wilson67731b82010-12-08 10:38:14 +000082}
83
Chris Wilson3b96eff2013-01-08 10:53:14 +000084static int
85eb_lookup_objects(struct eb_objects *eb,
86 struct drm_i915_gem_exec_object2 *exec,
Chris Wilsoneef90cc2013-01-08 10:53:17 +000087 const struct drm_i915_gem_execbuffer2 *args,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +000088 struct drm_file *file)
Chris Wilson3b96eff2013-01-08 10:53:14 +000089{
90 int i;
91
92 spin_lock(&file->table_lock);
Chris Wilsoneef90cc2013-01-08 10:53:17 +000093 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson3b96eff2013-01-08 10:53:14 +000094 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) {
98 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i);
101 return -ENOENT;
102 }
103
104 if (!list_empty(&obj->exec_list)) {
105 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i);
108 return -EINVAL;
109 }
110
111 drm_gem_object_reference(&obj->base);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000112 list_add_tail(&obj->exec_list, &eb->objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000113
Chris Wilson3b96eff2013-01-08 10:53:14 +0000114 obj->exec_entry = &exec[i];
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000115 if (eb->and < 0) {
116 eb->lut[i] = obj;
117 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle;
120 hlist_add_head(&obj->exec_node,
121 &eb->buckets[handle & eb->and]);
122 }
Chris Wilson3b96eff2013-01-08 10:53:14 +0000123 }
124 spin_unlock(&file->table_lock);
125
126 return 0;
127}
128
Chris Wilson67731b82010-12-08 10:38:14 +0000129static struct drm_i915_gem_object *
130eb_get_object(struct eb_objects *eb, unsigned long handle)
131{
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000132 if (eb->and < 0) {
133 if (handle >= -eb->and)
134 return NULL;
135 return eb->lut[handle];
136 } else {
137 struct hlist_head *head;
138 struct hlist_node *node;
Chris Wilson67731b82010-12-08 10:38:14 +0000139
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000140 head = &eb->buckets[handle & eb->and];
141 hlist_for_each(node, head) {
142 struct drm_i915_gem_object *obj;
143
144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
145 if (obj->exec_handle == handle)
146 return obj;
147 }
148 return NULL;
Chris Wilson67731b82010-12-08 10:38:14 +0000149 }
Chris Wilson67731b82010-12-08 10:38:14 +0000150}
151
152static void
153eb_destroy(struct eb_objects *eb)
154{
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157
158 obj = list_first_entry(&eb->objects,
159 struct drm_i915_gem_object,
160 exec_list);
161 list_del_init(&obj->exec_list);
162 drm_gem_object_unreference(&obj->base);
163 }
Chris Wilson67731b82010-12-08 10:38:14 +0000164 kfree(eb);
165}
166
Chris Wilsondabdfe02012-03-26 10:10:27 +0200167static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
168{
169 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilson504c7262012-08-23 13:12:52 +0100170 !obj->map_and_fenceable ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200171 obj->cache_level != I915_CACHE_NONE);
172}
173
Chris Wilson54cf91d2010-11-25 18:00:26 +0000174static int
175i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000176 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700177 struct drm_i915_gem_relocation_entry *reloc,
178 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000179{
180 struct drm_device *dev = obj->base.dev;
181 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100182 struct drm_i915_gem_object *target_i915_obj;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000183 uint32_t target_offset;
184 int ret = -EINVAL;
185
Chris Wilson67731b82010-12-08 10:38:14 +0000186 /* we've already hold a reference to all valid objects */
187 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
188 if (unlikely(target_obj == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000189 return -ENOENT;
190
Daniel Vetter149c8402012-02-15 23:50:23 +0100191 target_i915_obj = to_intel_bo(target_obj);
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700192 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000193
Eric Anholte844b992012-07-31 15:35:01 -0700194 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
195 * pipe_control writes because the gpu doesn't properly redirect them
196 * through the ppgtt for non_secure batchbuffers. */
197 if (unlikely(IS_GEN6(dev) &&
198 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
199 !target_i915_obj->has_global_gtt_mapping)) {
200 i915_gem_gtt_bind_object(target_i915_obj,
201 target_i915_obj->cache_level);
202 }
203
Chris Wilson54cf91d2010-11-25 18:00:26 +0000204 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000205 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100206 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000207 "obj %p target %d offset %d "
208 "read %08x write %08x",
209 obj, reloc->target_handle,
210 (int) reloc->offset,
211 reloc->read_domains,
212 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000213 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000214 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100215 if (unlikely((reloc->write_domain | reloc->read_domains)
216 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100217 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000218 "obj %p target %d offset %d "
219 "read %08x write %08x",
220 obj, reloc->target_handle,
221 (int) reloc->offset,
222 reloc->read_domains,
223 reloc->write_domain);
Chris Wilson67731b82010-12-08 10:38:14 +0000224 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000225 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000226
227 target_obj->pending_read_domains |= reloc->read_domains;
228 target_obj->pending_write_domain |= reloc->write_domain;
229
230 /* If the relocation already has the right value in it, no
231 * more work needs to be done.
232 */
233 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000234 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000235
236 /* Check that the relocation address is valid... */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000237 if (unlikely(reloc->offset > obj->base.size - 4)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100238 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000239 "obj %p target %d offset %d size %d.\n",
240 obj, reloc->target_handle,
241 (int) reloc->offset,
242 (int) obj->base.size);
Chris Wilson67731b82010-12-08 10:38:14 +0000243 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000244 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000245 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100246 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000247 "obj %p target %d offset %d.\n",
248 obj, reloc->target_handle,
249 (int) reloc->offset);
Chris Wilson67731b82010-12-08 10:38:14 +0000250 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000251 }
252
Chris Wilsondabdfe02012-03-26 10:10:27 +0200253 /* We can't wait for rendering with pagefaults disabled */
254 if (obj->active && in_atomic())
255 return -EFAULT;
256
Chris Wilson54cf91d2010-11-25 18:00:26 +0000257 reloc->delta += target_offset;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200258 if (use_cpu_reloc(obj)) {
Chris Wilsonde51f042013-07-21 17:23:11 +0100259 uint32_t page_offset = offset_in_page(reloc->offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000260 char *vaddr;
261
Chris Wilsondabdfe02012-03-26 10:10:27 +0200262 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
263 if (ret)
264 return ret;
265
Chris Wilson9da3da62012-06-01 15:20:22 +0100266 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
267 reloc->offset >> PAGE_SHIFT));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000268 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
269 kunmap_atomic(vaddr);
270 } else {
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 uint32_t __iomem *reloc_entry;
273 void __iomem *reloc_page;
274
Chris Wilson7b096382012-04-14 09:55:51 +0100275 ret = i915_gem_object_set_to_gtt_domain(obj, true);
276 if (ret)
277 return ret;
278
279 ret = i915_gem_object_put_fence(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000280 if (ret)
Chris Wilson67731b82010-12-08 10:38:14 +0000281 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000282
283 /* Map the page containing the relocation we're going to perform. */
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700284 reloc->offset += i915_gem_obj_ggtt_offset(obj);
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800285 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000286 reloc->offset & PAGE_MASK);
287 reloc_entry = (uint32_t __iomem *)
Chris Wilsonde51f042013-07-21 17:23:11 +0100288 (reloc_page + offset_in_page(reloc->offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000289 iowrite32(reloc->delta, reloc_entry);
290 io_mapping_unmap_atomic(reloc_page);
291 }
292
293 /* and update the user's relocation entry */
294 reloc->presumed_offset = target_offset;
295
Chris Wilson67731b82010-12-08 10:38:14 +0000296 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000297}
298
299static int
300i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700301 struct eb_objects *eb,
302 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000303{
Chris Wilson1d83f442012-03-24 20:12:53 +0000304#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
305 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000306 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000307 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000308 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000309
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200310 user_relocs = to_user_ptr(entry->relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000311
Chris Wilson1d83f442012-03-24 20:12:53 +0000312 remain = entry->relocation_count;
313 while (remain) {
314 struct drm_i915_gem_relocation_entry *r = stack_reloc;
315 int count = remain;
316 if (count > ARRAY_SIZE(stack_reloc))
317 count = ARRAY_SIZE(stack_reloc);
318 remain -= count;
319
320 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000321 return -EFAULT;
322
Chris Wilson1d83f442012-03-24 20:12:53 +0000323 do {
324 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000325
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700326 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
327 vm);
Chris Wilson1d83f442012-03-24 20:12:53 +0000328 if (ret)
329 return ret;
330
331 if (r->presumed_offset != offset &&
332 __copy_to_user_inatomic(&user_relocs->presumed_offset,
333 &r->presumed_offset,
334 sizeof(r->presumed_offset))) {
335 return -EFAULT;
336 }
337
338 user_relocs++;
339 r++;
340 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000341 }
342
343 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000344#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000345}
346
347static int
348i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
Chris Wilson67731b82010-12-08 10:38:14 +0000349 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700350 struct drm_i915_gem_relocation_entry *relocs,
351 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000352{
Chris Wilson6fe4f142011-01-10 17:35:37 +0000353 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000354 int i, ret;
355
356 for (i = 0; i < entry->relocation_count; i++) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700357 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
358 vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000359 if (ret)
360 return ret;
361 }
362
363 return 0;
364}
365
366static int
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700367i915_gem_execbuffer_relocate(struct eb_objects *eb,
368 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000369{
Chris Wilson432e58e2010-11-25 19:32:06 +0000370 struct drm_i915_gem_object *obj;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000371 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000372
Chris Wilsond4aeee72011-03-14 15:11:24 +0000373 /* This is the fast path and we cannot handle a pagefault whilst
374 * holding the struct mutex lest the user pass in the relocations
375 * contained within a mmaped bo. For in such a case we, the page
376 * fault handler would call i915_gem_fault() and we would try to
377 * acquire the struct mutex again. Obviously this is bad and so
378 * lockdep complains vehemently.
379 */
380 pagefault_disable();
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000381 list_for_each_entry(obj, &eb->objects, exec_list) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700382 ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000383 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000384 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000385 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000386 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000387
Chris Wilsond4aeee72011-03-14 15:11:24 +0000388 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000389}
390
Chris Wilson7788a762012-08-24 19:18:18 +0100391#define __EXEC_OBJECT_HAS_PIN (1<<31)
392#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100393
394static int
Chris Wilsondabdfe02012-03-26 10:10:27 +0200395need_reloc_mappable(struct drm_i915_gem_object *obj)
396{
397 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
398 return entry->relocation_count && !use_cpu_reloc(obj);
399}
400
401static int
Chris Wilson7788a762012-08-24 19:18:18 +0100402i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
Daniel Vettered5982e2013-01-17 22:23:36 +0100403 struct intel_ring_buffer *ring,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700404 struct i915_address_space *vm,
Daniel Vettered5982e2013-01-17 22:23:36 +0100405 bool *need_reloc)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100406{
Chris Wilson7788a762012-08-24 19:18:18 +0100407 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100408 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
409 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
410 bool need_fence, need_mappable;
411 int ret;
412
413 need_fence =
414 has_fenced_gpu_access &&
415 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
416 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200417 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100418
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700419 ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
420 false);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100421 if (ret)
422 return ret;
423
Chris Wilson7788a762012-08-24 19:18:18 +0100424 entry->flags |= __EXEC_OBJECT_HAS_PIN;
425
Chris Wilson1690e1e2011-12-14 13:57:08 +0100426 if (has_fenced_gpu_access) {
427 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson06d98132012-04-17 15:31:24 +0100428 ret = i915_gem_object_get_fence(obj);
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000429 if (ret)
Chris Wilson7788a762012-08-24 19:18:18 +0100430 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100431
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000432 if (i915_gem_object_pin_fence(obj))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100433 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson9a5a53b2012-03-22 15:10:00 +0000434
Chris Wilson7dd49062012-03-21 10:48:18 +0000435 obj->pending_fenced_gpu_access = true;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100436 }
Chris Wilson1690e1e2011-12-14 13:57:08 +0100437 }
438
Chris Wilson7788a762012-08-24 19:18:18 +0100439 /* Ensure ppgtt mapping exists if needed */
440 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
441 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
442 obj, obj->cache_level);
443
444 obj->has_aliasing_ppgtt_mapping = 1;
445 }
446
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700447 if (entry->offset != i915_gem_obj_offset(obj, vm)) {
448 entry->offset = i915_gem_obj_offset(obj, vm);
Daniel Vettered5982e2013-01-17 22:23:36 +0100449 *need_reloc = true;
450 }
451
452 if (entry->flags & EXEC_OBJECT_WRITE) {
453 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
454 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
455 }
456
457 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
458 !obj->has_global_gtt_mapping)
459 i915_gem_gtt_bind_object(obj, obj->cache_level);
460
Chris Wilson1690e1e2011-12-14 13:57:08 +0100461 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100462}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100463
Chris Wilson7788a762012-08-24 19:18:18 +0100464static void
465i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
466{
467 struct drm_i915_gem_exec_object2 *entry;
468
Ben Widawsky98438772013-07-31 17:00:12 -0700469 if (!i915_gem_obj_bound_any(obj))
Chris Wilson7788a762012-08-24 19:18:18 +0100470 return;
471
472 entry = obj->exec_entry;
473
474 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
475 i915_gem_object_unpin_fence(obj);
476
477 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
478 i915_gem_object_unpin(obj);
479
480 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100481}
482
Chris Wilson54cf91d2010-11-25 18:00:26 +0000483static int
Chris Wilsond9e86c02010-11-10 16:40:20 +0000484i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
Daniel Vettered5982e2013-01-17 22:23:36 +0100485 struct list_head *objects,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700486 struct i915_address_space *vm,
Daniel Vettered5982e2013-01-17 22:23:36 +0100487 bool *need_relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000488{
Chris Wilson432e58e2010-11-25 19:32:06 +0000489 struct drm_i915_gem_object *obj;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000490 struct list_head ordered_objects;
Chris Wilson7788a762012-08-24 19:18:18 +0100491 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
492 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000493
494 INIT_LIST_HEAD(&ordered_objects);
495 while (!list_empty(objects)) {
496 struct drm_i915_gem_exec_object2 *entry;
497 bool need_fence, need_mappable;
498
499 obj = list_first_entry(objects,
500 struct drm_i915_gem_object,
501 exec_list);
502 entry = obj->exec_entry;
503
504 need_fence =
505 has_fenced_gpu_access &&
506 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
507 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200508 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000509
510 if (need_mappable)
511 list_move(&obj->exec_list, &ordered_objects);
512 else
513 list_move_tail(&obj->exec_list, &ordered_objects);
Chris Wilson595dad72011-01-13 11:03:48 +0000514
Daniel Vettered5982e2013-01-17 22:23:36 +0100515 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
Chris Wilson595dad72011-01-13 11:03:48 +0000516 obj->base.pending_write_domain = 0;
Chris Wilson016fd0c2012-07-20 12:41:07 +0100517 obj->pending_fenced_gpu_access = false;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000518 }
519 list_splice(&ordered_objects, objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000520
521 /* Attempt to pin all of the buffers into the GTT.
522 * This is done in 3 phases:
523 *
524 * 1a. Unbind all objects that do not match the GTT constraints for
525 * the execbuffer (fenceable, mappable, alignment etc).
526 * 1b. Increment pin count for already bound objects.
527 * 2. Bind new objects.
528 * 3. Decrement pin count.
529 *
Chris Wilson7788a762012-08-24 19:18:18 +0100530 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000531 * room for the earlier objects *unless* we need to defragment.
532 */
533 retry = 0;
534 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100535 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000536
537 /* Unbind any ill-fitting objects or pin. */
Chris Wilson432e58e2010-11-25 19:32:06 +0000538 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000539 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000540 bool need_fence, need_mappable;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700541 u32 obj_offset;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100542
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700543 if (!i915_gem_obj_bound(obj, vm))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000544 continue;
545
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700546 obj_offset = i915_gem_obj_offset(obj, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000547 need_fence =
Chris Wilson9b3826b2010-12-05 17:11:54 +0000548 has_fenced_gpu_access &&
Chris Wilson54cf91d2010-11-25 18:00:26 +0000549 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
550 obj->tiling_mode != I915_TILING_NONE;
Chris Wilsondabdfe02012-03-26 10:10:27 +0200551 need_mappable = need_fence || need_reloc_mappable(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000552
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700553 WARN_ON((need_mappable || need_fence) &&
554 !i915_is_ggtt(vm));
555
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700556 if ((entry->alignment &&
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700557 obj_offset & (entry->alignment - 1)) ||
Chris Wilson54cf91d2010-11-25 18:00:26 +0000558 (need_mappable && !obj->map_and_fenceable))
Ben Widawsky07fe0b12013-07-31 17:00:10 -0700559 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000560 else
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700561 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
Chris Wilson432e58e2010-11-25 19:32:06 +0000562 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000563 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000564 }
565
566 /* Bind fresh objects */
Chris Wilson432e58e2010-11-25 19:32:06 +0000567 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700568 if (i915_gem_obj_bound(obj, vm))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100569 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000570
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700571 ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
Chris Wilson7788a762012-08-24 19:18:18 +0100572 if (ret)
573 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000574 }
575
Chris Wilson7788a762012-08-24 19:18:18 +0100576err: /* Decrement pin count for bound objects */
577 list_for_each_entry(obj, objects, exec_list)
578 i915_gem_execbuffer_unreserve_object(obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000579
Chris Wilson6c085a72012-08-20 11:40:46 +0200580 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000581 return ret;
582
Chris Wilson6c085a72012-08-20 11:40:46 +0200583 ret = i915_gem_evict_everything(ring->dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000584 if (ret)
585 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000586 } while (1);
587}
588
589static int
590i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
Daniel Vettered5982e2013-01-17 22:23:36 +0100591 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000592 struct drm_file *file,
Chris Wilsond9e86c02010-11-10 16:40:20 +0000593 struct intel_ring_buffer *ring,
Chris Wilson67731b82010-12-08 10:38:14 +0000594 struct eb_objects *eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700595 struct drm_i915_gem_exec_object2 *exec,
596 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000597{
598 struct drm_i915_gem_relocation_entry *reloc;
Chris Wilson432e58e2010-11-25 19:32:06 +0000599 struct drm_i915_gem_object *obj;
Daniel Vettered5982e2013-01-17 22:23:36 +0100600 bool need_relocs;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000601 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000602 int i, total, ret;
Daniel Vettered5982e2013-01-17 22:23:36 +0100603 int count = args->buffer_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000604
Chris Wilson67731b82010-12-08 10:38:14 +0000605 /* We may process another execbuffer during the unlock... */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000606 while (!list_empty(&eb->objects)) {
607 obj = list_first_entry(&eb->objects,
Chris Wilson67731b82010-12-08 10:38:14 +0000608 struct drm_i915_gem_object,
609 exec_list);
610 list_del_init(&obj->exec_list);
611 drm_gem_object_unreference(&obj->base);
612 }
613
Chris Wilson54cf91d2010-11-25 18:00:26 +0000614 mutex_unlock(&dev->struct_mutex);
615
616 total = 0;
617 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000618 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000619
Chris Wilsondd6864a2011-01-12 23:49:13 +0000620 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000621 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000622 if (reloc == NULL || reloc_offset == NULL) {
623 drm_free_large(reloc);
624 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000625 mutex_lock(&dev->struct_mutex);
626 return -ENOMEM;
627 }
628
629 total = 0;
630 for (i = 0; i < count; i++) {
631 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson262b6d32013-01-15 16:17:54 +0000632 u64 invalid_offset = (u64)-1;
633 int j;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000634
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200635 user_relocs = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000636
637 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000638 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000639 ret = -EFAULT;
640 mutex_lock(&dev->struct_mutex);
641 goto err;
642 }
643
Chris Wilson262b6d32013-01-15 16:17:54 +0000644 /* As we do not update the known relocation offsets after
645 * relocating (due to the complexities in lock handling),
646 * we need to mark them as invalid now so that we force the
647 * relocation processing next time. Just in case the target
648 * object is evicted and then rebound into its old
649 * presumed_offset before the next execbuffer - if that
650 * happened we would make the mistake of assuming that the
651 * relocations were valid.
652 */
653 for (j = 0; j < exec[i].relocation_count; j++) {
654 if (copy_to_user(&user_relocs[j].presumed_offset,
655 &invalid_offset,
656 sizeof(invalid_offset))) {
657 ret = -EFAULT;
658 mutex_lock(&dev->struct_mutex);
659 goto err;
660 }
661 }
662
Chris Wilsondd6864a2011-01-12 23:49:13 +0000663 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000664 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000665 }
666
667 ret = i915_mutex_lock_interruptible(dev);
668 if (ret) {
669 mutex_lock(&dev->struct_mutex);
670 goto err;
671 }
672
Chris Wilson67731b82010-12-08 10:38:14 +0000673 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000674 eb_reset(eb);
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000675 ret = eb_lookup_objects(eb, exec, args, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000676 if (ret)
677 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +0000678
Daniel Vettered5982e2013-01-17 22:23:36 +0100679 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700680 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000681 if (ret)
682 goto err;
683
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000684 list_for_each_entry(obj, &eb->objects, exec_list) {
Chris Wilsondd6864a2011-01-12 23:49:13 +0000685 int offset = obj->exec_entry - exec;
Chris Wilson67731b82010-12-08 10:38:14 +0000686 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700687 reloc + reloc_offset[offset],
688 vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000689 if (ret)
690 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000691 }
692
693 /* Leave the user relocations as are, this is the painfully slow path,
694 * and we want to avoid the complication of dropping the lock whilst
695 * having buffers reserved in the aperture and so causing spurious
696 * ENOSPC for random operations.
697 */
698
699err:
700 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000701 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000702 return ret;
703}
704
Chris Wilson54cf91d2010-11-25 18:00:26 +0000705static int
Chris Wilson432e58e2010-11-25 19:32:06 +0000706i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
707 struct list_head *objects)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000708{
Chris Wilson432e58e2010-11-25 19:32:06 +0000709 struct drm_i915_gem_object *obj;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200710 uint32_t flush_domains = 0;
Chris Wilson432e58e2010-11-25 19:32:06 +0000711 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000712
Chris Wilson432e58e2010-11-25 19:32:06 +0000713 list_for_each_entry(obj, objects, exec_list) {
Ben Widawsky2911a352012-04-05 14:47:36 -0700714 ret = i915_gem_object_sync(obj, ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000715 if (ret)
716 return ret;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200717
718 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
719 i915_gem_clflush_object(obj);
720
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200721 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000722 }
723
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200724 if (flush_domains & I915_GEM_DOMAIN_CPU)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800725 i915_gem_chipset_flush(ring->dev);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200726
727 if (flush_domains & I915_GEM_DOMAIN_GTT)
728 wmb();
729
Chris Wilson09cf7c92012-07-13 14:14:08 +0100730 /* Unconditionally invalidate gpu caches and ensure that we do flush
731 * any residual writes from the previous batch.
732 */
Chris Wilsona7b97612012-07-20 12:41:08 +0100733 return intel_ring_invalidate_all_caches(ring);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000734}
735
Chris Wilson432e58e2010-11-25 19:32:06 +0000736static bool
737i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000738{
Daniel Vettered5982e2013-01-17 22:23:36 +0100739 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
740 return false;
741
Chris Wilson432e58e2010-11-25 19:32:06 +0000742 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000743}
744
745static int
746validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
747 int count)
748{
749 int i;
Kees Cook3118a4f2013-03-11 17:31:45 -0700750 int relocs_total = 0;
751 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000752
753 for (i = 0; i < count; i++) {
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200754 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000755 int length; /* limited by fault_in_pages_readable() */
756
Daniel Vettered5982e2013-01-17 22:23:36 +0100757 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
758 return -EINVAL;
759
Kees Cook3118a4f2013-03-11 17:31:45 -0700760 /* First check for malicious input causing overflow in
761 * the worst case where we need to allocate the entire
762 * relocation tree as a single array.
763 */
764 if (exec[i].relocation_count > relocs_max - relocs_total)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000765 return -EINVAL;
Kees Cook3118a4f2013-03-11 17:31:45 -0700766 relocs_total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000767
768 length = exec[i].relocation_count *
769 sizeof(struct drm_i915_gem_relocation_entry);
Kees Cook30587532013-03-11 14:37:35 -0700770 /*
771 * We must check that the entire relocation array is safe
772 * to read, but since we may need to update the presumed
773 * offsets during execution, check for full write access.
774 */
Chris Wilson54cf91d2010-11-25 18:00:26 +0000775 if (!access_ok(VERIFY_WRITE, ptr, length))
776 return -EFAULT;
777
Xiong Zhang0b74b502013-07-19 13:51:24 +0800778 if (likely(!i915_prefault_disable)) {
779 if (fault_in_multipages_readable(ptr, length))
780 return -EFAULT;
781 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000782 }
783
784 return 0;
785}
786
Chris Wilson432e58e2010-11-25 19:32:06 +0000787static void
788i915_gem_execbuffer_move_to_active(struct list_head *objects,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700789 struct i915_address_space *vm,
Chris Wilson9d7730912012-11-27 16:22:52 +0000790 struct intel_ring_buffer *ring)
Chris Wilson432e58e2010-11-25 19:32:06 +0000791{
792 struct drm_i915_gem_object *obj;
793
794 list_for_each_entry(obj, objects, exec_list) {
Chris Wilson69c2fc82012-07-20 12:41:03 +0100795 u32 old_read = obj->base.read_domains;
796 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +0000797
Chris Wilson432e58e2010-11-25 19:32:06 +0000798 obj->base.write_domain = obj->base.pending_write_domain;
Daniel Vettered5982e2013-01-17 22:23:36 +0100799 if (obj->base.write_domain == 0)
800 obj->base.pending_read_domains |= obj->base.read_domains;
801 obj->base.read_domains = obj->base.pending_read_domains;
Chris Wilson432e58e2010-11-25 19:32:06 +0000802 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
803
Ben Widawskyca191b12013-07-31 17:00:14 -0700804 /* FIXME: This lookup gets fixed later <-- danvet */
805 list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
Chris Wilson9d7730912012-11-27 16:22:52 +0000806 i915_gem_object_move_to_active(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000807 if (obj->base.write_domain) {
808 obj->dirty = 1;
Chris Wilson9d7730912012-11-27 16:22:52 +0000809 obj->last_write_seqno = intel_ring_get_seqno(ring);
Chris Wilsonacb87df2012-05-03 15:47:57 +0100810 if (obj->pin_count) /* check for potential scanout */
Chris Wilsonc65355b2013-06-06 16:53:41 -0300811 intel_mark_fb_busy(obj, ring);
Chris Wilson432e58e2010-11-25 19:32:06 +0000812 }
813
Chris Wilsondb53a302011-02-03 11:57:46 +0000814 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +0000815 }
816}
817
Chris Wilson54cf91d2010-11-25 18:00:26 +0000818static void
819i915_gem_execbuffer_retire_commands(struct drm_device *dev,
Chris Wilson432e58e2010-11-25 19:32:06 +0000820 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +0300821 struct intel_ring_buffer *ring,
822 struct drm_i915_gem_object *obj)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000823{
Daniel Vettercc889e02012-06-13 20:45:19 +0200824 /* Unconditionally force add_request to emit a full flush. */
825 ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000826
Chris Wilson432e58e2010-11-25 19:32:06 +0000827 /* Add a breadcrumb for the completion of the batch buffer */
Mika Kuoppala7d736f42013-06-12 15:01:39 +0300828 (void)__i915_add_request(ring, file, obj, NULL);
Chris Wilson432e58e2010-11-25 19:32:06 +0000829}
Chris Wilson54cf91d2010-11-25 18:00:26 +0000830
831static int
Eric Anholtae662d32012-01-03 09:23:29 -0800832i915_reset_gen7_sol_offsets(struct drm_device *dev,
833 struct intel_ring_buffer *ring)
834{
835 drm_i915_private_t *dev_priv = dev->dev_private;
836 int ret, i;
837
838 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
839 return 0;
840
841 ret = intel_ring_begin(ring, 4 * 3);
842 if (ret)
843 return ret;
844
845 for (i = 0; i < 4; i++) {
846 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
847 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
848 intel_ring_emit(ring, 0);
849 }
850
851 intel_ring_advance(ring);
852
853 return 0;
854}
855
856static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000857i915_gem_do_execbuffer(struct drm_device *dev, void *data,
858 struct drm_file *file,
859 struct drm_i915_gem_execbuffer2 *args,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -0700860 struct drm_i915_gem_exec_object2 *exec,
861 struct i915_address_space *vm)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000862{
863 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson67731b82010-12-08 10:38:14 +0000864 struct eb_objects *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000865 struct drm_i915_gem_object *batch_obj;
866 struct drm_clip_rect *cliprects = NULL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000867 struct intel_ring_buffer *ring;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700868 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000869 u32 exec_start, exec_len;
Daniel Vettered5982e2013-01-17 22:23:36 +0100870 u32 mask, flags;
Chris Wilson72bfa192010-12-19 11:42:05 +0000871 int ret, mode, i;
Daniel Vettered5982e2013-01-17 22:23:36 +0100872 bool need_relocs;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000873
Daniel Vettered5982e2013-01-17 22:23:36 +0100874 if (!i915_gem_check_execbuffer(args))
Chris Wilson432e58e2010-11-25 19:32:06 +0000875 return -EINVAL;
Chris Wilson432e58e2010-11-25 19:32:06 +0000876
877 ret = validate_exec_list(exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000878 if (ret)
879 return ret;
880
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100881 flags = 0;
882 if (args->flags & I915_EXEC_SECURE) {
883 if (!file->is_master || !capable(CAP_SYS_ADMIN))
884 return -EPERM;
885
886 flags |= I915_DISPATCH_SECURE;
887 }
Daniel Vetterb45305f2012-12-17 16:21:27 +0100888 if (args->flags & I915_EXEC_IS_PINNED)
889 flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100890
Chris Wilson54cf91d2010-11-25 18:00:26 +0000891 switch (args->flags & I915_EXEC_RING_MASK) {
892 case I915_EXEC_DEFAULT:
893 case I915_EXEC_RENDER:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000894 ring = &dev_priv->ring[RCS];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000895 break;
896 case I915_EXEC_BSD:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000897 ring = &dev_priv->ring[VCS];
Chris Wilsone8520962013-07-03 17:22:07 +0300898 if (ctx_id != DEFAULT_CONTEXT_ID) {
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700899 DRM_DEBUG("Ring %s doesn't support contexts\n",
900 ring->name);
901 return -EPERM;
902 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000903 break;
904 case I915_EXEC_BLT:
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000905 ring = &dev_priv->ring[BCS];
Chris Wilsone8520962013-07-03 17:22:07 +0300906 if (ctx_id != DEFAULT_CONTEXT_ID) {
Ben Widawsky6e0a69d2012-06-04 14:42:55 -0700907 DRM_DEBUG("Ring %s doesn't support contexts\n",
908 ring->name);
909 return -EPERM;
910 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000911 break;
Xiang, Haihao82f91b62013-05-28 19:22:33 -0700912 case I915_EXEC_VEBOX:
913 ring = &dev_priv->ring[VECS];
Chris Wilsone8520962013-07-03 17:22:07 +0300914 if (ctx_id != DEFAULT_CONTEXT_ID) {
Xiang, Haihao82f91b62013-05-28 19:22:33 -0700915 DRM_DEBUG("Ring %s doesn't support contexts\n",
916 ring->name);
917 return -EPERM;
918 }
919 break;
920
Chris Wilson54cf91d2010-11-25 18:00:26 +0000921 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100922 DRM_DEBUG("execbuf with unknown ring: %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +0000923 (int)(args->flags & I915_EXEC_RING_MASK));
924 return -EINVAL;
925 }
Chris Wilsona15817c2012-05-11 14:29:31 +0100926 if (!intel_ring_initialized(ring)) {
927 DRM_DEBUG("execbuf with invalid ring: %d\n",
928 (int)(args->flags & I915_EXEC_RING_MASK));
929 return -EINVAL;
930 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000931
Chris Wilson72bfa192010-12-19 11:42:05 +0000932 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800933 mask = I915_EXEC_CONSTANTS_MASK;
Chris Wilson72bfa192010-12-19 11:42:05 +0000934 switch (mode) {
935 case I915_EXEC_CONSTANTS_REL_GENERAL:
936 case I915_EXEC_CONSTANTS_ABSOLUTE:
937 case I915_EXEC_CONSTANTS_REL_SURFACE:
938 if (ring == &dev_priv->ring[RCS] &&
939 mode != dev_priv->relative_constants_mode) {
940 if (INTEL_INFO(dev)->gen < 4)
941 return -EINVAL;
942
943 if (INTEL_INFO(dev)->gen > 5 &&
944 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
945 return -EINVAL;
Ben Widawsky84f9f932011-12-12 19:21:58 -0800946
947 /* The HW changed the meaning on this bit on gen6 */
948 if (INTEL_INFO(dev)->gen >= 6)
949 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
Chris Wilson72bfa192010-12-19 11:42:05 +0000950 }
951 break;
952 default:
Daniel Vetterff240192012-01-31 21:08:14 +0100953 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
Chris Wilson72bfa192010-12-19 11:42:05 +0000954 return -EINVAL;
955 }
956
Chris Wilson54cf91d2010-11-25 18:00:26 +0000957 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +0100958 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000959 return -EINVAL;
960 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000961
962 if (args->num_cliprects != 0) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000963 if (ring != &dev_priv->ring[RCS]) {
Daniel Vetterff240192012-01-31 21:08:14 +0100964 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
Chris Wilsonc4e7a412010-11-30 14:10:25 +0000965 return -EINVAL;
966 }
967
Daniel Vetter6ebebc92012-04-26 23:28:11 +0200968 if (INTEL_INFO(dev)->gen >= 5) {
969 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
970 return -EINVAL;
971 }
972
Xi Wang44afb3a2012-04-23 04:06:42 -0400973 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
974 DRM_DEBUG("execbuf with %u cliprects\n",
975 args->num_cliprects);
976 return -EINVAL;
977 }
Daniel Vetter5e13a0c2012-05-08 13:39:59 +0200978
Chris Wilson432e58e2010-11-25 19:32:06 +0000979 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
Chris Wilson54cf91d2010-11-25 18:00:26 +0000980 GFP_KERNEL);
981 if (cliprects == NULL) {
982 ret = -ENOMEM;
983 goto pre_mutex_err;
984 }
985
Chris Wilson432e58e2010-11-25 19:32:06 +0000986 if (copy_from_user(cliprects,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200987 to_user_ptr(args->cliprects_ptr),
988 sizeof(*cliprects)*args->num_cliprects)) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000989 ret = -EFAULT;
990 goto pre_mutex_err;
991 }
992 }
993
Chris Wilson54cf91d2010-11-25 18:00:26 +0000994 ret = i915_mutex_lock_interruptible(dev);
995 if (ret)
996 goto pre_mutex_err;
997
Daniel Vetterdb1b76c2013-07-09 16:51:37 +0200998 if (dev_priv->ums.mm_suspended) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000999 mutex_unlock(&dev->struct_mutex);
1000 ret = -EBUSY;
1001 goto pre_mutex_err;
1002 }
1003
Chris Wilsoneef90cc2013-01-08 10:53:17 +00001004 eb = eb_create(args);
Chris Wilson67731b82010-12-08 10:38:14 +00001005 if (eb == NULL) {
1006 mutex_unlock(&dev->struct_mutex);
1007 ret = -ENOMEM;
1008 goto pre_mutex_err;
1009 }
1010
Chris Wilson54cf91d2010-11-25 18:00:26 +00001011 /* Look up object handles */
Chris Wilsoneef90cc2013-01-08 10:53:17 +00001012 ret = eb_lookup_objects(eb, exec, args, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +00001013 if (ret)
1014 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001015
Chris Wilson6fe4f142011-01-10 17:35:37 +00001016 /* take note of the batch buffer before we might reorder the lists */
Chris Wilsonbcffc3f2013-01-08 10:53:15 +00001017 batch_obj = list_entry(eb->objects.prev,
Chris Wilson6fe4f142011-01-10 17:35:37 +00001018 struct drm_i915_gem_object,
1019 exec_list);
1020
Chris Wilson54cf91d2010-11-25 18:00:26 +00001021 /* Move the objects en-masse into the GTT, evicting if necessary. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001022 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001023 ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001024 if (ret)
1025 goto err;
1026
1027 /* The objects are in their final locations, apply the relocations. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001028 if (need_relocs)
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001029 ret = i915_gem_execbuffer_relocate(eb, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001030 if (ret) {
1031 if (ret == -EFAULT) {
Daniel Vettered5982e2013-01-17 22:23:36 +01001032 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001033 eb, exec, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001034 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1035 }
1036 if (ret)
1037 goto err;
1038 }
1039
1040 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001041 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +01001042 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +00001043 ret = -EINVAL;
1044 goto err;
1045 }
1046 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1047
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001048 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1049 * batch" bit. Hence we need to pin secure batches into the global gtt.
1050 * hsw should have this fixed, but let's be paranoid and do it
1051 * unconditionally for now. */
1052 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1053 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1054
Chris Wilsonbcffc3f2013-01-08 10:53:15 +00001055 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001056 if (ret)
1057 goto err;
1058
Eric Anholt0da5cec2012-07-23 12:33:55 -07001059 ret = i915_switch_context(ring, file, ctx_id);
1060 if (ret)
1061 goto err;
1062
Ben Widawskye2971bd2011-12-12 19:21:57 -08001063 if (ring == &dev_priv->ring[RCS] &&
1064 mode != dev_priv->relative_constants_mode) {
1065 ret = intel_ring_begin(ring, 4);
1066 if (ret)
1067 goto err;
1068
1069 intel_ring_emit(ring, MI_NOOP);
1070 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1071 intel_ring_emit(ring, INSTPM);
Ben Widawsky84f9f932011-12-12 19:21:58 -08001072 intel_ring_emit(ring, mask << 16 | mode);
Ben Widawskye2971bd2011-12-12 19:21:57 -08001073 intel_ring_advance(ring);
1074
1075 dev_priv->relative_constants_mode = mode;
1076 }
1077
Eric Anholtae662d32012-01-03 09:23:29 -08001078 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1079 ret = i915_reset_gen7_sol_offsets(dev, ring);
1080 if (ret)
1081 goto err;
1082 }
1083
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001084 exec_start = i915_gem_obj_offset(batch_obj, vm) +
1085 args->batch_start_offset;
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001086 exec_len = args->batch_len;
1087 if (cliprects) {
1088 for (i = 0; i < args->num_cliprects; i++) {
1089 ret = i915_emit_box(dev, &cliprects[i],
1090 args->DR1, args->DR4);
1091 if (ret)
1092 goto err;
1093
1094 ret = ring->dispatch_execbuffer(ring,
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001095 exec_start, exec_len,
1096 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001097 if (ret)
1098 goto err;
1099 }
1100 } else {
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001101 ret = ring->dispatch_execbuffer(ring,
1102 exec_start, exec_len,
1103 flags);
Chris Wilsonc4e7a412010-11-30 14:10:25 +00001104 if (ret)
1105 goto err;
1106 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001107
Chris Wilson9d7730912012-11-27 16:22:52 +00001108 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1109
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001110 i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
Mika Kuoppala7d736f42013-06-12 15:01:39 +03001111 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001112
1113err:
Chris Wilson67731b82010-12-08 10:38:14 +00001114 eb_destroy(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001115
1116 mutex_unlock(&dev->struct_mutex);
1117
1118pre_mutex_err:
Chris Wilson54cf91d2010-11-25 18:00:26 +00001119 kfree(cliprects);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001120 return ret;
1121}
1122
1123/*
1124 * Legacy execbuffer just creates an exec2 list from the original exec object
1125 * list array and passes it to the real function.
1126 */
1127int
1128i915_gem_execbuffer(struct drm_device *dev, void *data,
1129 struct drm_file *file)
1130{
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001131 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001132 struct drm_i915_gem_execbuffer *args = data;
1133 struct drm_i915_gem_execbuffer2 exec2;
1134 struct drm_i915_gem_exec_object *exec_list = NULL;
1135 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1136 int ret, i;
1137
Chris Wilson54cf91d2010-11-25 18:00:26 +00001138 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001139 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001140 return -EINVAL;
1141 }
1142
1143 /* Copy in the exec list from userland */
1144 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1145 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1146 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001147 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001148 args->buffer_count);
1149 drm_free_large(exec_list);
1150 drm_free_large(exec2_list);
1151 return -ENOMEM;
1152 }
1153 ret = copy_from_user(exec_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001154 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001155 sizeof(*exec_list) * args->buffer_count);
1156 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001157 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001158 args->buffer_count, ret);
1159 drm_free_large(exec_list);
1160 drm_free_large(exec2_list);
1161 return -EFAULT;
1162 }
1163
1164 for (i = 0; i < args->buffer_count; i++) {
1165 exec2_list[i].handle = exec_list[i].handle;
1166 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1167 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1168 exec2_list[i].alignment = exec_list[i].alignment;
1169 exec2_list[i].offset = exec_list[i].offset;
1170 if (INTEL_INFO(dev)->gen < 4)
1171 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1172 else
1173 exec2_list[i].flags = 0;
1174 }
1175
1176 exec2.buffers_ptr = args->buffers_ptr;
1177 exec2.buffer_count = args->buffer_count;
1178 exec2.batch_start_offset = args->batch_start_offset;
1179 exec2.batch_len = args->batch_len;
1180 exec2.DR1 = args->DR1;
1181 exec2.DR4 = args->DR4;
1182 exec2.num_cliprects = args->num_cliprects;
1183 exec2.cliprects_ptr = args->cliprects_ptr;
1184 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001185 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001186
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001187 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1188 &dev_priv->gtt.base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001189 if (!ret) {
1190 /* Copy the new buffer offsets back to the user's exec list. */
1191 for (i = 0; i < args->buffer_count; i++)
1192 exec_list[i].offset = exec2_list[i].offset;
1193 /* ... and back out to userspace */
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001194 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001195 exec_list,
1196 sizeof(*exec_list) * args->buffer_count);
1197 if (ret) {
1198 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001199 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001200 "back to user (%d)\n",
1201 args->buffer_count, ret);
1202 }
1203 }
1204
1205 drm_free_large(exec_list);
1206 drm_free_large(exec2_list);
1207 return ret;
1208}
1209
1210int
1211i915_gem_execbuffer2(struct drm_device *dev, void *data,
1212 struct drm_file *file)
1213{
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001214 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001215 struct drm_i915_gem_execbuffer2 *args = data;
1216 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1217 int ret;
1218
Xi Wanged8cd3b2012-04-23 04:06:41 -04001219 if (args->buffer_count < 1 ||
1220 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001221 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001222 return -EINVAL;
1223 }
1224
Chris Wilson8408c282011-02-21 12:54:48 +00001225 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
Chris Wilson419fa722013-01-08 10:53:13 +00001226 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
Chris Wilson8408c282011-02-21 12:54:48 +00001227 if (exec2_list == NULL)
1228 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1229 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001230 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001231 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001232 args->buffer_count);
1233 return -ENOMEM;
1234 }
1235 ret = copy_from_user(exec2_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001236 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001237 sizeof(*exec2_list) * args->buffer_count);
1238 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001239 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001240 args->buffer_count, ret);
1241 drm_free_large(exec2_list);
1242 return -EFAULT;
1243 }
1244
Ben Widawsky28d6a7b2013-07-31 17:00:02 -07001245 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1246 &dev_priv->gtt.base);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001247 if (!ret) {
1248 /* Copy the new buffer offsets back to the user's exec list. */
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001249 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001250 exec2_list,
1251 sizeof(*exec2_list) * args->buffer_count);
1252 if (ret) {
1253 ret = -EFAULT;
Daniel Vetterff240192012-01-31 21:08:14 +01001254 DRM_DEBUG("failed to copy %d exec entries "
Chris Wilson54cf91d2010-11-25 18:00:26 +00001255 "back to user (%d)\n",
1256 args->buffer_count, ret);
1257 }
1258 }
1259
1260 drm_free_large(exec2_list);
1261 return ret;
1262}