blob: 5cb57f642ac131cd5192ccf94bd068196fe3cf39 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
30#include <drm/i915_drm.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000031#include "i915_drv.h"
32#include "i915_trace.h"
33#include "intel_drv.h"
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080034#include <linux/dma_remapping.h>
David Hildenbrand32d82062015-05-11 17:52:12 +020035#include <linux/uaccess.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000036
Chris Wilsona415d352013-11-26 11:23:15 +000037#define __EXEC_OBJECT_HAS_PIN (1<<31)
38#define __EXEC_OBJECT_HAS_FENCE (1<<30)
Chris Wilsone6a84462014-08-11 12:00:12 +020039#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
Chris Wilsond23db882014-05-23 08:48:08 +020040#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
41
42#define BATCH_OFFSET_BIAS (256*1024)
Chris Wilsona415d352013-11-26 11:23:15 +000043
Ben Widawsky27173f12013-08-14 11:38:36 +020044struct eb_vmas {
45 struct list_head vmas;
Chris Wilson67731b82010-12-08 10:38:14 +000046 int and;
Chris Wilsoneef90cc2013-01-08 10:53:17 +000047 union {
Ben Widawsky27173f12013-08-14 11:38:36 +020048 struct i915_vma *lut[0];
Chris Wilsoneef90cc2013-01-08 10:53:17 +000049 struct hlist_head buckets[0];
50 };
Chris Wilson67731b82010-12-08 10:38:14 +000051};
52
Ben Widawsky27173f12013-08-14 11:38:36 +020053static struct eb_vmas *
Ben Widawsky17601cbc2013-11-25 09:54:38 -080054eb_create(struct drm_i915_gem_execbuffer2 *args)
Chris Wilson67731b82010-12-08 10:38:14 +000055{
Ben Widawsky27173f12013-08-14 11:38:36 +020056 struct eb_vmas *eb = NULL;
Chris Wilson67731b82010-12-08 10:38:14 +000057
Chris Wilsoneef90cc2013-01-08 10:53:17 +000058 if (args->flags & I915_EXEC_HANDLE_LUT) {
Daniel Vetterb205ca52013-09-19 14:00:11 +020059 unsigned size = args->buffer_count;
Ben Widawsky27173f12013-08-14 11:38:36 +020060 size *= sizeof(struct i915_vma *);
61 size += sizeof(struct eb_vmas);
Chris Wilsoneef90cc2013-01-08 10:53:17 +000062 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
63 }
64
65 if (eb == NULL) {
Daniel Vetterb205ca52013-09-19 14:00:11 +020066 unsigned size = args->buffer_count;
67 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Lauri Kasanen27b7c632013-03-27 15:04:55 +020068 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
Chris Wilsoneef90cc2013-01-08 10:53:17 +000069 while (count > 2*size)
70 count >>= 1;
71 eb = kzalloc(count*sizeof(struct hlist_head) +
Ben Widawsky27173f12013-08-14 11:38:36 +020072 sizeof(struct eb_vmas),
Chris Wilsoneef90cc2013-01-08 10:53:17 +000073 GFP_TEMPORARY);
74 if (eb == NULL)
75 return eb;
76
77 eb->and = count - 1;
78 } else
79 eb->and = -args->buffer_count;
80
Ben Widawsky27173f12013-08-14 11:38:36 +020081 INIT_LIST_HEAD(&eb->vmas);
Chris Wilson67731b82010-12-08 10:38:14 +000082 return eb;
83}
84
85static void
Ben Widawsky27173f12013-08-14 11:38:36 +020086eb_reset(struct eb_vmas *eb)
Chris Wilson67731b82010-12-08 10:38:14 +000087{
Chris Wilsoneef90cc2013-01-08 10:53:17 +000088 if (eb->and >= 0)
89 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
Chris Wilson67731b82010-12-08 10:38:14 +000090}
91
Chris Wilson3b96eff2013-01-08 10:53:14 +000092static int
Ben Widawsky27173f12013-08-14 11:38:36 +020093eb_lookup_vmas(struct eb_vmas *eb,
94 struct drm_i915_gem_exec_object2 *exec,
95 const struct drm_i915_gem_execbuffer2 *args,
96 struct i915_address_space *vm,
97 struct drm_file *file)
Chris Wilson3b96eff2013-01-08 10:53:14 +000098{
Ben Widawsky27173f12013-08-14 11:38:36 +020099 struct drm_i915_gem_object *obj;
100 struct list_head objects;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000101 int i, ret;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000102
Ben Widawsky27173f12013-08-14 11:38:36 +0200103 INIT_LIST_HEAD(&objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000104 spin_lock(&file->table_lock);
Ben Widawsky27173f12013-08-14 11:38:36 +0200105 /* Grab a reference to the object and release the lock so we can lookup
106 * or create the VMA without using GFP_ATOMIC */
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000107 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson3b96eff2013-01-08 10:53:14 +0000108 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
109 if (obj == NULL) {
110 spin_unlock(&file->table_lock);
111 DRM_DEBUG("Invalid object handle %d at index %d\n",
112 exec[i].handle, i);
Ben Widawsky27173f12013-08-14 11:38:36 +0200113 ret = -ENOENT;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000114 goto err;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000115 }
116
Ben Widawsky27173f12013-08-14 11:38:36 +0200117 if (!list_empty(&obj->obj_exec_link)) {
Chris Wilson3b96eff2013-01-08 10:53:14 +0000118 spin_unlock(&file->table_lock);
119 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
120 obj, exec[i].handle, i);
Ben Widawsky27173f12013-08-14 11:38:36 +0200121 ret = -EINVAL;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000122 goto err;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000123 }
124
125 drm_gem_object_reference(&obj->base);
Ben Widawsky27173f12013-08-14 11:38:36 +0200126 list_add_tail(&obj->obj_exec_link, &objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000127 }
128 spin_unlock(&file->table_lock);
129
Ben Widawsky27173f12013-08-14 11:38:36 +0200130 i = 0;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000131 while (!list_empty(&objects)) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200132 struct i915_vma *vma;
Ben Widawsky6f65e292013-12-06 14:10:56 -0800133
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000134 obj = list_first_entry(&objects,
135 struct drm_i915_gem_object,
136 obj_exec_link);
137
Daniel Vettere656a6c2013-08-14 14:14:04 +0200138 /*
139 * NOTE: We can leak any vmas created here when something fails
140 * later on. But that's no issue since vma_unbind can deal with
141 * vmas which are not actually bound. And since only
142 * lookup_or_create exists as an interface to get at the vma
143 * from the (obj, vm) we don't run the risk of creating
144 * duplicated vmas for the same vm.
145 */
Daniel Vetterda51a1e2014-08-11 12:08:58 +0200146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Ben Widawsky27173f12013-08-14 11:38:36 +0200147 if (IS_ERR(vma)) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200148 DRM_DEBUG("Failed to lookup VMA\n");
149 ret = PTR_ERR(vma);
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000150 goto err;
Ben Widawsky27173f12013-08-14 11:38:36 +0200151 }
152
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000153 /* Transfer ownership from the objects list to the vmas list. */
Ben Widawsky27173f12013-08-14 11:38:36 +0200154 list_add_tail(&vma->exec_list, &eb->vmas);
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000155 list_del_init(&obj->obj_exec_link);
Ben Widawsky27173f12013-08-14 11:38:36 +0200156
157 vma->exec_entry = &exec[i];
158 if (eb->and < 0) {
159 eb->lut[i] = vma;
160 } else {
161 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 vma->exec_handle = handle;
163 hlist_add_head(&vma->exec_node,
164 &eb->buckets[handle & eb->and]);
165 }
166 ++i;
167 }
168
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000169 return 0;
Ben Widawsky27173f12013-08-14 11:38:36 +0200170
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000171
172err:
Ben Widawsky27173f12013-08-14 11:38:36 +0200173 while (!list_empty(&objects)) {
174 obj = list_first_entry(&objects,
175 struct drm_i915_gem_object,
176 obj_exec_link);
177 list_del_init(&obj->obj_exec_link);
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000178 drm_gem_object_unreference(&obj->base);
Ben Widawsky27173f12013-08-14 11:38:36 +0200179 }
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000180 /*
181 * Objects already transfered to the vmas list will be unreferenced by
182 * eb_destroy.
183 */
184
Ben Widawsky27173f12013-08-14 11:38:36 +0200185 return ret;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000186}
187
Ben Widawsky27173f12013-08-14 11:38:36 +0200188static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
Chris Wilson67731b82010-12-08 10:38:14 +0000189{
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000190 if (eb->and < 0) {
191 if (handle >= -eb->and)
192 return NULL;
193 return eb->lut[handle];
194 } else {
195 struct hlist_head *head;
Geliang Tangaa459502016-01-18 23:54:20 +0800196 struct i915_vma *vma;
Chris Wilson67731b82010-12-08 10:38:14 +0000197
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000198 head = &eb->buckets[handle & eb->and];
Geliang Tangaa459502016-01-18 23:54:20 +0800199 hlist_for_each_entry(vma, head, exec_node) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200200 if (vma->exec_handle == handle)
201 return vma;
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000202 }
203 return NULL;
Chris Wilson67731b82010-12-08 10:38:14 +0000204 }
Chris Wilson67731b82010-12-08 10:38:14 +0000205}
206
Chris Wilsona415d352013-11-26 11:23:15 +0000207static void
208i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
209{
210 struct drm_i915_gem_exec_object2 *entry;
211 struct drm_i915_gem_object *obj = vma->obj;
212
213 if (!drm_mm_node_allocated(&vma->node))
214 return;
215
216 entry = vma->exec_entry;
217
218 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
219 i915_gem_object_unpin_fence(obj);
220
221 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
Daniel Vetter3d7f0f92013-12-18 16:23:37 +0100222 vma->pin_count--;
Chris Wilsona415d352013-11-26 11:23:15 +0000223
Chris Wilsonde4e7832015-04-07 16:20:35 +0100224 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilsona415d352013-11-26 11:23:15 +0000225}
226
227static void eb_destroy(struct eb_vmas *eb)
228{
Ben Widawsky27173f12013-08-14 11:38:36 +0200229 while (!list_empty(&eb->vmas)) {
230 struct i915_vma *vma;
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000231
Ben Widawsky27173f12013-08-14 11:38:36 +0200232 vma = list_first_entry(&eb->vmas,
233 struct i915_vma,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000234 exec_list);
Ben Widawsky27173f12013-08-14 11:38:36 +0200235 list_del_init(&vma->exec_list);
Chris Wilsona415d352013-11-26 11:23:15 +0000236 i915_gem_execbuffer_unreserve_vma(vma);
Ben Widawsky27173f12013-08-14 11:38:36 +0200237 drm_gem_object_unreference(&vma->obj->base);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000238 }
Chris Wilson67731b82010-12-08 10:38:14 +0000239 kfree(eb);
240}
241
Chris Wilsondabdfe02012-03-26 10:10:27 +0200242static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
243{
Chris Wilson2cc86b82013-08-26 19:51:00 -0300244 return (HAS_LLC(obj->base.dev) ||
245 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200246 obj->cache_level != I915_CACHE_NONE);
247}
248
Michał Winiarski934acce2015-12-29 18:24:52 +0100249/* Used to convert any address to canonical form.
250 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
251 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
252 * addresses to be in a canonical form:
253 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
254 * canonical form [63:48] == [47]."
255 */
256#define GEN8_HIGH_ADDRESS_BIT 47
257static inline uint64_t gen8_canonical_addr(uint64_t address)
258{
259 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
260}
261
262static inline uint64_t gen8_noncanonical_addr(uint64_t address)
263{
264 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
265}
266
267static inline uint64_t
268relocation_target(struct drm_i915_gem_relocation_entry *reloc,
269 uint64_t target_offset)
270{
271 return gen8_canonical_addr((int)reloc->delta + target_offset);
272}
273
Chris Wilson54cf91d2010-11-25 18:00:26 +0000274static int
Rafael Barbalho5032d872013-08-21 17:10:51 +0100275relocate_entry_cpu(struct drm_i915_gem_object *obj,
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700276 struct drm_i915_gem_relocation_entry *reloc,
277 uint64_t target_offset)
Rafael Barbalho5032d872013-08-21 17:10:51 +0100278{
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700279 struct drm_device *dev = obj->base.dev;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100280 uint32_t page_offset = offset_in_page(reloc->offset);
Michał Winiarski934acce2015-12-29 18:24:52 +0100281 uint64_t delta = relocation_target(reloc, target_offset);
Rafael Barbalho5032d872013-08-21 17:10:51 +0100282 char *vaddr;
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800283 int ret;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100284
Chris Wilson2cc86b82013-08-26 19:51:00 -0300285 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Rafael Barbalho5032d872013-08-21 17:10:51 +0100286 if (ret)
287 return ret;
288
Dave Gordon033908a2015-12-10 18:51:23 +0000289 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
Rafael Barbalho5032d872013-08-21 17:10:51 +0100290 reloc->offset >> PAGE_SHIFT));
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700291 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700292
293 if (INTEL_INFO(dev)->gen >= 8) {
294 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
295
296 if (page_offset == 0) {
297 kunmap_atomic(vaddr);
Dave Gordon033908a2015-12-10 18:51:23 +0000298 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700299 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
300 }
301
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700302 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700303 }
304
Rafael Barbalho5032d872013-08-21 17:10:51 +0100305 kunmap_atomic(vaddr);
306
307 return 0;
308}
309
310static int
311relocate_entry_gtt(struct drm_i915_gem_object *obj,
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700312 struct drm_i915_gem_relocation_entry *reloc,
313 uint64_t target_offset)
Rafael Barbalho5032d872013-08-21 17:10:51 +0100314{
315 struct drm_device *dev = obj->base.dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
Michał Winiarski934acce2015-12-29 18:24:52 +0100317 uint64_t delta = relocation_target(reloc, target_offset);
Chris Wilson906843c2014-08-10 06:29:11 +0100318 uint64_t offset;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100319 void __iomem *reloc_page;
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800320 int ret;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100321
322 ret = i915_gem_object_set_to_gtt_domain(obj, true);
323 if (ret)
324 return ret;
325
326 ret = i915_gem_object_put_fence(obj);
327 if (ret)
328 return ret;
329
330 /* Map the page containing the relocation we're going to perform. */
Chris Wilson906843c2014-08-10 06:29:11 +0100331 offset = i915_gem_obj_ggtt_offset(obj);
332 offset += reloc->offset;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100333 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
Chris Wilson906843c2014-08-10 06:29:11 +0100334 offset & PAGE_MASK);
335 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700336
337 if (INTEL_INFO(dev)->gen >= 8) {
Chris Wilson906843c2014-08-10 06:29:11 +0100338 offset += sizeof(uint32_t);
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700339
Chris Wilson906843c2014-08-10 06:29:11 +0100340 if (offset_in_page(offset) == 0) {
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700341 io_mapping_unmap_atomic(reloc_page);
Chris Wilson906843c2014-08-10 06:29:11 +0100342 reloc_page =
343 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
344 offset);
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700345 }
346
Chris Wilson906843c2014-08-10 06:29:11 +0100347 iowrite32(upper_32_bits(delta),
348 reloc_page + offset_in_page(offset));
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700349 }
350
Rafael Barbalho5032d872013-08-21 17:10:51 +0100351 io_mapping_unmap_atomic(reloc_page);
352
353 return 0;
354}
355
Chris Wilsonedf44272015-01-14 11:20:56 +0000356static void
357clflush_write32(void *addr, uint32_t value)
358{
359 /* This is not a fast path, so KISS. */
360 drm_clflush_virt_range(addr, sizeof(uint32_t));
361 *(uint32_t *)addr = value;
362 drm_clflush_virt_range(addr, sizeof(uint32_t));
363}
364
365static int
366relocate_entry_clflush(struct drm_i915_gem_object *obj,
367 struct drm_i915_gem_relocation_entry *reloc,
368 uint64_t target_offset)
369{
370 struct drm_device *dev = obj->base.dev;
371 uint32_t page_offset = offset_in_page(reloc->offset);
Michał Winiarski934acce2015-12-29 18:24:52 +0100372 uint64_t delta = relocation_target(reloc, target_offset);
Chris Wilsonedf44272015-01-14 11:20:56 +0000373 char *vaddr;
374 int ret;
375
376 ret = i915_gem_object_set_to_gtt_domain(obj, true);
377 if (ret)
378 return ret;
379
Dave Gordon033908a2015-12-10 18:51:23 +0000380 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
Chris Wilsonedf44272015-01-14 11:20:56 +0000381 reloc->offset >> PAGE_SHIFT));
382 clflush_write32(vaddr + page_offset, lower_32_bits(delta));
383
384 if (INTEL_INFO(dev)->gen >= 8) {
385 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
386
387 if (page_offset == 0) {
388 kunmap_atomic(vaddr);
Dave Gordon033908a2015-12-10 18:51:23 +0000389 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
Chris Wilsonedf44272015-01-14 11:20:56 +0000390 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
391 }
392
393 clflush_write32(vaddr + page_offset, upper_32_bits(delta));
394 }
395
396 kunmap_atomic(vaddr);
397
398 return 0;
399}
400
Rafael Barbalho5032d872013-08-21 17:10:51 +0100401static int
Chris Wilson54cf91d2010-11-25 18:00:26 +0000402i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
Ben Widawsky27173f12013-08-14 11:38:36 +0200403 struct eb_vmas *eb,
Ben Widawsky3e7a0322013-12-06 14:10:57 -0800404 struct drm_i915_gem_relocation_entry *reloc)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000405{
406 struct drm_device *dev = obj->base.dev;
407 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100408 struct drm_i915_gem_object *target_i915_obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200409 struct i915_vma *target_vma;
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700410 uint64_t target_offset;
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800411 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000412
Chris Wilson67731b82010-12-08 10:38:14 +0000413 /* we've already hold a reference to all valid objects */
Ben Widawsky27173f12013-08-14 11:38:36 +0200414 target_vma = eb_get_vma(eb, reloc->target_handle);
415 if (unlikely(target_vma == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000416 return -ENOENT;
Ben Widawsky27173f12013-08-14 11:38:36 +0200417 target_i915_obj = target_vma->obj;
418 target_obj = &target_vma->obj->base;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000419
Michał Winiarski934acce2015-12-29 18:24:52 +0100420 target_offset = gen8_canonical_addr(target_vma->node.start);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000421
Eric Anholte844b992012-07-31 15:35:01 -0700422 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
423 * pipe_control writes because the gpu doesn't properly redirect them
424 * through the ppgtt for non_secure batchbuffers. */
425 if (unlikely(IS_GEN6(dev) &&
Daniel Vetter08755462015-04-20 09:04:05 -0700426 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000427 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
Daniel Vetter08755462015-04-20 09:04:05 -0700428 PIN_GLOBAL);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000429 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
430 return ret;
431 }
Eric Anholte844b992012-07-31 15:35:01 -0700432
Chris Wilson54cf91d2010-11-25 18:00:26 +0000433 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000434 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100435 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000436 "obj %p target %d offset %d "
437 "read %08x write %08x",
438 obj, reloc->target_handle,
439 (int) reloc->offset,
440 reloc->read_domains,
441 reloc->write_domain);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800442 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000443 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100444 if (unlikely((reloc->write_domain | reloc->read_domains)
445 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100446 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000447 "obj %p target %d offset %d "
448 "read %08x write %08x",
449 obj, reloc->target_handle,
450 (int) reloc->offset,
451 reloc->read_domains,
452 reloc->write_domain);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800453 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000454 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000455
456 target_obj->pending_read_domains |= reloc->read_domains;
457 target_obj->pending_write_domain |= reloc->write_domain;
458
459 /* If the relocation already has the right value in it, no
460 * more work needs to be done.
461 */
462 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000463 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000464
465 /* Check that the relocation address is valid... */
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700466 if (unlikely(reloc->offset >
467 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100468 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000469 "obj %p target %d offset %d size %d.\n",
470 obj, reloc->target_handle,
471 (int) reloc->offset,
472 (int) obj->base.size);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800473 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000474 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000475 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100476 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000477 "obj %p target %d offset %d.\n",
478 obj, reloc->target_handle,
479 (int) reloc->offset);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800480 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000481 }
482
Chris Wilsondabdfe02012-03-26 10:10:27 +0200483 /* We can't wait for rendering with pagefaults disabled */
David Hildenbrand32d82062015-05-11 17:52:12 +0200484 if (obj->active && pagefault_disabled())
Chris Wilsondabdfe02012-03-26 10:10:27 +0200485 return -EFAULT;
486
Rafael Barbalho5032d872013-08-21 17:10:51 +0100487 if (use_cpu_reloc(obj))
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700488 ret = relocate_entry_cpu(obj, reloc, target_offset);
Chris Wilsonedf44272015-01-14 11:20:56 +0000489 else if (obj->map_and_fenceable)
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700490 ret = relocate_entry_gtt(obj, reloc, target_offset);
Chris Wilsonedf44272015-01-14 11:20:56 +0000491 else if (cpu_has_clflush)
492 ret = relocate_entry_clflush(obj, reloc, target_offset);
493 else {
494 WARN_ONCE(1, "Impossible case in relocation handling\n");
495 ret = -ENODEV;
496 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000497
Daniel Vetterd4d36012013-09-02 20:56:23 +0200498 if (ret)
499 return ret;
500
Chris Wilson54cf91d2010-11-25 18:00:26 +0000501 /* and update the user's relocation entry */
502 reloc->presumed_offset = target_offset;
503
Chris Wilson67731b82010-12-08 10:38:14 +0000504 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000505}
506
507static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200508i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
509 struct eb_vmas *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000510{
Chris Wilson1d83f442012-03-24 20:12:53 +0000511#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
512 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000513 struct drm_i915_gem_relocation_entry __user *user_relocs;
Ben Widawsky27173f12013-08-14 11:38:36 +0200514 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilson1d83f442012-03-24 20:12:53 +0000515 int remain, ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000516
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200517 user_relocs = to_user_ptr(entry->relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000518
Chris Wilson1d83f442012-03-24 20:12:53 +0000519 remain = entry->relocation_count;
520 while (remain) {
521 struct drm_i915_gem_relocation_entry *r = stack_reloc;
522 int count = remain;
523 if (count > ARRAY_SIZE(stack_reloc))
524 count = ARRAY_SIZE(stack_reloc);
525 remain -= count;
526
527 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000528 return -EFAULT;
529
Chris Wilson1d83f442012-03-24 20:12:53 +0000530 do {
531 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000532
Ben Widawsky3e7a0322013-12-06 14:10:57 -0800533 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
Chris Wilson1d83f442012-03-24 20:12:53 +0000534 if (ret)
535 return ret;
536
537 if (r->presumed_offset != offset &&
538 __copy_to_user_inatomic(&user_relocs->presumed_offset,
539 &r->presumed_offset,
540 sizeof(r->presumed_offset))) {
541 return -EFAULT;
542 }
543
544 user_relocs++;
545 r++;
546 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000547 }
548
549 return 0;
Chris Wilson1d83f442012-03-24 20:12:53 +0000550#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000551}
552
553static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200554i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
555 struct eb_vmas *eb,
556 struct drm_i915_gem_relocation_entry *relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000557{
Ben Widawsky27173f12013-08-14 11:38:36 +0200558 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000559 int i, ret;
560
561 for (i = 0; i < entry->relocation_count; i++) {
Ben Widawsky3e7a0322013-12-06 14:10:57 -0800562 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000563 if (ret)
564 return ret;
565 }
566
567 return 0;
568}
569
570static int
Ben Widawsky17601cbc2013-11-25 09:54:38 -0800571i915_gem_execbuffer_relocate(struct eb_vmas *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000572{
Ben Widawsky27173f12013-08-14 11:38:36 +0200573 struct i915_vma *vma;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000574 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000575
Chris Wilsond4aeee72011-03-14 15:11:24 +0000576 /* This is the fast path and we cannot handle a pagefault whilst
577 * holding the struct mutex lest the user pass in the relocations
578 * contained within a mmaped bo. For in such a case we, the page
579 * fault handler would call i915_gem_fault() and we would try to
580 * acquire the struct mutex again. Obviously this is bad and so
581 * lockdep complains vehemently.
582 */
583 pagefault_disable();
Ben Widawsky27173f12013-08-14 11:38:36 +0200584 list_for_each_entry(vma, &eb->vmas, exec_list) {
585 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000586 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000587 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000588 }
Chris Wilsond4aeee72011-03-14 15:11:24 +0000589 pagefault_enable();
Chris Wilson54cf91d2010-11-25 18:00:26 +0000590
Chris Wilsond4aeee72011-03-14 15:11:24 +0000591 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000592}
593
Chris Wilsonedf44272015-01-14 11:20:56 +0000594static bool only_mappable_for_reloc(unsigned int flags)
595{
596 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
597 __EXEC_OBJECT_NEEDS_MAP;
598}
599
Chris Wilson1690e1e2011-12-14 13:57:08 +0100600static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200601i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100602 struct intel_engine_cs *ring,
Ben Widawsky27173f12013-08-14 11:38:36 +0200603 bool *need_reloc)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100604{
Ben Widawsky6f65e292013-12-06 14:10:56 -0800605 struct drm_i915_gem_object *obj = vma->obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilsond23db882014-05-23 08:48:08 +0200607 uint64_t flags;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100608 int ret;
609
Daniel Vetter08755462015-04-20 09:04:05 -0700610 flags = PIN_USER;
Daniel Vetter0229da32015-04-14 19:01:54 +0200611 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
612 flags |= PIN_GLOBAL;
613
Chris Wilsonedf44272015-01-14 11:20:56 +0000614 if (!drm_mm_node_allocated(&vma->node)) {
Michel Thierry101b5062015-10-01 13:33:57 +0100615 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
616 * limit address to the first 4GBs for unflagged objects.
617 */
618 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
619 flags |= PIN_ZONE_4G;
Chris Wilsonedf44272015-01-14 11:20:56 +0000620 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
621 flags |= PIN_GLOBAL | PIN_MAPPABLE;
Chris Wilsonedf44272015-01-14 11:20:56 +0000622 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
623 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
Chris Wilson506a8e82015-12-08 11:55:07 +0000624 if (entry->flags & EXEC_OBJECT_PINNED)
625 flags |= entry->offset | PIN_OFFSET_FIXED;
Michel Thierry101b5062015-10-01 13:33:57 +0100626 if ((flags & PIN_MAPPABLE) == 0)
627 flags |= PIN_HIGH;
Chris Wilsonedf44272015-01-14 11:20:56 +0000628 }
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100629
630 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
Chris Wilsonedf44272015-01-14 11:20:56 +0000631 if ((ret == -ENOSPC || ret == -E2BIG) &&
632 only_mappable_for_reloc(entry->flags))
633 ret = i915_gem_object_pin(obj, vma->vm,
634 entry->alignment,
Daniel Vetter0229da32015-04-14 19:01:54 +0200635 flags & ~PIN_MAPPABLE);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100636 if (ret)
637 return ret;
638
Chris Wilson7788a762012-08-24 19:18:18 +0100639 entry->flags |= __EXEC_OBJECT_HAS_PIN;
640
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100641 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
642 ret = i915_gem_object_get_fence(obj);
643 if (ret)
644 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100645
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100646 if (i915_gem_object_pin_fence(obj))
647 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100648 }
649
Ben Widawsky27173f12013-08-14 11:38:36 +0200650 if (entry->offset != vma->node.start) {
651 entry->offset = vma->node.start;
Daniel Vettered5982e2013-01-17 22:23:36 +0100652 *need_reloc = true;
653 }
654
655 if (entry->flags & EXEC_OBJECT_WRITE) {
656 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
657 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
658 }
659
Chris Wilson1690e1e2011-12-14 13:57:08 +0100660 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100661}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100662
Chris Wilsond23db882014-05-23 08:48:08 +0200663static bool
Chris Wilsone6a84462014-08-11 12:00:12 +0200664need_reloc_mappable(struct i915_vma *vma)
665{
666 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
667
668 if (entry->relocation_count == 0)
669 return false;
670
671 if (!i915_is_ggtt(vma->vm))
672 return false;
673
674 /* See also use_cpu_reloc() */
675 if (HAS_LLC(vma->obj->base.dev))
676 return false;
677
678 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
679 return false;
680
681 return true;
682}
683
684static bool
685eb_vma_misplaced(struct i915_vma *vma)
Chris Wilsond23db882014-05-23 08:48:08 +0200686{
687 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
688 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsond23db882014-05-23 08:48:08 +0200689
Chris Wilsone6a84462014-08-11 12:00:12 +0200690 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
Chris Wilsond23db882014-05-23 08:48:08 +0200691 !i915_is_ggtt(vma->vm));
692
693 if (entry->alignment &&
694 vma->node.start & (entry->alignment - 1))
695 return true;
696
Chris Wilson506a8e82015-12-08 11:55:07 +0000697 if (entry->flags & EXEC_OBJECT_PINNED &&
698 vma->node.start != entry->offset)
699 return true;
700
Chris Wilsond23db882014-05-23 08:48:08 +0200701 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
702 vma->node.start < BATCH_OFFSET_BIAS)
703 return true;
704
Chris Wilsonedf44272015-01-14 11:20:56 +0000705 /* avoid costly ping-pong once a batch bo ended up non-mappable */
706 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
707 return !only_mappable_for_reloc(entry->flags);
708
Michel Thierry101b5062015-10-01 13:33:57 +0100709 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
710 (vma->node.start + vma->node.size - 1) >> 32)
711 return true;
712
Chris Wilsond23db882014-05-23 08:48:08 +0200713 return false;
714}
715
Chris Wilson54cf91d2010-11-25 18:00:26 +0000716static int
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100717i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
Ben Widawsky27173f12013-08-14 11:38:36 +0200718 struct list_head *vmas,
David Weinehallb1b38272015-05-20 17:00:13 +0300719 struct intel_context *ctx,
Daniel Vettered5982e2013-01-17 22:23:36 +0100720 bool *need_relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000721{
Chris Wilson432e58e2010-11-25 19:32:06 +0000722 struct drm_i915_gem_object *obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200723 struct i915_vma *vma;
Ben Widawsky68c8c172013-09-11 14:57:50 -0700724 struct i915_address_space *vm;
Ben Widawsky27173f12013-08-14 11:38:36 +0200725 struct list_head ordered_vmas;
Chris Wilson506a8e82015-12-08 11:55:07 +0000726 struct list_head pinned_vmas;
Chris Wilson7788a762012-08-24 19:18:18 +0100727 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
728 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000729
Chris Wilson227f7822014-05-15 10:41:42 +0100730 i915_gem_retire_requests_ring(ring);
731
Ben Widawsky68c8c172013-09-11 14:57:50 -0700732 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
733
Ben Widawsky27173f12013-08-14 11:38:36 +0200734 INIT_LIST_HEAD(&ordered_vmas);
Chris Wilson506a8e82015-12-08 11:55:07 +0000735 INIT_LIST_HEAD(&pinned_vmas);
Ben Widawsky27173f12013-08-14 11:38:36 +0200736 while (!list_empty(vmas)) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000737 struct drm_i915_gem_exec_object2 *entry;
738 bool need_fence, need_mappable;
739
Ben Widawsky27173f12013-08-14 11:38:36 +0200740 vma = list_first_entry(vmas, struct i915_vma, exec_list);
741 obj = vma->obj;
742 entry = vma->exec_entry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000743
David Weinehallb1b38272015-05-20 17:00:13 +0300744 if (ctx->flags & CONTEXT_NO_ZEROMAP)
745 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
746
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100747 if (!has_fenced_gpu_access)
748 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000749 need_fence =
Chris Wilson6fe4f142011-01-10 17:35:37 +0000750 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
751 obj->tiling_mode != I915_TILING_NONE;
Ben Widawsky27173f12013-08-14 11:38:36 +0200752 need_mappable = need_fence || need_reloc_mappable(vma);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000753
Chris Wilson506a8e82015-12-08 11:55:07 +0000754 if (entry->flags & EXEC_OBJECT_PINNED)
755 list_move_tail(&vma->exec_list, &pinned_vmas);
756 else if (need_mappable) {
Chris Wilsone6a84462014-08-11 12:00:12 +0200757 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
Ben Widawsky27173f12013-08-14 11:38:36 +0200758 list_move(&vma->exec_list, &ordered_vmas);
Chris Wilsone6a84462014-08-11 12:00:12 +0200759 } else
Ben Widawsky27173f12013-08-14 11:38:36 +0200760 list_move_tail(&vma->exec_list, &ordered_vmas);
Chris Wilson595dad72011-01-13 11:03:48 +0000761
Daniel Vettered5982e2013-01-17 22:23:36 +0100762 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
Chris Wilson595dad72011-01-13 11:03:48 +0000763 obj->base.pending_write_domain = 0;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000764 }
Ben Widawsky27173f12013-08-14 11:38:36 +0200765 list_splice(&ordered_vmas, vmas);
Chris Wilson506a8e82015-12-08 11:55:07 +0000766 list_splice(&pinned_vmas, vmas);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000767
768 /* Attempt to pin all of the buffers into the GTT.
769 * This is done in 3 phases:
770 *
771 * 1a. Unbind all objects that do not match the GTT constraints for
772 * the execbuffer (fenceable, mappable, alignment etc).
773 * 1b. Increment pin count for already bound objects.
774 * 2. Bind new objects.
775 * 3. Decrement pin count.
776 *
Chris Wilson7788a762012-08-24 19:18:18 +0100777 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000778 * room for the earlier objects *unless* we need to defragment.
779 */
780 retry = 0;
781 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100782 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000783
784 /* Unbind any ill-fitting objects or pin. */
Ben Widawsky27173f12013-08-14 11:38:36 +0200785 list_for_each_entry(vma, vmas, exec_list) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200786 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000787 continue;
788
Chris Wilsone6a84462014-08-11 12:00:12 +0200789 if (eb_vma_misplaced(vma))
Ben Widawsky27173f12013-08-14 11:38:36 +0200790 ret = i915_vma_unbind(vma);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000791 else
Ben Widawsky27173f12013-08-14 11:38:36 +0200792 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
Chris Wilson432e58e2010-11-25 19:32:06 +0000793 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000794 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000795 }
796
797 /* Bind fresh objects */
Ben Widawsky27173f12013-08-14 11:38:36 +0200798 list_for_each_entry(vma, vmas, exec_list) {
799 if (drm_mm_node_allocated(&vma->node))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100800 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000801
Ben Widawsky27173f12013-08-14 11:38:36 +0200802 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
Chris Wilson7788a762012-08-24 19:18:18 +0100803 if (ret)
804 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000805 }
806
Chris Wilsona415d352013-11-26 11:23:15 +0000807err:
Chris Wilson6c085a72012-08-20 11:40:46 +0200808 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000809 return ret;
810
Chris Wilsona415d352013-11-26 11:23:15 +0000811 /* Decrement pin count for bound objects */
812 list_for_each_entry(vma, vmas, exec_list)
813 i915_gem_execbuffer_unreserve_vma(vma);
814
Ben Widawsky68c8c172013-09-11 14:57:50 -0700815 ret = i915_gem_evict_vm(vm, true);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000816 if (ret)
817 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000818 } while (1);
819}
820
821static int
822i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
Daniel Vettered5982e2013-01-17 22:23:36 +0100823 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson54cf91d2010-11-25 18:00:26 +0000824 struct drm_file *file,
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100825 struct intel_engine_cs *ring,
Ben Widawsky27173f12013-08-14 11:38:36 +0200826 struct eb_vmas *eb,
David Weinehallb1b38272015-05-20 17:00:13 +0300827 struct drm_i915_gem_exec_object2 *exec,
828 struct intel_context *ctx)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000829{
830 struct drm_i915_gem_relocation_entry *reloc;
Ben Widawsky27173f12013-08-14 11:38:36 +0200831 struct i915_address_space *vm;
832 struct i915_vma *vma;
Daniel Vettered5982e2013-01-17 22:23:36 +0100833 bool need_relocs;
Chris Wilsondd6864a2011-01-12 23:49:13 +0000834 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000835 int i, total, ret;
Daniel Vetterb205ca52013-09-19 14:00:11 +0200836 unsigned count = args->buffer_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000837
Ben Widawsky27173f12013-08-14 11:38:36 +0200838 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
839
Chris Wilson67731b82010-12-08 10:38:14 +0000840 /* We may process another execbuffer during the unlock... */
Ben Widawsky27173f12013-08-14 11:38:36 +0200841 while (!list_empty(&eb->vmas)) {
842 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
843 list_del_init(&vma->exec_list);
Chris Wilsona415d352013-11-26 11:23:15 +0000844 i915_gem_execbuffer_unreserve_vma(vma);
Ben Widawsky27173f12013-08-14 11:38:36 +0200845 drm_gem_object_unreference(&vma->obj->base);
Chris Wilson67731b82010-12-08 10:38:14 +0000846 }
847
Chris Wilson54cf91d2010-11-25 18:00:26 +0000848 mutex_unlock(&dev->struct_mutex);
849
850 total = 0;
851 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +0000852 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000853
Chris Wilsondd6864a2011-01-12 23:49:13 +0000854 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +0000855 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +0000856 if (reloc == NULL || reloc_offset == NULL) {
857 drm_free_large(reloc);
858 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000859 mutex_lock(&dev->struct_mutex);
860 return -ENOMEM;
861 }
862
863 total = 0;
864 for (i = 0; i < count; i++) {
865 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson262b6d32013-01-15 16:17:54 +0000866 u64 invalid_offset = (u64)-1;
867 int j;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000868
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200869 user_relocs = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000870
871 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +0000872 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +0000873 ret = -EFAULT;
874 mutex_lock(&dev->struct_mutex);
875 goto err;
876 }
877
Chris Wilson262b6d32013-01-15 16:17:54 +0000878 /* As we do not update the known relocation offsets after
879 * relocating (due to the complexities in lock handling),
880 * we need to mark them as invalid now so that we force the
881 * relocation processing next time. Just in case the target
882 * object is evicted and then rebound into its old
883 * presumed_offset before the next execbuffer - if that
884 * happened we would make the mistake of assuming that the
885 * relocations were valid.
886 */
887 for (j = 0; j < exec[i].relocation_count; j++) {
Chris Wilson9aab8bf2014-05-23 10:45:52 +0100888 if (__copy_to_user(&user_relocs[j].presumed_offset,
889 &invalid_offset,
890 sizeof(invalid_offset))) {
Chris Wilson262b6d32013-01-15 16:17:54 +0000891 ret = -EFAULT;
892 mutex_lock(&dev->struct_mutex);
893 goto err;
894 }
895 }
896
Chris Wilsondd6864a2011-01-12 23:49:13 +0000897 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +0000898 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000899 }
900
901 ret = i915_mutex_lock_interruptible(dev);
902 if (ret) {
903 mutex_lock(&dev->struct_mutex);
904 goto err;
905 }
906
Chris Wilson67731b82010-12-08 10:38:14 +0000907 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +0000908 eb_reset(eb);
Ben Widawsky27173f12013-08-14 11:38:36 +0200909 ret = eb_lookup_vmas(eb, exec, args, vm, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000910 if (ret)
911 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +0000912
Daniel Vettered5982e2013-01-17 22:23:36 +0100913 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
David Weinehallb1b38272015-05-20 17:00:13 +0300914 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000915 if (ret)
916 goto err;
917
Ben Widawsky27173f12013-08-14 11:38:36 +0200918 list_for_each_entry(vma, &eb->vmas, exec_list) {
919 int offset = vma->exec_entry - exec;
920 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
921 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000922 if (ret)
923 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000924 }
925
926 /* Leave the user relocations as are, this is the painfully slow path,
927 * and we want to avoid the complication of dropping the lock whilst
928 * having buffers reserved in the aperture and so causing spurious
929 * ENOSPC for random operations.
930 */
931
932err:
933 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +0000934 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000935 return ret;
936}
937
Chris Wilson54cf91d2010-11-25 18:00:26 +0000938static int
John Harrison535fbe82015-05-29 17:43:32 +0100939i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
Ben Widawsky27173f12013-08-14 11:38:36 +0200940 struct list_head *vmas)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000941{
John Harrison535fbe82015-05-29 17:43:32 +0100942 const unsigned other_rings = ~intel_ring_flag(req->ring);
Ben Widawsky27173f12013-08-14 11:38:36 +0200943 struct i915_vma *vma;
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200944 uint32_t flush_domains = 0;
Chris Wilson000433b2013-08-08 14:41:09 +0100945 bool flush_chipset = false;
Chris Wilson432e58e2010-11-25 19:32:06 +0000946 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000947
Ben Widawsky27173f12013-08-14 11:38:36 +0200948 list_for_each_entry(vma, vmas, exec_list) {
949 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson03ade512015-04-27 13:41:18 +0100950
951 if (obj->active & other_rings) {
John Harrison91af1272015-06-18 13:14:56 +0100952 ret = i915_gem_object_sync(obj, req->ring, &req);
Chris Wilson03ade512015-04-27 13:41:18 +0100953 if (ret)
954 return ret;
955 }
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200956
957 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
Chris Wilson000433b2013-08-08 14:41:09 +0100958 flush_chipset |= i915_gem_clflush_object(obj, false);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200959
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200960 flush_domains |= obj->base.write_domain;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000961 }
962
Chris Wilson000433b2013-08-08 14:41:09 +0100963 if (flush_chipset)
John Harrison535fbe82015-05-29 17:43:32 +0100964 i915_gem_chipset_flush(req->ring->dev);
Daniel Vetter6ac42f42012-07-21 12:25:01 +0200965
966 if (flush_domains & I915_GEM_DOMAIN_GTT)
967 wmb();
968
Chris Wilson09cf7c92012-07-13 14:14:08 +0100969 /* Unconditionally invalidate gpu caches and ensure that we do flush
970 * any residual writes from the previous batch.
971 */
John Harrison2f200552015-05-29 17:43:53 +0100972 return intel_ring_invalidate_all_caches(req);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000973}
974
Chris Wilson432e58e2010-11-25 19:32:06 +0000975static bool
976i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000977{
Daniel Vettered5982e2013-01-17 22:23:36 +0100978 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
979 return false;
980
Chris Wilson2f5945b2015-10-06 11:39:55 +0100981 /* Kernel clipping was a DRI1 misfeature */
982 if (exec->num_cliprects || exec->cliprects_ptr)
983 return false;
984
985 if (exec->DR4 == 0xffffffff) {
986 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
987 exec->DR4 = 0;
988 }
989 if (exec->DR1 || exec->DR4)
990 return false;
991
992 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
993 return false;
994
995 return true;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000996}
997
998static int
Chris Wilsonad19f102014-08-10 06:29:08 +0100999validate_exec_list(struct drm_device *dev,
1000 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001001 int count)
1002{
Daniel Vetterb205ca52013-09-19 14:00:11 +02001003 unsigned relocs_total = 0;
1004 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilsonad19f102014-08-10 06:29:08 +01001005 unsigned invalid_flags;
1006 int i;
1007
1008 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1009 if (USES_FULL_PPGTT(dev))
1010 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001011
1012 for (i = 0; i < count; i++) {
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001013 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001014 int length; /* limited by fault_in_pages_readable() */
1015
Chris Wilsonad19f102014-08-10 06:29:08 +01001016 if (exec[i].flags & invalid_flags)
Daniel Vettered5982e2013-01-17 22:23:36 +01001017 return -EINVAL;
1018
Michał Winiarski934acce2015-12-29 18:24:52 +01001019 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1020 * any non-page-aligned or non-canonical addresses.
1021 */
1022 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1023 if (exec[i].offset !=
1024 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1025 return -EINVAL;
1026
1027 /* From drm_mm perspective address space is continuous,
1028 * so from this point we're always using non-canonical
1029 * form internally.
1030 */
1031 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1032 }
1033
Chris Wilson55a97852015-06-19 13:59:46 +01001034 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1035 return -EINVAL;
1036
Kees Cook3118a4f2013-03-11 17:31:45 -07001037 /* First check for malicious input causing overflow in
1038 * the worst case where we need to allocate the entire
1039 * relocation tree as a single array.
1040 */
1041 if (exec[i].relocation_count > relocs_max - relocs_total)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001042 return -EINVAL;
Kees Cook3118a4f2013-03-11 17:31:45 -07001043 relocs_total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001044
1045 length = exec[i].relocation_count *
1046 sizeof(struct drm_i915_gem_relocation_entry);
Kees Cook30587532013-03-11 14:37:35 -07001047 /*
1048 * We must check that the entire relocation array is safe
1049 * to read, but since we may need to update the presumed
1050 * offsets during execution, check for full write access.
1051 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001052 if (!access_ok(VERIFY_WRITE, ptr, length))
1053 return -EFAULT;
1054
Jani Nikulad330a952014-01-21 11:24:25 +02001055 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +08001056 if (fault_in_multipages_readable(ptr, length))
1057 return -EFAULT;
1058 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001059 }
1060
1061 return 0;
1062}
1063
Oscar Mateo273497e2014-05-22 14:13:37 +01001064static struct intel_context *
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001065i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001066 struct intel_engine_cs *ring, const u32 ctx_id)
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001067{
Oscar Mateo273497e2014-05-22 14:13:37 +01001068 struct intel_context *ctx = NULL;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001069 struct i915_ctx_hang_stats *hs;
1070
Oscar Mateo821d66d2014-07-03 16:28:00 +01001071 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
Daniel Vetter7c9c4b82013-12-18 16:37:49 +01001072 return ERR_PTR(-EINVAL);
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001073
Ben Widawsky41bde552013-12-06 14:11:21 -08001074 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -10001075 if (IS_ERR(ctx))
Ben Widawsky41bde552013-12-06 14:11:21 -08001076 return ctx;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001077
Ben Widawsky41bde552013-12-06 14:11:21 -08001078 hs = &ctx->hang_stats;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001079 if (hs->banned) {
1080 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
Ben Widawsky41bde552013-12-06 14:11:21 -08001081 return ERR_PTR(-EIO);
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001082 }
1083
Oscar Mateoec3e9962014-07-24 17:04:18 +01001084 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
Nick Hoathe84fe802015-09-11 12:53:46 +01001085 int ret = intel_lr_context_deferred_alloc(ctx, ring);
Oscar Mateoec3e9962014-07-24 17:04:18 +01001086 if (ret) {
1087 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1088 return ERR_PTR(ret);
1089 }
1090 }
1091
Ben Widawsky41bde552013-12-06 14:11:21 -08001092 return ctx;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001093}
1094
Oscar Mateoba8b7cc2014-07-24 17:04:33 +01001095void
Ben Widawsky27173f12013-08-14 11:38:36 +02001096i915_gem_execbuffer_move_to_active(struct list_head *vmas,
John Harrison8a8edb52015-05-29 17:43:33 +01001097 struct drm_i915_gem_request *req)
Chris Wilson432e58e2010-11-25 19:32:06 +00001098{
John Harrison8a8edb52015-05-29 17:43:33 +01001099 struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
Ben Widawsky27173f12013-08-14 11:38:36 +02001100 struct i915_vma *vma;
Chris Wilson432e58e2010-11-25 19:32:06 +00001101
Ben Widawsky27173f12013-08-14 11:38:36 +02001102 list_for_each_entry(vma, vmas, exec_list) {
Chris Wilson82b6b6d2014-08-09 17:37:24 +01001103 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Ben Widawsky27173f12013-08-14 11:38:36 +02001104 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson69c2fc82012-07-20 12:41:03 +01001105 u32 old_read = obj->base.read_domains;
1106 u32 old_write = obj->base.write_domain;
Chris Wilsondb53a302011-02-03 11:57:46 +00001107
Chris Wilson51bc1402015-08-31 15:10:39 +01001108 obj->dirty = 1; /* be paranoid */
Chris Wilson432e58e2010-11-25 19:32:06 +00001109 obj->base.write_domain = obj->base.pending_write_domain;
Daniel Vettered5982e2013-01-17 22:23:36 +01001110 if (obj->base.write_domain == 0)
1111 obj->base.pending_read_domains |= obj->base.read_domains;
1112 obj->base.read_domains = obj->base.pending_read_domains;
Chris Wilson432e58e2010-11-25 19:32:06 +00001113
John Harrisonb2af0372015-05-29 17:43:50 +01001114 i915_vma_move_to_active(vma, req);
Chris Wilson432e58e2010-11-25 19:32:06 +00001115 if (obj->base.write_domain) {
John Harrison97b2a6a2014-11-24 18:49:26 +00001116 i915_gem_request_assign(&obj->last_write_req, req);
Daniel Vetterf99d7062014-06-19 16:01:59 +02001117
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -07001118 intel_fb_obj_invalidate(obj, ORIGIN_CS);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001119
1120 /* update for the implicit flush after a batch */
1121 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson432e58e2010-11-25 19:32:06 +00001122 }
Chris Wilson82b6b6d2014-08-09 17:37:24 +01001123 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
John Harrison97b2a6a2014-11-24 18:49:26 +00001124 i915_gem_request_assign(&obj->last_fenced_req, req);
Chris Wilson82b6b6d2014-08-09 17:37:24 +01001125 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1126 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1127 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1128 &dev_priv->mm.fence_list);
1129 }
1130 }
Chris Wilson432e58e2010-11-25 19:32:06 +00001131
Chris Wilsondb53a302011-02-03 11:57:46 +00001132 trace_i915_gem_object_change_domain(obj, old_read, old_write);
Chris Wilson432e58e2010-11-25 19:32:06 +00001133 }
1134}
1135
Oscar Mateoba8b7cc2014-07-24 17:04:33 +01001136void
John Harrisonadeca762015-05-29 17:43:28 +01001137i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001138{
Daniel Vettercc889e02012-06-13 20:45:19 +02001139 /* Unconditionally force add_request to emit a full flush. */
John Harrisonadeca762015-05-29 17:43:28 +01001140 params->ring->gpu_caches_dirty = true;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001141
Chris Wilson432e58e2010-11-25 19:32:06 +00001142 /* Add a breadcrumb for the completion of the batch buffer */
John Harrisonfcfa423c2015-05-29 17:44:12 +01001143 __i915_add_request(params->request, params->batch_obj, true);
Chris Wilson432e58e2010-11-25 19:32:06 +00001144}
Chris Wilson54cf91d2010-11-25 18:00:26 +00001145
1146static int
Eric Anholtae662d32012-01-03 09:23:29 -08001147i915_reset_gen7_sol_offsets(struct drm_device *dev,
John Harrison2f200552015-05-29 17:43:53 +01001148 struct drm_i915_gem_request *req)
Eric Anholtae662d32012-01-03 09:23:29 -08001149{
John Harrison2f200552015-05-29 17:43:53 +01001150 struct intel_engine_cs *ring = req->ring;
Jani Nikula50227e12014-03-31 14:27:21 +03001151 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholtae662d32012-01-03 09:23:29 -08001152 int ret, i;
1153
Daniel Vetter9d662da2014-04-24 08:09:09 +02001154 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1155 DRM_DEBUG("sol reset is gen7/rcs only\n");
1156 return -EINVAL;
1157 }
Eric Anholtae662d32012-01-03 09:23:29 -08001158
John Harrison5fb9de12015-05-29 17:44:07 +01001159 ret = intel_ring_begin(req, 4 * 3);
Eric Anholtae662d32012-01-03 09:23:29 -08001160 if (ret)
1161 return ret;
1162
1163 for (i = 0; i < 4; i++) {
1164 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001165 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
Eric Anholtae662d32012-01-03 09:23:29 -08001166 intel_ring_emit(ring, 0);
1167 }
1168
1169 intel_ring_advance(ring);
1170
1171 return 0;
1172}
1173
Brad Volkin71745372014-12-11 12:13:12 -08001174static struct drm_i915_gem_object*
1175i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1176 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1177 struct eb_vmas *eb,
1178 struct drm_i915_gem_object *batch_obj,
1179 u32 batch_start_offset,
1180 u32 batch_len,
Chris Wilson17cabf52015-01-14 11:20:57 +00001181 bool is_master)
Brad Volkin71745372014-12-11 12:13:12 -08001182{
Brad Volkin71745372014-12-11 12:13:12 -08001183 struct drm_i915_gem_object *shadow_batch_obj;
Chris Wilson17cabf52015-01-14 11:20:57 +00001184 struct i915_vma *vma;
Brad Volkin71745372014-12-11 12:13:12 -08001185 int ret;
1186
Chris Wilson06fbca72015-04-07 16:20:36 +01001187 shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
Chris Wilson17cabf52015-01-14 11:20:57 +00001188 PAGE_ALIGN(batch_len));
Brad Volkin71745372014-12-11 12:13:12 -08001189 if (IS_ERR(shadow_batch_obj))
1190 return shadow_batch_obj;
1191
1192 ret = i915_parse_cmds(ring,
1193 batch_obj,
1194 shadow_batch_obj,
1195 batch_start_offset,
1196 batch_len,
1197 is_master);
Chris Wilson17cabf52015-01-14 11:20:57 +00001198 if (ret)
1199 goto err;
Brad Volkin71745372014-12-11 12:13:12 -08001200
Chris Wilson17cabf52015-01-14 11:20:57 +00001201 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1202 if (ret)
1203 goto err;
Brad Volkin71745372014-12-11 12:13:12 -08001204
Chris Wilsonde4e7832015-04-07 16:20:35 +01001205 i915_gem_object_unpin_pages(shadow_batch_obj);
1206
Chris Wilson17cabf52015-01-14 11:20:57 +00001207 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
Brad Volkin71745372014-12-11 12:13:12 -08001208
Chris Wilson17cabf52015-01-14 11:20:57 +00001209 vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1210 vma->exec_entry = shadow_exec_entry;
Chris Wilsonde4e7832015-04-07 16:20:35 +01001211 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
Chris Wilson17cabf52015-01-14 11:20:57 +00001212 drm_gem_object_reference(&shadow_batch_obj->base);
1213 list_add_tail(&vma->exec_list, &eb->vmas);
Brad Volkin71745372014-12-11 12:13:12 -08001214
Chris Wilson17cabf52015-01-14 11:20:57 +00001215 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
Brad Volkin71745372014-12-11 12:13:12 -08001216
Chris Wilson17cabf52015-01-14 11:20:57 +00001217 return shadow_batch_obj;
1218
1219err:
Chris Wilsonde4e7832015-04-07 16:20:35 +01001220 i915_gem_object_unpin_pages(shadow_batch_obj);
Chris Wilson17cabf52015-01-14 11:20:57 +00001221 if (ret == -EACCES) /* unhandled chained batch */
1222 return batch_obj;
1223 else
1224 return ERR_PTR(ret);
Brad Volkin71745372014-12-11 12:13:12 -08001225}
Chris Wilson5c6c6002014-09-06 10:28:27 +01001226
Oscar Mateoa83014d2014-07-24 17:04:21 +01001227int
John Harrison5f19e2b2015-05-29 17:43:27 +01001228i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
Oscar Mateoa83014d2014-07-24 17:04:21 +01001229 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +01001230 struct list_head *vmas)
Oscar Mateo78382592014-07-03 16:28:05 +01001231{
John Harrison5f19e2b2015-05-29 17:43:27 +01001232 struct drm_device *dev = params->dev;
1233 struct intel_engine_cs *ring = params->ring;
Oscar Mateo78382592014-07-03 16:28:05 +01001234 struct drm_i915_private *dev_priv = dev->dev_private;
John Harrison5f19e2b2015-05-29 17:43:27 +01001235 u64 exec_start, exec_len;
Oscar Mateo78382592014-07-03 16:28:05 +01001236 int instp_mode;
1237 u32 instp_mask;
Chris Wilson2f5945b2015-10-06 11:39:55 +01001238 int ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001239
John Harrison535fbe82015-05-29 17:43:32 +01001240 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
Oscar Mateo78382592014-07-03 16:28:05 +01001241 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001242 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001243
John Harrisonba01cc92015-05-29 17:43:41 +01001244 ret = i915_switch_context(params->request);
Oscar Mateo78382592014-07-03 16:28:05 +01001245 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001246 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001247
John Harrison5f19e2b2015-05-29 17:43:27 +01001248 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
Daniel Vetter92588112015-04-14 17:35:19 +02001249 "%s didn't clear reload\n", ring->name);
Ben Widawsky563222a2015-03-19 12:53:28 +00001250
Oscar Mateo78382592014-07-03 16:28:05 +01001251 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1252 instp_mask = I915_EXEC_CONSTANTS_MASK;
1253 switch (instp_mode) {
1254 case I915_EXEC_CONSTANTS_REL_GENERAL:
1255 case I915_EXEC_CONSTANTS_ABSOLUTE:
1256 case I915_EXEC_CONSTANTS_REL_SURFACE:
1257 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1258 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
Chris Wilson2f5945b2015-10-06 11:39:55 +01001259 return -EINVAL;
Oscar Mateo78382592014-07-03 16:28:05 +01001260 }
1261
1262 if (instp_mode != dev_priv->relative_constants_mode) {
1263 if (INTEL_INFO(dev)->gen < 4) {
1264 DRM_DEBUG("no rel constants on pre-gen4\n");
Chris Wilson2f5945b2015-10-06 11:39:55 +01001265 return -EINVAL;
Oscar Mateo78382592014-07-03 16:28:05 +01001266 }
1267
1268 if (INTEL_INFO(dev)->gen > 5 &&
1269 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1270 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
Chris Wilson2f5945b2015-10-06 11:39:55 +01001271 return -EINVAL;
Oscar Mateo78382592014-07-03 16:28:05 +01001272 }
1273
1274 /* The HW changed the meaning on this bit on gen6 */
1275 if (INTEL_INFO(dev)->gen >= 6)
1276 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1277 }
1278 break;
1279 default:
1280 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
Chris Wilson2f5945b2015-10-06 11:39:55 +01001281 return -EINVAL;
Oscar Mateo78382592014-07-03 16:28:05 +01001282 }
1283
1284 if (ring == &dev_priv->ring[RCS] &&
Chris Wilson2f5945b2015-10-06 11:39:55 +01001285 instp_mode != dev_priv->relative_constants_mode) {
John Harrison5fb9de12015-05-29 17:44:07 +01001286 ret = intel_ring_begin(params->request, 4);
Oscar Mateo78382592014-07-03 16:28:05 +01001287 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001288 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001289
1290 intel_ring_emit(ring, MI_NOOP);
1291 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001292 intel_ring_emit_reg(ring, INSTPM);
Oscar Mateo78382592014-07-03 16:28:05 +01001293 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1294 intel_ring_advance(ring);
1295
1296 dev_priv->relative_constants_mode = instp_mode;
1297 }
1298
1299 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
John Harrison2f200552015-05-29 17:43:53 +01001300 ret = i915_reset_gen7_sol_offsets(dev, params->request);
Oscar Mateo78382592014-07-03 16:28:05 +01001301 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001302 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001303 }
1304
John Harrison5f19e2b2015-05-29 17:43:27 +01001305 exec_len = args->batch_len;
1306 exec_start = params->batch_obj_vm_offset +
1307 params->args_batch_start_offset;
1308
Ville Syrjälä9d611c02015-12-14 18:23:49 +02001309 if (exec_len == 0)
1310 exec_len = params->batch_obj->base.size;
1311
Chris Wilson2f5945b2015-10-06 11:39:55 +01001312 ret = ring->dispatch_execbuffer(params->request,
1313 exec_start, exec_len,
1314 params->dispatch_flags);
1315 if (ret)
1316 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001317
John Harrison95c24162015-05-29 17:43:31 +01001318 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
Oscar Mateo78382592014-07-03 16:28:05 +01001319
John Harrison8a8edb52015-05-29 17:43:33 +01001320 i915_gem_execbuffer_move_to_active(vmas, params->request);
John Harrisonadeca762015-05-29 17:43:28 +01001321 i915_gem_execbuffer_retire_commands(params);
Oscar Mateo78382592014-07-03 16:28:05 +01001322
Chris Wilson2f5945b2015-10-06 11:39:55 +01001323 return 0;
Oscar Mateo78382592014-07-03 16:28:05 +01001324}
1325
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001326/**
1327 * Find one BSD ring to dispatch the corresponding BSD command.
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001328 * The ring index is returned.
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001329 */
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001330static unsigned int
1331gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001332{
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001333 struct drm_i915_file_private *file_priv = file->driver_priv;
1334
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001335 /* Check whether the file_priv has already selected one ring. */
1336 if ((int)file_priv->bsd_ring < 0) {
1337 /* If not, use the ping-pong mechanism to select one. */
1338 mutex_lock(&dev_priv->dev->struct_mutex);
1339 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1340 dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1341 mutex_unlock(&dev_priv->dev->struct_mutex);
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001342 }
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001343
1344 return file_priv->bsd_ring;
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001345}
1346
Chris Wilsond23db882014-05-23 08:48:08 +02001347static struct drm_i915_gem_object *
1348eb_get_batch(struct eb_vmas *eb)
1349{
1350 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1351
1352 /*
1353 * SNA is doing fancy tricks with compressing batch buffers, which leads
1354 * to negative relocation deltas. Usually that works out ok since the
1355 * relocate address is still positive, except when the batch is placed
1356 * very low in the GTT. Ensure this doesn't happen.
1357 *
1358 * Note that actual hangs have only been observed on gen7, but for
1359 * paranoia do it everywhere.
1360 */
Chris Wilson506a8e82015-12-08 11:55:07 +00001361 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
1362 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
Chris Wilsond23db882014-05-23 08:48:08 +02001363
1364 return vma->obj;
1365}
1366
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001367#define I915_USER_RINGS (4)
1368
1369static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
1370 [I915_EXEC_DEFAULT] = RCS,
1371 [I915_EXEC_RENDER] = RCS,
1372 [I915_EXEC_BLT] = BCS,
1373 [I915_EXEC_BSD] = VCS,
1374 [I915_EXEC_VEBOX] = VECS
1375};
1376
1377static int
1378eb_select_ring(struct drm_i915_private *dev_priv,
1379 struct drm_file *file,
1380 struct drm_i915_gem_execbuffer2 *args,
1381 struct intel_engine_cs **ring)
1382{
1383 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1384
1385 if (user_ring_id > I915_USER_RINGS) {
1386 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1387 return -EINVAL;
1388 }
1389
1390 if ((user_ring_id != I915_EXEC_BSD) &&
1391 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1392 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1393 "bsd dispatch flags: %d\n", (int)(args->flags));
1394 return -EINVAL;
1395 }
1396
1397 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1398 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1399
1400 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1401 bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
1402 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1403 bsd_idx <= I915_EXEC_BSD_RING2) {
Tvrtko Ursulind9da6aa2016-01-27 13:41:09 +00001404 bsd_idx >>= I915_EXEC_BSD_SHIFT;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001405 bsd_idx--;
1406 } else {
1407 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1408 bsd_idx);
1409 return -EINVAL;
1410 }
1411
1412 *ring = &dev_priv->ring[_VCS(bsd_idx)];
1413 } else {
1414 *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
1415 }
1416
1417 if (!intel_ring_initialized(*ring)) {
1418 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1419 return -EINVAL;
1420 }
1421
1422 return 0;
1423}
1424
Eric Anholtae662d32012-01-03 09:23:29 -08001425static int
Chris Wilson54cf91d2010-11-25 18:00:26 +00001426i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1427 struct drm_file *file,
1428 struct drm_i915_gem_execbuffer2 *args,
Ben Widawsky41bde552013-12-06 14:11:21 -08001429 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001430{
Jani Nikula50227e12014-03-31 14:27:21 +03001431 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Gordon26827082016-01-19 19:02:53 +00001432 struct drm_i915_gem_request *req = NULL;
Ben Widawsky27173f12013-08-14 11:38:36 +02001433 struct eb_vmas *eb;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001434 struct drm_i915_gem_object *batch_obj;
Brad Volkin78a42372014-12-11 12:13:09 -08001435 struct drm_i915_gem_exec_object2 shadow_exec_entry;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001436 struct intel_engine_cs *ring;
Oscar Mateo273497e2014-05-22 14:13:37 +01001437 struct intel_context *ctx;
Ben Widawsky41bde552013-12-06 14:11:21 -08001438 struct i915_address_space *vm;
John Harrison5f19e2b2015-05-29 17:43:27 +01001439 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1440 struct i915_execbuffer_params *params = &params_master;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001441 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
John Harrison8e004ef2015-02-13 11:48:10 +00001442 u32 dispatch_flags;
Oscar Mateo78382592014-07-03 16:28:05 +01001443 int ret;
Daniel Vettered5982e2013-01-17 22:23:36 +01001444 bool need_relocs;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001445
Daniel Vettered5982e2013-01-17 22:23:36 +01001446 if (!i915_gem_check_execbuffer(args))
Chris Wilson432e58e2010-11-25 19:32:06 +00001447 return -EINVAL;
Chris Wilson432e58e2010-11-25 19:32:06 +00001448
Chris Wilsonad19f102014-08-10 06:29:08 +01001449 ret = validate_exec_list(dev, exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001450 if (ret)
1451 return ret;
1452
John Harrison8e004ef2015-02-13 11:48:10 +00001453 dispatch_flags = 0;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001454 if (args->flags & I915_EXEC_SECURE) {
1455 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1456 return -EPERM;
1457
John Harrison8e004ef2015-02-13 11:48:10 +00001458 dispatch_flags |= I915_DISPATCH_SECURE;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001459 }
Daniel Vetterb45305f2012-12-17 16:21:27 +01001460 if (args->flags & I915_EXEC_IS_PINNED)
John Harrison8e004ef2015-02-13 11:48:10 +00001461 dispatch_flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001462
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001463 ret = eb_select_ring(dev_priv, file, args, &ring);
1464 if (ret)
1465 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001466
1467 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001468 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001469 return -EINVAL;
1470 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001471
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001472 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1473 if (!HAS_RESOURCE_STREAMER(dev)) {
1474 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1475 return -EINVAL;
1476 }
1477 if (ring->id != RCS) {
1478 DRM_DEBUG("RS is not available on %s\n",
1479 ring->name);
1480 return -EINVAL;
1481 }
1482
1483 dispatch_flags |= I915_DISPATCH_RS;
1484 }
1485
Paulo Zanonif65c9162013-11-27 18:20:34 -02001486 intel_runtime_pm_get(dev_priv);
1487
Chris Wilson54cf91d2010-11-25 18:00:26 +00001488 ret = i915_mutex_lock_interruptible(dev);
1489 if (ret)
1490 goto pre_mutex_err;
1491
Daniel Vetter7c9c4b82013-12-18 16:37:49 +01001492 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -10001493 if (IS_ERR(ctx)) {
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001494 mutex_unlock(&dev->struct_mutex);
Ben Widawsky41bde552013-12-06 14:11:21 -08001495 ret = PTR_ERR(ctx);
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001496 goto pre_mutex_err;
Ben Widawsky935f38d2014-04-04 22:41:07 -07001497 }
Ben Widawsky41bde552013-12-06 14:11:21 -08001498
1499 i915_gem_context_reference(ctx);
1500
Daniel Vetterae6c4802014-08-06 15:04:53 +02001501 if (ctx->ppgtt)
1502 vm = &ctx->ppgtt->base;
1503 else
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08001504 vm = &dev_priv->gtt.base;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001505
John Harrison5f19e2b2015-05-29 17:43:27 +01001506 memset(&params_master, 0x00, sizeof(params_master));
1507
Ben Widawsky17601cbc2013-11-25 09:54:38 -08001508 eb = eb_create(args);
Chris Wilson67731b82010-12-08 10:38:14 +00001509 if (eb == NULL) {
Ben Widawsky935f38d2014-04-04 22:41:07 -07001510 i915_gem_context_unreference(ctx);
Chris Wilson67731b82010-12-08 10:38:14 +00001511 mutex_unlock(&dev->struct_mutex);
1512 ret = -ENOMEM;
1513 goto pre_mutex_err;
1514 }
1515
Chris Wilson54cf91d2010-11-25 18:00:26 +00001516 /* Look up object handles */
Ben Widawsky27173f12013-08-14 11:38:36 +02001517 ret = eb_lookup_vmas(eb, exec, args, vm, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +00001518 if (ret)
1519 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001520
Chris Wilson6fe4f142011-01-10 17:35:37 +00001521 /* take note of the batch buffer before we might reorder the lists */
Chris Wilsond23db882014-05-23 08:48:08 +02001522 batch_obj = eb_get_batch(eb);
Chris Wilson6fe4f142011-01-10 17:35:37 +00001523
Chris Wilson54cf91d2010-11-25 18:00:26 +00001524 /* Move the objects en-masse into the GTT, evicting if necessary. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001525 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
David Weinehallb1b38272015-05-20 17:00:13 +03001526 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001527 if (ret)
1528 goto err;
1529
1530 /* The objects are in their final locations, apply the relocations. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001531 if (need_relocs)
Ben Widawsky17601cbc2013-11-25 09:54:38 -08001532 ret = i915_gem_execbuffer_relocate(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001533 if (ret) {
1534 if (ret == -EFAULT) {
Daniel Vettered5982e2013-01-17 22:23:36 +01001535 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
David Weinehallb1b38272015-05-20 17:00:13 +03001536 eb, exec, ctx);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001537 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1538 }
1539 if (ret)
1540 goto err;
1541 }
1542
1543 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001544 if (batch_obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +01001545 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +00001546 ret = -EINVAL;
1547 goto err;
1548 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001549
John Harrison5f19e2b2015-05-29 17:43:27 +01001550 params->args_batch_start_offset = args->batch_start_offset;
Chris Wilson743e78c2015-03-27 11:02:10 +00001551 if (i915_needs_cmd_parser(ring) && args->batch_len) {
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001552 struct drm_i915_gem_object *parsed_batch_obj;
1553
1554 parsed_batch_obj = i915_gem_execbuffer_parse(ring,
Brad Volkin71745372014-12-11 12:13:12 -08001555 &shadow_exec_entry,
1556 eb,
1557 batch_obj,
1558 args->batch_start_offset,
1559 args->batch_len,
Chris Wilson17cabf52015-01-14 11:20:57 +00001560 file->is_master);
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001561 if (IS_ERR(parsed_batch_obj)) {
1562 ret = PTR_ERR(parsed_batch_obj);
Brad Volkin78a42372014-12-11 12:13:09 -08001563 goto err;
1564 }
Chris Wilson17cabf52015-01-14 11:20:57 +00001565
1566 /*
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001567 * parsed_batch_obj == batch_obj means batch not fully parsed:
1568 * Accept, but don't promote to secure.
Chris Wilson17cabf52015-01-14 11:20:57 +00001569 */
Chris Wilson17cabf52015-01-14 11:20:57 +00001570
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001571 if (parsed_batch_obj != batch_obj) {
1572 /*
1573 * Batch parsed and accepted:
1574 *
1575 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1576 * bit from MI_BATCH_BUFFER_START commands issued in
1577 * the dispatch_execbuffer implementations. We
1578 * specifically don't want that set on batches the
1579 * command parser has accepted.
1580 */
1581 dispatch_flags |= I915_DISPATCH_SECURE;
John Harrison5f19e2b2015-05-29 17:43:27 +01001582 params->args_batch_start_offset = 0;
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001583 batch_obj = parsed_batch_obj;
1584 }
Brad Volkin351e3db2014-02-18 10:15:46 -08001585 }
1586
Brad Volkin78a42372014-12-11 12:13:09 -08001587 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1588
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001589 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1590 * batch" bit. Hence we need to pin secure batches into the global gtt.
Ben Widawsky28cf5412013-11-02 21:07:26 -07001591 * hsw should have this fixed, but bdw mucks it up again. */
John Harrison8e004ef2015-02-13 11:48:10 +00001592 if (dispatch_flags & I915_DISPATCH_SECURE) {
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001593 /*
1594 * So on first glance it looks freaky that we pin the batch here
1595 * outside of the reservation loop. But:
1596 * - The batch is already pinned into the relevant ppgtt, so we
1597 * already have the backing storage fully allocated.
1598 * - No other BO uses the global gtt (well contexts, but meh),
Yannick Guerrinifd0753c2015-02-28 17:20:41 +01001599 * so we don't really have issues with multiple objects not
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001600 * fitting due to fragmentation.
1601 * So this is actually safe.
1602 */
1603 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1604 if (ret)
1605 goto err;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001606
John Harrison5f19e2b2015-05-29 17:43:27 +01001607 params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001608 } else
John Harrison5f19e2b2015-05-29 17:43:27 +01001609 params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001610
John Harrison0c8dac82015-05-29 17:43:25 +01001611 /* Allocate a request for this batch buffer nice and early. */
Dave Gordon26827082016-01-19 19:02:53 +00001612 req = i915_gem_request_alloc(ring, ctx);
1613 if (IS_ERR(req)) {
1614 ret = PTR_ERR(req);
John Harrison0c8dac82015-05-29 17:43:25 +01001615 goto err_batch_unpin;
Dave Gordon26827082016-01-19 19:02:53 +00001616 }
John Harrison0c8dac82015-05-29 17:43:25 +01001617
Dave Gordon26827082016-01-19 19:02:53 +00001618 ret = i915_gem_request_add_to_client(req, file);
John Harrisonfcfa423c2015-05-29 17:44:12 +01001619 if (ret)
1620 goto err_batch_unpin;
1621
John Harrison5f19e2b2015-05-29 17:43:27 +01001622 /*
1623 * Save assorted stuff away to pass through to *_submission().
1624 * NB: This data should be 'persistent' and not local as it will
1625 * kept around beyond the duration of the IOCTL once the GPU
1626 * scheduler arrives.
1627 */
1628 params->dev = dev;
1629 params->file = file;
1630 params->ring = ring;
1631 params->dispatch_flags = dispatch_flags;
1632 params->batch_obj = batch_obj;
1633 params->ctx = ctx;
Dave Gordon26827082016-01-19 19:02:53 +00001634 params->request = req;
John Harrison5f19e2b2015-05-29 17:43:27 +01001635
1636 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001637
John Harrison0c8dac82015-05-29 17:43:25 +01001638err_batch_unpin:
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001639 /*
1640 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1641 * batch vma for correctness. For less ugly and less fragility this
1642 * needs to be adjusted to also track the ggtt batch vma properly as
1643 * active.
1644 */
John Harrison8e004ef2015-02-13 11:48:10 +00001645 if (dispatch_flags & I915_DISPATCH_SECURE)
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001646 i915_gem_object_ggtt_unpin(batch_obj);
John Harrison0c8dac82015-05-29 17:43:25 +01001647
Chris Wilson54cf91d2010-11-25 18:00:26 +00001648err:
Ben Widawsky41bde552013-12-06 14:11:21 -08001649 /* the request owns the ref now */
1650 i915_gem_context_unreference(ctx);
Chris Wilson67731b82010-12-08 10:38:14 +00001651 eb_destroy(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001652
John Harrison6a6ae792015-05-29 17:43:30 +01001653 /*
1654 * If the request was created but not successfully submitted then it
1655 * must be freed again. If it was submitted then it is being tracked
1656 * on the active request list and no clean up is required here.
1657 */
Dave Gordon26827082016-01-19 19:02:53 +00001658 if (ret && req)
1659 i915_gem_request_cancel(req);
John Harrison6a6ae792015-05-29 17:43:30 +01001660
Chris Wilson54cf91d2010-11-25 18:00:26 +00001661 mutex_unlock(&dev->struct_mutex);
1662
1663pre_mutex_err:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001664 /* intel_gpu_busy should also get a ref, so it will free when the device
1665 * is really idle. */
1666 intel_runtime_pm_put(dev_priv);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001667 return ret;
1668}
1669
1670/*
1671 * Legacy execbuffer just creates an exec2 list from the original exec object
1672 * list array and passes it to the real function.
1673 */
1674int
1675i915_gem_execbuffer(struct drm_device *dev, void *data,
1676 struct drm_file *file)
1677{
1678 struct drm_i915_gem_execbuffer *args = data;
1679 struct drm_i915_gem_execbuffer2 exec2;
1680 struct drm_i915_gem_exec_object *exec_list = NULL;
1681 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1682 int ret, i;
1683
Chris Wilson54cf91d2010-11-25 18:00:26 +00001684 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001685 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001686 return -EINVAL;
1687 }
1688
1689 /* Copy in the exec list from userland */
1690 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1691 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1692 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001693 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001694 args->buffer_count);
1695 drm_free_large(exec_list);
1696 drm_free_large(exec2_list);
1697 return -ENOMEM;
1698 }
1699 ret = copy_from_user(exec_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001700 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001701 sizeof(*exec_list) * args->buffer_count);
1702 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001703 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001704 args->buffer_count, ret);
1705 drm_free_large(exec_list);
1706 drm_free_large(exec2_list);
1707 return -EFAULT;
1708 }
1709
1710 for (i = 0; i < args->buffer_count; i++) {
1711 exec2_list[i].handle = exec_list[i].handle;
1712 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1713 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1714 exec2_list[i].alignment = exec_list[i].alignment;
1715 exec2_list[i].offset = exec_list[i].offset;
1716 if (INTEL_INFO(dev)->gen < 4)
1717 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1718 else
1719 exec2_list[i].flags = 0;
1720 }
1721
1722 exec2.buffers_ptr = args->buffers_ptr;
1723 exec2.buffer_count = args->buffer_count;
1724 exec2.batch_start_offset = args->batch_start_offset;
1725 exec2.batch_len = args->batch_len;
1726 exec2.DR1 = args->DR1;
1727 exec2.DR4 = args->DR4;
1728 exec2.num_cliprects = args->num_cliprects;
1729 exec2.cliprects_ptr = args->cliprects_ptr;
1730 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001731 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001732
Ben Widawsky41bde552013-12-06 14:11:21 -08001733 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001734 if (!ret) {
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001735 struct drm_i915_gem_exec_object __user *user_exec_list =
1736 to_user_ptr(args->buffers_ptr);
1737
Chris Wilson54cf91d2010-11-25 18:00:26 +00001738 /* Copy the new buffer offsets back to the user's exec list. */
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001739 for (i = 0; i < args->buffer_count; i++) {
Michał Winiarski934acce2015-12-29 18:24:52 +01001740 exec2_list[i].offset =
1741 gen8_canonical_addr(exec2_list[i].offset);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001742 ret = __copy_to_user(&user_exec_list[i].offset,
1743 &exec2_list[i].offset,
1744 sizeof(user_exec_list[i].offset));
1745 if (ret) {
1746 ret = -EFAULT;
1747 DRM_DEBUG("failed to copy %d exec entries "
1748 "back to user (%d)\n",
1749 args->buffer_count, ret);
1750 break;
1751 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001752 }
1753 }
1754
1755 drm_free_large(exec_list);
1756 drm_free_large(exec2_list);
1757 return ret;
1758}
1759
1760int
1761i915_gem_execbuffer2(struct drm_device *dev, void *data,
1762 struct drm_file *file)
1763{
1764 struct drm_i915_gem_execbuffer2 *args = data;
1765 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1766 int ret;
1767
Xi Wanged8cd3b2012-04-23 04:06:41 -04001768 if (args->buffer_count < 1 ||
1769 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001770 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001771 return -EINVAL;
1772 }
1773
Daniel Vetter9cb34662014-04-24 08:09:11 +02001774 if (args->rsvd2 != 0) {
1775 DRM_DEBUG("dirty rvsd2 field\n");
1776 return -EINVAL;
1777 }
1778
Chris Wilson8408c282011-02-21 12:54:48 +00001779 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
Chris Wilson419fa722013-01-08 10:53:13 +00001780 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
Chris Wilson8408c282011-02-21 12:54:48 +00001781 if (exec2_list == NULL)
1782 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1783 args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001784 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001785 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001786 args->buffer_count);
1787 return -ENOMEM;
1788 }
1789 ret = copy_from_user(exec2_list,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001790 to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001791 sizeof(*exec2_list) * args->buffer_count);
1792 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001793 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001794 args->buffer_count, ret);
1795 drm_free_large(exec2_list);
1796 return -EFAULT;
1797 }
1798
Ben Widawsky41bde552013-12-06 14:11:21 -08001799 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001800 if (!ret) {
1801 /* Copy the new buffer offsets back to the user's exec list. */
Ville Syrjäläd593d992014-06-13 16:42:51 +03001802 struct drm_i915_gem_exec_object2 __user *user_exec_list =
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001803 to_user_ptr(args->buffers_ptr);
1804 int i;
1805
1806 for (i = 0; i < args->buffer_count; i++) {
Michał Winiarski934acce2015-12-29 18:24:52 +01001807 exec2_list[i].offset =
1808 gen8_canonical_addr(exec2_list[i].offset);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001809 ret = __copy_to_user(&user_exec_list[i].offset,
1810 &exec2_list[i].offset,
1811 sizeof(user_exec_list[i].offset));
1812 if (ret) {
1813 ret = -EFAULT;
1814 DRM_DEBUG("failed to copy %d exec entries "
1815 "back to user\n",
1816 args->buffer_count);
1817 break;
1818 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001819 }
1820 }
1821
1822 drm_free_large(exec2_list);
1823 return ret;
1824}