blob: 9ad13eeed904d4d012c3fe93f6124b5ed5884b04 [file] [log] [blame]
Chris Wilson54cf91d2010-11-25 18:00:26 +00001/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
Eugeni Dodonovf45b5552011-12-09 17:16:37 -080029#include <linux/dma_remapping.h>
Chris Wilsonad778f82016-08-04 16:32:42 +010030#include <linux/reservation.h>
Chris Wilsonfec04452017-01-27 09:40:08 +000031#include <linux/sync_file.h>
David Hildenbrand32d82062015-05-11 17:52:12 +020032#include <linux/uaccess.h>
Chris Wilson54cf91d2010-11-25 18:00:26 +000033
Chris Wilson54cf91d2010-11-25 18:00:26 +000034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Chris Wilsonad778f82016-08-04 16:32:42 +010036
Chris Wilson54cf91d2010-11-25 18:00:26 +000037#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000038#include "i915_gem_clflush.h"
Chris Wilson54cf91d2010-11-25 18:00:26 +000039#include "i915_trace.h"
40#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010041#include "intel_frontbuffer.h"
Chris Wilson54cf91d2010-11-25 18:00:26 +000042
Chris Wilsond50415c2016-08-18 17:16:52 +010043#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
44
Dave Gordon9e2793f62016-07-14 14:52:03 +010045#define __EXEC_OBJECT_HAS_PIN (1<<31)
46#define __EXEC_OBJECT_HAS_FENCE (1<<30)
47#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
48#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
49#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
Chris Wilsond23db882014-05-23 08:48:08 +020050
51#define BATCH_OFFSET_BIAS (256*1024)
Chris Wilsona415d352013-11-26 11:23:15 +000052
Chris Wilson5b043f42016-08-02 22:50:38 +010053struct i915_execbuffer_params {
54 struct drm_device *dev;
55 struct drm_file *file;
Chris Wilson59bfa122016-08-04 16:32:31 +010056 struct i915_vma *batch;
57 u32 dispatch_flags;
58 u32 args_batch_start_offset;
Chris Wilson5b043f42016-08-02 22:50:38 +010059 struct intel_engine_cs *engine;
Chris Wilson5b043f42016-08-02 22:50:38 +010060 struct i915_gem_context *ctx;
61 struct drm_i915_gem_request *request;
62};
63
Ben Widawsky27173f12013-08-14 11:38:36 +020064struct eb_vmas {
Chris Wilsond50415c2016-08-18 17:16:52 +010065 struct drm_i915_private *i915;
Ben Widawsky27173f12013-08-14 11:38:36 +020066 struct list_head vmas;
Chris Wilson67731b82010-12-08 10:38:14 +000067 int and;
Chris Wilsoneef90cc2013-01-08 10:53:17 +000068 union {
Ben Widawsky27173f12013-08-14 11:38:36 +020069 struct i915_vma *lut[0];
Chris Wilsoneef90cc2013-01-08 10:53:17 +000070 struct hlist_head buckets[0];
71 };
Chris Wilson67731b82010-12-08 10:38:14 +000072};
73
Ben Widawsky27173f12013-08-14 11:38:36 +020074static struct eb_vmas *
Chris Wilsond50415c2016-08-18 17:16:52 +010075eb_create(struct drm_i915_private *i915,
76 struct drm_i915_gem_execbuffer2 *args)
Chris Wilson67731b82010-12-08 10:38:14 +000077{
Ben Widawsky27173f12013-08-14 11:38:36 +020078 struct eb_vmas *eb = NULL;
Chris Wilson67731b82010-12-08 10:38:14 +000079
Chris Wilsoneef90cc2013-01-08 10:53:17 +000080 if (args->flags & I915_EXEC_HANDLE_LUT) {
Daniel Vetterb205ca52013-09-19 14:00:11 +020081 unsigned size = args->buffer_count;
Ben Widawsky27173f12013-08-14 11:38:36 +020082 size *= sizeof(struct i915_vma *);
83 size += sizeof(struct eb_vmas);
Chris Wilsoneef90cc2013-01-08 10:53:17 +000084 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
85 }
86
87 if (eb == NULL) {
Daniel Vetterb205ca52013-09-19 14:00:11 +020088 unsigned size = args->buffer_count;
89 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
Lauri Kasanen27b7c632013-03-27 15:04:55 +020090 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
Chris Wilsoneef90cc2013-01-08 10:53:17 +000091 while (count > 2*size)
92 count >>= 1;
93 eb = kzalloc(count*sizeof(struct hlist_head) +
Ben Widawsky27173f12013-08-14 11:38:36 +020094 sizeof(struct eb_vmas),
Chris Wilsoneef90cc2013-01-08 10:53:17 +000095 GFP_TEMPORARY);
96 if (eb == NULL)
97 return eb;
98
99 eb->and = count - 1;
100 } else
101 eb->and = -args->buffer_count;
102
Chris Wilsond50415c2016-08-18 17:16:52 +0100103 eb->i915 = i915;
Ben Widawsky27173f12013-08-14 11:38:36 +0200104 INIT_LIST_HEAD(&eb->vmas);
Chris Wilson67731b82010-12-08 10:38:14 +0000105 return eb;
106}
107
108static void
Ben Widawsky27173f12013-08-14 11:38:36 +0200109eb_reset(struct eb_vmas *eb)
Chris Wilson67731b82010-12-08 10:38:14 +0000110{
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000111 if (eb->and >= 0)
112 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
Chris Wilson67731b82010-12-08 10:38:14 +0000113}
114
Chris Wilson59bfa122016-08-04 16:32:31 +0100115static struct i915_vma *
116eb_get_batch(struct eb_vmas *eb)
117{
118 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
119
120 /*
121 * SNA is doing fancy tricks with compressing batch buffers, which leads
122 * to negative relocation deltas. Usually that works out ok since the
123 * relocate address is still positive, except when the batch is placed
124 * very low in the GTT. Ensure this doesn't happen.
125 *
126 * Note that actual hangs have only been observed on gen7, but for
127 * paranoia do it everywhere.
128 */
129 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
130 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
131
132 return vma;
133}
134
Chris Wilson3b96eff2013-01-08 10:53:14 +0000135static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200136eb_lookup_vmas(struct eb_vmas *eb,
137 struct drm_i915_gem_exec_object2 *exec,
138 const struct drm_i915_gem_execbuffer2 *args,
139 struct i915_address_space *vm,
140 struct drm_file *file)
Chris Wilson3b96eff2013-01-08 10:53:14 +0000141{
Ben Widawsky27173f12013-08-14 11:38:36 +0200142 struct drm_i915_gem_object *obj;
143 struct list_head objects;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000144 int i, ret;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000145
Ben Widawsky27173f12013-08-14 11:38:36 +0200146 INIT_LIST_HEAD(&objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000147 spin_lock(&file->table_lock);
Ben Widawsky27173f12013-08-14 11:38:36 +0200148 /* Grab a reference to the object and release the lock so we can lookup
149 * or create the VMA without using GFP_ATOMIC */
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000150 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson3b96eff2013-01-08 10:53:14 +0000151 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
152 if (obj == NULL) {
153 spin_unlock(&file->table_lock);
154 DRM_DEBUG("Invalid object handle %d at index %d\n",
155 exec[i].handle, i);
Ben Widawsky27173f12013-08-14 11:38:36 +0200156 ret = -ENOENT;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000157 goto err;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000158 }
159
Ben Widawsky27173f12013-08-14 11:38:36 +0200160 if (!list_empty(&obj->obj_exec_link)) {
Chris Wilson3b96eff2013-01-08 10:53:14 +0000161 spin_unlock(&file->table_lock);
162 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
163 obj, exec[i].handle, i);
Ben Widawsky27173f12013-08-14 11:38:36 +0200164 ret = -EINVAL;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000165 goto err;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000166 }
167
Chris Wilson25dc5562016-07-20 13:31:52 +0100168 i915_gem_object_get(obj);
Ben Widawsky27173f12013-08-14 11:38:36 +0200169 list_add_tail(&obj->obj_exec_link, &objects);
Chris Wilson3b96eff2013-01-08 10:53:14 +0000170 }
171 spin_unlock(&file->table_lock);
172
Ben Widawsky27173f12013-08-14 11:38:36 +0200173 i = 0;
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000174 while (!list_empty(&objects)) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200175 struct i915_vma *vma;
Ben Widawsky6f65e292013-12-06 14:10:56 -0800176
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000177 obj = list_first_entry(&objects,
178 struct drm_i915_gem_object,
179 obj_exec_link);
180
Daniel Vettere656a6c2013-08-14 14:14:04 +0200181 /*
182 * NOTE: We can leak any vmas created here when something fails
183 * later on. But that's no issue since vma_unbind can deal with
184 * vmas which are not actually bound. And since only
185 * lookup_or_create exists as an interface to get at the vma
186 * from the (obj, vm) we don't run the risk of creating
187 * duplicated vmas for the same vm.
188 */
Chris Wilson718659a2017-01-16 15:21:28 +0000189 vma = i915_vma_instance(obj, vm, NULL);
Chris Wilson058d88c2016-08-15 10:49:06 +0100190 if (unlikely(IS_ERR(vma))) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200191 DRM_DEBUG("Failed to lookup VMA\n");
192 ret = PTR_ERR(vma);
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000193 goto err;
Ben Widawsky27173f12013-08-14 11:38:36 +0200194 }
195
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000196 /* Transfer ownership from the objects list to the vmas list. */
Ben Widawsky27173f12013-08-14 11:38:36 +0200197 list_add_tail(&vma->exec_list, &eb->vmas);
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000198 list_del_init(&obj->obj_exec_link);
Ben Widawsky27173f12013-08-14 11:38:36 +0200199
200 vma->exec_entry = &exec[i];
201 if (eb->and < 0) {
202 eb->lut[i] = vma;
203 } else {
204 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
205 vma->exec_handle = handle;
206 hlist_add_head(&vma->exec_node,
207 &eb->buckets[handle & eb->and]);
208 }
209 ++i;
210 }
211
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000212 return 0;
Ben Widawsky27173f12013-08-14 11:38:36 +0200213
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000214
215err:
Ben Widawsky27173f12013-08-14 11:38:36 +0200216 while (!list_empty(&objects)) {
217 obj = list_first_entry(&objects,
218 struct drm_i915_gem_object,
219 obj_exec_link);
220 list_del_init(&obj->obj_exec_link);
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100221 i915_gem_object_put(obj);
Ben Widawsky27173f12013-08-14 11:38:36 +0200222 }
Chris Wilson9ae9ab52013-12-04 09:52:58 +0000223 /*
224 * Objects already transfered to the vmas list will be unreferenced by
225 * eb_destroy.
226 */
227
Ben Widawsky27173f12013-08-14 11:38:36 +0200228 return ret;
Chris Wilson3b96eff2013-01-08 10:53:14 +0000229}
230
Ben Widawsky27173f12013-08-14 11:38:36 +0200231static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
Chris Wilson67731b82010-12-08 10:38:14 +0000232{
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000233 if (eb->and < 0) {
234 if (handle >= -eb->and)
235 return NULL;
236 return eb->lut[handle];
237 } else {
238 struct hlist_head *head;
Geliang Tangaa459502016-01-18 23:54:20 +0800239 struct i915_vma *vma;
Chris Wilson67731b82010-12-08 10:38:14 +0000240
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000241 head = &eb->buckets[handle & eb->and];
Geliang Tangaa459502016-01-18 23:54:20 +0800242 hlist_for_each_entry(vma, head, exec_node) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200243 if (vma->exec_handle == handle)
244 return vma;
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000245 }
246 return NULL;
Chris Wilson67731b82010-12-08 10:38:14 +0000247 }
Chris Wilson67731b82010-12-08 10:38:14 +0000248}
249
Chris Wilsona415d352013-11-26 11:23:15 +0000250static void
251i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
252{
253 struct drm_i915_gem_exec_object2 *entry;
Chris Wilsona415d352013-11-26 11:23:15 +0000254
255 if (!drm_mm_node_allocated(&vma->node))
256 return;
257
258 entry = vma->exec_entry;
259
260 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
Chris Wilson49ef5292016-08-18 17:17:00 +0100261 i915_vma_unpin_fence(vma);
Chris Wilsona415d352013-11-26 11:23:15 +0000262
263 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100264 __i915_vma_unpin(vma);
Chris Wilsona415d352013-11-26 11:23:15 +0000265
Chris Wilsonde4e7832015-04-07 16:20:35 +0100266 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
Chris Wilsona415d352013-11-26 11:23:15 +0000267}
268
269static void eb_destroy(struct eb_vmas *eb)
270{
Ben Widawsky27173f12013-08-14 11:38:36 +0200271 while (!list_empty(&eb->vmas)) {
272 struct i915_vma *vma;
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000273
Ben Widawsky27173f12013-08-14 11:38:36 +0200274 vma = list_first_entry(&eb->vmas,
275 struct i915_vma,
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000276 exec_list);
Ben Widawsky27173f12013-08-14 11:38:36 +0200277 list_del_init(&vma->exec_list);
Chris Wilsona415d352013-11-26 11:23:15 +0000278 i915_gem_execbuffer_unreserve_vma(vma);
Chris Wilson172ae5b2016-12-05 14:29:37 +0000279 vma->exec_entry = NULL;
Chris Wilson624192c2016-08-15 10:48:50 +0100280 i915_vma_put(vma);
Chris Wilsonbcffc3f2013-01-08 10:53:15 +0000281 }
Chris Wilson67731b82010-12-08 10:38:14 +0000282 kfree(eb);
283}
284
Chris Wilsondabdfe02012-03-26 10:10:27 +0200285static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
286{
Chris Wilson9e53d9b2016-08-18 17:16:54 +0100287 if (!i915_gem_object_has_struct_page(obj))
288 return false;
289
Chris Wilsond50415c2016-08-18 17:16:52 +0100290 if (DBG_USE_CPU_RELOC)
291 return DBG_USE_CPU_RELOC > 0;
292
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +0000293 return (HAS_LLC(to_i915(obj->base.dev)) ||
Chris Wilson2cc86b82013-08-26 19:51:00 -0300294 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
Chris Wilsondabdfe02012-03-26 10:10:27 +0200295 obj->cache_level != I915_CACHE_NONE);
296}
297
Michał Winiarski934acce2015-12-29 18:24:52 +0100298/* Used to convert any address to canonical form.
299 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
300 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
301 * addresses to be in a canonical form:
302 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
303 * canonical form [63:48] == [47]."
304 */
305#define GEN8_HIGH_ADDRESS_BIT 47
306static inline uint64_t gen8_canonical_addr(uint64_t address)
307{
308 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
309}
310
311static inline uint64_t gen8_noncanonical_addr(uint64_t address)
312{
313 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
314}
315
316static inline uint64_t
Chris Wilsond50415c2016-08-18 17:16:52 +0100317relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
Michał Winiarski934acce2015-12-29 18:24:52 +0100318 uint64_t target_offset)
319{
320 return gen8_canonical_addr((int)reloc->delta + target_offset);
321}
322
Chris Wilson31a39202016-08-18 17:16:46 +0100323struct reloc_cache {
Chris Wilsond50415c2016-08-18 17:16:52 +0100324 struct drm_i915_private *i915;
325 struct drm_mm_node node;
326 unsigned long vaddr;
Chris Wilson31a39202016-08-18 17:16:46 +0100327 unsigned int page;
Chris Wilsond50415c2016-08-18 17:16:52 +0100328 bool use_64bit_reloc;
Chris Wilson31a39202016-08-18 17:16:46 +0100329};
330
Chris Wilsond50415c2016-08-18 17:16:52 +0100331static void reloc_cache_init(struct reloc_cache *cache,
332 struct drm_i915_private *i915)
Rafael Barbalho5032d872013-08-21 17:10:51 +0100333{
Chris Wilson31a39202016-08-18 17:16:46 +0100334 cache->page = -1;
Chris Wilsond50415c2016-08-18 17:16:52 +0100335 cache->vaddr = 0;
336 cache->i915 = i915;
Joonas Lahtinendfc51482016-11-03 10:39:46 +0200337 /* Must be a variable in the struct to allow GCC to unroll. */
338 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
Chris Wilsone8cb9092016-08-18 17:16:53 +0100339 cache->node.allocated = false;
Chris Wilson31a39202016-08-18 17:16:46 +0100340}
Rafael Barbalho5032d872013-08-21 17:10:51 +0100341
Chris Wilsond50415c2016-08-18 17:16:52 +0100342static inline void *unmask_page(unsigned long p)
343{
344 return (void *)(uintptr_t)(p & PAGE_MASK);
345}
Rafael Barbalho5032d872013-08-21 17:10:51 +0100346
Chris Wilsond50415c2016-08-18 17:16:52 +0100347static inline unsigned int unmask_flags(unsigned long p)
348{
349 return p & ~PAGE_MASK;
350}
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700351
Chris Wilsond50415c2016-08-18 17:16:52 +0100352#define KMAP 0x4 /* after CLFLUSH_FLAGS */
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700353
Chris Wilson31a39202016-08-18 17:16:46 +0100354static void reloc_cache_fini(struct reloc_cache *cache)
355{
Chris Wilsond50415c2016-08-18 17:16:52 +0100356 void *vaddr;
357
Chris Wilson31a39202016-08-18 17:16:46 +0100358 if (!cache->vaddr)
359 return;
360
Chris Wilsond50415c2016-08-18 17:16:52 +0100361 vaddr = unmask_page(cache->vaddr);
362 if (cache->vaddr & KMAP) {
363 if (cache->vaddr & CLFLUSH_AFTER)
364 mb();
Chris Wilson31a39202016-08-18 17:16:46 +0100365
Chris Wilsond50415c2016-08-18 17:16:52 +0100366 kunmap_atomic(vaddr);
367 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
368 } else {
Chris Wilsone8cb9092016-08-18 17:16:53 +0100369 wmb();
Chris Wilsond50415c2016-08-18 17:16:52 +0100370 io_mapping_unmap_atomic((void __iomem *)vaddr);
Chris Wilsone8cb9092016-08-18 17:16:53 +0100371 if (cache->node.allocated) {
372 struct i915_ggtt *ggtt = &cache->i915->ggtt;
373
374 ggtt->base.clear_range(&ggtt->base,
375 cache->node.start,
Michał Winiarski4fb84d92016-10-13 14:02:40 +0200376 cache->node.size);
Chris Wilsone8cb9092016-08-18 17:16:53 +0100377 drm_mm_remove_node(&cache->node);
378 } else {
379 i915_vma_unpin((struct i915_vma *)cache->node.mm);
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700380 }
Chris Wilson31a39202016-08-18 17:16:46 +0100381 }
382}
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700383
Chris Wilson31a39202016-08-18 17:16:46 +0100384static void *reloc_kmap(struct drm_i915_gem_object *obj,
385 struct reloc_cache *cache,
386 int page)
387{
Chris Wilsond50415c2016-08-18 17:16:52 +0100388 void *vaddr;
Chris Wilson31a39202016-08-18 17:16:46 +0100389
Chris Wilsond50415c2016-08-18 17:16:52 +0100390 if (cache->vaddr) {
391 kunmap_atomic(unmask_page(cache->vaddr));
392 } else {
393 unsigned int flushes;
394 int ret;
Chris Wilson31a39202016-08-18 17:16:46 +0100395
Chris Wilsond50415c2016-08-18 17:16:52 +0100396 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
397 if (ret)
398 return ERR_PTR(ret);
399
400 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
401 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
402
403 cache->vaddr = flushes | KMAP;
404 cache->node.mm = (void *)obj;
405 if (flushes)
406 mb();
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700407 }
408
Chris Wilsond50415c2016-08-18 17:16:52 +0100409 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
410 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
Chris Wilson31a39202016-08-18 17:16:46 +0100411 cache->page = page;
Chris Wilson31a39202016-08-18 17:16:46 +0100412
Chris Wilsond50415c2016-08-18 17:16:52 +0100413 return vaddr;
Chris Wilson31a39202016-08-18 17:16:46 +0100414}
415
Chris Wilsond50415c2016-08-18 17:16:52 +0100416static void *reloc_iomap(struct drm_i915_gem_object *obj,
Chris Wilson31a39202016-08-18 17:16:46 +0100417 struct reloc_cache *cache,
Chris Wilsond50415c2016-08-18 17:16:52 +0100418 int page)
Chris Wilson31a39202016-08-18 17:16:46 +0100419{
Chris Wilsone8cb9092016-08-18 17:16:53 +0100420 struct i915_ggtt *ggtt = &cache->i915->ggtt;
421 unsigned long offset;
Chris Wilsond50415c2016-08-18 17:16:52 +0100422 void *vaddr;
Chris Wilson31a39202016-08-18 17:16:46 +0100423
Chris Wilsond50415c2016-08-18 17:16:52 +0100424 if (cache->vaddr) {
Jani Nikula615e5002016-10-04 12:54:13 +0300425 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
Chris Wilsond50415c2016-08-18 17:16:52 +0100426 } else {
427 struct i915_vma *vma;
428 int ret;
Chris Wilson31a39202016-08-18 17:16:46 +0100429
Chris Wilsond50415c2016-08-18 17:16:52 +0100430 if (use_cpu_reloc(obj))
431 return NULL;
Chris Wilson31a39202016-08-18 17:16:46 +0100432
Chris Wilsond50415c2016-08-18 17:16:52 +0100433 ret = i915_gem_object_set_to_gtt_domain(obj, true);
434 if (ret)
435 return ERR_PTR(ret);
Chris Wilson31a39202016-08-18 17:16:46 +0100436
Chris Wilsond50415c2016-08-18 17:16:52 +0100437 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
438 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilsone8cb9092016-08-18 17:16:53 +0100439 if (IS_ERR(vma)) {
440 memset(&cache->node, 0, sizeof(cache->node));
Chris Wilson4e64e552017-02-02 21:04:38 +0000441 ret = drm_mm_insert_node_in_range
Chris Wilsone8cb9092016-08-18 17:16:53 +0100442 (&ggtt->base.mm, &cache->node,
Chris Wilsonf51455d2017-01-10 14:47:34 +0000443 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
Chris Wilsone8cb9092016-08-18 17:16:53 +0100444 0, ggtt->mappable_end,
Chris Wilson4e64e552017-02-02 21:04:38 +0000445 DRM_MM_INSERT_LOW);
Chris Wilsonc92fa4f2016-10-07 07:53:25 +0100446 if (ret) /* no inactive aperture space, use cpu reloc */
447 return NULL;
Chris Wilsone8cb9092016-08-18 17:16:53 +0100448 } else {
Chris Wilson49ef5292016-08-18 17:17:00 +0100449 ret = i915_vma_put_fence(vma);
Chris Wilsone8cb9092016-08-18 17:16:53 +0100450 if (ret) {
451 i915_vma_unpin(vma);
452 return ERR_PTR(ret);
453 }
Rafael Barbalho5032d872013-08-21 17:10:51 +0100454
Chris Wilsone8cb9092016-08-18 17:16:53 +0100455 cache->node.start = vma->node.start;
456 cache->node.mm = (void *)vma;
Chris Wilsond50415c2016-08-18 17:16:52 +0100457 }
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700458 }
459
Chris Wilsone8cb9092016-08-18 17:16:53 +0100460 offset = cache->node.start;
461 if (cache->node.allocated) {
Chris Wilsonfc099092016-10-28 15:27:56 +0100462 wmb();
Chris Wilsone8cb9092016-08-18 17:16:53 +0100463 ggtt->base.insert_page(&ggtt->base,
464 i915_gem_object_get_dma_address(obj, page),
465 offset, I915_CACHE_NONE, 0);
466 } else {
467 offset += page << PAGE_SHIFT;
468 }
469
Jani Nikula615e5002016-10-04 12:54:13 +0300470 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
Chris Wilsond50415c2016-08-18 17:16:52 +0100471 cache->page = page;
472 cache->vaddr = (unsigned long)vaddr;
473
474 return vaddr;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100475}
476
Chris Wilsond50415c2016-08-18 17:16:52 +0100477static void *reloc_vaddr(struct drm_i915_gem_object *obj,
478 struct reloc_cache *cache,
479 int page)
Chris Wilsonedf44272015-01-14 11:20:56 +0000480{
Chris Wilsond50415c2016-08-18 17:16:52 +0100481 void *vaddr;
482
483 if (cache->page == page) {
484 vaddr = unmask_page(cache->vaddr);
485 } else {
486 vaddr = NULL;
487 if ((cache->vaddr & KMAP) == 0)
488 vaddr = reloc_iomap(obj, cache, page);
489 if (!vaddr)
490 vaddr = reloc_kmap(obj, cache, page);
491 }
492
493 return vaddr;
494}
495
496static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
497{
498 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
499 if (flushes & CLFLUSH_BEFORE) {
500 clflushopt(addr);
501 mb();
502 }
503
504 *addr = value;
505
506 /* Writes to the same cacheline are serialised by the CPU
507 * (including clflush). On the write path, we only require
508 * that it hits memory in an orderly fashion and place
509 * mb barriers at the start and end of the relocation phase
510 * to ensure ordering of clflush wrt to the system.
511 */
512 if (flushes & CLFLUSH_AFTER)
513 clflushopt(addr);
514 } else
515 *addr = value;
Chris Wilsonedf44272015-01-14 11:20:56 +0000516}
517
518static int
Chris Wilsond50415c2016-08-18 17:16:52 +0100519relocate_entry(struct drm_i915_gem_object *obj,
520 const struct drm_i915_gem_relocation_entry *reloc,
521 struct reloc_cache *cache,
522 u64 target_offset)
Chris Wilsonedf44272015-01-14 11:20:56 +0000523{
Chris Wilsond50415c2016-08-18 17:16:52 +0100524 u64 offset = reloc->offset;
525 bool wide = cache->use_64bit_reloc;
526 void *vaddr;
Chris Wilsonedf44272015-01-14 11:20:56 +0000527
Chris Wilsond50415c2016-08-18 17:16:52 +0100528 target_offset = relocation_target(reloc, target_offset);
529repeat:
530 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
531 if (IS_ERR(vaddr))
532 return PTR_ERR(vaddr);
Chris Wilsonedf44272015-01-14 11:20:56 +0000533
Chris Wilsond50415c2016-08-18 17:16:52 +0100534 clflush_write32(vaddr + offset_in_page(offset),
535 lower_32_bits(target_offset),
536 cache->vaddr);
Chris Wilsonedf44272015-01-14 11:20:56 +0000537
Chris Wilsond50415c2016-08-18 17:16:52 +0100538 if (wide) {
539 offset += sizeof(u32);
540 target_offset >>= 32;
541 wide = false;
542 goto repeat;
Chris Wilsonedf44272015-01-14 11:20:56 +0000543 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000544
545 return 0;
Rafael Barbalho5032d872013-08-21 17:10:51 +0100546}
547
Rafael Barbalho5032d872013-08-21 17:10:51 +0100548static int
Chris Wilson611cdf32017-06-16 15:05:18 +0100549i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
Ben Widawsky27173f12013-08-14 11:38:36 +0200550 struct eb_vmas *eb,
Chris Wilson31a39202016-08-18 17:16:46 +0100551 struct drm_i915_gem_relocation_entry *reloc,
552 struct reloc_cache *cache)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000553{
Chris Wilson611cdf32017-06-16 15:05:18 +0100554 struct drm_i915_gem_object *obj = vma->obj;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100555 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000556 struct drm_gem_object *target_obj;
Daniel Vetter149c8402012-02-15 23:50:23 +0100557 struct drm_i915_gem_object *target_i915_obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200558 struct i915_vma *target_vma;
Ben Widawskyd9ceb952014-04-28 17:18:28 -0700559 uint64_t target_offset;
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800560 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000561
Chris Wilson67731b82010-12-08 10:38:14 +0000562 /* we've already hold a reference to all valid objects */
Ben Widawsky27173f12013-08-14 11:38:36 +0200563 target_vma = eb_get_vma(eb, reloc->target_handle);
564 if (unlikely(target_vma == NULL))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000565 return -ENOENT;
Ben Widawsky27173f12013-08-14 11:38:36 +0200566 target_i915_obj = target_vma->obj;
567 target_obj = &target_vma->obj->base;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000568
Michał Winiarski934acce2015-12-29 18:24:52 +0100569 target_offset = gen8_canonical_addr(target_vma->node.start);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000570
Eric Anholte844b992012-07-31 15:35:01 -0700571 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
572 * pipe_control writes because the gpu doesn't properly redirect them
573 * through the ppgtt for non_secure batchbuffers. */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100574 if (unlikely(IS_GEN6(dev_priv) &&
Daniel Vetter08755462015-04-20 09:04:05 -0700575 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000576 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
Daniel Vetter08755462015-04-20 09:04:05 -0700577 PIN_GLOBAL);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000578 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
579 return ret;
580 }
Eric Anholte844b992012-07-31 15:35:01 -0700581
Chris Wilson54cf91d2010-11-25 18:00:26 +0000582 /* Validate that the target is in a valid r/w GPU domain */
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000583 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100584 DRM_DEBUG("reloc with multiple write domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000585 "obj %p target %d offset %d "
586 "read %08x write %08x",
587 obj, reloc->target_handle,
588 (int) reloc->offset,
589 reloc->read_domains,
590 reloc->write_domain);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800591 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000592 }
Daniel Vetter4ca4a252011-12-14 13:57:27 +0100593 if (unlikely((reloc->write_domain | reloc->read_domains)
594 & ~I915_GEM_GPU_DOMAINS)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100595 DRM_DEBUG("reloc with read/write non-GPU domains: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000596 "obj %p target %d offset %d "
597 "read %08x write %08x",
598 obj, reloc->target_handle,
599 (int) reloc->offset,
600 reloc->read_domains,
601 reloc->write_domain);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800602 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000603 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000604
605 target_obj->pending_read_domains |= reloc->read_domains;
606 target_obj->pending_write_domain |= reloc->write_domain;
607
608 /* If the relocation already has the right value in it, no
609 * more work needs to be done.
610 */
611 if (target_offset == reloc->presumed_offset)
Chris Wilson67731b82010-12-08 10:38:14 +0000612 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000613
614 /* Check that the relocation address is valid... */
Ben Widawsky3c94cee2013-11-02 21:07:11 -0700615 if (unlikely(reloc->offset >
Chris Wilsond50415c2016-08-18 17:16:52 +0100616 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
Daniel Vetterff240192012-01-31 21:08:14 +0100617 DRM_DEBUG("Relocation beyond object bounds: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000618 "obj %p target %d offset %d size %d.\n",
619 obj, reloc->target_handle,
620 (int) reloc->offset,
621 (int) obj->base.size);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800622 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000623 }
Chris Wilsonb8f7ab12010-12-08 10:43:06 +0000624 if (unlikely(reloc->offset & 3)) {
Daniel Vetterff240192012-01-31 21:08:14 +0100625 DRM_DEBUG("Relocation not 4-byte aligned: "
Chris Wilson54cf91d2010-11-25 18:00:26 +0000626 "obj %p target %d offset %d.\n",
627 obj, reloc->target_handle,
628 (int) reloc->offset);
Ben Widawsky8b78f0e2013-12-26 13:39:50 -0800629 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000630 }
631
Chris Wilson611cdf32017-06-16 15:05:18 +0100632 /*
633 * If we write into the object, we need to force the synchronisation
634 * barrier, either with an asynchronous clflush or if we executed the
635 * patching using the GPU (though that should be serialised by the
636 * timeline). To be completely sure, and since we are required to
637 * do relocations we are already stalling, disable the user's opt
638 * of our synchronisation.
639 */
640 vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
641
Chris Wilsond50415c2016-08-18 17:16:52 +0100642 ret = relocate_entry(obj, reloc, cache, target_offset);
Daniel Vetterd4d36012013-09-02 20:56:23 +0200643 if (ret)
644 return ret;
645
Chris Wilson54cf91d2010-11-25 18:00:26 +0000646 /* and update the user's relocation entry */
647 reloc->presumed_offset = target_offset;
Chris Wilson67731b82010-12-08 10:38:14 +0000648 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000649}
650
651static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200652i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
653 struct eb_vmas *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000654{
Chris Wilson1d83f442012-03-24 20:12:53 +0000655#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
656 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
Chris Wilson54cf91d2010-11-25 18:00:26 +0000657 struct drm_i915_gem_relocation_entry __user *user_relocs;
Ben Widawsky27173f12013-08-14 11:38:36 +0200658 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilson31a39202016-08-18 17:16:46 +0100659 struct reloc_cache cache;
660 int remain, ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000661
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300662 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
Chris Wilsond50415c2016-08-18 17:16:52 +0100663 reloc_cache_init(&cache, eb->i915);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000664
Chris Wilson1d83f442012-03-24 20:12:53 +0000665 remain = entry->relocation_count;
666 while (remain) {
667 struct drm_i915_gem_relocation_entry *r = stack_reloc;
Chris Wilsonebc08082016-10-18 13:02:51 +0100668 unsigned long unwritten;
669 unsigned int count;
670
671 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
Chris Wilson1d83f442012-03-24 20:12:53 +0000672 remain -= count;
673
Chris Wilsonebc08082016-10-18 13:02:51 +0100674 /* This is the fast path and we cannot handle a pagefault
675 * whilst holding the struct mutex lest the user pass in the
676 * relocations contained within a mmaped bo. For in such a case
677 * we, the page fault handler would call i915_gem_fault() and
678 * we would try to acquire the struct mutex again. Obviously
679 * this is bad and so lockdep complains vehemently.
680 */
681 pagefault_disable();
682 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
683 pagefault_enable();
684 if (unlikely(unwritten)) {
Chris Wilson31a39202016-08-18 17:16:46 +0100685 ret = -EFAULT;
686 goto out;
687 }
Chris Wilson54cf91d2010-11-25 18:00:26 +0000688
Chris Wilson1d83f442012-03-24 20:12:53 +0000689 do {
690 u64 offset = r->presumed_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000691
Chris Wilson611cdf32017-06-16 15:05:18 +0100692 ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
Chris Wilson1d83f442012-03-24 20:12:53 +0000693 if (ret)
Chris Wilson31a39202016-08-18 17:16:46 +0100694 goto out;
Chris Wilson1d83f442012-03-24 20:12:53 +0000695
Chris Wilsonebc08082016-10-18 13:02:51 +0100696 if (r->presumed_offset != offset) {
697 pagefault_disable();
698 unwritten = __put_user(r->presumed_offset,
699 &user_relocs->presumed_offset);
700 pagefault_enable();
701 if (unlikely(unwritten)) {
702 /* Note that reporting an error now
703 * leaves everything in an inconsistent
704 * state as we have *already* changed
705 * the relocation value inside the
706 * object. As we have not changed the
707 * reloc.presumed_offset or will not
708 * change the execobject.offset, on the
709 * call we may not rewrite the value
710 * inside the object, leaving it
711 * dangling and causing a GPU hang.
712 */
713 ret = -EFAULT;
714 goto out;
715 }
Chris Wilson1d83f442012-03-24 20:12:53 +0000716 }
717
718 user_relocs++;
719 r++;
720 } while (--count);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000721 }
722
Chris Wilson31a39202016-08-18 17:16:46 +0100723out:
724 reloc_cache_fini(&cache);
725 return ret;
Chris Wilson1d83f442012-03-24 20:12:53 +0000726#undef N_RELOC
Chris Wilson54cf91d2010-11-25 18:00:26 +0000727}
728
729static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200730i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
731 struct eb_vmas *eb,
732 struct drm_i915_gem_relocation_entry *relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000733{
Ben Widawsky27173f12013-08-14 11:38:36 +0200734 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilson31a39202016-08-18 17:16:46 +0100735 struct reloc_cache cache;
736 int i, ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000737
Chris Wilsond50415c2016-08-18 17:16:52 +0100738 reloc_cache_init(&cache, eb->i915);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000739 for (i = 0; i < entry->relocation_count; i++) {
Chris Wilson611cdf32017-06-16 15:05:18 +0100740 ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000741 if (ret)
Chris Wilson31a39202016-08-18 17:16:46 +0100742 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000743 }
Chris Wilson31a39202016-08-18 17:16:46 +0100744 reloc_cache_fini(&cache);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000745
Chris Wilson31a39202016-08-18 17:16:46 +0100746 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000747}
748
749static int
Ben Widawsky17601cbc2013-11-25 09:54:38 -0800750i915_gem_execbuffer_relocate(struct eb_vmas *eb)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000751{
Ben Widawsky27173f12013-08-14 11:38:36 +0200752 struct i915_vma *vma;
Chris Wilsond4aeee72011-03-14 15:11:24 +0000753 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000754
Ben Widawsky27173f12013-08-14 11:38:36 +0200755 list_for_each_entry(vma, &eb->vmas, exec_list) {
756 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000757 if (ret)
Chris Wilsond4aeee72011-03-14 15:11:24 +0000758 break;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000759 }
760
Chris Wilsond4aeee72011-03-14 15:11:24 +0000761 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000762}
763
Chris Wilsonedf44272015-01-14 11:20:56 +0000764static bool only_mappable_for_reloc(unsigned int flags)
765{
766 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
767 __EXEC_OBJECT_NEEDS_MAP;
768}
769
Chris Wilson1690e1e2011-12-14 13:57:08 +0100770static int
Ben Widawsky27173f12013-08-14 11:38:36 +0200771i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000772 struct intel_engine_cs *engine,
Ben Widawsky27173f12013-08-14 11:38:36 +0200773 bool *need_reloc)
Chris Wilson1690e1e2011-12-14 13:57:08 +0100774{
Ben Widawsky6f65e292013-12-06 14:10:56 -0800775 struct drm_i915_gem_object *obj = vma->obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200776 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilsond23db882014-05-23 08:48:08 +0200777 uint64_t flags;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100778 int ret;
779
Daniel Vetter08755462015-04-20 09:04:05 -0700780 flags = PIN_USER;
Daniel Vetter0229da32015-04-14 19:01:54 +0200781 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
782 flags |= PIN_GLOBAL;
783
Chris Wilsonedf44272015-01-14 11:20:56 +0000784 if (!drm_mm_node_allocated(&vma->node)) {
Michel Thierry101b5062015-10-01 13:33:57 +0100785 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
786 * limit address to the first 4GBs for unflagged objects.
787 */
788 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
789 flags |= PIN_ZONE_4G;
Chris Wilsonedf44272015-01-14 11:20:56 +0000790 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
791 flags |= PIN_GLOBAL | PIN_MAPPABLE;
Chris Wilsonedf44272015-01-14 11:20:56 +0000792 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
793 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
Chris Wilson506a8e82015-12-08 11:55:07 +0000794 if (entry->flags & EXEC_OBJECT_PINNED)
795 flags |= entry->offset | PIN_OFFSET_FIXED;
Michel Thierry101b5062015-10-01 13:33:57 +0100796 if ((flags & PIN_MAPPABLE) == 0)
797 flags |= PIN_HIGH;
Chris Wilsonedf44272015-01-14 11:20:56 +0000798 }
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100799
Chris Wilson59bfa122016-08-04 16:32:31 +0100800 ret = i915_vma_pin(vma,
801 entry->pad_to_size,
802 entry->alignment,
803 flags);
804 if ((ret == -ENOSPC || ret == -E2BIG) &&
Chris Wilsonedf44272015-01-14 11:20:56 +0000805 only_mappable_for_reloc(entry->flags))
Chris Wilson59bfa122016-08-04 16:32:31 +0100806 ret = i915_vma_pin(vma,
807 entry->pad_to_size,
808 entry->alignment,
809 flags & ~PIN_MAPPABLE);
Chris Wilson1690e1e2011-12-14 13:57:08 +0100810 if (ret)
811 return ret;
812
Chris Wilson7788a762012-08-24 19:18:18 +0100813 entry->flags |= __EXEC_OBJECT_HAS_PIN;
814
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100815 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100816 ret = i915_vma_get_fence(vma);
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100817 if (ret)
818 return ret;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100819
Chris Wilson49ef5292016-08-18 17:17:00 +0100820 if (i915_vma_pin_fence(vma))
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100821 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Chris Wilson1690e1e2011-12-14 13:57:08 +0100822 }
823
Ben Widawsky27173f12013-08-14 11:38:36 +0200824 if (entry->offset != vma->node.start) {
825 entry->offset = vma->node.start;
Daniel Vettered5982e2013-01-17 22:23:36 +0100826 *need_reloc = true;
827 }
828
829 if (entry->flags & EXEC_OBJECT_WRITE) {
830 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
831 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
832 }
833
Chris Wilson1690e1e2011-12-14 13:57:08 +0100834 return 0;
Chris Wilson7788a762012-08-24 19:18:18 +0100835}
Chris Wilson1690e1e2011-12-14 13:57:08 +0100836
Chris Wilsond23db882014-05-23 08:48:08 +0200837static bool
Chris Wilsone6a84462014-08-11 12:00:12 +0200838need_reloc_mappable(struct i915_vma *vma)
839{
840 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
841
842 if (entry->relocation_count == 0)
843 return false;
844
Chris Wilson3272db52016-08-04 16:32:32 +0100845 if (!i915_vma_is_ggtt(vma))
Chris Wilsone6a84462014-08-11 12:00:12 +0200846 return false;
847
848 /* See also use_cpu_reloc() */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +0000849 if (HAS_LLC(to_i915(vma->obj->base.dev)))
Chris Wilsone6a84462014-08-11 12:00:12 +0200850 return false;
851
852 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
853 return false;
854
855 return true;
856}
857
858static bool
859eb_vma_misplaced(struct i915_vma *vma)
Chris Wilsond23db882014-05-23 08:48:08 +0200860{
861 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
Chris Wilsond23db882014-05-23 08:48:08 +0200862
Chris Wilson3272db52016-08-04 16:32:32 +0100863 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
864 !i915_vma_is_ggtt(vma));
Chris Wilsond23db882014-05-23 08:48:08 +0200865
Chris Wilsonf51455d2017-01-10 14:47:34 +0000866 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
Chris Wilsond23db882014-05-23 08:48:08 +0200867 return true;
868
Chris Wilson91b2db62016-08-04 16:32:23 +0100869 if (vma->node.size < entry->pad_to_size)
870 return true;
871
Chris Wilson506a8e82015-12-08 11:55:07 +0000872 if (entry->flags & EXEC_OBJECT_PINNED &&
873 vma->node.start != entry->offset)
874 return true;
875
Chris Wilsond23db882014-05-23 08:48:08 +0200876 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
877 vma->node.start < BATCH_OFFSET_BIAS)
878 return true;
879
Chris Wilsonedf44272015-01-14 11:20:56 +0000880 /* avoid costly ping-pong once a batch bo ended up non-mappable */
Chris Wilson05a20d02016-08-18 17:16:55 +0100881 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
882 !i915_vma_is_map_and_fenceable(vma))
Chris Wilsonedf44272015-01-14 11:20:56 +0000883 return !only_mappable_for_reloc(entry->flags);
884
Michel Thierry101b5062015-10-01 13:33:57 +0100885 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
886 (vma->node.start + vma->node.size - 1) >> 32)
887 return true;
888
Chris Wilsond23db882014-05-23 08:48:08 +0200889 return false;
890}
891
Chris Wilson54cf91d2010-11-25 18:00:26 +0000892static int
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000893i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
Ben Widawsky27173f12013-08-14 11:38:36 +0200894 struct list_head *vmas,
Chris Wilsone2efd132016-05-24 14:53:34 +0100895 struct i915_gem_context *ctx,
Daniel Vettered5982e2013-01-17 22:23:36 +0100896 bool *need_relocs)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000897{
Chris Wilson432e58e2010-11-25 19:32:06 +0000898 struct drm_i915_gem_object *obj;
Ben Widawsky27173f12013-08-14 11:38:36 +0200899 struct i915_vma *vma;
Ben Widawsky68c8c172013-09-11 14:57:50 -0700900 struct i915_address_space *vm;
Ben Widawsky27173f12013-08-14 11:38:36 +0200901 struct list_head ordered_vmas;
Chris Wilson506a8e82015-12-08 11:55:07 +0000902 struct list_head pinned_vmas;
Chris Wilsonc0336662016-05-06 15:40:21 +0100903 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
Chris Wilsonf4ce7662017-03-25 11:32:43 +0000904 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
Chris Wilson7788a762012-08-24 19:18:18 +0100905 int retry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000906
Ben Widawsky68c8c172013-09-11 14:57:50 -0700907 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
908
Ben Widawsky27173f12013-08-14 11:38:36 +0200909 INIT_LIST_HEAD(&ordered_vmas);
Chris Wilson506a8e82015-12-08 11:55:07 +0000910 INIT_LIST_HEAD(&pinned_vmas);
Ben Widawsky27173f12013-08-14 11:38:36 +0200911 while (!list_empty(vmas)) {
Chris Wilson6fe4f142011-01-10 17:35:37 +0000912 struct drm_i915_gem_exec_object2 *entry;
913 bool need_fence, need_mappable;
914
Ben Widawsky27173f12013-08-14 11:38:36 +0200915 vma = list_first_entry(vmas, struct i915_vma, exec_list);
916 obj = vma->obj;
917 entry = vma->exec_entry;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000918
David Weinehallb1b38272015-05-20 17:00:13 +0300919 if (ctx->flags & CONTEXT_NO_ZEROMAP)
920 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
921
Chris Wilson82b6b6d2014-08-09 17:37:24 +0100922 if (!has_fenced_gpu_access)
923 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000924 need_fence =
Chris Wilsonf4ce7662017-03-25 11:32:43 +0000925 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
926 needs_unfenced_map) &&
Chris Wilson3e510a82016-08-05 10:14:23 +0100927 i915_gem_object_is_tiled(obj);
Ben Widawsky27173f12013-08-14 11:38:36 +0200928 need_mappable = need_fence || need_reloc_mappable(vma);
Chris Wilson6fe4f142011-01-10 17:35:37 +0000929
Chris Wilson506a8e82015-12-08 11:55:07 +0000930 if (entry->flags & EXEC_OBJECT_PINNED)
931 list_move_tail(&vma->exec_list, &pinned_vmas);
932 else if (need_mappable) {
Chris Wilsone6a84462014-08-11 12:00:12 +0200933 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
Ben Widawsky27173f12013-08-14 11:38:36 +0200934 list_move(&vma->exec_list, &ordered_vmas);
Chris Wilsone6a84462014-08-11 12:00:12 +0200935 } else
Ben Widawsky27173f12013-08-14 11:38:36 +0200936 list_move_tail(&vma->exec_list, &ordered_vmas);
Chris Wilson595dad72011-01-13 11:03:48 +0000937
Daniel Vettered5982e2013-01-17 22:23:36 +0100938 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
Chris Wilson595dad72011-01-13 11:03:48 +0000939 obj->base.pending_write_domain = 0;
Chris Wilson6fe4f142011-01-10 17:35:37 +0000940 }
Ben Widawsky27173f12013-08-14 11:38:36 +0200941 list_splice(&ordered_vmas, vmas);
Chris Wilson506a8e82015-12-08 11:55:07 +0000942 list_splice(&pinned_vmas, vmas);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000943
944 /* Attempt to pin all of the buffers into the GTT.
945 * This is done in 3 phases:
946 *
947 * 1a. Unbind all objects that do not match the GTT constraints for
948 * the execbuffer (fenceable, mappable, alignment etc).
949 * 1b. Increment pin count for already bound objects.
950 * 2. Bind new objects.
951 * 3. Decrement pin count.
952 *
Chris Wilson7788a762012-08-24 19:18:18 +0100953 * This avoid unnecessary unbinding of later objects in order to make
Chris Wilson54cf91d2010-11-25 18:00:26 +0000954 * room for the earlier objects *unless* we need to defragment.
955 */
956 retry = 0;
957 do {
Chris Wilson7788a762012-08-24 19:18:18 +0100958 int ret = 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000959
960 /* Unbind any ill-fitting objects or pin. */
Ben Widawsky27173f12013-08-14 11:38:36 +0200961 list_for_each_entry(vma, vmas, exec_list) {
Ben Widawsky27173f12013-08-14 11:38:36 +0200962 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson54cf91d2010-11-25 18:00:26 +0000963 continue;
964
Chris Wilsone6a84462014-08-11 12:00:12 +0200965 if (eb_vma_misplaced(vma))
Ben Widawsky27173f12013-08-14 11:38:36 +0200966 ret = i915_vma_unbind(vma);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000967 else
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000968 ret = i915_gem_execbuffer_reserve_vma(vma,
969 engine,
970 need_relocs);
Chris Wilson432e58e2010-11-25 19:32:06 +0000971 if (ret)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000972 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000973 }
974
975 /* Bind fresh objects */
Ben Widawsky27173f12013-08-14 11:38:36 +0200976 list_for_each_entry(vma, vmas, exec_list) {
977 if (drm_mm_node_allocated(&vma->node))
Chris Wilson1690e1e2011-12-14 13:57:08 +0100978 continue;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000979
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000980 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
981 need_relocs);
Chris Wilson7788a762012-08-24 19:18:18 +0100982 if (ret)
983 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000984 }
985
Chris Wilsona415d352013-11-26 11:23:15 +0000986err:
Chris Wilson6c085a72012-08-20 11:40:46 +0200987 if (ret != -ENOSPC || retry++)
Chris Wilson54cf91d2010-11-25 18:00:26 +0000988 return ret;
989
Chris Wilsona415d352013-11-26 11:23:15 +0000990 /* Decrement pin count for bound objects */
991 list_for_each_entry(vma, vmas, exec_list)
992 i915_gem_execbuffer_unreserve_vma(vma);
993
Ben Widawsky68c8c172013-09-11 14:57:50 -0700994 ret = i915_gem_evict_vm(vm, true);
Chris Wilson54cf91d2010-11-25 18:00:26 +0000995 if (ret)
996 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +0000997 } while (1);
998}
999
1000static int
1001i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
Daniel Vettered5982e2013-01-17 22:23:36 +01001002 struct drm_i915_gem_execbuffer2 *args,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001003 struct drm_file *file,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001004 struct intel_engine_cs *engine,
Ben Widawsky27173f12013-08-14 11:38:36 +02001005 struct eb_vmas *eb,
David Weinehallb1b38272015-05-20 17:00:13 +03001006 struct drm_i915_gem_exec_object2 *exec,
Chris Wilsone2efd132016-05-24 14:53:34 +01001007 struct i915_gem_context *ctx)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001008{
1009 struct drm_i915_gem_relocation_entry *reloc;
Ben Widawsky27173f12013-08-14 11:38:36 +02001010 struct i915_address_space *vm;
1011 struct i915_vma *vma;
Daniel Vettered5982e2013-01-17 22:23:36 +01001012 bool need_relocs;
Chris Wilsondd6864a2011-01-12 23:49:13 +00001013 int *reloc_offset;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001014 int i, total, ret;
Daniel Vetterb205ca52013-09-19 14:00:11 +02001015 unsigned count = args->buffer_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001016
Ben Widawsky27173f12013-08-14 11:38:36 +02001017 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1018
Chris Wilson67731b82010-12-08 10:38:14 +00001019 /* We may process another execbuffer during the unlock... */
Ben Widawsky27173f12013-08-14 11:38:36 +02001020 while (!list_empty(&eb->vmas)) {
1021 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1022 list_del_init(&vma->exec_list);
Chris Wilsona415d352013-11-26 11:23:15 +00001023 i915_gem_execbuffer_unreserve_vma(vma);
Chris Wilson624192c2016-08-15 10:48:50 +01001024 i915_vma_put(vma);
Chris Wilson67731b82010-12-08 10:38:14 +00001025 }
1026
Chris Wilson54cf91d2010-11-25 18:00:26 +00001027 mutex_unlock(&dev->struct_mutex);
1028
1029 total = 0;
1030 for (i = 0; i < count; i++)
Chris Wilson432e58e2010-11-25 19:32:06 +00001031 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001032
Chris Wilsondd6864a2011-01-12 23:49:13 +00001033 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
Chris Wilson54cf91d2010-11-25 18:00:26 +00001034 reloc = drm_malloc_ab(total, sizeof(*reloc));
Chris Wilsondd6864a2011-01-12 23:49:13 +00001035 if (reloc == NULL || reloc_offset == NULL) {
1036 drm_free_large(reloc);
1037 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001038 mutex_lock(&dev->struct_mutex);
1039 return -ENOMEM;
1040 }
1041
1042 total = 0;
1043 for (i = 0; i < count; i++) {
1044 struct drm_i915_gem_relocation_entry __user *user_relocs;
Chris Wilson262b6d32013-01-15 16:17:54 +00001045 u64 invalid_offset = (u64)-1;
1046 int j;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001047
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001048 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001049
1050 if (copy_from_user(reloc+total, user_relocs,
Chris Wilson432e58e2010-11-25 19:32:06 +00001051 exec[i].relocation_count * sizeof(*reloc))) {
Chris Wilson54cf91d2010-11-25 18:00:26 +00001052 ret = -EFAULT;
1053 mutex_lock(&dev->struct_mutex);
1054 goto err;
1055 }
1056
Chris Wilson262b6d32013-01-15 16:17:54 +00001057 /* As we do not update the known relocation offsets after
1058 * relocating (due to the complexities in lock handling),
1059 * we need to mark them as invalid now so that we force the
1060 * relocation processing next time. Just in case the target
1061 * object is evicted and then rebound into its old
1062 * presumed_offset before the next execbuffer - if that
1063 * happened we would make the mistake of assuming that the
1064 * relocations were valid.
1065 */
1066 for (j = 0; j < exec[i].relocation_count; j++) {
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001067 if (__copy_to_user(&user_relocs[j].presumed_offset,
1068 &invalid_offset,
1069 sizeof(invalid_offset))) {
Chris Wilson262b6d32013-01-15 16:17:54 +00001070 ret = -EFAULT;
1071 mutex_lock(&dev->struct_mutex);
1072 goto err;
1073 }
1074 }
1075
Chris Wilsondd6864a2011-01-12 23:49:13 +00001076 reloc_offset[i] = total;
Chris Wilson432e58e2010-11-25 19:32:06 +00001077 total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001078 }
1079
1080 ret = i915_mutex_lock_interruptible(dev);
1081 if (ret) {
1082 mutex_lock(&dev->struct_mutex);
1083 goto err;
1084 }
1085
Chris Wilson67731b82010-12-08 10:38:14 +00001086 /* reacquire the objects */
Chris Wilson67731b82010-12-08 10:38:14 +00001087 eb_reset(eb);
Ben Widawsky27173f12013-08-14 11:38:36 +02001088 ret = eb_lookup_vmas(eb, exec, args, vm, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +00001089 if (ret)
1090 goto err;
Chris Wilson67731b82010-12-08 10:38:14 +00001091
Daniel Vettered5982e2013-01-17 22:23:36 +01001092 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001093 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1094 &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001095 if (ret)
1096 goto err;
1097
Ben Widawsky27173f12013-08-14 11:38:36 +02001098 list_for_each_entry(vma, &eb->vmas, exec_list) {
1099 int offset = vma->exec_entry - exec;
1100 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1101 reloc + reloc_offset[offset]);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001102 if (ret)
1103 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001104 }
1105
1106 /* Leave the user relocations as are, this is the painfully slow path,
1107 * and we want to avoid the complication of dropping the lock whilst
1108 * having buffers reserved in the aperture and so causing spurious
1109 * ENOSPC for random operations.
1110 */
1111
1112err:
1113 drm_free_large(reloc);
Chris Wilsondd6864a2011-01-12 23:49:13 +00001114 drm_free_large(reloc_offset);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001115 return ret;
1116}
1117
Chris Wilson54cf91d2010-11-25 18:00:26 +00001118static int
John Harrison535fbe82015-05-29 17:43:32 +01001119i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
Ben Widawsky27173f12013-08-14 11:38:36 +02001120 struct list_head *vmas)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001121{
Ben Widawsky27173f12013-08-14 11:38:36 +02001122 struct i915_vma *vma;
Chris Wilson432e58e2010-11-25 19:32:06 +00001123 int ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001124
Ben Widawsky27173f12013-08-14 11:38:36 +02001125 list_for_each_entry(vma, vmas, exec_list) {
1126 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson03ade512015-04-27 13:41:18 +01001127
Chris Wilson77ae9952017-01-27 09:40:07 +00001128 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
1129 continue;
1130
Chris Wilson57822dc2017-02-22 11:40:48 +00001131 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
1132 i915_gem_clflush_object(obj, 0);
1133 obj->base.write_domain = 0;
1134 }
1135
Chris Wilsond07f0e52016-10-28 13:58:44 +01001136 ret = i915_gem_request_await_object
1137 (req, obj, obj->base.pending_write_domain);
1138 if (ret)
1139 return ret;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001140 }
1141
Chris Wilsondcd79932016-08-18 17:16:40 +01001142 /* Unconditionally flush any chipset caches (for streaming writes). */
1143 i915_gem_chipset_flush(req->engine->i915);
Daniel Vetter6ac42f42012-07-21 12:25:01 +02001144
Chris Wilsonc7fe7d22016-08-02 22:50:24 +01001145 /* Unconditionally invalidate GPU caches and TLBs. */
Chris Wilson7c9cf4e2016-08-02 22:50:25 +01001146 return req->engine->emit_flush(req, EMIT_INVALIDATE);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001147}
1148
Chris Wilson432e58e2010-11-25 19:32:06 +00001149static bool
1150i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001151{
Daniel Vettered5982e2013-01-17 22:23:36 +01001152 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1153 return false;
1154
Chris Wilson2f5945b2015-10-06 11:39:55 +01001155 /* Kernel clipping was a DRI1 misfeature */
1156 if (exec->num_cliprects || exec->cliprects_ptr)
1157 return false;
1158
1159 if (exec->DR4 == 0xffffffff) {
1160 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1161 exec->DR4 = 0;
1162 }
1163 if (exec->DR1 || exec->DR4)
1164 return false;
1165
1166 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1167 return false;
1168
1169 return true;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001170}
1171
1172static int
Chris Wilsonad19f102014-08-10 06:29:08 +01001173validate_exec_list(struct drm_device *dev,
1174 struct drm_i915_gem_exec_object2 *exec,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001175 int count)
1176{
Daniel Vetterb205ca52013-09-19 14:00:11 +02001177 unsigned relocs_total = 0;
1178 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
Chris Wilsonad19f102014-08-10 06:29:08 +01001179 unsigned invalid_flags;
1180 int i;
1181
Dave Gordon9e2793f62016-07-14 14:52:03 +01001182 /* INTERNAL flags must not overlap with external ones */
1183 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1184
Chris Wilsonad19f102014-08-10 06:29:08 +01001185 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1186 if (USES_FULL_PPGTT(dev))
1187 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001188
1189 for (i = 0; i < count; i++) {
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001190 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001191 int length; /* limited by fault_in_pages_readable() */
1192
Chris Wilsonad19f102014-08-10 06:29:08 +01001193 if (exec[i].flags & invalid_flags)
Daniel Vettered5982e2013-01-17 22:23:36 +01001194 return -EINVAL;
1195
Michał Winiarski934acce2015-12-29 18:24:52 +01001196 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1197 * any non-page-aligned or non-canonical addresses.
1198 */
1199 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1200 if (exec[i].offset !=
1201 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1202 return -EINVAL;
Michał Winiarski934acce2015-12-29 18:24:52 +01001203 }
1204
Michał Winiarski038c95a2017-02-07 20:55:59 +01001205 /* From drm_mm perspective address space is continuous,
1206 * so from this point we're always using non-canonical
1207 * form internally.
1208 */
1209 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1210
Chris Wilson55a97852015-06-19 13:59:46 +01001211 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1212 return -EINVAL;
1213
Chris Wilson91b2db62016-08-04 16:32:23 +01001214 /* pad_to_size was once a reserved field, so sanitize it */
1215 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1216 if (offset_in_page(exec[i].pad_to_size))
1217 return -EINVAL;
1218 } else {
1219 exec[i].pad_to_size = 0;
1220 }
1221
Kees Cook3118a4f2013-03-11 17:31:45 -07001222 /* First check for malicious input causing overflow in
1223 * the worst case where we need to allocate the entire
1224 * relocation tree as a single array.
1225 */
1226 if (exec[i].relocation_count > relocs_max - relocs_total)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001227 return -EINVAL;
Kees Cook3118a4f2013-03-11 17:31:45 -07001228 relocs_total += exec[i].relocation_count;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001229
1230 length = exec[i].relocation_count *
1231 sizeof(struct drm_i915_gem_relocation_entry);
Kees Cook30587532013-03-11 14:37:35 -07001232 /*
1233 * We must check that the entire relocation array is safe
1234 * to read, but since we may need to update the presumed
1235 * offsets during execution, check for full write access.
1236 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001237 if (!access_ok(VERIFY_WRITE, ptr, length))
1238 return -EFAULT;
1239
Jani Nikulad330a952014-01-21 11:24:25 +02001240 if (likely(!i915.prefault_disable)) {
Al Viro4bce9f62016-09-17 18:02:44 -04001241 if (fault_in_pages_readable(ptr, length))
Xiong Zhang0b74b502013-07-19 13:51:24 +08001242 return -EFAULT;
1243 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001244 }
1245
1246 return 0;
1247}
1248
Chris Wilsone2efd132016-05-24 14:53:34 +01001249static struct i915_gem_context *
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001250i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001251 struct intel_engine_cs *engine, const u32 ctx_id)
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001252{
Chris Wilsonf7978a02016-08-22 09:03:36 +01001253 struct i915_gem_context *ctx;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001254
Chris Wilsonca585b52016-05-24 14:53:36 +01001255 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -10001256 if (IS_ERR(ctx))
Ben Widawsky41bde552013-12-06 14:11:21 -08001257 return ctx;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001258
Chris Wilson60958682016-12-31 11:20:11 +00001259 if (i915_gem_context_is_banned(ctx)) {
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001260 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
Ben Widawsky41bde552013-12-06 14:11:21 -08001261 return ERR_PTR(-EIO);
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001262 }
1263
Ben Widawsky41bde552013-12-06 14:11:21 -08001264 return ctx;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001265}
1266
Chris Wilson7aa6ca62016-11-07 16:52:04 +00001267static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1268{
1269 return !(obj->cache_level == I915_CACHE_NONE ||
1270 obj->cache_level == I915_CACHE_WT);
1271}
1272
Chris Wilson5cf3d282016-08-04 07:52:43 +01001273void i915_vma_move_to_active(struct i915_vma *vma,
1274 struct drm_i915_gem_request *req,
1275 unsigned int flags)
1276{
1277 struct drm_i915_gem_object *obj = vma->obj;
1278 const unsigned int idx = req->engine->id;
1279
Chris Wilson81147b02016-12-18 15:37:18 +00001280 lockdep_assert_held(&req->i915->drm.struct_mutex);
Chris Wilson5cf3d282016-08-04 07:52:43 +01001281 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1282
Chris Wilsonb0decaf2016-08-04 07:52:44 +01001283 /* Add a reference if we're newly entering the active list.
1284 * The order in which we add operations to the retirement queue is
1285 * vital here: mark_active adds to the start of the callback list,
1286 * such that subsequent callbacks are called first. Therefore we
1287 * add the active reference first and queue for it to be dropped
1288 * *last*.
1289 */
Chris Wilsond07f0e52016-10-28 13:58:44 +01001290 if (!i915_vma_is_active(vma))
1291 obj->active_count++;
1292 i915_vma_set_active(vma, idx);
1293 i915_gem_active_set(&vma->last_read[idx], req);
1294 list_move_tail(&vma->vm_link, &vma->vm->active_list);
Chris Wilson5cf3d282016-08-04 07:52:43 +01001295
1296 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00001297 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1298 i915_gem_active_set(&obj->frontbuffer_write, req);
Chris Wilson5cf3d282016-08-04 07:52:43 +01001299
1300 /* update for the implicit flush after a batch */
1301 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson7aa6ca62016-11-07 16:52:04 +00001302 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1303 obj->cache_dirty = true;
Chris Wilson5cf3d282016-08-04 07:52:43 +01001304 }
1305
Chris Wilson49ef5292016-08-18 17:17:00 +01001306 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1307 i915_gem_active_set(&vma->last_fence, req);
Chris Wilson5cf3d282016-08-04 07:52:43 +01001308}
1309
Chris Wilsonad778f82016-08-04 16:32:42 +01001310static void eb_export_fence(struct drm_i915_gem_object *obj,
1311 struct drm_i915_gem_request *req,
1312 unsigned int flags)
1313{
Chris Wilsond07f0e52016-10-28 13:58:44 +01001314 struct reservation_object *resv = obj->resv;
Chris Wilsonad778f82016-08-04 16:32:42 +01001315
1316 /* Ignore errors from failing to allocate the new fence, we can't
1317 * handle an error right now. Worst case should be missed
1318 * synchronisation leading to rendering corruption.
1319 */
Chris Wilsone2989f12017-02-21 09:17:23 +00001320 reservation_object_lock(resv, NULL);
Chris Wilsonad778f82016-08-04 16:32:42 +01001321 if (flags & EXEC_OBJECT_WRITE)
1322 reservation_object_add_excl_fence(resv, &req->fence);
1323 else if (reservation_object_reserve_shared(resv) == 0)
1324 reservation_object_add_shared_fence(resv, &req->fence);
Chris Wilsone2989f12017-02-21 09:17:23 +00001325 reservation_object_unlock(resv);
Chris Wilsonad778f82016-08-04 16:32:42 +01001326}
1327
Chris Wilson5b043f42016-08-02 22:50:38 +01001328static void
Ben Widawsky27173f12013-08-14 11:38:36 +02001329i915_gem_execbuffer_move_to_active(struct list_head *vmas,
John Harrison8a8edb52015-05-29 17:43:33 +01001330 struct drm_i915_gem_request *req)
Chris Wilson432e58e2010-11-25 19:32:06 +00001331{
Ben Widawsky27173f12013-08-14 11:38:36 +02001332 struct i915_vma *vma;
Chris Wilson432e58e2010-11-25 19:32:06 +00001333
Ben Widawsky27173f12013-08-14 11:38:36 +02001334 list_for_each_entry(vma, vmas, exec_list) {
1335 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsondb53a302011-02-03 11:57:46 +00001336
Chris Wilson432e58e2010-11-25 19:32:06 +00001337 obj->base.write_domain = obj->base.pending_write_domain;
Chris Wilson5cf3d282016-08-04 07:52:43 +01001338 if (obj->base.write_domain)
1339 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1340 else
Daniel Vettered5982e2013-01-17 22:23:36 +01001341 obj->base.pending_read_domains |= obj->base.read_domains;
1342 obj->base.read_domains = obj->base.pending_read_domains;
Chris Wilson432e58e2010-11-25 19:32:06 +00001343
Chris Wilson5cf3d282016-08-04 07:52:43 +01001344 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
Chris Wilsonad778f82016-08-04 16:32:42 +01001345 eb_export_fence(obj, req, vma->exec_entry->flags);
Chris Wilson432e58e2010-11-25 19:32:06 +00001346 }
1347}
1348
Chris Wilson54cf91d2010-11-25 18:00:26 +00001349static int
Chris Wilsonb5321f32016-08-02 22:50:18 +01001350i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
Eric Anholtae662d32012-01-03 09:23:29 -08001351{
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001352 u32 *cs;
1353 int i;
Eric Anholtae662d32012-01-03 09:23:29 -08001354
Chris Wilsonb5321f32016-08-02 22:50:18 +01001355 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
Daniel Vetter9d662da2014-04-24 08:09:09 +02001356 DRM_DEBUG("sol reset is gen7/rcs only\n");
1357 return -EINVAL;
1358 }
Eric Anholtae662d32012-01-03 09:23:29 -08001359
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001360 cs = intel_ring_begin(req, 4 * 3);
1361 if (IS_ERR(cs))
1362 return PTR_ERR(cs);
Eric Anholtae662d32012-01-03 09:23:29 -08001363
1364 for (i = 0; i < 4; i++) {
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001365 *cs++ = MI_LOAD_REGISTER_IMM(1);
1366 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1367 *cs++ = 0;
Eric Anholtae662d32012-01-03 09:23:29 -08001368 }
1369
Tvrtko Ursulin73dec952017-02-14 11:32:42 +00001370 intel_ring_advance(req, cs);
Eric Anholtae662d32012-01-03 09:23:29 -08001371
1372 return 0;
1373}
1374
Chris Wilson058d88c2016-08-15 10:49:06 +01001375static struct i915_vma *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001376i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
Brad Volkin71745372014-12-11 12:13:12 -08001377 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
Brad Volkin71745372014-12-11 12:13:12 -08001378 struct drm_i915_gem_object *batch_obj,
Chris Wilson59bfa122016-08-04 16:32:31 +01001379 struct eb_vmas *eb,
Brad Volkin71745372014-12-11 12:13:12 -08001380 u32 batch_start_offset,
1381 u32 batch_len,
Chris Wilson17cabf52015-01-14 11:20:57 +00001382 bool is_master)
Brad Volkin71745372014-12-11 12:13:12 -08001383{
Brad Volkin71745372014-12-11 12:13:12 -08001384 struct drm_i915_gem_object *shadow_batch_obj;
Chris Wilson17cabf52015-01-14 11:20:57 +00001385 struct i915_vma *vma;
Brad Volkin71745372014-12-11 12:13:12 -08001386 int ret;
1387
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001388 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
Chris Wilson17cabf52015-01-14 11:20:57 +00001389 PAGE_ALIGN(batch_len));
Brad Volkin71745372014-12-11 12:13:12 -08001390 if (IS_ERR(shadow_batch_obj))
Chris Wilson59bfa122016-08-04 16:32:31 +01001391 return ERR_CAST(shadow_batch_obj);
Brad Volkin71745372014-12-11 12:13:12 -08001392
Chris Wilson33a051a2016-07-27 09:07:26 +01001393 ret = intel_engine_cmd_parser(engine,
1394 batch_obj,
1395 shadow_batch_obj,
1396 batch_start_offset,
1397 batch_len,
1398 is_master);
Chris Wilson058d88c2016-08-15 10:49:06 +01001399 if (ret) {
1400 if (ret == -EACCES) /* unhandled chained batch */
1401 vma = NULL;
1402 else
1403 vma = ERR_PTR(ret);
1404 goto out;
1405 }
Brad Volkin71745372014-12-11 12:13:12 -08001406
Chris Wilson058d88c2016-08-15 10:49:06 +01001407 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1408 if (IS_ERR(vma))
1409 goto out;
Chris Wilsonde4e7832015-04-07 16:20:35 +01001410
Chris Wilson17cabf52015-01-14 11:20:57 +00001411 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
Brad Volkin71745372014-12-11 12:13:12 -08001412
Chris Wilson17cabf52015-01-14 11:20:57 +00001413 vma->exec_entry = shadow_exec_entry;
Chris Wilsonde4e7832015-04-07 16:20:35 +01001414 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
Chris Wilson25dc5562016-07-20 13:31:52 +01001415 i915_gem_object_get(shadow_batch_obj);
Chris Wilson17cabf52015-01-14 11:20:57 +00001416 list_add_tail(&vma->exec_list, &eb->vmas);
Brad Volkin71745372014-12-11 12:13:12 -08001417
Chris Wilson058d88c2016-08-15 10:49:06 +01001418out:
Chris Wilsonde4e7832015-04-07 16:20:35 +01001419 i915_gem_object_unpin_pages(shadow_batch_obj);
Chris Wilson058d88c2016-08-15 10:49:06 +01001420 return vma;
Brad Volkin71745372014-12-11 12:13:12 -08001421}
Chris Wilson5c6c6002014-09-06 10:28:27 +01001422
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001423static void
1424add_to_client(struct drm_i915_gem_request *req,
1425 struct drm_file *file)
1426{
1427 req->file_priv = file->driver_priv;
1428 list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
1429}
1430
Chris Wilson5b043f42016-08-02 22:50:38 +01001431static int
1432execbuf_submit(struct i915_execbuffer_params *params,
1433 struct drm_i915_gem_execbuffer2 *args,
1434 struct list_head *vmas)
Oscar Mateo78382592014-07-03 16:28:05 +01001435{
John Harrison5f19e2b2015-05-29 17:43:27 +01001436 u64 exec_start, exec_len;
Chris Wilson2f5945b2015-10-06 11:39:55 +01001437 int ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001438
John Harrison535fbe82015-05-29 17:43:32 +01001439 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
Oscar Mateo78382592014-07-03 16:28:05 +01001440 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001441 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001442
John Harrisonba01cc92015-05-29 17:43:41 +01001443 ret = i915_switch_context(params->request);
Oscar Mateo78382592014-07-03 16:28:05 +01001444 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001445 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001446
Kenneth Graunkeef0f4112017-02-15 01:34:46 -08001447 if (args->flags & I915_EXEC_CONSTANTS_MASK) {
1448 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
Chris Wilson2f5945b2015-10-06 11:39:55 +01001449 return -EINVAL;
Oscar Mateo78382592014-07-03 16:28:05 +01001450 }
1451
Oscar Mateo78382592014-07-03 16:28:05 +01001452 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
Chris Wilsonb5321f32016-08-02 22:50:18 +01001453 ret = i915_reset_gen7_sol_offsets(params->request);
Oscar Mateo78382592014-07-03 16:28:05 +01001454 if (ret)
Chris Wilson2f5945b2015-10-06 11:39:55 +01001455 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001456 }
1457
John Harrison5f19e2b2015-05-29 17:43:27 +01001458 exec_len = args->batch_len;
Chris Wilson59bfa122016-08-04 16:32:31 +01001459 exec_start = params->batch->node.start +
John Harrison5f19e2b2015-05-29 17:43:27 +01001460 params->args_batch_start_offset;
1461
Ville Syrjälä9d611c02015-12-14 18:23:49 +02001462 if (exec_len == 0)
Chris Wilson0b537272016-08-18 17:17:12 +01001463 exec_len = params->batch->size - params->args_batch_start_offset;
Ville Syrjälä9d611c02015-12-14 18:23:49 +02001464
Chris Wilson803688b2016-08-02 22:50:27 +01001465 ret = params->engine->emit_bb_start(params->request,
1466 exec_start, exec_len,
1467 params->dispatch_flags);
Chris Wilson2f5945b2015-10-06 11:39:55 +01001468 if (ret)
1469 return ret;
Oscar Mateo78382592014-07-03 16:28:05 +01001470
John Harrison8a8edb52015-05-29 17:43:33 +01001471 i915_gem_execbuffer_move_to_active(vmas, params->request);
Oscar Mateo78382592014-07-03 16:28:05 +01001472
Chris Wilson2f5945b2015-10-06 11:39:55 +01001473 return 0;
Oscar Mateo78382592014-07-03 16:28:05 +01001474}
1475
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001476/**
1477 * Find one BSD ring to dispatch the corresponding BSD command.
Chris Wilsonc80ff162016-07-27 09:07:27 +01001478 * The engine index is returned.
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001479 */
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001480static unsigned int
Chris Wilsonc80ff162016-07-27 09:07:27 +01001481gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1482 struct drm_file *file)
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001483{
Zhao Yakuia8ebba72014-04-17 10:37:40 +08001484 struct drm_i915_file_private *file_priv = file->driver_priv;
1485
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001486 /* Check whether the file_priv has already selected one ring. */
Joonas Lahtinen6f633402016-09-01 14:58:21 +03001487 if ((int)file_priv->bsd_engine < 0)
1488 file_priv->bsd_engine = atomic_fetch_xor(1,
1489 &dev_priv->mm.bsd_engine_dispatch_index);
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001490
Chris Wilsonc80ff162016-07-27 09:07:27 +01001491 return file_priv->bsd_engine;
Chris Wilsond23db882014-05-23 08:48:08 +02001492}
1493
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001494#define I915_USER_RINGS (4)
1495
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001496static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001497 [I915_EXEC_DEFAULT] = RCS,
1498 [I915_EXEC_RENDER] = RCS,
1499 [I915_EXEC_BLT] = BCS,
1500 [I915_EXEC_BSD] = VCS,
1501 [I915_EXEC_VEBOX] = VECS
1502};
1503
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001504static struct intel_engine_cs *
1505eb_select_engine(struct drm_i915_private *dev_priv,
1506 struct drm_file *file,
1507 struct drm_i915_gem_execbuffer2 *args)
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001508{
1509 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001510 struct intel_engine_cs *engine;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001511
1512 if (user_ring_id > I915_USER_RINGS) {
1513 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001514 return NULL;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001515 }
1516
1517 if ((user_ring_id != I915_EXEC_BSD) &&
1518 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1519 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1520 "bsd dispatch flags: %d\n", (int)(args->flags));
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001521 return NULL;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001522 }
1523
1524 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1525 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1526
1527 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
Chris Wilsonc80ff162016-07-27 09:07:27 +01001528 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001529 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1530 bsd_idx <= I915_EXEC_BSD_RING2) {
Tvrtko Ursulind9da6aa2016-01-27 13:41:09 +00001531 bsd_idx >>= I915_EXEC_BSD_SHIFT;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001532 bsd_idx--;
1533 } else {
1534 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1535 bsd_idx);
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001536 return NULL;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001537 }
1538
Akash Goel3b3f1652016-10-13 22:44:48 +05301539 engine = dev_priv->engine[_VCS(bsd_idx)];
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001540 } else {
Akash Goel3b3f1652016-10-13 22:44:48 +05301541 engine = dev_priv->engine[user_ring_map[user_ring_id]];
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001542 }
1543
Akash Goel3b3f1652016-10-13 22:44:48 +05301544 if (!engine) {
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001545 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001546 return NULL;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001547 }
1548
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001549 return engine;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00001550}
1551
Eric Anholtae662d32012-01-03 09:23:29 -08001552static int
Chris Wilson54cf91d2010-11-25 18:00:26 +00001553i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1554 struct drm_file *file,
1555 struct drm_i915_gem_execbuffer2 *args,
Ben Widawsky41bde552013-12-06 14:11:21 -08001556 struct drm_i915_gem_exec_object2 *exec)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001557{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001558 struct drm_i915_private *dev_priv = to_i915(dev);
1559 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Ben Widawsky27173f12013-08-14 11:38:36 +02001560 struct eb_vmas *eb;
Brad Volkin78a42372014-12-11 12:13:09 -08001561 struct drm_i915_gem_exec_object2 shadow_exec_entry;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001562 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001563 struct i915_gem_context *ctx;
Ben Widawsky41bde552013-12-06 14:11:21 -08001564 struct i915_address_space *vm;
John Harrison5f19e2b2015-05-29 17:43:27 +01001565 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1566 struct i915_execbuffer_params *params = &params_master;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001567 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
John Harrison8e004ef2015-02-13 11:48:10 +00001568 u32 dispatch_flags;
Chris Wilsonfec04452017-01-27 09:40:08 +00001569 struct dma_fence *in_fence = NULL;
1570 struct sync_file *out_fence = NULL;
1571 int out_fence_fd = -1;
Oscar Mateo78382592014-07-03 16:28:05 +01001572 int ret;
Daniel Vettered5982e2013-01-17 22:23:36 +01001573 bool need_relocs;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001574
Daniel Vettered5982e2013-01-17 22:23:36 +01001575 if (!i915_gem_check_execbuffer(args))
Chris Wilson432e58e2010-11-25 19:32:06 +00001576 return -EINVAL;
Chris Wilson432e58e2010-11-25 19:32:06 +00001577
Chris Wilsonad19f102014-08-10 06:29:08 +01001578 ret = validate_exec_list(dev, exec, args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001579 if (ret)
1580 return ret;
1581
John Harrison8e004ef2015-02-13 11:48:10 +00001582 dispatch_flags = 0;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001583 if (args->flags & I915_EXEC_SECURE) {
Daniel Vetterb3ac9f22016-06-21 10:54:20 +02001584 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001585 return -EPERM;
1586
John Harrison8e004ef2015-02-13 11:48:10 +00001587 dispatch_flags |= I915_DISPATCH_SECURE;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001588 }
Daniel Vetterb45305f2012-12-17 16:21:27 +01001589 if (args->flags & I915_EXEC_IS_PINNED)
John Harrison8e004ef2015-02-13 11:48:10 +00001590 dispatch_flags |= I915_DISPATCH_PINNED;
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001591
Dave Gordonf8ca0c02016-07-20 18:16:07 +01001592 engine = eb_select_engine(dev_priv, file, args);
1593 if (!engine)
1594 return -EINVAL;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001595
1596 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001597 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001598 return -EINVAL;
1599 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001600
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001601 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00001602 if (!HAS_RESOURCE_STREAMER(dev_priv)) {
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001603 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1604 return -EINVAL;
1605 }
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001606 if (engine->id != RCS) {
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001607 DRM_DEBUG("RS is not available on %s\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001608 engine->name);
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001609 return -EINVAL;
1610 }
1611
1612 dispatch_flags |= I915_DISPATCH_RS;
1613 }
1614
Chris Wilsonfec04452017-01-27 09:40:08 +00001615 if (args->flags & I915_EXEC_FENCE_IN) {
1616 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
Daniele Ceraolo Spurio4a04e372017-02-03 14:45:29 -08001617 if (!in_fence)
1618 return -EINVAL;
Chris Wilsonfec04452017-01-27 09:40:08 +00001619 }
1620
1621 if (args->flags & I915_EXEC_FENCE_OUT) {
1622 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1623 if (out_fence_fd < 0) {
1624 ret = out_fence_fd;
Daniele Ceraolo Spurio4a04e372017-02-03 14:45:29 -08001625 goto err_in_fence;
Chris Wilsonfec04452017-01-27 09:40:08 +00001626 }
1627 }
1628
Chris Wilson67d97da2016-07-04 08:08:31 +01001629 /* Take a local wakeref for preparing to dispatch the execbuf as
1630 * we expect to access the hardware fairly frequently in the
1631 * process. Upon first dispatch, we acquire another prolonged
1632 * wakeref that we hold until the GPU has been idle for at least
1633 * 100ms.
1634 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001635 intel_runtime_pm_get(dev_priv);
1636
Chris Wilson54cf91d2010-11-25 18:00:26 +00001637 ret = i915_mutex_lock_interruptible(dev);
1638 if (ret)
1639 goto pre_mutex_err;
1640
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001641 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
Ben Widawsky72ad5c42014-01-02 19:50:27 -10001642 if (IS_ERR(ctx)) {
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001643 mutex_unlock(&dev->struct_mutex);
Ben Widawsky41bde552013-12-06 14:11:21 -08001644 ret = PTR_ERR(ctx);
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001645 goto pre_mutex_err;
Ben Widawsky935f38d2014-04-04 22:41:07 -07001646 }
Ben Widawsky41bde552013-12-06 14:11:21 -08001647
Chris Wilson9a6feaf2016-07-20 13:31:50 +01001648 i915_gem_context_get(ctx);
Ben Widawsky41bde552013-12-06 14:11:21 -08001649
Daniel Vetterae6c4802014-08-06 15:04:53 +02001650 if (ctx->ppgtt)
1651 vm = &ctx->ppgtt->base;
1652 else
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001653 vm = &ggtt->base;
Mika Kuoppalad299cce2013-11-26 16:14:33 +02001654
John Harrison5f19e2b2015-05-29 17:43:27 +01001655 memset(&params_master, 0x00, sizeof(params_master));
1656
Chris Wilsond50415c2016-08-18 17:16:52 +01001657 eb = eb_create(dev_priv, args);
Chris Wilson67731b82010-12-08 10:38:14 +00001658 if (eb == NULL) {
Chris Wilson9a6feaf2016-07-20 13:31:50 +01001659 i915_gem_context_put(ctx);
Chris Wilson67731b82010-12-08 10:38:14 +00001660 mutex_unlock(&dev->struct_mutex);
1661 ret = -ENOMEM;
1662 goto pre_mutex_err;
1663 }
1664
Chris Wilson54cf91d2010-11-25 18:00:26 +00001665 /* Look up object handles */
Ben Widawsky27173f12013-08-14 11:38:36 +02001666 ret = eb_lookup_vmas(eb, exec, args, vm, file);
Chris Wilson3b96eff2013-01-08 10:53:14 +00001667 if (ret)
1668 goto err;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001669
Chris Wilson6fe4f142011-01-10 17:35:37 +00001670 /* take note of the batch buffer before we might reorder the lists */
Chris Wilson59bfa122016-08-04 16:32:31 +01001671 params->batch = eb_get_batch(eb);
Chris Wilson6fe4f142011-01-10 17:35:37 +00001672
Chris Wilson54cf91d2010-11-25 18:00:26 +00001673 /* Move the objects en-masse into the GTT, evicting if necessary. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001674 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001675 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1676 &need_relocs);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001677 if (ret)
1678 goto err;
1679
1680 /* The objects are in their final locations, apply the relocations. */
Daniel Vettered5982e2013-01-17 22:23:36 +01001681 if (need_relocs)
Ben Widawsky17601cbc2013-11-25 09:54:38 -08001682 ret = i915_gem_execbuffer_relocate(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001683 if (ret) {
1684 if (ret == -EFAULT) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001685 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1686 engine,
David Weinehallb1b38272015-05-20 17:00:13 +03001687 eb, exec, ctx);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001688 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1689 }
1690 if (ret)
1691 goto err;
1692 }
1693
1694 /* Set the pending read domains for the batch buffer to COMMAND */
Chris Wilson59bfa122016-08-04 16:32:31 +01001695 if (params->batch->obj->base.pending_write_domain) {
Daniel Vetterff240192012-01-31 21:08:14 +01001696 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
Chris Wilson54cf91d2010-11-25 18:00:26 +00001697 ret = -EINVAL;
1698 goto err;
1699 }
Chris Wilson0b537272016-08-18 17:17:12 +01001700 if (args->batch_start_offset > params->batch->size ||
1701 args->batch_len > params->batch->size - args->batch_start_offset) {
1702 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1703 ret = -EINVAL;
1704 goto err;
1705 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001706
John Harrison5f19e2b2015-05-29 17:43:27 +01001707 params->args_batch_start_offset = args->batch_start_offset;
Chris Wilson41736a82016-11-24 12:58:51 +00001708 if (engine->needs_cmd_parser && args->batch_len) {
Chris Wilson59bfa122016-08-04 16:32:31 +01001709 struct i915_vma *vma;
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001710
Chris Wilson59bfa122016-08-04 16:32:31 +01001711 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1712 params->batch->obj,
1713 eb,
1714 args->batch_start_offset,
1715 args->batch_len,
1716 drm_is_current_master(file));
1717 if (IS_ERR(vma)) {
1718 ret = PTR_ERR(vma);
Brad Volkin78a42372014-12-11 12:13:09 -08001719 goto err;
1720 }
Chris Wilson17cabf52015-01-14 11:20:57 +00001721
Chris Wilson59bfa122016-08-04 16:32:31 +01001722 if (vma) {
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001723 /*
1724 * Batch parsed and accepted:
1725 *
1726 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1727 * bit from MI_BATCH_BUFFER_START commands issued in
1728 * the dispatch_execbuffer implementations. We
1729 * specifically don't want that set on batches the
1730 * command parser has accepted.
1731 */
1732 dispatch_flags |= I915_DISPATCH_SECURE;
John Harrison5f19e2b2015-05-29 17:43:27 +01001733 params->args_batch_start_offset = 0;
Chris Wilson59bfa122016-08-04 16:32:31 +01001734 params->batch = vma;
Rebecca N. Palmerc7c73722015-05-08 14:26:50 +01001735 }
Brad Volkin351e3db2014-02-18 10:15:46 -08001736 }
1737
Chris Wilson59bfa122016-08-04 16:32:31 +01001738 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Brad Volkin78a42372014-12-11 12:13:09 -08001739
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001740 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1741 * batch" bit. Hence we need to pin secure batches into the global gtt.
Ben Widawsky28cf5412013-11-02 21:07:26 -07001742 * hsw should have this fixed, but bdw mucks it up again. */
John Harrison8e004ef2015-02-13 11:48:10 +00001743 if (dispatch_flags & I915_DISPATCH_SECURE) {
Chris Wilson59bfa122016-08-04 16:32:31 +01001744 struct drm_i915_gem_object *obj = params->batch->obj;
Chris Wilson058d88c2016-08-15 10:49:06 +01001745 struct i915_vma *vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001746
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001747 /*
1748 * So on first glance it looks freaky that we pin the batch here
1749 * outside of the reservation loop. But:
1750 * - The batch is already pinned into the relevant ppgtt, so we
1751 * already have the backing storage fully allocated.
1752 * - No other BO uses the global gtt (well contexts, but meh),
Yannick Guerrinifd0753c2015-02-28 17:20:41 +01001753 * so we don't really have issues with multiple objects not
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001754 * fitting due to fragmentation.
1755 * So this is actually safe.
1756 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001757 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1758 if (IS_ERR(vma)) {
1759 ret = PTR_ERR(vma);
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001760 goto err;
Chris Wilson058d88c2016-08-15 10:49:06 +01001761 }
Chris Wilsond7d4eed2012-10-17 12:09:54 +01001762
Chris Wilson058d88c2016-08-15 10:49:06 +01001763 params->batch = vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01001764 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001765
John Harrison0c8dac82015-05-29 17:43:25 +01001766 /* Allocate a request for this batch buffer nice and early. */
Chris Wilson8e637172016-08-02 22:50:26 +01001767 params->request = i915_gem_request_alloc(engine, ctx);
1768 if (IS_ERR(params->request)) {
1769 ret = PTR_ERR(params->request);
John Harrison0c8dac82015-05-29 17:43:25 +01001770 goto err_batch_unpin;
Dave Gordon26827082016-01-19 19:02:53 +00001771 }
John Harrison0c8dac82015-05-29 17:43:25 +01001772
Chris Wilsonfec04452017-01-27 09:40:08 +00001773 if (in_fence) {
1774 ret = i915_gem_request_await_dma_fence(params->request,
1775 in_fence);
1776 if (ret < 0)
1777 goto err_request;
1778 }
1779
1780 if (out_fence_fd != -1) {
1781 out_fence = sync_file_create(&params->request->fence);
1782 if (!out_fence) {
1783 ret = -ENOMEM;
1784 goto err_request;
1785 }
1786 }
1787
Chris Wilson17f298cf2016-08-10 13:41:46 +01001788 /* Whilst this request exists, batch_obj will be on the
1789 * active_list, and so will hold the active reference. Only when this
1790 * request is retired will the the batch_obj be moved onto the
1791 * inactive_list and lose its active reference. Hence we do not need
1792 * to explicitly hold another reference here.
1793 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001794 params->request->batch = params->batch;
Chris Wilson17f298cf2016-08-10 13:41:46 +01001795
John Harrison5f19e2b2015-05-29 17:43:27 +01001796 /*
1797 * Save assorted stuff away to pass through to *_submission().
1798 * NB: This data should be 'persistent' and not local as it will
1799 * kept around beyond the duration of the IOCTL once the GPU
1800 * scheduler arrives.
1801 */
1802 params->dev = dev;
1803 params->file = file;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001804 params->engine = engine;
John Harrison5f19e2b2015-05-29 17:43:27 +01001805 params->dispatch_flags = dispatch_flags;
John Harrison5f19e2b2015-05-29 17:43:27 +01001806 params->ctx = ctx;
1807
Tvrtko Ursulin1cce8922017-02-21 09:13:44 +00001808 trace_i915_gem_request_queue(params->request, dispatch_flags);
1809
Chris Wilson5b043f42016-08-02 22:50:38 +01001810 ret = execbuf_submit(params, args, &eb->vmas);
Chris Wilsonaa9b7812016-04-13 17:35:15 +01001811err_request:
Chris Wilson17f298cf2016-08-10 13:41:46 +01001812 __i915_add_request(params->request, ret == 0);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00001813 add_to_client(params->request, file);
1814
Chris Wilsonfec04452017-01-27 09:40:08 +00001815 if (out_fence) {
1816 if (ret == 0) {
1817 fd_install(out_fence_fd, out_fence->file);
1818 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1819 args->rsvd2 |= (u64)out_fence_fd << 32;
1820 out_fence_fd = -1;
1821 } else {
1822 fput(out_fence->file);
1823 }
1824 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001825
John Harrison0c8dac82015-05-29 17:43:25 +01001826err_batch_unpin:
Daniel Vetterda51a1e2014-08-11 12:08:58 +02001827 /*
1828 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1829 * batch vma for correctness. For less ugly and less fragility this
1830 * needs to be adjusted to also track the ggtt batch vma properly as
1831 * active.
1832 */
John Harrison8e004ef2015-02-13 11:48:10 +00001833 if (dispatch_flags & I915_DISPATCH_SECURE)
Chris Wilson59bfa122016-08-04 16:32:31 +01001834 i915_vma_unpin(params->batch);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001835err:
Ben Widawsky41bde552013-12-06 14:11:21 -08001836 /* the request owns the ref now */
Chris Wilson9a6feaf2016-07-20 13:31:50 +01001837 i915_gem_context_put(ctx);
Chris Wilson67731b82010-12-08 10:38:14 +00001838 eb_destroy(eb);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001839
1840 mutex_unlock(&dev->struct_mutex);
1841
1842pre_mutex_err:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001843 /* intel_gpu_busy should also get a ref, so it will free when the device
1844 * is really idle. */
1845 intel_runtime_pm_put(dev_priv);
Chris Wilsonfec04452017-01-27 09:40:08 +00001846 if (out_fence_fd != -1)
1847 put_unused_fd(out_fence_fd);
Daniele Ceraolo Spurio4a04e372017-02-03 14:45:29 -08001848err_in_fence:
Chris Wilsonfec04452017-01-27 09:40:08 +00001849 dma_fence_put(in_fence);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001850 return ret;
1851}
1852
1853/*
1854 * Legacy execbuffer just creates an exec2 list from the original exec object
1855 * list array and passes it to the real function.
1856 */
1857int
1858i915_gem_execbuffer(struct drm_device *dev, void *data,
1859 struct drm_file *file)
1860{
1861 struct drm_i915_gem_execbuffer *args = data;
1862 struct drm_i915_gem_execbuffer2 exec2;
1863 struct drm_i915_gem_exec_object *exec_list = NULL;
1864 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1865 int ret, i;
1866
Chris Wilson54cf91d2010-11-25 18:00:26 +00001867 if (args->buffer_count < 1) {
Daniel Vetterff240192012-01-31 21:08:14 +01001868 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001869 return -EINVAL;
1870 }
1871
1872 /* Copy in the exec list from userland */
1873 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1874 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1875 if (exec_list == NULL || exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001876 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001877 args->buffer_count);
1878 drm_free_large(exec_list);
1879 drm_free_large(exec2_list);
1880 return -ENOMEM;
1881 }
1882 ret = copy_from_user(exec_list,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001883 u64_to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001884 sizeof(*exec_list) * args->buffer_count);
1885 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001886 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001887 args->buffer_count, ret);
1888 drm_free_large(exec_list);
1889 drm_free_large(exec2_list);
1890 return -EFAULT;
1891 }
1892
1893 for (i = 0; i < args->buffer_count; i++) {
1894 exec2_list[i].handle = exec_list[i].handle;
1895 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1896 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1897 exec2_list[i].alignment = exec_list[i].alignment;
1898 exec2_list[i].offset = exec_list[i].offset;
Tvrtko Ursulinf0836b72016-11-16 08:55:32 +00001899 if (INTEL_GEN(to_i915(dev)) < 4)
Chris Wilson54cf91d2010-11-25 18:00:26 +00001900 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1901 else
1902 exec2_list[i].flags = 0;
1903 }
1904
1905 exec2.buffers_ptr = args->buffers_ptr;
1906 exec2.buffer_count = args->buffer_count;
1907 exec2.batch_start_offset = args->batch_start_offset;
1908 exec2.batch_len = args->batch_len;
1909 exec2.DR1 = args->DR1;
1910 exec2.DR4 = args->DR4;
1911 exec2.num_cliprects = args->num_cliprects;
1912 exec2.cliprects_ptr = args->cliprects_ptr;
1913 exec2.flags = I915_EXEC_RENDER;
Ben Widawsky6e0a69d2012-06-04 14:42:55 -07001914 i915_execbuffer2_set_context_id(exec2, 0);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001915
Ben Widawsky41bde552013-12-06 14:11:21 -08001916 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001917 if (!ret) {
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001918 struct drm_i915_gem_exec_object __user *user_exec_list =
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001919 u64_to_user_ptr(args->buffers_ptr);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001920
Chris Wilson54cf91d2010-11-25 18:00:26 +00001921 /* Copy the new buffer offsets back to the user's exec list. */
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001922 for (i = 0; i < args->buffer_count; i++) {
Michał Winiarski934acce2015-12-29 18:24:52 +01001923 exec2_list[i].offset =
1924 gen8_canonical_addr(exec2_list[i].offset);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001925 ret = __copy_to_user(&user_exec_list[i].offset,
1926 &exec2_list[i].offset,
1927 sizeof(user_exec_list[i].offset));
1928 if (ret) {
1929 ret = -EFAULT;
1930 DRM_DEBUG("failed to copy %d exec entries "
1931 "back to user (%d)\n",
1932 args->buffer_count, ret);
1933 break;
1934 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001935 }
1936 }
1937
1938 drm_free_large(exec_list);
1939 drm_free_large(exec2_list);
1940 return ret;
1941}
1942
1943int
1944i915_gem_execbuffer2(struct drm_device *dev, void *data,
1945 struct drm_file *file)
1946{
1947 struct drm_i915_gem_execbuffer2 *args = data;
1948 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1949 int ret;
1950
Xi Wanged8cd3b2012-04-23 04:06:41 -04001951 if (args->buffer_count < 1 ||
1952 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
Daniel Vetterff240192012-01-31 21:08:14 +01001953 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001954 return -EINVAL;
1955 }
1956
Chris Wilsonf2a85e12016-04-08 12:11:13 +01001957 exec2_list = drm_malloc_gfp(args->buffer_count,
1958 sizeof(*exec2_list),
1959 GFP_TEMPORARY);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001960 if (exec2_list == NULL) {
Daniel Vetterff240192012-01-31 21:08:14 +01001961 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001962 args->buffer_count);
1963 return -ENOMEM;
1964 }
1965 ret = copy_from_user(exec2_list,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001966 u64_to_user_ptr(args->buffers_ptr),
Chris Wilson54cf91d2010-11-25 18:00:26 +00001967 sizeof(*exec2_list) * args->buffer_count);
1968 if (ret != 0) {
Daniel Vetterff240192012-01-31 21:08:14 +01001969 DRM_DEBUG("copy %d exec entries failed %d\n",
Chris Wilson54cf91d2010-11-25 18:00:26 +00001970 args->buffer_count, ret);
1971 drm_free_large(exec2_list);
1972 return -EFAULT;
1973 }
1974
Ben Widawsky41bde552013-12-06 14:11:21 -08001975 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
Chris Wilson54cf91d2010-11-25 18:00:26 +00001976 if (!ret) {
1977 /* Copy the new buffer offsets back to the user's exec list. */
Ville Syrjäläd593d992014-06-13 16:42:51 +03001978 struct drm_i915_gem_exec_object2 __user *user_exec_list =
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001979 u64_to_user_ptr(args->buffers_ptr);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001980 int i;
1981
1982 for (i = 0; i < args->buffer_count; i++) {
Michał Winiarski934acce2015-12-29 18:24:52 +01001983 exec2_list[i].offset =
1984 gen8_canonical_addr(exec2_list[i].offset);
Chris Wilson9aab8bf2014-05-23 10:45:52 +01001985 ret = __copy_to_user(&user_exec_list[i].offset,
1986 &exec2_list[i].offset,
1987 sizeof(user_exec_list[i].offset));
1988 if (ret) {
1989 ret = -EFAULT;
1990 DRM_DEBUG("failed to copy %d exec entries "
1991 "back to user\n",
1992 args->buffer_count);
1993 break;
1994 }
Chris Wilson54cf91d2010-11-25 18:00:26 +00001995 }
1996 }
1997
1998 drm_free_large(exec2_list);
1999 return ret;
2000}