blob: a77ce9983f69c9965725f806a008fc06a129935f [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Yu Zhangeb822892015-02-10 19:05:49 +080032#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010033#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070034#include "intel_drv.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010035#include "intel_mocs.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070036#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080039#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020040#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070041
Chris Wilson05394f32010-11-08 19:18:58 +000042static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Daniel Vettere62b59e2015-01-21 14:53:48 +010043static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilsonc8725f32014-03-17 12:21:55 +000044static void
Chris Wilsonb4716182015-04-27 13:41:17 +010045i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
46static void
47i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
Chris Wilson61050802012-04-17 15:31:31 +010048
Chris Wilsonc76ce032013-08-08 14:41:03 +010049static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
51{
52 return HAS_LLC(dev) || level != I915_CACHE_NONE;
53}
54
Chris Wilson2c225692013-08-09 12:26:45 +010055static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053057 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58 return false;
59
Chris Wilson2c225692013-08-09 12:26:45 +010060 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
61 return true;
62
63 return obj->pin_display;
64}
65
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053066static int
67insert_mappable_node(struct drm_i915_private *i915,
68 struct drm_mm_node *node, u32 size)
69{
70 memset(node, 0, sizeof(*node));
71 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 size, 0, 0, 0,
73 i915->ggtt.mappable_end,
74 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
76}
77
78static void
79remove_mappable_node(struct drm_mm_node *node)
80{
81 drm_mm_remove_node(node);
82}
83
Chris Wilson73aa8082010-09-30 11:46:12 +010084/* some bookkeeping */
85static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86 size_t size)
87{
Daniel Vetterc20e8352013-07-24 22:40:23 +020088 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010089 dev_priv->mm.object_count++;
90 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020091 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010092}
93
94static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95 size_t size)
96{
Daniel Vetterc20e8352013-07-24 22:40:23 +020097 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010098 dev_priv->mm.object_count--;
99 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200100 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100101}
102
Chris Wilson21dd3732011-01-26 15:55:56 +0000103static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100104i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100105{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100106 int ret;
107
Chris Wilsond98c52c2016-04-13 17:35:05 +0100108 if (!i915_reset_in_progress(error))
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100109 return 0;
110
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200111 /*
112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
113 * userspace. If it takes that long something really bad is going on and
114 * we should simply try to bail out and fail as gracefully as possible.
115 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100116 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilsond98c52c2016-04-13 17:35:05 +0100117 !i915_reset_in_progress(error),
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100118 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200119 if (ret == 0) {
120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 return -EIO;
122 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100123 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100124 } else {
125 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100127}
128
Chris Wilson54cf91d2010-11-25 18:00:26 +0000129int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100130{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100131 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100132 int ret;
133
Daniel Vetter33196de2012-11-14 17:14:05 +0100134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100135 if (ret)
136 return ret;
137
138 ret = mutex_lock_interruptible(&dev->struct_mutex);
139 if (ret)
140 return ret;
141
Chris Wilson23bc5982010-09-29 16:10:57 +0100142 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143 return 0;
144}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100145
Eric Anholt673a3942008-07-30 12:06:12 -0700146int
Eric Anholt5a125c32008-10-22 21:40:13 -0700147i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000148 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700149{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300150 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200151 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Eric Anholt5a125c32008-10-22 21:40:13 -0700152 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100153 struct i915_vma *vma;
Chris Wilson6299f992010-11-24 12:23:44 +0000154 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700155
Chris Wilson6299f992010-11-24 12:23:44 +0000156 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000158 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100159 if (vma->pin_count)
160 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000161 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100162 if (vma->pin_count)
163 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100164 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700165
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300166 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400167 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000168
Eric Anholt5a125c32008-10-22 21:40:13 -0700169 return 0;
170}
171
Chris Wilson6a2c4232014-11-04 04:51:40 -0800172static int
173i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100174{
Al Viro93c76a32015-12-04 23:45:44 -0500175 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800176 char *vaddr = obj->phys_handle->vaddr;
177 struct sg_table *st;
178 struct scatterlist *sg;
179 int i;
Chris Wilson00731152014-05-21 12:42:56 +0100180
Chris Wilson6a2c4232014-11-04 04:51:40 -0800181 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100183
Chris Wilson6a2c4232014-11-04 04:51:40 -0800184 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
185 struct page *page;
186 char *src;
187
188 page = shmem_read_mapping_page(mapping, i);
189 if (IS_ERR(page))
190 return PTR_ERR(page);
191
192 src = kmap_atomic(page);
193 memcpy(vaddr, src, PAGE_SIZE);
194 drm_clflush_virt_range(vaddr, PAGE_SIZE);
195 kunmap_atomic(src);
196
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300197 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800198 vaddr += PAGE_SIZE;
199 }
200
Chris Wilsonc0336662016-05-06 15:40:21 +0100201 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800202
203 st = kmalloc(sizeof(*st), GFP_KERNEL);
204 if (st == NULL)
205 return -ENOMEM;
206
207 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
208 kfree(st);
209 return -ENOMEM;
210 }
211
212 sg = st->sgl;
213 sg->offset = 0;
214 sg->length = obj->base.size;
215
216 sg_dma_address(sg) = obj->phys_handle->busaddr;
217 sg_dma_len(sg) = obj->base.size;
218
219 obj->pages = st;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800220 return 0;
221}
222
223static void
224i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
225{
226 int ret;
227
228 BUG_ON(obj->madv == __I915_MADV_PURGED);
229
230 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100231 if (WARN_ON(ret)) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800232 /* In the event of a disaster, abandon all caches and
233 * hope for the best.
234 */
Chris Wilson6a2c4232014-11-04 04:51:40 -0800235 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
236 }
237
238 if (obj->madv == I915_MADV_DONTNEED)
239 obj->dirty = 0;
240
241 if (obj->dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500242 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800243 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100244 int i;
245
246 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800247 struct page *page;
248 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100249
Chris Wilson6a2c4232014-11-04 04:51:40 -0800250 page = shmem_read_mapping_page(mapping, i);
251 if (IS_ERR(page))
252 continue;
253
254 dst = kmap_atomic(page);
255 drm_clflush_virt_range(vaddr, PAGE_SIZE);
256 memcpy(dst, vaddr, PAGE_SIZE);
257 kunmap_atomic(dst);
258
259 set_page_dirty(page);
260 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100261 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300262 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100263 vaddr += PAGE_SIZE;
264 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800265 obj->dirty = 0;
Chris Wilson00731152014-05-21 12:42:56 +0100266 }
267
Chris Wilson6a2c4232014-11-04 04:51:40 -0800268 sg_free_table(obj->pages);
269 kfree(obj->pages);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800270}
271
272static void
273i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
274{
275 drm_pci_free(obj->base.dev, obj->phys_handle);
276}
277
278static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279 .get_pages = i915_gem_object_get_pages_phys,
280 .put_pages = i915_gem_object_put_pages_phys,
281 .release = i915_gem_object_release_phys,
282};
283
284static int
285drop_pages(struct drm_i915_gem_object *obj)
286{
287 struct i915_vma *vma, *next;
288 int ret;
289
290 drm_gem_object_reference(&obj->base);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000291 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800292 if (i915_vma_unbind(vma))
293 break;
294
295 ret = i915_gem_object_put_pages(obj);
296 drm_gem_object_unreference(&obj->base);
297
298 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100299}
300
301int
302i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
303 int align)
304{
305 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800306 int ret;
Chris Wilson00731152014-05-21 12:42:56 +0100307
308 if (obj->phys_handle) {
309 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
310 return -EBUSY;
311
312 return 0;
313 }
314
315 if (obj->madv != I915_MADV_WILLNEED)
316 return -EFAULT;
317
318 if (obj->base.filp == NULL)
319 return -EINVAL;
320
Chris Wilson6a2c4232014-11-04 04:51:40 -0800321 ret = drop_pages(obj);
322 if (ret)
323 return ret;
324
Chris Wilson00731152014-05-21 12:42:56 +0100325 /* create a new object */
326 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
327 if (!phys)
328 return -ENOMEM;
329
Chris Wilson00731152014-05-21 12:42:56 +0100330 obj->phys_handle = phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800331 obj->ops = &i915_gem_phys_ops;
332
333 return i915_gem_object_get_pages(obj);
Chris Wilson00731152014-05-21 12:42:56 +0100334}
335
336static int
337i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338 struct drm_i915_gem_pwrite *args,
339 struct drm_file *file_priv)
340{
341 struct drm_device *dev = obj->base.dev;
342 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300343 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200344 int ret = 0;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800345
346 /* We manually control the domain here and pretend that it
347 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
348 */
349 ret = i915_gem_object_wait_rendering(obj, false);
350 if (ret)
351 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100352
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700353 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson00731152014-05-21 12:42:56 +0100354 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355 unsigned long unwritten;
356
357 /* The physical object once assigned is fixed for the lifetime
358 * of the obj, so we can safely drop the lock and continue
359 * to access vaddr.
360 */
361 mutex_unlock(&dev->struct_mutex);
362 unwritten = copy_from_user(vaddr, user_data, args->size);
363 mutex_lock(&dev->struct_mutex);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200364 if (unwritten) {
365 ret = -EFAULT;
366 goto out;
367 }
Chris Wilson00731152014-05-21 12:42:56 +0100368 }
369
Chris Wilson6a2c4232014-11-04 04:51:40 -0800370 drm_clflush_virt_range(vaddr, args->size);
Chris Wilsonc0336662016-05-06 15:40:21 +0100371 i915_gem_chipset_flush(to_i915(dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200372
373out:
Rodrigo Vivide152b62015-07-07 16:28:51 -0700374 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200375 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100376}
377
Chris Wilson42dcedd2012-11-15 11:32:30 +0000378void *i915_gem_object_alloc(struct drm_device *dev)
379{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100380 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000382}
383
384void i915_gem_object_free(struct drm_i915_gem_object *obj)
385{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100386 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100387 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000388}
389
Dave Airlieff72145b2011-02-07 12:16:14 +1000390static int
391i915_gem_create(struct drm_file *file,
392 struct drm_device *dev,
393 uint64_t size,
394 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700395{
Chris Wilson05394f32010-11-08 19:18:58 +0000396 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300397 int ret;
398 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700399
Dave Airlieff72145b2011-02-07 12:16:14 +1000400 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200401 if (size == 0)
402 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700403
404 /* Allocate the new object */
Dave Gordond37cd8a2016-04-22 19:14:32 +0100405 obj = i915_gem_object_create(dev, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100406 if (IS_ERR(obj))
407 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700408
Chris Wilson05394f32010-11-08 19:18:58 +0000409 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100410 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200411 drm_gem_object_unreference_unlocked(&obj->base);
412 if (ret)
413 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100414
Dave Airlieff72145b2011-02-07 12:16:14 +1000415 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700416 return 0;
417}
418
Dave Airlieff72145b2011-02-07 12:16:14 +1000419int
420i915_gem_dumb_create(struct drm_file *file,
421 struct drm_device *dev,
422 struct drm_mode_create_dumb *args)
423{
424 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300425 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000426 args->size = args->pitch * args->height;
427 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000428 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000429}
430
Dave Airlieff72145b2011-02-07 12:16:14 +1000431/**
432 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000436 */
437int
438i915_gem_create_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *file)
440{
441 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200442
Dave Airlieff72145b2011-02-07 12:16:14 +1000443 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000444 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000445}
446
Daniel Vetter8c599672011-12-14 13:57:31 +0100447static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100448__copy_to_user_swizzled(char __user *cpu_vaddr,
449 const char *gpu_vaddr, int gpu_offset,
450 int length)
451{
452 int ret, cpu_offset = 0;
453
454 while (length > 0) {
455 int cacheline_end = ALIGN(gpu_offset + 1, 64);
456 int this_length = min(cacheline_end - gpu_offset, length);
457 int swizzled_gpu_offset = gpu_offset ^ 64;
458
459 ret = __copy_to_user(cpu_vaddr + cpu_offset,
460 gpu_vaddr + swizzled_gpu_offset,
461 this_length);
462 if (ret)
463 return ret + length;
464
465 cpu_offset += this_length;
466 gpu_offset += this_length;
467 length -= this_length;
468 }
469
470 return 0;
471}
472
473static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700474__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
475 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100476 int length)
477{
478 int ret, cpu_offset = 0;
479
480 while (length > 0) {
481 int cacheline_end = ALIGN(gpu_offset + 1, 64);
482 int this_length = min(cacheline_end - gpu_offset, length);
483 int swizzled_gpu_offset = gpu_offset ^ 64;
484
485 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
486 cpu_vaddr + cpu_offset,
487 this_length);
488 if (ret)
489 return ret + length;
490
491 cpu_offset += this_length;
492 gpu_offset += this_length;
493 length -= this_length;
494 }
495
496 return 0;
497}
498
Brad Volkin4c914c02014-02-18 10:15:45 -0800499/*
500 * Pins the specified object's pages and synchronizes the object with
501 * GPU accesses. Sets needs_clflush to non-zero if the caller should
502 * flush the object from the CPU cache.
503 */
504int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
505 int *needs_clflush)
506{
507 int ret;
508
509 *needs_clflush = 0;
510
Chris Wilsonb9bcd142016-06-20 15:05:51 +0100511 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
Brad Volkin4c914c02014-02-18 10:15:45 -0800512 return -EINVAL;
513
514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
515 /* If we're not in the cpu read domain, set ourself into the gtt
516 * read domain and manually flush cachelines (if required). This
517 * optimizes for the case when the gpu will dirty the data
518 * anyway again before the next pread happens. */
519 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
520 obj->cache_level);
521 ret = i915_gem_object_wait_rendering(obj, true);
522 if (ret)
523 return ret;
524 }
525
526 ret = i915_gem_object_get_pages(obj);
527 if (ret)
528 return ret;
529
530 i915_gem_object_pin_pages(obj);
531
532 return ret;
533}
534
Daniel Vetterd174bd62012-03-25 19:47:40 +0200535/* Per-page copy function for the shmem pread fastpath.
536 * Flushes invalid cachelines before reading the target if
537 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700538static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200539shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
540 char __user *user_data,
541 bool page_do_bit17_swizzling, bool needs_clflush)
542{
543 char *vaddr;
544 int ret;
545
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200546 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200547 return -EINVAL;
548
549 vaddr = kmap_atomic(page);
550 if (needs_clflush)
551 drm_clflush_virt_range(vaddr + shmem_page_offset,
552 page_length);
553 ret = __copy_to_user_inatomic(user_data,
554 vaddr + shmem_page_offset,
555 page_length);
556 kunmap_atomic(vaddr);
557
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100558 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200559}
560
Daniel Vetter23c18c72012-03-25 19:47:42 +0200561static void
562shmem_clflush_swizzled_range(char *addr, unsigned long length,
563 bool swizzled)
564{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200565 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200566 unsigned long start = (unsigned long) addr;
567 unsigned long end = (unsigned long) addr + length;
568
569 /* For swizzling simply ensure that we always flush both
570 * channels. Lame, but simple and it works. Swizzled
571 * pwrite/pread is far from a hotpath - current userspace
572 * doesn't use it at all. */
573 start = round_down(start, 128);
574 end = round_up(end, 128);
575
576 drm_clflush_virt_range((void *)start, end - start);
577 } else {
578 drm_clflush_virt_range(addr, length);
579 }
580
581}
582
Daniel Vetterd174bd62012-03-25 19:47:40 +0200583/* Only difference to the fast-path function is that this can handle bit17
584 * and uses non-atomic copy and kmap functions. */
585static int
586shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
587 char __user *user_data,
588 bool page_do_bit17_swizzling, bool needs_clflush)
589{
590 char *vaddr;
591 int ret;
592
593 vaddr = kmap(page);
594 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200595 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
596 page_length,
597 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200598
599 if (page_do_bit17_swizzling)
600 ret = __copy_to_user_swizzled(user_data,
601 vaddr, shmem_page_offset,
602 page_length);
603 else
604 ret = __copy_to_user(user_data,
605 vaddr + shmem_page_offset,
606 page_length);
607 kunmap(page);
608
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100609 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200610}
611
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530612static inline unsigned long
613slow_user_access(struct io_mapping *mapping,
614 uint64_t page_base, int page_offset,
615 char __user *user_data,
616 unsigned long length, bool pwrite)
617{
618 void __iomem *ioaddr;
619 void *vaddr;
620 uint64_t unwritten;
621
622 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr = (void __force *)ioaddr + page_offset;
625 if (pwrite)
626 unwritten = __copy_from_user(vaddr, user_data, length);
627 else
628 unwritten = __copy_to_user(user_data, vaddr, length);
629
630 io_mapping_unmap(ioaddr);
631 return unwritten;
632}
633
634static int
635i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr)
638{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100639 struct drm_i915_private *dev_priv = to_i915(dev);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node;
642 char __user *user_data;
643 uint64_t remain;
644 uint64_t offset;
645 int ret;
646
647 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
648 if (ret) {
649 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
650 if (ret)
651 goto out;
652
653 ret = i915_gem_object_get_pages(obj);
654 if (ret) {
655 remove_mappable_node(&node);
656 goto out;
657 }
658
659 i915_gem_object_pin_pages(obj);
660 } else {
661 node.start = i915_gem_obj_ggtt_offset(obj);
662 node.allocated = false;
663 ret = i915_gem_object_put_fence(obj);
664 if (ret)
665 goto out_unpin;
666 }
667
668 ret = i915_gem_object_set_to_gtt_domain(obj, false);
669 if (ret)
670 goto out_unpin;
671
672 user_data = u64_to_user_ptr(data_ptr);
673 remain = size;
674 offset = data_offset;
675
676 mutex_unlock(&dev->struct_mutex);
677 if (likely(!i915.prefault_disable)) {
678 ret = fault_in_multipages_writeable(user_data, remain);
679 if (ret) {
680 mutex_lock(&dev->struct_mutex);
681 goto out_unpin;
682 }
683 }
684
685 while (remain > 0) {
686 /* Operation in this page
687 *
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
691 */
692 u32 page_base = node.start;
693 unsigned page_offset = offset_in_page(offset);
694 unsigned page_length = PAGE_SIZE - page_offset;
695 page_length = remain < page_length ? remain : page_length;
696 if (node.allocated) {
697 wmb();
698 ggtt->base.insert_page(&ggtt->base,
699 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
700 node.start,
701 I915_CACHE_NONE, 0);
702 wmb();
703 } else {
704 page_base += offset & PAGE_MASK;
705 }
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
709 */
710 if (slow_user_access(ggtt->mappable, page_base,
711 page_offset, user_data,
712 page_length, false)) {
713 ret = -EFAULT;
714 break;
715 }
716
717 remain -= page_length;
718 user_data += page_length;
719 offset += page_length;
720 }
721
722 mutex_lock(&dev->struct_mutex);
723 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
728 * domain.
729 */
730 ret = i915_gem_object_set_to_gtt_domain(obj, false);
731 }
732
733out_unpin:
734 if (node.allocated) {
735 wmb();
736 ggtt->base.clear_range(&ggtt->base,
737 node.start, node.size,
738 true);
739 i915_gem_object_unpin_pages(obj);
740 remove_mappable_node(&node);
741 } else {
742 i915_gem_object_ggtt_unpin(obj);
743 }
744out:
745 return ret;
746}
747
Eric Anholteb014592009-03-10 11:44:52 -0700748static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200749i915_gem_shmem_pread(struct drm_device *dev,
750 struct drm_i915_gem_object *obj,
751 struct drm_i915_gem_pread *args,
752 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700753{
Daniel Vetter8461d222011-12-14 13:57:32 +0100754 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700755 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100756 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100757 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100758 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200759 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200760 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200761 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700762
Chris Wilson6eae0052016-06-20 15:05:52 +0100763 if (!i915_gem_object_has_struct_page(obj))
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530764 return -ENODEV;
765
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300766 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700767 remain = args->size;
768
Daniel Vetter8461d222011-12-14 13:57:32 +0100769 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700770
Brad Volkin4c914c02014-02-18 10:15:45 -0800771 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100772 if (ret)
773 return ret;
774
Eric Anholteb014592009-03-10 11:44:52 -0700775 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100776
Imre Deak67d5a502013-02-18 19:28:02 +0200777 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
778 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200779 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100780
781 if (remain <= 0)
782 break;
783
Eric Anholteb014592009-03-10 11:44:52 -0700784 /* Operation in this page
785 *
Eric Anholteb014592009-03-10 11:44:52 -0700786 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700787 * page_length = bytes to copy for this page
788 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100789 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700790 page_length = remain;
791 if ((shmem_page_offset + page_length) > PAGE_SIZE)
792 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700793
Daniel Vetter8461d222011-12-14 13:57:32 +0100794 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
795 (page_to_phys(page) & (1 << 17)) != 0;
796
Daniel Vetterd174bd62012-03-25 19:47:40 +0200797 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
798 user_data, page_do_bit17_swizzling,
799 needs_clflush);
800 if (ret == 0)
801 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700802
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200803 mutex_unlock(&dev->struct_mutex);
804
Jani Nikulad330a952014-01-21 11:24:25 +0200805 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200806 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200807 /* Userspace is tricking us, but we've already clobbered
808 * its pages with the prefault and promised to write the
809 * data up to the first fault. Hence ignore any errors
810 * and just continue. */
811 (void)ret;
812 prefaulted = 1;
813 }
814
Daniel Vetterd174bd62012-03-25 19:47:40 +0200815 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling,
817 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700818
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200819 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100820
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100821 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100822 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100823
Chris Wilson17793c92014-03-07 08:30:36 +0000824next_page:
Eric Anholteb014592009-03-10 11:44:52 -0700825 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100826 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700827 offset += page_length;
828 }
829
Chris Wilson4f27b752010-10-14 15:26:45 +0100830out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100831 i915_gem_object_unpin_pages(obj);
832
Eric Anholteb014592009-03-10 11:44:52 -0700833 return ret;
834}
835
Eric Anholt673a3942008-07-30 12:06:12 -0700836/**
837 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -0700841 *
842 * On error, the contents of *data are undefined.
843 */
844int
845i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000846 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700847{
848 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000849 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100850 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700851
Chris Wilson51311d02010-11-17 09:10:42 +0000852 if (args->size == 0)
853 return 0;
854
855 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300856 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000857 args->size))
858 return -EFAULT;
859
Chris Wilson4f27b752010-10-14 15:26:45 +0100860 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100861 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100862 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700863
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100864 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000865 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100866 ret = -ENOENT;
867 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100868 }
Eric Anholt673a3942008-07-30 12:06:12 -0700869
Chris Wilson7dcd2492010-09-26 20:21:44 +0100870 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000871 if (args->offset > obj->base.size ||
872 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100873 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100874 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100875 }
876
Chris Wilsondb53a302011-02-03 11:57:46 +0000877 trace_i915_gem_object_pread(obj, args->offset, args->size);
878
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200879 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700880
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530881 /* pread for non shmem backed objects */
Chris Wilson2ca17b82016-08-04 09:09:53 +0100882 if (ret == -EFAULT || ret == -ENODEV) {
883 intel_runtime_pm_get(to_i915(dev));
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530884 ret = i915_gem_gtt_pread(dev, obj, args->size,
885 args->offset, args->data_ptr);
Chris Wilson2ca17b82016-08-04 09:09:53 +0100886 intel_runtime_pm_put(to_i915(dev));
887 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530888
Chris Wilson35b62a82010-09-26 20:23:38 +0100889out:
Chris Wilson05394f32010-11-08 19:18:58 +0000890 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100891unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100892 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700893 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700894}
895
Keith Packard0839ccb2008-10-30 19:38:48 -0700896/* This is the fast write path which cannot handle
897 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700898 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700899
Keith Packard0839ccb2008-10-30 19:38:48 -0700900static inline int
901fast_user_write(struct io_mapping *mapping,
902 loff_t page_base, int page_offset,
903 char __user *user_data,
904 int length)
905{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700906 void __iomem *vaddr_atomic;
907 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700908 unsigned long unwritten;
909
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700910 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700911 /* We can use the cpu mem copy function because this is X86. */
912 vaddr = (void __force*)vaddr_atomic + page_offset;
913 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700914 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700915 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100916 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700917}
918
Eric Anholt3de09aa2009-03-09 09:42:23 -0700919/**
920 * This is the fast pwrite path, where we copy the data directly from the
921 * user into the GTT, uncached.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100922 * @dev: drm device pointer
923 * @obj: i915 gem object
924 * @args: pwrite arguments structure
925 * @file: drm file pointer
Eric Anholt3de09aa2009-03-09 09:42:23 -0700926 */
Eric Anholt673a3942008-07-30 12:06:12 -0700927static int
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530928i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
Chris Wilson05394f32010-11-08 19:18:58 +0000929 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700930 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000931 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700932{
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530933 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530934 struct drm_device *dev = obj->base.dev;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530935 struct drm_mm_node node;
936 uint64_t remain, offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700937 char __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530938 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530939 bool hit_slow_path = false;
940
941 if (obj->tiling_mode != I915_TILING_NONE)
942 return -EFAULT;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200943
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100944 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530945 if (ret) {
946 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
947 if (ret)
948 goto out;
949
950 ret = i915_gem_object_get_pages(obj);
951 if (ret) {
952 remove_mappable_node(&node);
953 goto out;
954 }
955
956 i915_gem_object_pin_pages(obj);
957 } else {
958 node.start = i915_gem_obj_ggtt_offset(obj);
959 node.allocated = false;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530960 ret = i915_gem_object_put_fence(obj);
961 if (ret)
962 goto out_unpin;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530963 }
Daniel Vetter935aaa62012-03-25 19:47:35 +0200964
965 ret = i915_gem_object_set_to_gtt_domain(obj, true);
966 if (ret)
967 goto out_unpin;
968
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700969 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530970 obj->dirty = true;
Eric Anholt673a3942008-07-30 12:06:12 -0700971
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300972 user_data = u64_to_user_ptr(args->data_ptr);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530973 offset = args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700974 remain = args->size;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530975 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -0700976 /* Operation in this page
977 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700978 * page_base = page offset within aperture
979 * page_offset = offset within page
980 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700981 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +0530982 u32 page_base = node.start;
983 unsigned page_offset = offset_in_page(offset);
984 unsigned page_length = PAGE_SIZE - page_offset;
985 page_length = remain < page_length ? remain : page_length;
986 if (node.allocated) {
987 wmb(); /* flush the write before we modify the GGTT */
988 ggtt->base.insert_page(&ggtt->base,
989 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
990 node.start, I915_CACHE_NONE, 0);
991 wmb(); /* flush modifications to the GGTT (insert_page) */
992 } else {
993 page_base += offset & PAGE_MASK;
994 }
Keith Packard0839ccb2008-10-30 19:38:48 -0700995 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700996 * source page isn't available. Return the error and we'll
997 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530998 * If the object is non-shmem backed, we retry again with the
999 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001000 */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001001 if (fast_user_write(ggtt->mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +02001002 page_offset, user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301003 hit_slow_path = true;
1004 mutex_unlock(&dev->struct_mutex);
1005 if (slow_user_access(ggtt->mappable,
1006 page_base,
1007 page_offset, user_data,
1008 page_length, true)) {
1009 ret = -EFAULT;
1010 mutex_lock(&dev->struct_mutex);
1011 goto out_flush;
1012 }
1013
1014 mutex_lock(&dev->struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001015 }
Eric Anholt673a3942008-07-30 12:06:12 -07001016
Keith Packard0839ccb2008-10-30 19:38:48 -07001017 remain -= page_length;
1018 user_data += page_length;
1019 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001020 }
Eric Anholt673a3942008-07-30 12:06:12 -07001021
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001022out_flush:
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301023 if (hit_slow_path) {
1024 if (ret == 0 &&
1025 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1026 /* The user has modified the object whilst we tried
1027 * reading from it, and we now have no idea what domain
1028 * the pages should be in. As we have just been touching
1029 * them directly, flush everything back to the GTT
1030 * domain.
1031 */
1032 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1033 }
1034 }
1035
Rodrigo Vivide152b62015-07-07 16:28:51 -07001036 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001037out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301038 if (node.allocated) {
1039 wmb();
1040 ggtt->base.clear_range(&ggtt->base,
1041 node.start, node.size,
1042 true);
1043 i915_gem_object_unpin_pages(obj);
1044 remove_mappable_node(&node);
1045 } else {
1046 i915_gem_object_ggtt_unpin(obj);
1047 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001048out:
Eric Anholt3de09aa2009-03-09 09:42:23 -07001049 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001050}
1051
Daniel Vetterd174bd62012-03-25 19:47:40 +02001052/* Per-page copy function for the shmem pwrite fastpath.
1053 * Flushes invalid cachelines before writing to the target if
1054 * needs_clflush_before is set and flushes out any written cachelines after
1055 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -07001056static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001057shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1058 char __user *user_data,
1059 bool page_do_bit17_swizzling,
1060 bool needs_clflush_before,
1061 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001062{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001063 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001064 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001065
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001066 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +02001067 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001068
Daniel Vetterd174bd62012-03-25 19:47:40 +02001069 vaddr = kmap_atomic(page);
1070 if (needs_clflush_before)
1071 drm_clflush_virt_range(vaddr + shmem_page_offset,
1072 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +00001073 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1074 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001075 if (needs_clflush_after)
1076 drm_clflush_virt_range(vaddr + shmem_page_offset,
1077 page_length);
1078 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001079
Chris Wilson755d2212012-09-04 21:02:55 +01001080 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001081}
1082
Daniel Vetterd174bd62012-03-25 19:47:40 +02001083/* Only difference to the fast-path function is that this can handle bit17
1084 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -07001085static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001086shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1087 char __user *user_data,
1088 bool page_do_bit17_swizzling,
1089 bool needs_clflush_before,
1090 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001091{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001092 char *vaddr;
1093 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001094
Daniel Vetterd174bd62012-03-25 19:47:40 +02001095 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001096 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +02001097 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1098 page_length,
1099 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001100 if (page_do_bit17_swizzling)
1101 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001102 user_data,
1103 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001104 else
1105 ret = __copy_from_user(vaddr + shmem_page_offset,
1106 user_data,
1107 page_length);
1108 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +02001109 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1110 page_length,
1111 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001112 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001113
Chris Wilson755d2212012-09-04 21:02:55 +01001114 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001115}
1116
Eric Anholt40123c12009-03-09 13:42:30 -07001117static int
Daniel Vettere244a442012-03-25 19:47:28 +02001118i915_gem_shmem_pwrite(struct drm_device *dev,
1119 struct drm_i915_gem_object *obj,
1120 struct drm_i915_gem_pwrite *args,
1121 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -07001122{
Eric Anholt40123c12009-03-09 13:42:30 -07001123 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +01001124 loff_t offset;
1125 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +01001126 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +01001127 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +02001128 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +02001129 int needs_clflush_after = 0;
1130 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +02001131 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -07001132
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001133 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -07001134 remain = args->size;
1135
Daniel Vetter8c599672011-12-14 13:57:31 +01001136 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001137
Daniel Vetter58642882012-03-25 19:47:37 +02001138 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1139 /* If we're not in the cpu write domain, set ourself into the gtt
1140 * write domain and manually flush cachelines (if required). This
1141 * optimizes for the case when the gpu will use the data
1142 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +01001143 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -07001144 ret = i915_gem_object_wait_rendering(obj, false);
1145 if (ret)
1146 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +02001147 }
Chris Wilsonc76ce032013-08-08 14:41:03 +01001148 /* Same trick applies to invalidate partially written cachelines read
1149 * before writing. */
1150 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1151 needs_clflush_before =
1152 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +02001153
Chris Wilson755d2212012-09-04 21:02:55 +01001154 ret = i915_gem_object_get_pages(obj);
1155 if (ret)
1156 return ret;
1157
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -07001158 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001159
Chris Wilson755d2212012-09-04 21:02:55 +01001160 i915_gem_object_pin_pages(obj);
1161
Eric Anholt40123c12009-03-09 13:42:30 -07001162 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +00001163 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -07001164
Imre Deak67d5a502013-02-18 19:28:02 +02001165 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1166 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +02001167 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +02001168 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001169
Chris Wilson9da3da62012-06-01 15:20:22 +01001170 if (remain <= 0)
1171 break;
1172
Eric Anholt40123c12009-03-09 13:42:30 -07001173 /* Operation in this page
1174 *
Eric Anholt40123c12009-03-09 13:42:30 -07001175 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -07001176 * page_length = bytes to copy for this page
1177 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +01001178 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -07001179
1180 page_length = remain;
1181 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1182 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -07001183
Daniel Vetter58642882012-03-25 19:47:37 +02001184 /* If we don't overwrite a cacheline completely we need to be
1185 * careful to have up-to-date data by first clflushing. Don't
1186 * overcomplicate things and flush the entire patch. */
1187 partial_cacheline_write = needs_clflush_before &&
1188 ((shmem_page_offset | page_length)
1189 & (boot_cpu_data.x86_clflush_size - 1));
1190
Daniel Vetter8c599672011-12-14 13:57:31 +01001191 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1192 (page_to_phys(page) & (1 << 17)) != 0;
1193
Daniel Vetterd174bd62012-03-25 19:47:40 +02001194 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1195 user_data, page_do_bit17_swizzling,
1196 partial_cacheline_write,
1197 needs_clflush_after);
1198 if (ret == 0)
1199 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -07001200
Daniel Vettere244a442012-03-25 19:47:28 +02001201 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +02001202 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001203 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1204 user_data, page_do_bit17_swizzling,
1205 partial_cacheline_write,
1206 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -07001207
Daniel Vettere244a442012-03-25 19:47:28 +02001208 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +01001209
Chris Wilson755d2212012-09-04 21:02:55 +01001210 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +01001211 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +01001212
Chris Wilson17793c92014-03-07 08:30:36 +00001213next_page:
Eric Anholt40123c12009-03-09 13:42:30 -07001214 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +01001215 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -07001216 offset += page_length;
1217 }
1218
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001219out:
Chris Wilson755d2212012-09-04 21:02:55 +01001220 i915_gem_object_unpin_pages(obj);
1221
Daniel Vettere244a442012-03-25 19:47:28 +02001222 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +01001223 /*
1224 * Fixup: Flush cpu caches in case we didn't flush the dirty
1225 * cachelines in-line while writing and the object moved
1226 * out of the cpu write domain while we've dropped the lock.
1227 */
1228 if (!needs_clflush_after &&
1229 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +01001230 if (i915_gem_clflush_object(obj, obj->pin_display))
Ville Syrjäläed75a552015-08-11 19:47:10 +03001231 needs_clflush_after = true;
Daniel Vettere244a442012-03-25 19:47:28 +02001232 }
Daniel Vetter8c599672011-12-14 13:57:31 +01001233 }
Eric Anholt40123c12009-03-09 13:42:30 -07001234
Daniel Vetter58642882012-03-25 19:47:37 +02001235 if (needs_clflush_after)
Chris Wilsonc0336662016-05-06 15:40:21 +01001236 i915_gem_chipset_flush(to_i915(dev));
Ville Syrjäläed75a552015-08-11 19:47:10 +03001237 else
1238 obj->cache_dirty = true;
Daniel Vetter58642882012-03-25 19:47:37 +02001239
Rodrigo Vivide152b62015-07-07 16:28:51 -07001240 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Eric Anholt40123c12009-03-09 13:42:30 -07001241 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001242}
1243
1244/**
1245 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001246 * @dev: drm device
1247 * @data: ioctl data blob
1248 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001249 *
1250 * On error, the contents of the buffer that were to be modified are undefined.
1251 */
1252int
1253i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001254 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001255{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001256 struct drm_i915_private *dev_priv = to_i915(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001257 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001258 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001259 int ret;
1260
1261 if (args->size == 0)
1262 return 0;
1263
1264 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001265 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001266 args->size))
1267 return -EFAULT;
1268
Jani Nikulad330a952014-01-21 11:24:25 +02001269 if (likely(!i915.prefault_disable)) {
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001270 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
Xiong Zhang0b74b502013-07-19 13:51:24 +08001271 args->size);
1272 if (ret)
1273 return -EFAULT;
1274 }
Eric Anholt673a3942008-07-30 12:06:12 -07001275
Imre Deak5d77d9c2014-11-12 16:40:35 +02001276 intel_runtime_pm_get(dev_priv);
1277
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001278 ret = i915_mutex_lock_interruptible(dev);
1279 if (ret)
Imre Deak5d77d9c2014-11-12 16:40:35 +02001280 goto put_rpm;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001281
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01001282 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001283 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001284 ret = -ENOENT;
1285 goto unlock;
1286 }
Eric Anholt673a3942008-07-30 12:06:12 -07001287
Chris Wilson7dcd2492010-09-26 20:21:44 +01001288 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001289 if (args->offset > obj->base.size ||
1290 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001291 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +01001292 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001293 }
1294
Chris Wilsondb53a302011-02-03 11:57:46 +00001295 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1296
Daniel Vetter935aaa62012-03-25 19:47:35 +02001297 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001298 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1299 * it would end up going through the fenced access, and we'll get
1300 * different detiling behavior between reading and writing.
1301 * pread/pwrite currently are reading and writing from the CPU
1302 * perspective, requiring manual detiling by the client.
1303 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001304 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson2c225692013-08-09 12:26:45 +01001305 cpu_write_needs_clflush(obj)) {
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301306 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001307 /* Note that the gtt paths might fail with non-page-backed user
1308 * pointers (e.g. gtt mappings when moving data between
1309 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -07001310 }
Eric Anholt673a3942008-07-30 12:06:12 -07001311
Chris Wilsonfae82e52016-07-16 18:42:36 +01001312 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001313 if (obj->phys_handle)
1314 ret = i915_gem_phys_pwrite(obj, args, file);
Chris Wilson6eae0052016-06-20 15:05:52 +01001315 else if (i915_gem_object_has_struct_page(obj))
Chris Wilson6a2c4232014-11-04 04:51:40 -08001316 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301317 else
1318 ret = -ENODEV;
Chris Wilson6a2c4232014-11-04 04:51:40 -08001319 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001320
Chris Wilson35b62a82010-09-26 20:23:38 +01001321out:
Chris Wilson05394f32010-11-08 19:18:58 +00001322 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001323unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001324 mutex_unlock(&dev->struct_mutex);
Imre Deak5d77d9c2014-11-12 16:40:35 +02001325put_rpm:
1326 intel_runtime_pm_put(dev_priv);
1327
Eric Anholt673a3942008-07-30 12:06:12 -07001328 return ret;
1329}
1330
Chris Wilsonf4457ae2016-04-13 17:35:08 +01001331static int
1332i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
Chris Wilsonb3612372012-08-24 09:35:08 +01001333{
Chris Wilsonf4457ae2016-04-13 17:35:08 +01001334 if (__i915_terminally_wedged(reset_counter))
1335 return -EIO;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001336
Chris Wilsonf4457ae2016-04-13 17:35:08 +01001337 if (__i915_reset_in_progress(reset_counter)) {
Chris Wilsonb3612372012-08-24 09:35:08 +01001338 /* Non-interruptible callers can't handle -EAGAIN, hence return
1339 * -EIO unconditionally for these. */
1340 if (!interruptible)
1341 return -EIO;
1342
Chris Wilsond98c52c2016-04-13 17:35:05 +01001343 return -EAGAIN;
Chris Wilsonb3612372012-08-24 09:35:08 +01001344 }
1345
1346 return 0;
1347}
1348
Chris Wilsonca5b7212015-12-11 11:32:58 +00001349static unsigned long local_clock_us(unsigned *cpu)
1350{
1351 unsigned long t;
1352
1353 /* Cheaply and approximately convert from nanoseconds to microseconds.
1354 * The result and subsequent calculations are also defined in the same
1355 * approximate microseconds units. The principal source of timing
1356 * error here is from the simple truncation.
1357 *
1358 * Note that local_clock() is only defined wrt to the current CPU;
1359 * the comparisons are no longer valid if we switch CPUs. Instead of
1360 * blocking preemption for the entire busywait, we can detect the CPU
1361 * switch and use that as indicator of system load and a reason to
1362 * stop busywaiting, see busywait_stop().
1363 */
1364 *cpu = get_cpu();
1365 t = local_clock() >> 10;
1366 put_cpu();
1367
1368 return t;
1369}
1370
1371static bool busywait_stop(unsigned long timeout, unsigned cpu)
1372{
1373 unsigned this_cpu;
1374
1375 if (time_after(local_clock_us(&this_cpu), timeout))
1376 return true;
1377
1378 return this_cpu != cpu;
1379}
1380
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001381bool __i915_spin_request(const struct drm_i915_gem_request *req,
1382 int state, unsigned long timeout_us)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001383{
Chris Wilsonca5b7212015-12-11 11:32:58 +00001384 unsigned cpu;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001385
Chris Wilsonca5b7212015-12-11 11:32:58 +00001386 /* When waiting for high frequency requests, e.g. during synchronous
1387 * rendering split between the CPU and GPU, the finite amount of time
1388 * required to set up the irq and wait upon it limits the response
1389 * rate. By busywaiting on the request completion for a short while we
1390 * can service the high frequency waits as quick as possible. However,
1391 * if it is a slow request, we want to sleep as quickly as possible.
1392 * The tradeoff between waiting and sleeping is roughly the time it
1393 * takes to sleep on a request, on the order of a microsecond.
1394 */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001395
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001396 timeout_us += local_clock_us(&cpu);
Chris Wilson688e6c72016-07-01 17:23:15 +01001397 do {
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001398 if (i915_gem_request_completed(req))
Chris Wilson688e6c72016-07-01 17:23:15 +01001399 return true;
Chris Wilson2def4ad92015-04-07 16:20:41 +01001400
Chris Wilson91b0c352015-12-11 11:32:57 +00001401 if (signal_pending_state(state, current))
1402 break;
1403
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001404 if (busywait_stop(timeout_us, cpu))
Chris Wilson2def4ad92015-04-07 16:20:41 +01001405 break;
1406
1407 cpu_relax_lowlatency();
Chris Wilson688e6c72016-07-01 17:23:15 +01001408 } while (!need_resched());
Chris Wilson821485d2015-12-11 11:32:59 +00001409
Chris Wilson688e6c72016-07-01 17:23:15 +01001410 return false;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001411}
1412
Chris Wilsonb3612372012-08-24 09:35:08 +01001413/**
John Harrison9c654812014-11-24 18:49:35 +00001414 * __i915_wait_request - wait until execution of request has finished
1415 * @req: duh!
Chris Wilsonb3612372012-08-24 09:35:08 +01001416 * @interruptible: do an interruptible wait (normally yes)
1417 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001418 * @rps: RPS client
Chris Wilsonb3612372012-08-24 09:35:08 +01001419 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001420 * Note: It is of utmost importance that the passed in seqno and reset_counter
1421 * values have been read by the caller in an smp safe manner. Where read-side
1422 * locks are involved, it is sufficient to read the reset_counter before
1423 * unlocking the lock that protects the seqno. For lockless tricks, the
1424 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1425 * inserted.
1426 *
John Harrison9c654812014-11-24 18:49:35 +00001427 * Returns 0 if the request was found within the alloted time. Else returns the
Chris Wilsonb3612372012-08-24 09:35:08 +01001428 * errno with remaining time filled in timeout argument.
1429 */
John Harrison9c654812014-11-24 18:49:35 +00001430int __i915_wait_request(struct drm_i915_gem_request *req,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001431 bool interruptible,
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00001432 s64 *timeout,
Chris Wilson2e1b8732015-04-27 13:41:22 +01001433 struct intel_rps_client *rps)
Chris Wilsonb3612372012-08-24 09:35:08 +01001434{
Chris Wilson91b0c352015-12-11 11:32:57 +00001435 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
Chris Wilson1f15b762016-07-01 17:23:14 +01001436 DEFINE_WAIT(reset);
Chris Wilson688e6c72016-07-01 17:23:15 +01001437 struct intel_wait wait;
1438 unsigned long timeout_remain;
Tvrtko Ursuline0313db2016-01-15 15:11:12 +00001439 s64 before = 0; /* Only to silence a compiler warning. */
Chris Wilson688e6c72016-07-01 17:23:15 +01001440 int ret = 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001441
Chris Wilson688e6c72016-07-01 17:23:15 +01001442 might_sleep();
Paulo Zanonic67a4702013-08-19 13:18:09 -03001443
Chris Wilsonb4716182015-04-27 13:41:17 +01001444 if (list_empty(&req->list))
1445 return 0;
1446
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001447 if (i915_gem_request_completed(req))
Chris Wilsonb3612372012-08-24 09:35:08 +01001448 return 0;
1449
Chris Wilson688e6c72016-07-01 17:23:15 +01001450 timeout_remain = MAX_SCHEDULE_TIMEOUT;
Chris Wilsonbb6d1982015-11-26 13:31:42 +00001451 if (timeout) {
1452 if (WARN_ON(*timeout < 0))
1453 return -EINVAL;
1454
1455 if (*timeout == 0)
1456 return -ETIME;
1457
Chris Wilson688e6c72016-07-01 17:23:15 +01001458 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
Tvrtko Ursuline0313db2016-01-15 15:11:12 +00001459
1460 /*
1461 * Record current time in case interrupted by signal, or wedged.
1462 */
1463 before = ktime_get_raw_ns();
Chris Wilsonbb6d1982015-11-26 13:31:42 +00001464 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001465
John Harrison74328ee2014-11-24 18:49:38 +00001466 trace_i915_gem_request_wait_begin(req);
Chris Wilson2def4ad92015-04-07 16:20:41 +01001467
Chris Wilsondf4ba502016-07-04 08:08:35 +01001468 /* This client is about to stall waiting for the GPU. In many cases
1469 * this is undesirable and limits the throughput of the system, as
1470 * many clients cannot continue processing user input/output whilst
1471 * blocked. RPS autotuning may take tens of milliseconds to respond
1472 * to the GPU load and thus incurs additional latency for the client.
1473 * We can circumvent that by promoting the GPU frequency to maximum
1474 * before we wait. This makes the GPU throttle up much more quickly
1475 * (good for benchmarks and user experience, e.g. window animations),
1476 * but at a cost of spending more power processing the workload
1477 * (bad for battery). Not all clients even want their results
1478 * immediately and for them we should just let the GPU select its own
1479 * frequency to maximise efficiency. To prevent a single client from
1480 * forcing the clocks too high for the whole system, we only allow
1481 * each client to waitboost once in a busy period.
1482 */
Chris Wilson688e6c72016-07-01 17:23:15 +01001483 if (INTEL_INFO(req->i915)->gen >= 6)
1484 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
Chris Wilson2def4ad92015-04-07 16:20:41 +01001485
Chris Wilson688e6c72016-07-01 17:23:15 +01001486 /* Optimistic spin for the next ~jiffie before touching IRQs */
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001487 if (i915_spin_request(req, state, 5))
Chris Wilson688e6c72016-07-01 17:23:15 +01001488 goto complete;
Chris Wilson2def4ad92015-04-07 16:20:41 +01001489
Chris Wilson688e6c72016-07-01 17:23:15 +01001490 set_current_state(state);
1491 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
Chris Wilsonb3612372012-08-24 09:35:08 +01001492
Chris Wilson688e6c72016-07-01 17:23:15 +01001493 intel_wait_init(&wait, req->seqno);
1494 if (intel_engine_add_wait(req->engine, &wait))
1495 /* In order to check that we haven't missed the interrupt
1496 * as we enabled it, we need to kick ourselves to do a
1497 * coherent check on the seqno before we sleep.
Chris Wilsonf4457ae2016-04-13 17:35:08 +01001498 */
Chris Wilson688e6c72016-07-01 17:23:15 +01001499 goto wakeup;
Chris Wilsonb3612372012-08-24 09:35:08 +01001500
1501 for (;;) {
Chris Wilson91b0c352015-12-11 11:32:57 +00001502 if (signal_pending_state(state, current)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001503 ret = -ERESTARTSYS;
1504 break;
1505 }
1506
Chris Wilson688e6c72016-07-01 17:23:15 +01001507 timeout_remain = io_schedule_timeout(timeout_remain);
1508 if (timeout_remain == 0) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001509 ret = -ETIME;
1510 break;
1511 }
1512
Chris Wilson688e6c72016-07-01 17:23:15 +01001513 if (intel_wait_complete(&wait))
1514 break;
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001515
Chris Wilson688e6c72016-07-01 17:23:15 +01001516 set_current_state(state);
Chris Wilson094f9a52013-09-25 17:34:55 +01001517
Chris Wilson688e6c72016-07-01 17:23:15 +01001518wakeup:
1519 /* Carefully check if the request is complete, giving time
1520 * for the seqno to be visible following the interrupt.
1521 * We also have to check in case we are kicked by the GPU
1522 * reset in order to drop the struct_mutex.
1523 */
1524 if (__i915_request_irq_complete(req))
1525 break;
Chris Wilson094f9a52013-09-25 17:34:55 +01001526
Chris Wilsonf69a02c2016-07-01 17:23:16 +01001527 /* Only spin if we know the GPU is processing this request */
1528 if (i915_spin_request(req, state, 2))
1529 break;
Chris Wilson094f9a52013-09-25 17:34:55 +01001530 }
Chris Wilson688e6c72016-07-01 17:23:15 +01001531 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
Chris Wilson094f9a52013-09-25 17:34:55 +01001532
Chris Wilson688e6c72016-07-01 17:23:15 +01001533 intel_engine_remove_wait(req->engine, &wait);
1534 __set_current_state(TASK_RUNNING);
1535complete:
Chris Wilson2def4ad92015-04-07 16:20:41 +01001536 trace_i915_gem_request_wait_end(req);
1537
Chris Wilsonb3612372012-08-24 09:35:08 +01001538 if (timeout) {
Tvrtko Ursuline0313db2016-01-15 15:11:12 +00001539 s64 tres = *timeout - (ktime_get_raw_ns() - before);
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00001540
1541 *timeout = tres < 0 ? 0 : tres;
Daniel Vetter9cca3062014-11-28 10:29:55 +01001542
1543 /*
1544 * Apparently ktime isn't accurate enough and occasionally has a
1545 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1546 * things up to make the test happy. We allow up to 1 jiffy.
1547 *
1548 * This is a regrssion from the timespec->ktime conversion.
1549 */
1550 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1551 *timeout = 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001552 }
1553
Chris Wilson0e6883b2016-07-04 08:08:34 +01001554 if (rps && req->seqno == req->engine->last_submitted_seqno) {
1555 /* The GPU is now idle and this client has stalled.
1556 * Since no other client has submitted a request in the
1557 * meantime, assume that this client is the only one
1558 * supplying work to the GPU but is unable to keep that
1559 * work supplied because it is waiting. Since the GPU is
1560 * then never kept fully busy, RPS autoclocking will
1561 * keep the clocks relatively low, causing further delays.
1562 * Compensate by giving the synchronous client credit for
1563 * a waitboost next time.
1564 */
1565 spin_lock(&req->i915->rps.client_lock);
1566 list_del_init(&rps->link);
1567 spin_unlock(&req->i915->rps.client_lock);
1568 }
1569
Chris Wilson094f9a52013-09-25 17:34:55 +01001570 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001571}
1572
John Harrisonfcfa423c2015-05-29 17:44:12 +01001573int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1574 struct drm_file *file)
1575{
John Harrisonfcfa423c2015-05-29 17:44:12 +01001576 struct drm_i915_file_private *file_priv;
1577
1578 WARN_ON(!req || !file || req->file_priv);
1579
1580 if (!req || !file)
1581 return -EINVAL;
1582
1583 if (req->file_priv)
1584 return -EINVAL;
1585
John Harrisonfcfa423c2015-05-29 17:44:12 +01001586 file_priv = file->driver_priv;
1587
1588 spin_lock(&file_priv->mm.lock);
1589 req->file_priv = file_priv;
1590 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1591 spin_unlock(&file_priv->mm.lock);
1592
1593 req->pid = get_pid(task_pid(current));
1594
1595 return 0;
1596}
1597
Chris Wilsonb4716182015-04-27 13:41:17 +01001598static inline void
1599i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1600{
1601 struct drm_i915_file_private *file_priv = request->file_priv;
1602
1603 if (!file_priv)
1604 return;
1605
1606 spin_lock(&file_priv->mm.lock);
1607 list_del(&request->client_list);
1608 request->file_priv = NULL;
1609 spin_unlock(&file_priv->mm.lock);
John Harrisonfcfa423c2015-05-29 17:44:12 +01001610
1611 put_pid(request->pid);
1612 request->pid = NULL;
Chris Wilsonb4716182015-04-27 13:41:17 +01001613}
1614
1615static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1616{
1617 trace_i915_gem_request_retire(request);
1618
1619 /* We know the GPU must have read the request to have
1620 * sent us the seqno + interrupt, so use the position
1621 * of tail of the request to update the last known position
1622 * of the GPU head.
1623 *
1624 * Note this requires that we are always called in request
1625 * completion order.
1626 */
1627 request->ringbuf->last_retired_head = request->postfix;
1628
1629 list_del_init(&request->list);
1630 i915_gem_request_remove_from_client(request);
1631
Chris Wilsona16a4052016-04-28 09:56:56 +01001632 if (request->previous_context) {
Chris Wilson73db04c2016-04-28 09:56:55 +01001633 if (i915.enable_execlists)
Chris Wilsona16a4052016-04-28 09:56:56 +01001634 intel_lr_context_unpin(request->previous_context,
1635 request->engine);
Chris Wilson73db04c2016-04-28 09:56:55 +01001636 }
1637
Chris Wilsona16a4052016-04-28 09:56:56 +01001638 i915_gem_context_unreference(request->ctx);
Chris Wilsonb4716182015-04-27 13:41:17 +01001639 i915_gem_request_unreference(request);
1640}
1641
1642static void
1643__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1644{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001645 struct intel_engine_cs *engine = req->engine;
Chris Wilsonb4716182015-04-27 13:41:17 +01001646 struct drm_i915_gem_request *tmp;
1647
Chris Wilson91c8a322016-07-05 10:40:23 +01001648 lockdep_assert_held(&engine->i915->drm.struct_mutex);
Chris Wilsonb4716182015-04-27 13:41:17 +01001649
1650 if (list_empty(&req->list))
1651 return;
1652
1653 do {
1654 tmp = list_first_entry(&engine->request_list,
1655 typeof(*tmp), list);
1656
1657 i915_gem_request_retire(tmp);
1658 } while (tmp != req);
1659
1660 WARN_ON(i915_verify_lists(engine->dev));
1661}
1662
Chris Wilsonb3612372012-08-24 09:35:08 +01001663/**
Daniel Vettera4b3a572014-11-26 14:17:05 +01001664 * Waits for a request to be signaled, and cleans up the
Chris Wilsonb3612372012-08-24 09:35:08 +01001665 * request and object lists appropriately for that event.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001666 * @req: request to wait on
Chris Wilsonb3612372012-08-24 09:35:08 +01001667 */
1668int
Daniel Vettera4b3a572014-11-26 14:17:05 +01001669i915_wait_request(struct drm_i915_gem_request *req)
Chris Wilsonb3612372012-08-24 09:35:08 +01001670{
Tvrtko Ursulin791bee12016-04-19 16:46:09 +01001671 struct drm_i915_private *dev_priv = req->i915;
Daniel Vettera4b3a572014-11-26 14:17:05 +01001672 bool interruptible;
Chris Wilsonb3612372012-08-24 09:35:08 +01001673 int ret;
1674
Daniel Vettera4b3a572014-11-26 14:17:05 +01001675 interruptible = dev_priv->mm.interruptible;
1676
Chris Wilson91c8a322016-07-05 10:40:23 +01001677 BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
Chris Wilsonb3612372012-08-24 09:35:08 +01001678
Chris Wilson299259a2016-04-13 17:35:06 +01001679 ret = __i915_wait_request(req, interruptible, NULL, NULL);
Chris Wilsonb4716182015-04-27 13:41:17 +01001680 if (ret)
1681 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001682
Chris Wilson157d2c72016-05-13 11:57:22 +01001683 /* If the GPU hung, we want to keep the requests to find the guilty. */
Chris Wilson0c5eed62016-06-29 15:51:14 +01001684 if (!i915_reset_in_progress(&dev_priv->gpu_error))
Chris Wilson157d2c72016-05-13 11:57:22 +01001685 __i915_gem_request_retire__upto(req);
1686
Chris Wilsond26e3af2013-06-29 22:05:26 +01001687 return 0;
1688}
1689
Chris Wilsonb3612372012-08-24 09:35:08 +01001690/**
1691 * Ensures that all rendering to the object has completed and the object is
1692 * safe to unbind from the GTT or access from the CPU.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001693 * @obj: i915 gem object
1694 * @readonly: waiting for read access or write
Chris Wilsonb3612372012-08-24 09:35:08 +01001695 */
Chris Wilson2e2f3512015-04-27 13:41:14 +01001696int
Chris Wilsonb3612372012-08-24 09:35:08 +01001697i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1698 bool readonly)
1699{
Chris Wilsonb4716182015-04-27 13:41:17 +01001700 int ret, i;
Chris Wilsonb3612372012-08-24 09:35:08 +01001701
Chris Wilsonb4716182015-04-27 13:41:17 +01001702 if (!obj->active)
Chris Wilsonb3612372012-08-24 09:35:08 +01001703 return 0;
1704
Chris Wilsonb4716182015-04-27 13:41:17 +01001705 if (readonly) {
1706 if (obj->last_write_req != NULL) {
1707 ret = i915_wait_request(obj->last_write_req);
1708 if (ret)
1709 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001710
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001711 i = obj->last_write_req->engine->id;
Chris Wilsonb4716182015-04-27 13:41:17 +01001712 if (obj->last_read_req[i] == obj->last_write_req)
1713 i915_gem_object_retire__read(obj, i);
1714 else
1715 i915_gem_object_retire__write(obj);
1716 }
1717 } else {
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001718 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilsonb4716182015-04-27 13:41:17 +01001719 if (obj->last_read_req[i] == NULL)
1720 continue;
1721
1722 ret = i915_wait_request(obj->last_read_req[i]);
1723 if (ret)
1724 return ret;
1725
1726 i915_gem_object_retire__read(obj, i);
1727 }
Chris Wilsond501b1d2016-04-13 17:35:02 +01001728 GEM_BUG_ON(obj->active);
Chris Wilsonb4716182015-04-27 13:41:17 +01001729 }
1730
1731 return 0;
1732}
1733
1734static void
1735i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1736 struct drm_i915_gem_request *req)
1737{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001738 int ring = req->engine->id;
Chris Wilsonb4716182015-04-27 13:41:17 +01001739
1740 if (obj->last_read_req[ring] == req)
1741 i915_gem_object_retire__read(obj, ring);
1742 else if (obj->last_write_req == req)
1743 i915_gem_object_retire__write(obj);
1744
Chris Wilson0c5eed62016-06-29 15:51:14 +01001745 if (!i915_reset_in_progress(&req->i915->gpu_error))
Chris Wilson157d2c72016-05-13 11:57:22 +01001746 __i915_gem_request_retire__upto(req);
Chris Wilsonb3612372012-08-24 09:35:08 +01001747}
1748
Chris Wilson3236f572012-08-24 09:35:09 +01001749/* A nonblocking variant of the above wait. This is a highly dangerous routine
1750 * as the object state may change during this call.
1751 */
1752static __must_check int
1753i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson2e1b8732015-04-27 13:41:22 +01001754 struct intel_rps_client *rps,
Chris Wilson3236f572012-08-24 09:35:09 +01001755 bool readonly)
1756{
1757 struct drm_device *dev = obj->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01001758 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001759 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
Chris Wilsonb4716182015-04-27 13:41:17 +01001760 int ret, i, n = 0;
Chris Wilson3236f572012-08-24 09:35:09 +01001761
1762 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1763 BUG_ON(!dev_priv->mm.interruptible);
1764
Chris Wilsonb4716182015-04-27 13:41:17 +01001765 if (!obj->active)
Chris Wilson3236f572012-08-24 09:35:09 +01001766 return 0;
1767
Chris Wilsonb4716182015-04-27 13:41:17 +01001768 if (readonly) {
1769 struct drm_i915_gem_request *req;
1770
1771 req = obj->last_write_req;
1772 if (req == NULL)
1773 return 0;
1774
Chris Wilsonb4716182015-04-27 13:41:17 +01001775 requests[n++] = i915_gem_request_reference(req);
1776 } else {
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001777 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilsonb4716182015-04-27 13:41:17 +01001778 struct drm_i915_gem_request *req;
1779
1780 req = obj->last_read_req[i];
1781 if (req == NULL)
1782 continue;
1783
Chris Wilsonb4716182015-04-27 13:41:17 +01001784 requests[n++] = i915_gem_request_reference(req);
1785 }
1786 }
1787
1788 mutex_unlock(&dev->struct_mutex);
Chris Wilson299259a2016-04-13 17:35:06 +01001789 ret = 0;
Chris Wilsonb4716182015-04-27 13:41:17 +01001790 for (i = 0; ret == 0 && i < n; i++)
Chris Wilson299259a2016-04-13 17:35:06 +01001791 ret = __i915_wait_request(requests[i], true, NULL, rps);
Chris Wilsonb4716182015-04-27 13:41:17 +01001792 mutex_lock(&dev->struct_mutex);
1793
Chris Wilsonb4716182015-04-27 13:41:17 +01001794 for (i = 0; i < n; i++) {
1795 if (ret == 0)
1796 i915_gem_object_retire_request(obj, requests[i]);
1797 i915_gem_request_unreference(requests[i]);
1798 }
1799
1800 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001801}
1802
Chris Wilson2e1b8732015-04-27 13:41:22 +01001803static struct intel_rps_client *to_rps_client(struct drm_file *file)
1804{
1805 struct drm_i915_file_private *fpriv = file->driver_priv;
1806 return &fpriv->rps;
1807}
1808
Chris Wilsonaeecc962016-06-17 14:46:39 -03001809static enum fb_op_origin
1810write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1811{
1812 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1813 ORIGIN_GTT : ORIGIN_CPU;
1814}
1815
Eric Anholt673a3942008-07-30 12:06:12 -07001816/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001817 * Called when user space prepares to use an object with the CPU, either
1818 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001819 * @dev: drm device
1820 * @data: ioctl data blob
1821 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001822 */
1823int
1824i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001825 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001826{
1827 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001828 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001829 uint32_t read_domains = args->read_domains;
1830 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001831 int ret;
1832
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001833 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001834 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001835 return -EINVAL;
1836
Chris Wilson21d509e2009-06-06 09:46:02 +01001837 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001838 return -EINVAL;
1839
1840 /* Having something in the write domain implies it's in the read
1841 * domain, and only that read domain. Enforce that in the request.
1842 */
1843 if (write_domain != 0 && read_domains != write_domain)
1844 return -EINVAL;
1845
Chris Wilson76c1dec2010-09-25 11:22:51 +01001846 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001847 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001848 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001849
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01001850 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001851 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001852 ret = -ENOENT;
1853 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001854 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001855
Chris Wilson3236f572012-08-24 09:35:09 +01001856 /* Try to flush the object off the GPU without holding the lock.
1857 * We will repeat the flush holding the lock in the normal manner
1858 * to catch cases where we are gazumped.
1859 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001860 ret = i915_gem_object_wait_rendering__nonblocking(obj,
Chris Wilson2e1b8732015-04-27 13:41:22 +01001861 to_rps_client(file),
Chris Wilson6e4930f2014-02-07 18:37:06 -02001862 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001863 if (ret)
1864 goto unref;
1865
Chris Wilson43566de2015-01-02 16:29:29 +05301866 if (read_domains & I915_GEM_DOMAIN_GTT)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001867 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Chris Wilson43566de2015-01-02 16:29:29 +05301868 else
Eric Anholte47c68e2008-11-14 13:35:19 -08001869 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001870
Daniel Vetter031b6982015-06-26 19:35:16 +02001871 if (write_domain != 0)
Chris Wilsonaeecc962016-06-17 14:46:39 -03001872 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001873
Chris Wilson3236f572012-08-24 09:35:09 +01001874unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001875 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001876unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001877 mutex_unlock(&dev->struct_mutex);
1878 return ret;
1879}
1880
1881/**
1882 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001883 * @dev: drm device
1884 * @data: ioctl data blob
1885 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001886 */
1887int
1888i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001889 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001890{
1891 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001892 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001893 int ret = 0;
1894
Chris Wilson76c1dec2010-09-25 11:22:51 +01001895 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001896 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001897 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001898
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01001899 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001900 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001901 ret = -ENOENT;
1902 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001903 }
1904
Eric Anholt673a3942008-07-30 12:06:12 -07001905 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001906 if (obj->pin_display)
Daniel Vettere62b59e2015-01-21 14:53:48 +01001907 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08001908
Chris Wilson05394f32010-11-08 19:18:58 +00001909 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001910unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001911 mutex_unlock(&dev->struct_mutex);
1912 return ret;
1913}
1914
1915/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001916 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1917 * it is mapped to.
1918 * @dev: drm device
1919 * @data: ioctl data blob
1920 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001921 *
1922 * While the mapping holds a reference on the contents of the object, it doesn't
1923 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001924 *
1925 * IMPORTANT:
1926 *
1927 * DRM driver writers who look a this function as an example for how to do GEM
1928 * mmap support, please don't implement mmap support like here. The modern way
1929 * to implement DRM mmap support is with an mmap offset ioctl (like
1930 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1931 * That way debug tooling like valgrind will understand what's going on, hiding
1932 * the mmap call in a driver private ioctl will break that. The i915 driver only
1933 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001934 */
1935int
1936i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001937 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001938{
1939 struct drm_i915_gem_mmap *args = data;
1940 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001941 unsigned long addr;
1942
Akash Goel1816f922015-01-02 16:29:30 +05301943 if (args->flags & ~(I915_MMAP_WC))
1944 return -EINVAL;
1945
Borislav Petkov568a58e2016-03-29 17:42:01 +02001946 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301947 return -ENODEV;
1948
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01001949 obj = drm_gem_object_lookup(file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001950 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001951 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001952
Daniel Vetter1286ff72012-05-10 15:25:09 +02001953 /* prime objects have no backing filp to GEM mmap
1954 * pages from.
1955 */
1956 if (!obj->filp) {
1957 drm_gem_object_unreference_unlocked(obj);
1958 return -EINVAL;
1959 }
1960
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001961 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001962 PROT_READ | PROT_WRITE, MAP_SHARED,
1963 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301964 if (args->flags & I915_MMAP_WC) {
1965 struct mm_struct *mm = current->mm;
1966 struct vm_area_struct *vma;
1967
Michal Hocko80a89a52016-05-23 16:26:11 -07001968 if (down_write_killable(&mm->mmap_sem)) {
1969 drm_gem_object_unreference_unlocked(obj);
1970 return -EINTR;
1971 }
Akash Goel1816f922015-01-02 16:29:30 +05301972 vma = find_vma(mm, addr);
1973 if (vma)
1974 vma->vm_page_prot =
1975 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1976 else
1977 addr = -ENOMEM;
1978 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001979
1980 /* This may race, but that's ok, it only gets set */
1981 WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
Akash Goel1816f922015-01-02 16:29:30 +05301982 }
Luca Barbieribc9025b2010-02-09 05:49:12 +00001983 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001984 if (IS_ERR((void *)addr))
1985 return addr;
1986
1987 args->addr_ptr = (uint64_t) addr;
1988
1989 return 0;
1990}
1991
Jesse Barnesde151cf2008-11-12 10:03:55 -08001992/**
1993 * i915_gem_fault - fault a page into the GTT
Geliang Tangd9072a32015-09-15 05:58:44 -07001994 * @vma: VMA in question
1995 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001996 *
1997 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1998 * from userspace. The fault handler takes care of binding the object to
1999 * the GTT (if needed), allocating and programming a fence register (again,
2000 * only if needed based on whether the old reg is still valid or the object
2001 * is tiled) and inserting a new PTE into the faulting process.
2002 *
2003 * Note that the faulting process may involve evicting existing objects
2004 * from the GTT and/or fence registers to make room. So performance may
2005 * suffer if the GTT working set is large or there are few fence registers
2006 * left.
2007 */
2008int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2009{
Chris Wilson05394f32010-11-08 19:18:58 +00002010 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
2011 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002012 struct drm_i915_private *dev_priv = to_i915(dev);
2013 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002014 struct i915_ggtt_view view = i915_ggtt_view_normal;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002015 pgoff_t page_offset;
2016 unsigned long pfn;
2017 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002018 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002019
Paulo Zanonif65c9162013-11-27 18:20:34 -02002020 intel_runtime_pm_get(dev_priv);
2021
Jesse Barnesde151cf2008-11-12 10:03:55 -08002022 /* We don't use vmf->pgoff since that has the fake offset */
2023 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
2024 PAGE_SHIFT;
2025
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002026 ret = i915_mutex_lock_interruptible(dev);
2027 if (ret)
2028 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002029
Chris Wilsondb53a302011-02-03 11:57:46 +00002030 trace_i915_gem_object_fault(obj, page_offset, true, write);
2031
Chris Wilson6e4930f2014-02-07 18:37:06 -02002032 /* Try to flush the object off the GPU first without holding the lock.
2033 * Upon reacquiring the lock, we will perform our sanity checks and then
2034 * repeat the flush holding the lock in the normal manner to catch cases
2035 * where we are gazumped.
2036 */
2037 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
2038 if (ret)
2039 goto unlock;
2040
Chris Wilsoneb119bd2012-12-16 12:43:36 +00002041 /* Access to snoopable pages through the GTT is incoherent. */
2042 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01002043 ret = -EFAULT;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00002044 goto unlock;
2045 }
2046
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002047 /* Use a partial view if the object is bigger than the aperture. */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002048 if (obj->base.size >= ggtt->mappable_end &&
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03002049 obj->tiling_mode == I915_TILING_NONE) {
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002050 static const unsigned int chunk_size = 256; // 1 MiB
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03002051
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002052 memset(&view, 0, sizeof(view));
2053 view.type = I915_GGTT_VIEW_PARTIAL;
2054 view.params.partial.offset = rounddown(page_offset, chunk_size);
2055 view.params.partial.size =
2056 min_t(unsigned int,
2057 chunk_size,
2058 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
2059 view.params.partial.offset);
2060 }
2061
2062 /* Now pin it into the GTT if needed */
2063 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002064 if (ret)
2065 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002066
Chris Wilsonc9839302012-11-20 10:45:17 +00002067 ret = i915_gem_object_set_to_gtt_domain(obj, write);
2068 if (ret)
2069 goto unpin;
2070
2071 ret = i915_gem_object_get_fence(obj);
2072 if (ret)
2073 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01002074
Chris Wilsonb90b91d2014-06-10 12:14:40 +01002075 /* Finally, remap it using the new GTT offset */
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03002076 pfn = ggtt->mappable_base +
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002077 i915_gem_obj_ggtt_offset_view(obj, &view);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002078 pfn >>= PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002079
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002080 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
2081 /* Overriding existing pages in partial view does not cause
2082 * us any trouble as TLBs are still valid because the fault
2083 * is due to userspace losing part of the mapping or never
2084 * having accessed it before (at this partials' range).
2085 */
2086 unsigned long base = vma->vm_start +
2087 (view.params.partial.offset << PAGE_SHIFT);
2088 unsigned int i;
Chris Wilsonb90b91d2014-06-10 12:14:40 +01002089
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002090 for (i = 0; i < view.params.partial.size; i++) {
2091 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
Chris Wilsonb90b91d2014-06-10 12:14:40 +01002092 if (ret)
2093 break;
2094 }
2095
2096 obj->fault_mappable = true;
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002097 } else {
2098 if (!obj->fault_mappable) {
2099 unsigned long size = min_t(unsigned long,
2100 vma->vm_end - vma->vm_start,
2101 obj->base.size);
2102 int i;
2103
2104 for (i = 0; i < size >> PAGE_SHIFT; i++) {
2105 ret = vm_insert_pfn(vma,
2106 (unsigned long)vma->vm_start + i * PAGE_SIZE,
2107 pfn + i);
2108 if (ret)
2109 break;
2110 }
2111
2112 obj->fault_mappable = true;
2113 } else
2114 ret = vm_insert_pfn(vma,
2115 (unsigned long)vmf->virtual_address,
2116 pfn + page_offset);
2117 }
Chris Wilsonc9839302012-11-20 10:45:17 +00002118unpin:
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03002119 i915_gem_object_ggtt_unpin_view(obj, &view);
Chris Wilsonc7150892009-09-23 00:43:56 +01002120unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002121 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002122out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002123 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002124 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02002125 /*
2126 * We eat errors when the gpu is terminally wedged to avoid
2127 * userspace unduly crashing (gl has no provisions for mmaps to
2128 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2129 * and so needs to be reported.
2130 */
2131 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02002132 ret = VM_FAULT_SIGBUS;
2133 break;
2134 }
Chris Wilson045e7692010-11-07 09:18:22 +00002135 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02002136 /*
2137 * EAGAIN means the gpu is hung and we'll wait for the error
2138 * handler to reset everything when re-faulting in
2139 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002140 */
Chris Wilsonc7150892009-09-23 00:43:56 +01002141 case 0:
2142 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00002143 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03002144 case -EBUSY:
2145 /*
2146 * EBUSY is ok: this just means that another thread
2147 * already did the job.
2148 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02002149 ret = VM_FAULT_NOPAGE;
2150 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002151 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02002152 ret = VM_FAULT_OOM;
2153 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02002154 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00002155 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02002156 ret = VM_FAULT_SIGBUS;
2157 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002158 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02002159 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02002160 ret = VM_FAULT_SIGBUS;
2161 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002162 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02002163
2164 intel_runtime_pm_put(dev_priv);
2165 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002166}
2167
2168/**
Chris Wilson901782b2009-07-10 08:18:50 +01002169 * i915_gem_release_mmap - remove physical page mappings
2170 * @obj: obj in question
2171 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002172 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01002173 * relinquish ownership of the pages back to the system.
2174 *
2175 * It is vital that we remove the page mapping if we have mapped a tiled
2176 * object through the GTT and then lose the fence register due to
2177 * resource pressure. Similarly if the object has been moved out of the
2178 * aperture, than pages mapped into userspace must be revoked. Removing the
2179 * mapping will then trigger a page fault on the next user access, allowing
2180 * fixup by i915_gem_fault().
2181 */
Eric Anholtd05ca302009-07-10 13:02:26 -07002182void
Chris Wilson05394f32010-11-08 19:18:58 +00002183i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01002184{
Chris Wilson349f2cc2016-04-13 17:35:12 +01002185 /* Serialisation between user GTT access and our code depends upon
2186 * revoking the CPU's PTE whilst the mutex is held. The next user
2187 * pagefault then has to wait until we release the mutex.
2188 */
2189 lockdep_assert_held(&obj->base.dev->struct_mutex);
2190
Chris Wilson6299f992010-11-24 12:23:44 +00002191 if (!obj->fault_mappable)
2192 return;
Chris Wilson901782b2009-07-10 08:18:50 +01002193
David Herrmann6796cb12014-01-03 14:24:19 +01002194 drm_vma_node_unmap(&obj->base.vma_node,
2195 obj->base.dev->anon_inode->i_mapping);
Chris Wilson349f2cc2016-04-13 17:35:12 +01002196
2197 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2198 * memory transactions from userspace before we return. The TLB
2199 * flushing implied above by changing the PTE above *should* be
2200 * sufficient, an extra barrier here just provides us with a bit
2201 * of paranoid documentation about our requirement to serialise
2202 * memory writes before touching registers / GSM.
2203 */
2204 wmb();
2205
Chris Wilson6299f992010-11-24 12:23:44 +00002206 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01002207}
2208
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002209void
2210i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
2211{
2212 struct drm_i915_gem_object *obj;
2213
2214 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
2215 i915_gem_release_mmap(obj);
2216}
2217
Imre Deak0fa87792013-01-07 21:47:35 +02002218uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07002219i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00002220{
Chris Wilsone28f8712011-07-18 13:11:49 -07002221 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002222
2223 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002224 tiling_mode == I915_TILING_NONE)
2225 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002226
2227 /* Previous chips need a power-of-two fence region when tiling */
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002228 if (IS_GEN3(dev))
Chris Wilsone28f8712011-07-18 13:11:49 -07002229 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002230 else
Chris Wilsone28f8712011-07-18 13:11:49 -07002231 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002232
Chris Wilsone28f8712011-07-18 13:11:49 -07002233 while (gtt_size < size)
2234 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002235
Chris Wilsone28f8712011-07-18 13:11:49 -07002236 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002237}
2238
Jesse Barnesde151cf2008-11-12 10:03:55 -08002239/**
2240 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002241 * @dev: drm device
2242 * @size: object size
2243 * @tiling_mode: tiling mode
2244 * @fenced: is fenced alignemned required or not
Jesse Barnesde151cf2008-11-12 10:03:55 -08002245 *
2246 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01002247 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002248 */
Imre Deakd8651102013-01-07 21:47:33 +02002249uint32_t
2250i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2251 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002252{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002253 /*
2254 * Minimum alignment is 4k (GTT page size), but might be greater
2255 * if a fence register is needed for the object.
2256 */
Imre Deakd8651102013-01-07 21:47:33 +02002257 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002258 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002259 return 4096;
2260
2261 /*
2262 * Previous chips need to be aligned to the size of the smallest
2263 * fence register that can contain the object.
2264 */
Chris Wilsone28f8712011-07-18 13:11:49 -07002265 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002266}
2267
Chris Wilsond8cb5082012-08-11 15:41:03 +01002268static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2269{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002270 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsond8cb5082012-08-11 15:41:03 +01002271 int ret;
2272
Daniel Vetterda494d72012-12-20 15:11:16 +01002273 dev_priv->mm.shrinker_no_lock_stealing = true;
2274
Chris Wilsond8cb5082012-08-11 15:41:03 +01002275 ret = drm_gem_create_mmap_offset(&obj->base);
2276 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01002277 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002278
2279 /* Badly fragmented mmap space? The only way we can recover
2280 * space is by destroying unwanted objects. We can't randomly release
2281 * mmap_offsets as userspace expects them to be persistent for the
2282 * lifetime of the objects. The closest we can is to release the
2283 * offsets on purgeable objects by truncating it and marking it purged,
2284 * which prevents userspace from ever using that object again.
2285 */
Chris Wilson21ab4e72014-09-09 11:16:08 +01002286 i915_gem_shrink(dev_priv,
2287 obj->base.size >> PAGE_SHIFT,
2288 I915_SHRINK_BOUND |
2289 I915_SHRINK_UNBOUND |
2290 I915_SHRINK_PURGEABLE);
Chris Wilsond8cb5082012-08-11 15:41:03 +01002291 ret = drm_gem_create_mmap_offset(&obj->base);
2292 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01002293 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002294
2295 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01002296 ret = drm_gem_create_mmap_offset(&obj->base);
2297out:
2298 dev_priv->mm.shrinker_no_lock_stealing = false;
2299
2300 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002301}
2302
2303static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2304{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002305 drm_gem_free_mmap_offset(&obj->base);
2306}
2307
Dave Airlieda6b51d2014-12-24 13:11:17 +10002308int
Dave Airlieff72145b2011-02-07 12:16:14 +10002309i915_gem_mmap_gtt(struct drm_file *file,
2310 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002311 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002312 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002313{
Chris Wilson05394f32010-11-08 19:18:58 +00002314 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002315 int ret;
2316
Chris Wilson76c1dec2010-09-25 11:22:51 +01002317 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002318 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01002319 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002320
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01002321 obj = to_intel_bo(drm_gem_object_lookup(file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00002322 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002323 ret = -ENOENT;
2324 goto unlock;
2325 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002326
Chris Wilson05394f32010-11-08 19:18:58 +00002327 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002328 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002329 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002330 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01002331 }
2332
Chris Wilsond8cb5082012-08-11 15:41:03 +01002333 ret = i915_gem_object_create_mmap_offset(obj);
2334 if (ret)
2335 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002336
David Herrmann0de23972013-07-24 21:07:52 +02002337 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002338
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002339out:
Chris Wilson05394f32010-11-08 19:18:58 +00002340 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002341unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002342 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002343 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002344}
2345
Dave Airlieff72145b2011-02-07 12:16:14 +10002346/**
2347 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2348 * @dev: DRM device
2349 * @data: GTT mapping ioctl data
2350 * @file: GEM object info
2351 *
2352 * Simply returns the fake offset to userspace so it can mmap it.
2353 * The mmap call will end up in drm_gem_mmap(), which will set things
2354 * up so we can get faults in the handler above.
2355 *
2356 * The fault handler will take care of binding the object into the GTT
2357 * (since it may have been evicted to make room for something), allocating
2358 * a fence register, and mapping the appropriate aperture address into
2359 * userspace.
2360 */
2361int
2362i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2363 struct drm_file *file)
2364{
2365 struct drm_i915_gem_mmap_gtt *args = data;
2366
Dave Airlieda6b51d2014-12-24 13:11:17 +10002367 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002368}
2369
Daniel Vetter225067e2012-08-20 10:23:20 +02002370/* Immediately discard the backing storage */
2371static void
2372i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002373{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002374 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002375
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002376 if (obj->base.filp == NULL)
2377 return;
2378
Daniel Vetter225067e2012-08-20 10:23:20 +02002379 /* Our goal here is to return as much of the memory as
2380 * is possible back to the system as we are called from OOM.
2381 * To do this we must instruct the shmfs to drop all of its
2382 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002383 */
Chris Wilson55372522014-03-25 13:23:06 +00002384 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02002385 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01002386}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002387
Chris Wilson55372522014-03-25 13:23:06 +00002388/* Try to discard unwanted pages */
2389static void
2390i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002391{
Chris Wilson55372522014-03-25 13:23:06 +00002392 struct address_space *mapping;
2393
2394 switch (obj->madv) {
2395 case I915_MADV_DONTNEED:
2396 i915_gem_object_truncate(obj);
2397 case __I915_MADV_PURGED:
2398 return;
2399 }
2400
2401 if (obj->base.filp == NULL)
2402 return;
2403
Al Viro93c76a32015-12-04 23:45:44 -05002404 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002405 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002406}
2407
Chris Wilson5cdf5882010-09-27 15:51:07 +01002408static void
Chris Wilson05394f32010-11-08 19:18:58 +00002409i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002410{
Dave Gordon85d12252016-05-20 11:54:06 +01002411 struct sgt_iter sgt_iter;
2412 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002413 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002414
Chris Wilson05394f32010-11-08 19:18:58 +00002415 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07002416
Chris Wilson6c085a72012-08-20 11:40:46 +02002417 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +01002418 if (WARN_ON(ret)) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002419 /* In the event of a disaster, abandon all caches and
2420 * hope for the best.
2421 */
Chris Wilson2c225692013-08-09 12:26:45 +01002422 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02002423 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2424 }
2425
Imre Deake2273302015-07-09 12:59:05 +03002426 i915_gem_gtt_finish_object(obj);
2427
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002428 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07002429 i915_gem_object_save_bit_17_swizzle(obj);
2430
Chris Wilson05394f32010-11-08 19:18:58 +00002431 if (obj->madv == I915_MADV_DONTNEED)
2432 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01002433
Dave Gordon85d12252016-05-20 11:54:06 +01002434 for_each_sgt_page(page, sgt_iter, obj->pages) {
Chris Wilson05394f32010-11-08 19:18:58 +00002435 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002436 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002437
Chris Wilson05394f32010-11-08 19:18:58 +00002438 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002439 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002440
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002441 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002442 }
Chris Wilson05394f32010-11-08 19:18:58 +00002443 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002444
Chris Wilson9da3da62012-06-01 15:20:22 +01002445 sg_free_table(obj->pages);
2446 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002447}
2448
Chris Wilsondd624af2013-01-15 12:39:35 +00002449int
Chris Wilson37e680a2012-06-07 15:38:42 +01002450i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2451{
2452 const struct drm_i915_gem_object_ops *ops = obj->ops;
2453
Chris Wilson2f745ad2012-09-04 21:02:58 +01002454 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01002455 return 0;
2456
Chris Wilsona5570172012-09-04 21:02:54 +01002457 if (obj->pages_pin_count)
2458 return -EBUSY;
2459
Ben Widawsky98438772013-07-31 17:00:12 -07002460 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07002461
Chris Wilsona2165e32012-12-03 11:49:00 +00002462 /* ->put_pages might need to allocate memory for the bit17 swizzle
2463 * array, hence protect them from being reaped by removing them from gtt
2464 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07002465 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00002466
Chris Wilson0a798eb2016-04-08 12:11:11 +01002467 if (obj->mapping) {
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002468 if (is_vmalloc_addr(obj->mapping))
2469 vunmap(obj->mapping);
2470 else
2471 kunmap(kmap_to_page(obj->mapping));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002472 obj->mapping = NULL;
2473 }
2474
Chris Wilson37e680a2012-06-07 15:38:42 +01002475 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002476 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02002477
Chris Wilson55372522014-03-25 13:23:06 +00002478 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02002479
2480 return 0;
2481}
2482
Chris Wilson37e680a2012-06-07 15:38:42 +01002483static int
Chris Wilson6c085a72012-08-20 11:40:46 +02002484i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002485{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002486 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002487 int page_count, i;
2488 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002489 struct sg_table *st;
2490 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002491 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002492 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002493 unsigned long last_pfn = 0; /* suppress gcc warning */
Imre Deake2273302015-07-09 12:59:05 +03002494 int ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02002495 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002496
Chris Wilson6c085a72012-08-20 11:40:46 +02002497 /* Assert that the object is not currently in any GPU domain. As it
2498 * wasn't in the GTT, there shouldn't be any way it could have been in
2499 * a GPU cache
2500 */
2501 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2502 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2503
Chris Wilson9da3da62012-06-01 15:20:22 +01002504 st = kmalloc(sizeof(*st), GFP_KERNEL);
2505 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002506 return -ENOMEM;
2507
Chris Wilson9da3da62012-06-01 15:20:22 +01002508 page_count = obj->base.size / PAGE_SIZE;
2509 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002510 kfree(st);
2511 return -ENOMEM;
2512 }
2513
2514 /* Get the list of pages out of our struct file. They'll be pinned
2515 * at this point until we release them.
2516 *
2517 * Fail silently without starting the shrinker
2518 */
Al Viro93c76a32015-12-04 23:45:44 -05002519 mapping = obj->base.filp->f_mapping;
Michal Hockoc62d2552015-11-06 16:28:49 -08002520 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
Mel Gormand0164ad2015-11-06 16:28:21 -08002521 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Imre Deak90797e62013-02-18 19:28:03 +02002522 sg = st->sgl;
2523 st->nents = 0;
2524 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002525 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2526 if (IS_ERR(page)) {
Chris Wilson21ab4e72014-09-09 11:16:08 +01002527 i915_gem_shrink(dev_priv,
2528 page_count,
2529 I915_SHRINK_BOUND |
2530 I915_SHRINK_UNBOUND |
2531 I915_SHRINK_PURGEABLE);
Chris Wilson6c085a72012-08-20 11:40:46 +02002532 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2533 }
2534 if (IS_ERR(page)) {
2535 /* We've tried hard to allocate the memory by reaping
2536 * our own buffer, now let the real VM do its job and
2537 * go down in flames if truly OOM.
2538 */
Chris Wilson6c085a72012-08-20 11:40:46 +02002539 i915_gem_shrink_all(dev_priv);
David Herrmannf461d1b2014-05-25 14:34:10 +02002540 page = shmem_read_mapping_page(mapping, i);
Imre Deake2273302015-07-09 12:59:05 +03002541 if (IS_ERR(page)) {
2542 ret = PTR_ERR(page);
Chris Wilson6c085a72012-08-20 11:40:46 +02002543 goto err_pages;
Imre Deake2273302015-07-09 12:59:05 +03002544 }
Chris Wilson6c085a72012-08-20 11:40:46 +02002545 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002546#ifdef CONFIG_SWIOTLB
2547 if (swiotlb_nr_tbl()) {
2548 st->nents++;
2549 sg_set_page(sg, page, PAGE_SIZE, 0);
2550 sg = sg_next(sg);
2551 continue;
2552 }
2553#endif
Imre Deak90797e62013-02-18 19:28:03 +02002554 if (!i || page_to_pfn(page) != last_pfn + 1) {
2555 if (i)
2556 sg = sg_next(sg);
2557 st->nents++;
2558 sg_set_page(sg, page, PAGE_SIZE, 0);
2559 } else {
2560 sg->length += PAGE_SIZE;
2561 }
2562 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002563
2564 /* Check that the i965g/gm workaround works. */
2565 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002566 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002567#ifdef CONFIG_SWIOTLB
2568 if (!swiotlb_nr_tbl())
2569#endif
2570 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002571 obj->pages = st;
2572
Imre Deake2273302015-07-09 12:59:05 +03002573 ret = i915_gem_gtt_prepare_object(obj);
2574 if (ret)
2575 goto err_pages;
2576
Eric Anholt673a3942008-07-30 12:06:12 -07002577 if (i915_gem_object_needs_bit17_swizzle(obj))
2578 i915_gem_object_do_bit_17_swizzle(obj);
2579
Daniel Vetter656bfa32014-11-20 09:26:30 +01002580 if (obj->tiling_mode != I915_TILING_NONE &&
2581 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2582 i915_gem_object_pin_pages(obj);
2583
Eric Anholt673a3942008-07-30 12:06:12 -07002584 return 0;
2585
2586err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02002587 sg_mark_end(sg);
Dave Gordon85d12252016-05-20 11:54:06 +01002588 for_each_sgt_page(page, sgt_iter, st)
2589 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002590 sg_free_table(st);
2591 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002592
2593 /* shmemfs first checks if there is enough memory to allocate the page
2594 * and reports ENOSPC should there be insufficient, along with the usual
2595 * ENOMEM for a genuine allocation failure.
2596 *
2597 * We use ENOSPC in our driver to mean that we have run out of aperture
2598 * space and so want to translate the error from shmemfs back to our
2599 * usual understanding of ENOMEM.
2600 */
Imre Deake2273302015-07-09 12:59:05 +03002601 if (ret == -ENOSPC)
2602 ret = -ENOMEM;
2603
2604 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002605}
2606
Chris Wilson37e680a2012-06-07 15:38:42 +01002607/* Ensure that the associated pages are gathered from the backing storage
2608 * and pinned into our object. i915_gem_object_get_pages() may be called
2609 * multiple times before they are released by a single call to
2610 * i915_gem_object_put_pages() - once the pages are no longer referenced
2611 * either as a result of memory pressure (reaping pages under the shrinker)
2612 * or as the object is itself released.
2613 */
2614int
2615i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2616{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002617 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson37e680a2012-06-07 15:38:42 +01002618 const struct drm_i915_gem_object_ops *ops = obj->ops;
2619 int ret;
2620
Chris Wilson2f745ad2012-09-04 21:02:58 +01002621 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002622 return 0;
2623
Chris Wilson43e28f02013-01-08 10:53:09 +00002624 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002625 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002626 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002627 }
2628
Chris Wilsona5570172012-09-04 21:02:54 +01002629 BUG_ON(obj->pages_pin_count);
2630
Chris Wilson37e680a2012-06-07 15:38:42 +01002631 ret = ops->get_pages(obj);
2632 if (ret)
2633 return ret;
2634
Ben Widawsky35c20a62013-05-31 11:28:48 -07002635 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilsonee286372015-04-07 16:20:25 +01002636
2637 obj->get_page.sg = obj->pages->sgl;
2638 obj->get_page.last = 0;
2639
Chris Wilson37e680a2012-06-07 15:38:42 +01002640 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002641}
2642
Dave Gordondd6034c2016-05-20 11:54:04 +01002643/* The 'mapping' part of i915_gem_object_pin_map() below */
2644static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2645{
2646 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2647 struct sg_table *sgt = obj->pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002648 struct sgt_iter sgt_iter;
2649 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002650 struct page *stack_pages[32];
2651 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002652 unsigned long i = 0;
2653 void *addr;
2654
2655 /* A single page can always be kmapped */
2656 if (n_pages == 1)
2657 return kmap(sg_page(sgt->sgl));
2658
Dave Gordonb338fa42016-05-20 11:54:05 +01002659 if (n_pages > ARRAY_SIZE(stack_pages)) {
2660 /* Too big for stack -- allocate temporary array instead */
2661 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2662 if (!pages)
2663 return NULL;
2664 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002665
Dave Gordon85d12252016-05-20 11:54:06 +01002666 for_each_sgt_page(page, sgt_iter, sgt)
2667 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002668
2669 /* Check that we have the expected number of pages */
2670 GEM_BUG_ON(i != n_pages);
2671
2672 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2673
Dave Gordonb338fa42016-05-20 11:54:05 +01002674 if (pages != stack_pages)
2675 drm_free_large(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002676
2677 return addr;
2678}
2679
2680/* get, pin, and map the pages of the object into kernel space */
Chris Wilson0a798eb2016-04-08 12:11:11 +01002681void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2682{
2683 int ret;
2684
2685 lockdep_assert_held(&obj->base.dev->struct_mutex);
2686
2687 ret = i915_gem_object_get_pages(obj);
2688 if (ret)
2689 return ERR_PTR(ret);
2690
2691 i915_gem_object_pin_pages(obj);
2692
Dave Gordondd6034c2016-05-20 11:54:04 +01002693 if (!obj->mapping) {
2694 obj->mapping = i915_gem_object_map(obj);
2695 if (!obj->mapping) {
Chris Wilson0a798eb2016-04-08 12:11:11 +01002696 i915_gem_object_unpin_pages(obj);
2697 return ERR_PTR(-ENOMEM);
2698 }
2699 }
2700
2701 return obj->mapping;
2702}
2703
Ben Widawskye2d05a82013-09-24 09:57:58 -07002704void i915_vma_move_to_active(struct i915_vma *vma,
John Harrisonb2af0372015-05-29 17:43:50 +01002705 struct drm_i915_gem_request *req)
Ben Widawskye2d05a82013-09-24 09:57:58 -07002706{
Chris Wilsonb4716182015-04-27 13:41:17 +01002707 struct drm_i915_gem_object *obj = vma->obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002708 struct intel_engine_cs *engine;
John Harrisonb2af0372015-05-29 17:43:50 +01002709
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002710 engine = i915_gem_request_get_engine(req);
Chris Wilsonb4716182015-04-27 13:41:17 +01002711
2712 /* Add a reference if we're newly entering the active list. */
2713 if (obj->active == 0)
2714 drm_gem_object_reference(&obj->base);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002715 obj->active |= intel_engine_flag(engine);
Chris Wilsonb4716182015-04-27 13:41:17 +01002716
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002717 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002718 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
Chris Wilsonb4716182015-04-27 13:41:17 +01002719
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00002720 list_move_tail(&vma->vm_link, &vma->vm->active_list);
Ben Widawskye2d05a82013-09-24 09:57:58 -07002721}
2722
Chris Wilsoncaea7472010-11-12 13:53:37 +00002723static void
Chris Wilsonb4716182015-04-27 13:41:17 +01002724i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2725{
Chris Wilsond501b1d2016-04-13 17:35:02 +01002726 GEM_BUG_ON(obj->last_write_req == NULL);
2727 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
Chris Wilsonb4716182015-04-27 13:41:17 +01002728
2729 i915_gem_request_assign(&obj->last_write_req, NULL);
Rodrigo Vivide152b62015-07-07 16:28:51 -07002730 intel_fb_obj_flush(obj, true, ORIGIN_CS);
Chris Wilsonb4716182015-04-27 13:41:17 +01002731}
2732
2733static void
2734i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
Chris Wilsoncaea7472010-11-12 13:53:37 +00002735{
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002736 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002737
Chris Wilsond501b1d2016-04-13 17:35:02 +01002738 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2739 GEM_BUG_ON(!(obj->active & (1 << ring)));
Chris Wilsonb4716182015-04-27 13:41:17 +01002740
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002741 list_del_init(&obj->engine_list[ring]);
Chris Wilsonb4716182015-04-27 13:41:17 +01002742 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2743
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002744 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
Chris Wilsonb4716182015-04-27 13:41:17 +01002745 i915_gem_object_retire__write(obj);
2746
2747 obj->active &= ~(1 << ring);
2748 if (obj->active)
2749 return;
Chris Wilson65ce3022012-07-20 12:41:02 +01002750
Chris Wilson6c246952015-07-27 10:26:26 +01002751 /* Bump our place on the bound list to keep it roughly in LRU order
2752 * so that we don't steal from recently used but inactive objects
2753 * (unless we are forced to ofc!)
2754 */
2755 list_move_tail(&obj->global_list,
2756 &to_i915(obj->base.dev)->mm.bound_list);
2757
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00002758 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2759 if (!list_empty(&vma->vm_link))
2760 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002761 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002762
John Harrison97b2a6a2014-11-24 18:49:26 +00002763 i915_gem_request_assign(&obj->last_fenced_req, NULL);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002764 drm_gem_object_unreference(&obj->base);
Chris Wilsonc8725f32014-03-17 12:21:55 +00002765}
2766
Chris Wilson9d7730912012-11-27 16:22:52 +00002767static int
Chris Wilsonc0336662016-05-06 15:40:21 +01002768i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002769{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002770 struct intel_engine_cs *engine;
Chris Wilson29dcb572016-04-07 07:29:13 +01002771 int ret;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002772
Chris Wilson107f27a52012-12-10 13:56:17 +02002773 /* Carefully retire all requests without writing to the rings */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002774 for_each_engine(engine, dev_priv) {
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002775 ret = intel_engine_idle(engine);
Chris Wilson107f27a52012-12-10 13:56:17 +02002776 if (ret)
2777 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002778 }
Chris Wilsonc0336662016-05-06 15:40:21 +01002779 i915_gem_retire_requests(dev_priv);
Chris Wilson107f27a52012-12-10 13:56:17 +02002780
Chris Wilson688e6c72016-07-01 17:23:15 +01002781 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
2782 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
Chris Wilsonc81d4612016-07-01 17:23:25 +01002783 while (intel_kick_waiters(dev_priv) ||
2784 intel_kick_signalers(dev_priv))
Chris Wilson688e6c72016-07-01 17:23:15 +01002785 yield();
2786 }
Chris Wilson107f27a52012-12-10 13:56:17 +02002787
2788 /* Finally reset hw state */
Chris Wilson29dcb572016-04-07 07:29:13 +01002789 for_each_engine(engine, dev_priv)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002790 intel_ring_init_seqno(engine, seqno);
Chris Wilson9d7730912012-11-27 16:22:52 +00002791
2792 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002793}
2794
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002795int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2796{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002797 struct drm_i915_private *dev_priv = to_i915(dev);
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002798 int ret;
2799
2800 if (seqno == 0)
2801 return -EINVAL;
2802
2803 /* HWS page needs to be set less than what we
2804 * will inject to ring
2805 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002806 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002807 if (ret)
2808 return ret;
2809
2810 /* Carefully set the last_seqno value so that wrap
2811 * detection still works
2812 */
2813 dev_priv->next_seqno = seqno;
2814 dev_priv->last_seqno = seqno - 1;
2815 if (dev_priv->last_seqno == 0)
2816 dev_priv->last_seqno--;
2817
2818 return 0;
2819}
2820
Chris Wilson9d7730912012-11-27 16:22:52 +00002821int
Chris Wilsonc0336662016-05-06 15:40:21 +01002822i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002823{
Chris Wilson9d7730912012-11-27 16:22:52 +00002824 /* reserve 0 for non-seqno */
2825 if (dev_priv->next_seqno == 0) {
Chris Wilsonc0336662016-05-06 15:40:21 +01002826 int ret = i915_gem_init_seqno(dev_priv, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002827 if (ret)
2828 return ret;
2829
2830 dev_priv->next_seqno = 1;
2831 }
2832
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002833 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002834 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002835}
2836
Chris Wilson67d97da2016-07-04 08:08:31 +01002837static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
2838{
2839 struct drm_i915_private *dev_priv = engine->i915;
2840
2841 dev_priv->gt.active_engines |= intel_engine_flag(engine);
2842 if (dev_priv->gt.awake)
2843 return;
2844
2845 intel_runtime_pm_get_noresume(dev_priv);
2846 dev_priv->gt.awake = true;
2847
2848 i915_update_gfx_val(dev_priv);
2849 if (INTEL_GEN(dev_priv) >= 6)
2850 gen6_rps_busy(dev_priv);
2851
2852 queue_delayed_work(dev_priv->wq,
2853 &dev_priv->gt.retire_work,
2854 round_jiffies_up_relative(HZ));
2855}
2856
John Harrisonbf7dc5b2015-05-29 17:43:24 +01002857/*
2858 * NB: This function is not allowed to fail. Doing so would mean the the
2859 * request is not being tracked for completion but the work itself is
2860 * going to happen on the hardware. This would be a Bad Thing(tm).
2861 */
John Harrison75289872015-05-29 17:43:49 +01002862void __i915_add_request(struct drm_i915_gem_request *request,
John Harrison5b4a60c2015-05-29 17:43:34 +01002863 struct drm_i915_gem_object *obj,
2864 bool flush_caches)
Eric Anholt673a3942008-07-30 12:06:12 -07002865{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002866 struct intel_engine_cs *engine;
Oscar Mateo48e29f52014-07-24 17:04:29 +01002867 struct intel_ringbuffer *ringbuf;
Nick Hoath6d3d8272015-01-15 13:10:39 +00002868 u32 request_start;
Chris Wilson0251a962016-04-28 09:56:47 +01002869 u32 reserved_tail;
Chris Wilson3cce4692010-10-27 16:11:02 +01002870 int ret;
2871
Oscar Mateo48e29f52014-07-24 17:04:29 +01002872 if (WARN_ON(request == NULL))
John Harrisonbf7dc5b2015-05-29 17:43:24 +01002873 return;
Oscar Mateo48e29f52014-07-24 17:04:29 +01002874
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002875 engine = request->engine;
John Harrison75289872015-05-29 17:43:49 +01002876 ringbuf = request->ringbuf;
2877
John Harrison29b1b412015-06-18 13:10:09 +01002878 /*
2879 * To ensure that this call will not fail, space for its emissions
2880 * should already have been reserved in the ring buffer. Let the ring
2881 * know that it is time to use that space up.
2882 */
Oscar Mateo48e29f52014-07-24 17:04:29 +01002883 request_start = intel_ring_get_tail(ringbuf);
Chris Wilson0251a962016-04-28 09:56:47 +01002884 reserved_tail = request->reserved_space;
2885 request->reserved_space = 0;
2886
Daniel Vettercc889e02012-06-13 20:45:19 +02002887 /*
2888 * Emit any outstanding flushes - execbuf can fail to emit the flush
2889 * after having emitted the batchbuffer command. Hence we need to fix
2890 * things up similar to emitting the lazy request. The difference here
2891 * is that the flush _must_ happen before the next request, no matter
2892 * what.
2893 */
John Harrison5b4a60c2015-05-29 17:43:34 +01002894 if (flush_caches) {
2895 if (i915.enable_execlists)
John Harrison4866d722015-05-29 17:43:55 +01002896 ret = logical_ring_flush_all_caches(request);
John Harrison5b4a60c2015-05-29 17:43:34 +01002897 else
John Harrison4866d722015-05-29 17:43:55 +01002898 ret = intel_ring_flush_all_caches(request);
John Harrison5b4a60c2015-05-29 17:43:34 +01002899 /* Not allowed to fail! */
2900 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2901 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002902
Chris Wilson7c90b7d2016-04-07 07:29:17 +01002903 trace_i915_gem_request_add(request);
Eric Anholt673a3942008-07-30 12:06:12 -07002904
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002905 request->head = request_start;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002906
2907 /* Whilst this request exists, batch_obj will be on the
2908 * active_list, and so will hold the active reference. Only when this
2909 * request is retired will the the batch_obj be moved onto the
2910 * inactive_list and lose its active reference. Hence we do not need
2911 * to explicitly hold another reference here.
2912 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002913 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002914
Chris Wilson7c90b7d2016-04-07 07:29:17 +01002915 /* Seal the request and mark it as pending execution. Note that
2916 * we may inspect this state, without holding any locks, during
2917 * hangcheck. Hence we apply the barrier to ensure that we do not
2918 * see a more recent value in the hws than we are tracking.
2919 */
Eric Anholt673a3942008-07-30 12:06:12 -07002920 request->emitted_jiffies = jiffies;
Chris Wilson7c90b7d2016-04-07 07:29:17 +01002921 request->previous_seqno = engine->last_submitted_seqno;
2922 smp_store_mb(engine->last_submitted_seqno, request->seqno);
2923 list_add_tail(&request->list, &engine->request_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002924
Eric Anholt673a3942008-07-30 12:06:12 -07002925 /* Record the position of the start of the request so that
2926 * should we detect the updated seqno part-way through the
2927 * GPU processing the request, we never over-estimate the
2928 * position of the head.
2929 */
2930 request->postfix = intel_ring_get_tail(ringbuf);
Chris Wilsondb53a302011-02-03 11:57:46 +00002931
Eric Anholt673a3942008-07-30 12:06:12 -07002932 if (i915.enable_execlists)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002933 ret = engine->emit_request(request);
Eric Anholt673a3942008-07-30 12:06:12 -07002934 else {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002935 ret = engine->add_request(request);
Eric Anholt673a3942008-07-30 12:06:12 -07002936
2937 request->tail = intel_ring_get_tail(ringbuf);
2938 }
2939 /* Not allowed to fail! */
2940 WARN(ret, "emit|add_request failed: %d!\n", ret);
John Harrison29b1b412015-06-18 13:10:09 +01002941 /* Sanity check that the reserved size was large enough. */
Chris Wilson0251a962016-04-28 09:56:47 +01002942 ret = intel_ring_get_tail(ringbuf) - request_start;
2943 if (ret < 0)
2944 ret += ringbuf->size;
2945 WARN_ONCE(ret > reserved_tail,
2946 "Not enough space reserved (%d bytes) "
2947 "for adding the request (%d bytes)\n",
2948 reserved_tail, ret);
Chris Wilson67d97da2016-07-04 08:08:31 +01002949
2950 i915_gem_mark_busy(engine);
Eric Anholt673a3942008-07-30 12:06:12 -07002951}
2952
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002953static bool i915_context_is_banned(const struct i915_gem_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002954{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002955 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002956
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002957 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002958 return true;
2959
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002960 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
Chris Wilson676fa572014-12-24 08:13:39 -08002961 if (ctx->hang_stats.ban_period_seconds &&
2962 elapsed <= ctx->hang_stats.ban_period_seconds) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002963 DRM_DEBUG("context hanging too fast, banning!\n");
2964 return true;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002965 }
2966
2967 return false;
2968}
2969
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002970static void i915_set_reset_status(struct i915_gem_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002971 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002972{
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002973 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002974
2975 if (guilty) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002976 hs->banned = i915_context_is_banned(ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002977 hs->batch_active++;
2978 hs->guilty_ts = get_seconds();
2979 } else {
2980 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002981 }
2982}
2983
John Harrisonabfe2622014-11-24 18:49:24 +00002984void i915_gem_request_free(struct kref *req_ref)
2985{
2986 struct drm_i915_gem_request *req = container_of(req_ref,
2987 typeof(*req), ref);
Chris Wilsonefab6d82015-04-07 16:20:57 +01002988 kmem_cache_free(req->i915->requests, req);
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002989}
2990
Dave Gordon26827082016-01-19 19:02:53 +00002991static inline int
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002992__i915_gem_request_alloc(struct intel_engine_cs *engine,
Chris Wilsone2efd132016-05-24 14:53:34 +01002993 struct i915_gem_context *ctx,
Dave Gordon26827082016-01-19 19:02:53 +00002994 struct drm_i915_gem_request **req_out)
John Harrison6689cb22015-03-19 12:30:08 +00002995{
Chris Wilsonc0336662016-05-06 15:40:21 +01002996 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson299259a2016-04-13 17:35:06 +01002997 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
Daniel Vettereed29a52015-05-21 14:21:25 +02002998 struct drm_i915_gem_request *req;
John Harrison6689cb22015-03-19 12:30:08 +00002999 int ret;
John Harrison6689cb22015-03-19 12:30:08 +00003000
John Harrison217e46b2015-05-29 17:43:29 +01003001 if (!req_out)
3002 return -EINVAL;
3003
John Harrisonbccca492015-05-29 17:44:11 +01003004 *req_out = NULL;
John Harrison6689cb22015-03-19 12:30:08 +00003005
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003006 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
3007 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
3008 * and restart.
3009 */
3010 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
Chris Wilson299259a2016-04-13 17:35:06 +01003011 if (ret)
3012 return ret;
3013
Daniel Vettereed29a52015-05-21 14:21:25 +02003014 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
3015 if (req == NULL)
John Harrison6689cb22015-03-19 12:30:08 +00003016 return -ENOMEM;
3017
Chris Wilsonc0336662016-05-06 15:40:21 +01003018 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
Chris Wilson9a0c1e22015-05-21 21:01:45 +01003019 if (ret)
3020 goto err;
John Harrison6689cb22015-03-19 12:30:08 +00003021
John Harrison40e895c2015-05-29 17:43:26 +01003022 kref_init(&req->ref);
3023 req->i915 = dev_priv;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00003024 req->engine = engine;
John Harrison40e895c2015-05-29 17:43:26 +01003025 req->ctx = ctx;
3026 i915_gem_context_reference(req->ctx);
John Harrison6689cb22015-03-19 12:30:08 +00003027
John Harrison29b1b412015-06-18 13:10:09 +01003028 /*
3029 * Reserve space in the ring buffer for all the commands required to
3030 * eventually emit this request. This is to guarantee that the
3031 * i915_add_request() call can't fail. Note that the reserve may need
3032 * to be redone if the request is not actually submitted straight
3033 * away, e.g. because a GPU scheduler has deferred it.
John Harrison29b1b412015-06-18 13:10:09 +01003034 */
Chris Wilson0251a962016-04-28 09:56:47 +01003035 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
Chris Wilsonbfa01202016-04-28 09:56:48 +01003036
John Harrisonccd98fe2015-05-29 17:44:09 +01003037 if (i915.enable_execlists)
Chris Wilsonbfa01202016-04-28 09:56:48 +01003038 ret = intel_logical_ring_alloc_request_extras(req);
John Harrisonccd98fe2015-05-29 17:44:09 +01003039 else
Chris Wilsonbfa01202016-04-28 09:56:48 +01003040 ret = intel_ring_alloc_request_extras(req);
3041 if (ret)
3042 goto err_ctx;
John Harrison29b1b412015-06-18 13:10:09 +01003043
John Harrisonbccca492015-05-29 17:44:11 +01003044 *req_out = req;
John Harrison6689cb22015-03-19 12:30:08 +00003045 return 0;
Chris Wilson9a0c1e22015-05-21 21:01:45 +01003046
Chris Wilsonbfa01202016-04-28 09:56:48 +01003047err_ctx:
3048 i915_gem_context_unreference(ctx);
Chris Wilson9a0c1e22015-05-21 21:01:45 +01003049err:
3050 kmem_cache_free(dev_priv->requests, req);
3051 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003052}
3053
Dave Gordon26827082016-01-19 19:02:53 +00003054/**
3055 * i915_gem_request_alloc - allocate a request structure
3056 *
3057 * @engine: engine that we wish to issue the request on.
3058 * @ctx: context that the request will be associated with.
3059 * This can be NULL if the request is not directly related to
3060 * any specific user context, in which case this function will
3061 * choose an appropriate context to use.
3062 *
3063 * Returns a pointer to the allocated request if successful,
3064 * or an error code if not.
3065 */
3066struct drm_i915_gem_request *
3067i915_gem_request_alloc(struct intel_engine_cs *engine,
Chris Wilsone2efd132016-05-24 14:53:34 +01003068 struct i915_gem_context *ctx)
Dave Gordon26827082016-01-19 19:02:53 +00003069{
3070 struct drm_i915_gem_request *req;
3071 int err;
3072
3073 if (ctx == NULL)
Chris Wilsonc0336662016-05-06 15:40:21 +01003074 ctx = engine->i915->kernel_context;
Dave Gordon26827082016-01-19 19:02:53 +00003075 err = __i915_gem_request_alloc(engine, ctx, &req);
3076 return err ? ERR_PTR(err) : req;
3077}
3078
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02003079struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003080i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01003081{
Chris Wilson4db080f2013-12-04 11:37:09 +00003082 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03003083
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003084 /* We are called by the error capture and reset at a random
3085 * point in time. In particular, note that neither is crucially
3086 * ordered with an interrupt. After a hang, the GPU is dead and we
3087 * assume that no more writes can happen (we waited long enough for
3088 * all writes that were in transaction to be flushed) - adding an
3089 * extra delay for a recent interrupt is pointless. Hence, we do
3090 * not need an engine->irq_seqno_barrier() before the seqno reads.
3091 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003092 list_for_each_entry(request, &engine->request_list, list) {
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003093 if (i915_gem_request_completed(request))
Chris Wilson4db080f2013-12-04 11:37:09 +00003094 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03003095
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02003096 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00003097 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02003098
3099 return NULL;
3100}
3101
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003102static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02003103{
3104 struct drm_i915_gem_request *request;
3105 bool ring_hung;
3106
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003107 request = i915_gem_find_active_request(engine);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02003108 if (request == NULL)
3109 return;
3110
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003111 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02003112
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003113 i915_set_reset_status(request->ctx, ring_hung);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003114 list_for_each_entry_continue(request, &engine->request_list, list)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003115 i915_set_reset_status(request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00003116}
3117
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003118static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
Chris Wilson4db080f2013-12-04 11:37:09 +00003119{
Chris Wilson608c1a52015-09-03 13:01:40 +01003120 struct intel_ringbuffer *buffer;
3121
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003122 while (!list_empty(&engine->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00003123 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003124
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003125 obj = list_first_entry(&engine->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00003126 struct drm_i915_gem_object,
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00003127 engine_list[engine->id]);
Eric Anholt673a3942008-07-30 12:06:12 -07003128
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003129 i915_gem_object_retire__read(obj, engine->id);
Eric Anholt673a3942008-07-30 12:06:12 -07003130 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08003131
3132 /*
Oscar Mateodcb4c122014-11-13 10:28:10 +00003133 * Clear the execlists queue up before freeing the requests, as those
3134 * are the ones that keep the context and ringbuffer backing objects
3135 * pinned in place.
3136 */
Oscar Mateodcb4c122014-11-13 10:28:10 +00003137
Tomas Elf7de16912015-10-19 16:32:32 +01003138 if (i915.enable_execlists) {
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01003139 /* Ensure irq handler finishes or is cancelled. */
3140 tasklet_kill(&engine->irq_tasklet);
Mika Kuoppala1197b4f2015-01-13 11:32:24 +02003141
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +01003142 intel_execlists_cancel_requests(engine);
Oscar Mateodcb4c122014-11-13 10:28:10 +00003143 }
3144
3145 /*
Ben Widawsky1d62bee2014-01-01 10:15:13 -08003146 * We must free the requests after all the corresponding objects have
3147 * been moved off active lists. Which is the same order as the normal
3148 * retire_requests function does. This is important if object hold
3149 * implicit references on things like e.g. ppgtt address spaces through
3150 * the request.
3151 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003152 while (!list_empty(&engine->request_list)) {
Ben Widawsky1d62bee2014-01-01 10:15:13 -08003153 struct drm_i915_gem_request *request;
3154
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003155 request = list_first_entry(&engine->request_list,
Ben Widawsky1d62bee2014-01-01 10:15:13 -08003156 struct drm_i915_gem_request,
3157 list);
3158
Chris Wilsonb4716182015-04-27 13:41:17 +01003159 i915_gem_request_retire(request);
Ben Widawsky1d62bee2014-01-01 10:15:13 -08003160 }
Chris Wilson608c1a52015-09-03 13:01:40 +01003161
3162 /* Having flushed all requests from all queues, we know that all
3163 * ringbuffers must now be empty. However, since we do not reclaim
3164 * all space when retiring the request (to prevent HEADs colliding
3165 * with rapid ringbuffer wraparound) the amount of available space
3166 * upon reset is less than when we start. Do one more pass over
3167 * all the ringbuffers to reset last_retired_head.
3168 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003169 list_for_each_entry(buffer, &engine->buffers, link) {
Chris Wilson608c1a52015-09-03 13:01:40 +01003170 buffer->last_retired_head = buffer->tail;
3171 intel_ring_update_space(buffer);
3172 }
Chris Wilson2ed53a92016-04-07 07:29:11 +01003173
3174 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
Chris Wilsonb06bc7e2016-07-13 09:10:31 +01003175
3176 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
Eric Anholt673a3942008-07-30 12:06:12 -07003177}
3178
Chris Wilson069efc12010-09-30 16:53:18 +01003179void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07003180{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003181 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003182 struct intel_engine_cs *engine;
Eric Anholt673a3942008-07-30 12:06:12 -07003183
Chris Wilson4db080f2013-12-04 11:37:09 +00003184 /*
3185 * Before we free the objects from the requests, we need to inspect
3186 * them for finding the guilty party. As the requests only borrow
3187 * their reference to the objects, the inspection must be done first.
3188 */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003189 for_each_engine(engine, dev_priv)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003190 i915_gem_reset_engine_status(engine);
Chris Wilson4db080f2013-12-04 11:37:09 +00003191
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003192 for_each_engine(engine, dev_priv)
Chris Wilson7b4d3a12016-07-04 08:08:37 +01003193 i915_gem_reset_engine_cleanup(engine);
Chris Wilsonb06bc7e2016-07-13 09:10:31 +01003194 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
Chris Wilsondfaae392010-09-22 10:31:52 +01003195
Ben Widawskyacce9ff2013-12-06 14:11:03 -08003196 i915_gem_context_reset(dev);
3197
Chris Wilson19b2dbd2013-06-12 10:15:12 +01003198 i915_gem_restore_fences(dev);
Chris Wilsonb4716182015-04-27 13:41:17 +01003199
3200 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003201}
3202
3203/**
3204 * This function clears the request list as sequence numbers are passed.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003205 * @engine: engine to retire requests on
Eric Anholt673a3942008-07-30 12:06:12 -07003206 */
Chris Wilson1cf0ba12014-05-05 09:07:33 +01003207void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003208i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
Eric Anholt673a3942008-07-30 12:06:12 -07003209{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003210 WARN_ON(i915_verify_lists(engine->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003211
Chris Wilson832a3aa2015-03-18 18:19:22 +00003212 /* Retire requests first as we use it above for the early return.
3213 * If we retire requests last, we may use a later seqno and so clear
3214 * the requests lists without clearing the active list, leading to
3215 * confusion.
Chris Wilsone9103032014-01-07 11:45:14 +00003216 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003217 while (!list_empty(&engine->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003218 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07003219
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003220 request = list_first_entry(&engine->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003221 struct drm_i915_gem_request,
3222 list);
Eric Anholt673a3942008-07-30 12:06:12 -07003223
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003224 if (!i915_gem_request_completed(request))
Eric Anholt673a3942008-07-30 12:06:12 -07003225 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01003226
Chris Wilsonb4716182015-04-27 13:41:17 +01003227 i915_gem_request_retire(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01003228 }
3229
Chris Wilson832a3aa2015-03-18 18:19:22 +00003230 /* Move any buffers on the active list that are no longer referenced
3231 * by the ringbuffer to the flushing/inactive lists as appropriate,
3232 * before we free the context associated with the requests.
3233 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003234 while (!list_empty(&engine->active_list)) {
Chris Wilson832a3aa2015-03-18 18:19:22 +00003235 struct drm_i915_gem_object *obj;
3236
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003237 obj = list_first_entry(&engine->active_list,
3238 struct drm_i915_gem_object,
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00003239 engine_list[engine->id]);
Chris Wilson832a3aa2015-03-18 18:19:22 +00003240
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003241 if (!list_empty(&obj->last_read_req[engine->id]->list))
Chris Wilson832a3aa2015-03-18 18:19:22 +00003242 break;
3243
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003244 i915_gem_object_retire__read(obj, engine->id);
Chris Wilson832a3aa2015-03-18 18:19:22 +00003245 }
3246
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00003247 WARN_ON(i915_verify_lists(engine->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003248}
3249
Chris Wilson67d97da2016-07-04 08:08:31 +01003250void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01003251{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003252 struct intel_engine_cs *engine;
Chris Wilson67d97da2016-07-04 08:08:31 +01003253
Chris Wilson91c8a322016-07-05 10:40:23 +01003254 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson67d97da2016-07-04 08:08:31 +01003255
3256 if (dev_priv->gt.active_engines == 0)
3257 return;
3258
3259 GEM_BUG_ON(!dev_priv->gt.awake);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01003260
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003261 for_each_engine(engine, dev_priv) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003262 i915_gem_retire_requests_ring(engine);
Chris Wilson67d97da2016-07-04 08:08:31 +01003263 if (list_empty(&engine->request_list))
3264 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003265 }
3266
Chris Wilson67d97da2016-07-04 08:08:31 +01003267 if (dev_priv->gt.active_engines == 0)
Chris Wilson1b51bce2016-07-04 08:08:32 +01003268 queue_delayed_work(dev_priv->wq,
3269 &dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003270 msecs_to_jiffies(100));
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01003271}
3272
Daniel Vetter75ef9da2010-08-21 00:25:16 +02003273static void
Eric Anholt673a3942008-07-30 12:06:12 -07003274i915_gem_retire_work_handler(struct work_struct *work)
3275{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003276 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003277 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01003278 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07003279
Chris Wilson891b48c2010-09-29 12:26:37 +01003280 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003281 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01003282 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003283 mutex_unlock(&dev->struct_mutex);
3284 }
Chris Wilson67d97da2016-07-04 08:08:31 +01003285
3286 /* Keep the retire handler running until we are finally idle.
3287 * We do not need to do this test under locking as in the worst-case
3288 * we queue the retire worker once too often.
3289 */
Chris Wilsonb1379d42016-07-05 08:54:36 +01003290 if (READ_ONCE(dev_priv->gt.awake))
Chris Wilson67d97da2016-07-04 08:08:31 +01003291 queue_delayed_work(dev_priv->wq,
3292 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01003293 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003294}
Chris Wilson891b48c2010-09-29 12:26:37 +01003295
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003296static void
3297i915_gem_idle_work_handler(struct work_struct *work)
3298{
3299 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003300 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01003301 struct drm_device *dev = &dev_priv->drm;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003302 struct intel_engine_cs *engine;
Chris Wilson67d97da2016-07-04 08:08:31 +01003303 unsigned int stuck_engines;
3304 bool rearm_hangcheck;
3305
3306 if (!READ_ONCE(dev_priv->gt.awake))
3307 return;
3308
3309 if (READ_ONCE(dev_priv->gt.active_engines))
3310 return;
3311
3312 rearm_hangcheck =
3313 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3314
3315 if (!mutex_trylock(&dev->struct_mutex)) {
3316 /* Currently busy, come back later */
3317 mod_delayed_work(dev_priv->wq,
3318 &dev_priv->gt.idle_work,
3319 msecs_to_jiffies(50));
3320 goto out_rearm;
3321 }
3322
3323 if (dev_priv->gt.active_engines)
3324 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003325
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003326 for_each_engine(engine, dev_priv)
Chris Wilson67d97da2016-07-04 08:08:31 +01003327 i915_gem_batch_pool_fini(&engine->batch_pool);
Zou Nan hai852835f2010-05-21 09:08:56 +08003328
Chris Wilson67d97da2016-07-04 08:08:31 +01003329 GEM_BUG_ON(!dev_priv->gt.awake);
3330 dev_priv->gt.awake = false;
3331 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01003332
Chris Wilson67d97da2016-07-04 08:08:31 +01003333 stuck_engines = intel_kick_waiters(dev_priv);
3334 if (unlikely(stuck_engines)) {
3335 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
3336 dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
3337 }
Chris Wilson35c94182015-04-07 16:20:37 +01003338
Chris Wilson67d97da2016-07-04 08:08:31 +01003339 if (INTEL_GEN(dev_priv) >= 6)
3340 gen6_rps_idle(dev_priv);
3341 intel_runtime_pm_put(dev_priv);
3342out_unlock:
3343 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01003344
Chris Wilson67d97da2016-07-04 08:08:31 +01003345out_rearm:
3346 if (rearm_hangcheck) {
3347 GEM_BUG_ON(!dev_priv->gt.awake);
3348 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01003349 }
Eric Anholt673a3942008-07-30 12:06:12 -07003350}
3351
Ben Widawsky5816d642012-04-11 11:18:19 -07003352/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003353 * Ensures that an object will eventually get non-busy by flushing any required
3354 * write domains, emitting any outstanding lazy request and retiring and
3355 * completed requests.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003356 * @obj: object to flush
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003357 */
3358static int
3359i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3360{
John Harrisona5ac0f92015-05-29 17:44:15 +01003361 int i;
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003362
Chris Wilsonb4716182015-04-27 13:41:17 +01003363 if (!obj->active)
3364 return 0;
John Harrison41c52412014-11-24 18:49:43 +00003365
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003366 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilsonb4716182015-04-27 13:41:17 +01003367 struct drm_i915_gem_request *req;
3368
3369 req = obj->last_read_req[i];
3370 if (req == NULL)
3371 continue;
3372
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003373 if (i915_gem_request_completed(req))
Chris Wilsonb4716182015-04-27 13:41:17 +01003374 i915_gem_object_retire__read(obj, i);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003375 }
3376
3377 return 0;
3378}
3379
3380/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003381 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003382 * @dev: drm device pointer
3383 * @data: ioctl data blob
3384 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003385 *
3386 * Returns 0 if successful, else an error is returned with the remaining time in
3387 * the timeout parameter.
3388 * -ETIME: object is still busy after timeout
3389 * -ERESTARTSYS: signal interrupted the wait
3390 * -ENONENT: object doesn't exist
3391 * Also possible, but rare:
3392 * -EAGAIN: GPU wedged
3393 * -ENOMEM: damn
3394 * -ENODEV: Internal IRQ fail
3395 * -E?: The add request failed
3396 *
3397 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3398 * non-zero timeout parameter the wait ioctl will wait for the given number of
3399 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3400 * without holding struct_mutex the object may become re-busied before this
3401 * function completes. A similar but shorter * race condition exists in the busy
3402 * ioctl
3403 */
3404int
3405i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3406{
3407 struct drm_i915_gem_wait *args = data;
3408 struct drm_i915_gem_object *obj;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003409 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
Chris Wilsonb4716182015-04-27 13:41:17 +01003410 int i, n = 0;
3411 int ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003412
Daniel Vetter11b5d512014-09-29 15:31:26 +02003413 if (args->flags != 0)
3414 return -EINVAL;
3415
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003416 ret = i915_mutex_lock_interruptible(dev);
3417 if (ret)
3418 return ret;
3419
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01003420 obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003421 if (&obj->base == NULL) {
3422 mutex_unlock(&dev->struct_mutex);
3423 return -ENOENT;
3424 }
3425
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003426 /* Need to make sure the object gets inactive eventually. */
3427 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003428 if (ret)
3429 goto out;
3430
Chris Wilsonb4716182015-04-27 13:41:17 +01003431 if (!obj->active)
John Harrison97b2a6a2014-11-24 18:49:26 +00003432 goto out;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003433
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003434 /* Do this after OLR check to make sure we make forward progress polling
Chris Wilson762e4582015-03-04 18:09:26 +00003435 * on this IOCTL with a timeout == 0 (like busy ioctl)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003436 */
Chris Wilson762e4582015-03-04 18:09:26 +00003437 if (args->timeout_ns == 0) {
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003438 ret = -ETIME;
3439 goto out;
3440 }
3441
3442 drm_gem_object_unreference(&obj->base);
Chris Wilsonb4716182015-04-27 13:41:17 +01003443
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003444 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilsonb4716182015-04-27 13:41:17 +01003445 if (obj->last_read_req[i] == NULL)
3446 continue;
3447
3448 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3449 }
3450
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003451 mutex_unlock(&dev->struct_mutex);
3452
Chris Wilsonb4716182015-04-27 13:41:17 +01003453 for (i = 0; i < n; i++) {
3454 if (ret == 0)
Chris Wilson299259a2016-04-13 17:35:06 +01003455 ret = __i915_wait_request(req[i], true,
Chris Wilsonb4716182015-04-27 13:41:17 +01003456 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
Chris Wilsonb6aa0872015-12-02 09:13:46 +00003457 to_rps_client(file));
Chris Wilson73db04c2016-04-28 09:56:55 +01003458 i915_gem_request_unreference(req[i]);
Chris Wilsonb4716182015-04-27 13:41:17 +01003459 }
John Harrisonff865882014-11-24 18:49:28 +00003460 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003461
3462out:
3463 drm_gem_object_unreference(&obj->base);
3464 mutex_unlock(&dev->struct_mutex);
3465 return ret;
3466}
3467
Chris Wilsonb4716182015-04-27 13:41:17 +01003468static int
3469__i915_gem_object_sync(struct drm_i915_gem_object *obj,
3470 struct intel_engine_cs *to,
John Harrison91af1272015-06-18 13:14:56 +01003471 struct drm_i915_gem_request *from_req,
3472 struct drm_i915_gem_request **to_req)
Chris Wilsonb4716182015-04-27 13:41:17 +01003473{
3474 struct intel_engine_cs *from;
3475 int ret;
3476
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003477 from = i915_gem_request_get_engine(from_req);
Chris Wilsonb4716182015-04-27 13:41:17 +01003478 if (to == from)
3479 return 0;
3480
Chris Wilsonf69a02c2016-07-01 17:23:16 +01003481 if (i915_gem_request_completed(from_req))
Chris Wilsonb4716182015-04-27 13:41:17 +01003482 return 0;
3483
Chris Wilsonc0336662016-05-06 15:40:21 +01003484 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
Chris Wilsona6f766f2015-04-27 13:41:20 +01003485 struct drm_i915_private *i915 = to_i915(obj->base.dev);
John Harrison91af1272015-06-18 13:14:56 +01003486 ret = __i915_wait_request(from_req,
Chris Wilsona6f766f2015-04-27 13:41:20 +01003487 i915->mm.interruptible,
3488 NULL,
3489 &i915->rps.semaphores);
Chris Wilsonb4716182015-04-27 13:41:17 +01003490 if (ret)
3491 return ret;
3492
John Harrison91af1272015-06-18 13:14:56 +01003493 i915_gem_object_retire_request(obj, from_req);
Chris Wilsonb4716182015-04-27 13:41:17 +01003494 } else {
3495 int idx = intel_ring_sync_index(from, to);
John Harrison91af1272015-06-18 13:14:56 +01003496 u32 seqno = i915_gem_request_get_seqno(from_req);
3497
3498 WARN_ON(!to_req);
Chris Wilsonb4716182015-04-27 13:41:17 +01003499
3500 if (seqno <= from->semaphore.sync_seqno[idx])
3501 return 0;
3502
John Harrison91af1272015-06-18 13:14:56 +01003503 if (*to_req == NULL) {
Dave Gordon26827082016-01-19 19:02:53 +00003504 struct drm_i915_gem_request *req;
3505
3506 req = i915_gem_request_alloc(to, NULL);
3507 if (IS_ERR(req))
3508 return PTR_ERR(req);
3509
3510 *to_req = req;
John Harrison91af1272015-06-18 13:14:56 +01003511 }
3512
John Harrison599d9242015-05-29 17:44:04 +01003513 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3514 ret = to->semaphore.sync_to(*to_req, from, seqno);
Chris Wilsonb4716182015-04-27 13:41:17 +01003515 if (ret)
3516 return ret;
3517
3518 /* We use last_read_req because sync_to()
3519 * might have just caused seqno wrap under
3520 * the radar.
3521 */
3522 from->semaphore.sync_seqno[idx] =
3523 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3524 }
3525
3526 return 0;
3527}
3528
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003529/**
Ben Widawsky5816d642012-04-11 11:18:19 -07003530 * i915_gem_object_sync - sync an object to a ring.
3531 *
3532 * @obj: object which may be in use on another ring.
3533 * @to: ring we wish to use the object on. May be NULL.
John Harrison91af1272015-06-18 13:14:56 +01003534 * @to_req: request we wish to use the object for. See below.
3535 * This will be allocated and returned if a request is
3536 * required but not passed in.
Ben Widawsky5816d642012-04-11 11:18:19 -07003537 *
3538 * This code is meant to abstract object synchronization with the GPU.
3539 * Calling with NULL implies synchronizing the object with the CPU
Chris Wilsonb4716182015-04-27 13:41:17 +01003540 * rather than a particular GPU ring. Conceptually we serialise writes
John Harrison91af1272015-06-18 13:14:56 +01003541 * between engines inside the GPU. We only allow one engine to write
Chris Wilsonb4716182015-04-27 13:41:17 +01003542 * into a buffer at any time, but multiple readers. To ensure each has
3543 * a coherent view of memory, we must:
3544 *
3545 * - If there is an outstanding write request to the object, the new
3546 * request must wait for it to complete (either CPU or in hw, requests
3547 * on the same ring will be naturally ordered).
3548 *
3549 * - If we are a write request (pending_write_domain is set), the new
3550 * request must wait for outstanding read requests to complete.
Ben Widawsky5816d642012-04-11 11:18:19 -07003551 *
John Harrison91af1272015-06-18 13:14:56 +01003552 * For CPU synchronisation (NULL to) no request is required. For syncing with
3553 * rings to_req must be non-NULL. However, a request does not have to be
3554 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3555 * request will be allocated automatically and returned through *to_req. Note
3556 * that it is not guaranteed that commands will be emitted (because the system
3557 * might already be idle). Hence there is no need to create a request that
3558 * might never have any work submitted. Note further that if a request is
3559 * returned in *to_req, it is the responsibility of the caller to submit
3560 * that request (after potentially adding more work to it).
3561 *
Ben Widawsky5816d642012-04-11 11:18:19 -07003562 * Returns 0 if successful, else propagates up the lower layer error.
3563 */
Ben Widawsky2911a352012-04-05 14:47:36 -07003564int
3565i915_gem_object_sync(struct drm_i915_gem_object *obj,
John Harrison91af1272015-06-18 13:14:56 +01003566 struct intel_engine_cs *to,
3567 struct drm_i915_gem_request **to_req)
Ben Widawsky2911a352012-04-05 14:47:36 -07003568{
Chris Wilsonb4716182015-04-27 13:41:17 +01003569 const bool readonly = obj->base.pending_write_domain == 0;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003570 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
Chris Wilsonb4716182015-04-27 13:41:17 +01003571 int ret, i, n;
Ben Widawsky2911a352012-04-05 14:47:36 -07003572
Chris Wilsonb4716182015-04-27 13:41:17 +01003573 if (!obj->active)
Ben Widawsky2911a352012-04-05 14:47:36 -07003574 return 0;
3575
Chris Wilsonb4716182015-04-27 13:41:17 +01003576 if (to == NULL)
3577 return i915_gem_object_wait_rendering(obj, readonly);
Ben Widawsky2911a352012-04-05 14:47:36 -07003578
Chris Wilsonb4716182015-04-27 13:41:17 +01003579 n = 0;
3580 if (readonly) {
3581 if (obj->last_write_req)
3582 req[n++] = obj->last_write_req;
3583 } else {
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003584 for (i = 0; i < I915_NUM_ENGINES; i++)
Chris Wilsonb4716182015-04-27 13:41:17 +01003585 if (obj->last_read_req[i])
3586 req[n++] = obj->last_read_req[i];
3587 }
3588 for (i = 0; i < n; i++) {
John Harrison91af1272015-06-18 13:14:56 +01003589 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
Chris Wilsonb4716182015-04-27 13:41:17 +01003590 if (ret)
3591 return ret;
3592 }
Ben Widawsky2911a352012-04-05 14:47:36 -07003593
Chris Wilsonb4716182015-04-27 13:41:17 +01003594 return 0;
Ben Widawsky2911a352012-04-05 14:47:36 -07003595}
3596
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01003597static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3598{
3599 u32 old_write_domain, old_read_domains;
3600
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01003601 /* Force a pagefault for domain tracking on next user access */
3602 i915_gem_release_mmap(obj);
3603
Keith Packardb97c3d92011-06-24 21:02:59 -07003604 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3605 return;
3606
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01003607 old_read_domains = obj->base.read_domains;
3608 old_write_domain = obj->base.write_domain;
3609
3610 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3611 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3612
3613 trace_i915_gem_object_change_domain(obj,
3614 old_read_domains,
3615 old_write_domain);
3616}
3617
Chris Wilson8ef85612016-04-28 09:56:39 +01003618static void __i915_vma_iounmap(struct i915_vma *vma)
3619{
3620 GEM_BUG_ON(vma->pin_count);
3621
3622 if (vma->iomap == NULL)
3623 return;
3624
3625 io_mapping_unmap(vma->iomap);
3626 vma->iomap = NULL;
3627}
3628
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +01003629static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
Eric Anholt673a3942008-07-30 12:06:12 -07003630{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003631 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003632 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson43e28f02013-01-08 10:53:09 +00003633 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003634
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003635 if (list_empty(&vma->obj_link))
Eric Anholt673a3942008-07-30 12:06:12 -07003636 return 0;
3637
Daniel Vetter0ff501c2013-08-29 19:50:31 +02003638 if (!drm_mm_node_allocated(&vma->node)) {
3639 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02003640 return 0;
3641 }
Ben Widawsky433544b2013-08-13 18:09:06 -07003642
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003643 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01003644 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07003645
Chris Wilsonc4670ad2012-08-20 10:23:27 +01003646 BUG_ON(obj->pages == NULL);
3647
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +01003648 if (wait) {
3649 ret = i915_gem_object_wait_rendering(obj, false);
3650 if (ret)
3651 return ret;
3652 }
Chris Wilsona8198ee2011-04-13 22:04:09 +01003653
Chris Wilson596c5922016-02-26 11:03:20 +00003654 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01003655 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01003656
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01003657 /* release the fence reg _after_ flushing */
3658 ret = i915_gem_object_put_fence(obj);
3659 if (ret)
3660 return ret;
Chris Wilson8ef85612016-04-28 09:56:39 +01003661
3662 __i915_vma_iounmap(vma);
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01003663 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01003664
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003665 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00003666
Daniel Vetter777dc5b2015-04-14 17:35:12 +02003667 vma->vm->unbind_vma(vma);
Mika Kuoppala5e562f12015-04-30 11:02:31 +03003668 vma->bound = 0;
Ben Widawsky6f65e292013-12-06 14:10:56 -08003669
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003670 list_del_init(&vma->vm_link);
Chris Wilson596c5922016-02-26 11:03:20 +00003671 if (vma->is_ggtt) {
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003672 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3673 obj->map_and_fenceable = false;
3674 } else if (vma->ggtt_view.pages) {
3675 sg_free_table(vma->ggtt_view.pages);
3676 kfree(vma->ggtt_view.pages);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003677 }
Chris Wilson016a65a2015-06-11 08:06:08 +01003678 vma->ggtt_view.pages = NULL;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003679 }
Eric Anholt673a3942008-07-30 12:06:12 -07003680
Ben Widawsky2f633152013-07-17 12:19:03 -07003681 drm_mm_remove_node(&vma->node);
3682 i915_gem_vma_destroy(vma);
3683
3684 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02003685 * no more VMAs exist. */
Imre Deake2273302015-07-09 12:59:05 +03003686 if (list_empty(&obj->vma_list))
Ben Widawsky2f633152013-07-17 12:19:03 -07003687 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003688
Chris Wilson70903c32013-12-04 09:59:09 +00003689 /* And finally now the object is completely decoupled from this vma,
3690 * we can drop its hold on the backing storage and allow it to be
3691 * reaped by the shrinker.
3692 */
3693 i915_gem_object_unpin_pages(obj);
3694
Chris Wilson88241782011-01-07 17:09:48 +00003695 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00003696}
3697
Tvrtko Ursuline9f24d52015-10-05 13:26:36 +01003698int i915_vma_unbind(struct i915_vma *vma)
3699{
3700 return __i915_vma_unbind(vma, true);
3701}
3702
3703int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3704{
3705 return __i915_vma_unbind(vma, false);
3706}
3707
Chris Wilson6e5a5be2016-06-24 14:55:57 +01003708int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003709{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003710 struct intel_engine_cs *engine;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003711 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003712
Chris Wilson91c8a322016-07-05 10:40:23 +01003713 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson6e5a5be2016-06-24 14:55:57 +01003714
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003715 for_each_engine(engine, dev_priv) {
Chris Wilson62e63002016-06-24 14:55:52 +01003716 if (engine->last_context == NULL)
3717 continue;
Ben Widawskyb6c74882012-08-14 14:35:14 -07003718
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00003719 ret = intel_engine_idle(engine);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003720 if (ret)
3721 return ret;
3722 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003723
Chris Wilsonb4716182015-04-27 13:41:17 +01003724 WARN_ON(i915_verify_lists(dev));
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01003725 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003726}
3727
Chris Wilson4144f9b2014-09-11 08:43:48 +01003728static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003729 unsigned long cache_level)
3730{
Chris Wilson4144f9b2014-09-11 08:43:48 +01003731 struct drm_mm_node *gtt_space = &vma->node;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003732 struct drm_mm_node *other;
3733
Chris Wilson4144f9b2014-09-11 08:43:48 +01003734 /*
3735 * On some machines we have to be careful when putting differing types
3736 * of snoopable memory together to avoid the prefetcher crossing memory
3737 * domains and dying. During vm initialisation, we decide whether or not
3738 * these constraints apply and set the drm_mm.color_adjust
3739 * appropriately.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003740 */
Chris Wilson4144f9b2014-09-11 08:43:48 +01003741 if (vma->vm->mm.color_adjust == NULL)
Chris Wilson42d6ab42012-07-26 11:49:32 +01003742 return true;
3743
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003744 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003745 return true;
3746
3747 if (list_empty(&gtt_space->node_list))
3748 return true;
3749
3750 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3751 if (other->allocated && !other->hole_follows && other->color != cache_level)
3752 return false;
3753
3754 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3755 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3756 return false;
3757
3758 return true;
3759}
3760
Jesse Barnesde151cf2008-11-12 10:03:55 -08003761/**
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003762 * Finds free space in the GTT aperture and binds the object or a view of it
3763 * there.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003764 * @obj: object to bind
3765 * @vm: address space to bind into
3766 * @ggtt_view: global gtt view if applicable
3767 * @alignment: requested alignment
3768 * @flags: mask of PIN_* flags to use
Eric Anholt673a3942008-07-30 12:06:12 -07003769 */
Daniel Vetter262de142014-02-14 14:01:20 +01003770static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003771i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3772 struct i915_address_space *vm,
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003773 const struct i915_ggtt_view *ggtt_view,
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003774 unsigned alignment,
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003775 uint64_t flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003776{
Chris Wilson05394f32010-11-08 19:18:58 +00003777 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003778 struct drm_i915_private *dev_priv = to_i915(dev);
3779 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Michel Thierry65bd3422015-07-29 17:23:58 +01003780 u32 fence_alignment, unfenced_alignment;
Michel Thierry101b5062015-10-01 13:33:57 +01003781 u32 search_flag, alloc_flag;
3782 u64 start, end;
Michel Thierry65bd3422015-07-29 17:23:58 +01003783 u64 size, fence_size;
Ben Widawsky2f633152013-07-17 12:19:03 -07003784 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003785 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003786
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003787 if (i915_is_ggtt(vm)) {
3788 u32 view_size;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003789
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003790 if (WARN_ON(!ggtt_view))
3791 return ERR_PTR(-EINVAL);
3792
3793 view_size = i915_ggtt_view_size(obj, ggtt_view);
3794
3795 fence_size = i915_gem_get_gtt_size(dev,
3796 view_size,
3797 obj->tiling_mode);
3798 fence_alignment = i915_gem_get_gtt_alignment(dev,
3799 view_size,
3800 obj->tiling_mode,
3801 true);
3802 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3803 view_size,
3804 obj->tiling_mode,
3805 false);
3806 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3807 } else {
3808 fence_size = i915_gem_get_gtt_size(dev,
3809 obj->base.size,
3810 obj->tiling_mode);
3811 fence_alignment = i915_gem_get_gtt_alignment(dev,
3812 obj->base.size,
3813 obj->tiling_mode,
3814 true);
3815 unfenced_alignment =
3816 i915_gem_get_gtt_alignment(dev,
3817 obj->base.size,
3818 obj->tiling_mode,
3819 false);
3820 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3821 }
Chris Wilsona00b10c2010-09-24 21:15:47 +01003822
Michel Thierry101b5062015-10-01 13:33:57 +01003823 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3824 end = vm->total;
3825 if (flags & PIN_MAPPABLE)
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003826 end = min_t(u64, end, ggtt->mappable_end);
Michel Thierry101b5062015-10-01 13:33:57 +01003827 if (flags & PIN_ZONE_4G)
Michel Thierry48ea1e32016-01-11 11:39:27 +00003828 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
Michel Thierry101b5062015-10-01 13:33:57 +01003829
Eric Anholt673a3942008-07-30 12:06:12 -07003830 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003831 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003832 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003833 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003834 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3835 ggtt_view ? ggtt_view->type : 0,
3836 alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003837 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003838 }
3839
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003840 /* If binding the object/GGTT view requires more space than the entire
3841 * aperture has, reject it early before evicting everything in a vain
3842 * attempt to find space.
Chris Wilson654fc602010-05-27 13:18:21 +01003843 */
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003844 if (size > end) {
Michel Thierry65bd3422015-07-29 17:23:58 +01003845 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003846 ggtt_view ? ggtt_view->type : 0,
3847 size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003848 flags & PIN_MAPPABLE ? "mappable" : "total",
Chris Wilsond23db882014-05-23 08:48:08 +02003849 end);
Daniel Vetter262de142014-02-14 14:01:20 +01003850 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003851 }
3852
Chris Wilson37e680a2012-06-07 15:38:42 +01003853 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003854 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003855 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003856
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003857 i915_gem_object_pin_pages(obj);
3858
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003859 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3860 i915_gem_obj_lookup_or_create_vma(obj, vm);
3861
Daniel Vetter262de142014-02-14 14:01:20 +01003862 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003863 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003864
Chris Wilson506a8e82015-12-08 11:55:07 +00003865 if (flags & PIN_OFFSET_FIXED) {
3866 uint64_t offset = flags & PIN_OFFSET_MASK;
3867
3868 if (offset & (alignment - 1) || offset + size > end) {
3869 ret = -EINVAL;
3870 goto err_free_vma;
3871 }
3872 vma->node.start = offset;
3873 vma->node.size = size;
3874 vma->node.color = obj->cache_level;
3875 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3876 if (ret) {
3877 ret = i915_gem_evict_for_vma(vma);
3878 if (ret == 0)
3879 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3880 }
3881 if (ret)
3882 goto err_free_vma;
Michel Thierry101b5062015-10-01 13:33:57 +01003883 } else {
Chris Wilson506a8e82015-12-08 11:55:07 +00003884 if (flags & PIN_HIGH) {
3885 search_flag = DRM_MM_SEARCH_BELOW;
3886 alloc_flag = DRM_MM_CREATE_TOP;
3887 } else {
3888 search_flag = DRM_MM_SEARCH_DEFAULT;
3889 alloc_flag = DRM_MM_CREATE_DEFAULT;
3890 }
Michel Thierry101b5062015-10-01 13:33:57 +01003891
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003892search_free:
Chris Wilson506a8e82015-12-08 11:55:07 +00003893 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3894 size, alignment,
3895 obj->cache_level,
3896 start, end,
3897 search_flag,
3898 alloc_flag);
3899 if (ret) {
3900 ret = i915_gem_evict_something(dev, vm, size, alignment,
3901 obj->cache_level,
3902 start, end,
3903 flags);
3904 if (ret == 0)
3905 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003906
Chris Wilson506a8e82015-12-08 11:55:07 +00003907 goto err_free_vma;
3908 }
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003909 }
Chris Wilson4144f9b2014-09-11 08:43:48 +01003910 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003911 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003912 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003913 }
3914
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003915 trace_i915_vma_bind(vma, flags);
Daniel Vetter08755462015-04-20 09:04:05 -07003916 ret = i915_vma_bind(vma, obj->cache_level, flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003917 if (ret)
Imre Deake2273302015-07-09 12:59:05 +03003918 goto err_remove_node;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003919
Ben Widawsky35c20a62013-05-31 11:28:48 -07003920 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003921 list_add_tail(&vma->vm_link, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003922
Daniel Vetter262de142014-02-14 14:01:20 +01003923 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003924
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003925err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003926 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003927err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003928 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003929 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003930err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003931 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003932 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003933}
3934
Chris Wilson000433b2013-08-08 14:41:09 +01003935bool
Chris Wilson2c225692013-08-09 12:26:45 +01003936i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3937 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003938{
Eric Anholt673a3942008-07-30 12:06:12 -07003939 /* If we don't have a page list set up, then we're not pinned
3940 * to GPU, and we can ignore the cache flush because it'll happen
3941 * again at bind time.
3942 */
Chris Wilson05394f32010-11-08 19:18:58 +00003943 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003944 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003945
Imre Deak769ce462013-02-13 21:56:05 +02003946 /*
3947 * Stolen memory is always coherent with the GPU as it is explicitly
3948 * marked as wc by the system, or the system is cache-coherent.
3949 */
Chris Wilson6a2c4232014-11-04 04:51:40 -08003950 if (obj->stolen || obj->phys_handle)
Chris Wilson000433b2013-08-08 14:41:09 +01003951 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003952
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003953 /* If the GPU is snooping the contents of the CPU cache,
3954 * we do not need to manually clear the CPU cache lines. However,
3955 * the caches are only snooped when the render cache is
3956 * flushed/invalidated. As we always have to emit invalidations
3957 * and flushes when moving into and out of the RENDER domain, correct
3958 * snooping behaviour occurs naturally as the result of our domain
3959 * tracking.
3960 */
Chris Wilson0f719792015-01-13 13:32:52 +00003961 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3962 obj->cache_dirty = true;
Chris Wilson000433b2013-08-08 14:41:09 +01003963 return false;
Chris Wilson0f719792015-01-13 13:32:52 +00003964 }
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003965
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003966 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003967 drm_clflush_sg(obj->pages);
Chris Wilson0f719792015-01-13 13:32:52 +00003968 obj->cache_dirty = false;
Chris Wilson000433b2013-08-08 14:41:09 +01003969
3970 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003971}
3972
3973/** Flushes the GTT write domain for the object if it's dirty. */
3974static void
Chris Wilson05394f32010-11-08 19:18:58 +00003975i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003976{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003977 uint32_t old_write_domain;
3978
Chris Wilson05394f32010-11-08 19:18:58 +00003979 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003980 return;
3981
Chris Wilson63256ec2011-01-04 18:42:07 +00003982 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003983 * to it immediately go to main memory as far as we know, so there's
3984 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003985 *
3986 * However, we do have to enforce the order so that all writes through
3987 * the GTT land before any writes to the device, such as updates to
3988 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003989 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003990 wmb();
3991
Chris Wilson05394f32010-11-08 19:18:58 +00003992 old_write_domain = obj->base.write_domain;
3993 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003994
Rodrigo Vivide152b62015-07-07 16:28:51 -07003995 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
Daniel Vetterf99d7062014-06-19 16:01:59 +02003996
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003997 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003998 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003999 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08004000}
4001
4002/** Flushes the CPU write domain for the object if it's dirty. */
4003static void
Daniel Vettere62b59e2015-01-21 14:53:48 +01004004i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08004005{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004006 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08004007
Chris Wilson05394f32010-11-08 19:18:58 +00004008 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08004009 return;
4010
Daniel Vettere62b59e2015-01-21 14:53:48 +01004011 if (i915_gem_clflush_object(obj, obj->pin_display))
Chris Wilsonc0336662016-05-06 15:40:21 +01004012 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson000433b2013-08-08 14:41:09 +01004013
Chris Wilson05394f32010-11-08 19:18:58 +00004014 old_write_domain = obj->base.write_domain;
4015 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004016
Rodrigo Vivide152b62015-07-07 16:28:51 -07004017 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetterf99d7062014-06-19 16:01:59 +02004018
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004019 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00004020 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004021 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08004022}
4023
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004024/**
4025 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01004026 * @obj: object to act on
4027 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004028 *
4029 * This function returns when the move is complete, including waiting on
4030 * flushes to occur.
4031 */
Jesse Barnes79e53942008-11-07 14:24:08 -08004032int
Chris Wilson20217462010-11-23 15:26:33 +00004033i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004034{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004035 struct drm_device *dev = obj->base.dev;
4036 struct drm_i915_private *dev_priv = to_i915(dev);
4037 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004038 uint32_t old_write_domain, old_read_domains;
Chris Wilson43566de2015-01-02 16:29:29 +05304039 struct i915_vma *vma;
Eric Anholte47c68e2008-11-14 13:35:19 -08004040 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004041
Chris Wilson8d7e3de2011-02-07 15:23:02 +00004042 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
4043 return 0;
4044
Chris Wilson0201f1e2012-07-20 12:41:01 +01004045 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00004046 if (ret)
4047 return ret;
4048
Chris Wilson43566de2015-01-02 16:29:29 +05304049 /* Flush and acquire obj->pages so that we are coherent through
4050 * direct access in memory with previous cached writes through
4051 * shmemfs and that our cache domain tracking remains valid.
4052 * For example, if the obj->filp was moved to swap without us
4053 * being notified and releasing the pages, we would mistakenly
4054 * continue to assume that the obj remained out of the CPU cached
4055 * domain.
4056 */
4057 ret = i915_gem_object_get_pages(obj);
4058 if (ret)
4059 return ret;
4060
Daniel Vettere62b59e2015-01-21 14:53:48 +01004061 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004062
Chris Wilsond0a57782012-10-09 19:24:37 +01004063 /* Serialise direct access to this object with the barriers for
4064 * coherent writes from the GPU, by effectively invalidating the
4065 * GTT domain upon first access.
4066 */
4067 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
4068 mb();
4069
Chris Wilson05394f32010-11-08 19:18:58 +00004070 old_write_domain = obj->base.write_domain;
4071 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004072
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004073 /* It should now be out of any other write domains, and we can update
4074 * the domain values for our changes.
4075 */
Chris Wilson05394f32010-11-08 19:18:58 +00004076 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
4077 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08004078 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00004079 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
4080 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
4081 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08004082 }
4083
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004084 trace_i915_gem_object_change_domain(obj,
4085 old_read_domains,
4086 old_write_domain);
4087
Chris Wilson8325a092012-04-24 15:52:35 +01004088 /* And bump the LRU for this access */
Chris Wilson43566de2015-01-02 16:29:29 +05304089 vma = i915_gem_obj_to_ggtt(obj);
4090 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004091 list_move_tail(&vma->vm_link,
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004092 &ggtt->base.inactive_list);
Chris Wilson8325a092012-04-24 15:52:35 +01004093
Eric Anholte47c68e2008-11-14 13:35:19 -08004094 return 0;
4095}
4096
Chris Wilsonef55f922015-10-09 14:11:27 +01004097/**
4098 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01004099 * @obj: object to act on
4100 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01004101 *
4102 * After this function returns, the object will be in the new cache-level
4103 * across all GTT and the contents of the backing storage will be coherent,
4104 * with respect to the new cache-level. In order to keep the backing storage
4105 * coherent for all users, we only allow a single cache level to be set
4106 * globally on the object and prevent it from being changed whilst the
4107 * hardware is reading from the object. That is if the object is currently
4108 * on the scanout it will be set to uncached (or equivalent display
4109 * cache coherency) and all non-MOCS GPU access will also be uncached so
4110 * that all direct access to the scanout remains coherent.
4111 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01004112int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
4113 enum i915_cache_level cache_level)
4114{
Daniel Vetter7bddb012012-02-09 17:15:47 +01004115 struct drm_device *dev = obj->base.dev;
Chris Wilsondf6f7832014-03-21 07:40:56 +00004116 struct i915_vma *vma, *next;
Chris Wilsonef55f922015-10-09 14:11:27 +01004117 bool bound = false;
Ville Syrjäläed75a552015-08-11 19:47:10 +03004118 int ret = 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01004119
4120 if (obj->cache_level == cache_level)
Ville Syrjäläed75a552015-08-11 19:47:10 +03004121 goto out;
Chris Wilsone4ffd172011-04-04 09:44:39 +01004122
Chris Wilsonef55f922015-10-09 14:11:27 +01004123 /* Inspect the list of currently bound VMA and unbind any that would
4124 * be invalid given the new cache-level. This is principally to
4125 * catch the issue of the CS prefetch crossing page boundaries and
4126 * reading an invalid PTE on older architectures.
4127 */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004128 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01004129 if (!drm_mm_node_allocated(&vma->node))
4130 continue;
4131
4132 if (vma->pin_count) {
4133 DRM_DEBUG("can not change the cache level of pinned objects\n");
4134 return -EBUSY;
4135 }
4136
Chris Wilson4144f9b2014-09-11 08:43:48 +01004137 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004138 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07004139 if (ret)
4140 return ret;
Chris Wilsonef55f922015-10-09 14:11:27 +01004141 } else
4142 bound = true;
Chris Wilson42d6ab42012-07-26 11:49:32 +01004143 }
4144
Chris Wilsonef55f922015-10-09 14:11:27 +01004145 /* We can reuse the existing drm_mm nodes but need to change the
4146 * cache-level on the PTE. We could simply unbind them all and
4147 * rebind with the correct cache-level on next use. However since
4148 * we already have a valid slot, dma mapping, pages etc, we may as
4149 * rewrite the PTE in the belief that doing so tramples upon less
4150 * state and so involves less work.
4151 */
4152 if (bound) {
4153 /* Before we change the PTE, the GPU must not be accessing it.
4154 * If we wait upon the object, we know that all the bound
4155 * VMA are no longer active.
4156 */
Chris Wilson2e2f3512015-04-27 13:41:14 +01004157 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsone4ffd172011-04-04 09:44:39 +01004158 if (ret)
4159 return ret;
4160
Chris Wilsonef55f922015-10-09 14:11:27 +01004161 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
4162 /* Access to snoopable pages through the GTT is
4163 * incoherent and on some machines causes a hard
4164 * lockup. Relinquish the CPU mmaping to force
4165 * userspace to refault in the pages and we can
4166 * then double check if the GTT mapping is still
4167 * valid for that pointer access.
4168 */
4169 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01004170
Chris Wilsonef55f922015-10-09 14:11:27 +01004171 /* As we no longer need a fence for GTT access,
4172 * we can relinquish it now (and so prevent having
4173 * to steal a fence from someone else on the next
4174 * fence request). Note GPU activity would have
4175 * dropped the fence as all snoopable access is
4176 * supposed to be linear.
4177 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01004178 ret = i915_gem_object_put_fence(obj);
4179 if (ret)
4180 return ret;
Chris Wilsonef55f922015-10-09 14:11:27 +01004181 } else {
4182 /* We either have incoherent backing store and
4183 * so no GTT access or the architecture is fully
4184 * coherent. In such cases, existing GTT mmaps
4185 * ignore the cache bit in the PTE and we can
4186 * rewrite it without confusing the GPU or having
4187 * to force userspace to fault back in its mmaps.
4188 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01004189 }
4190
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004191 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01004192 if (!drm_mm_node_allocated(&vma->node))
4193 continue;
4194
4195 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
4196 if (ret)
4197 return ret;
4198 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01004199 }
4200
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004201 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01004202 vma->node.color = cache_level;
4203 obj->cache_level = cache_level;
4204
Ville Syrjäläed75a552015-08-11 19:47:10 +03004205out:
Chris Wilsonef55f922015-10-09 14:11:27 +01004206 /* Flush the dirty CPU caches to the backing storage so that the
4207 * object is now coherent at its new cache level (with respect
4208 * to the access domain).
4209 */
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05304210 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
Chris Wilson0f719792015-01-13 13:32:52 +00004211 if (i915_gem_clflush_object(obj, true))
Chris Wilsonc0336662016-05-06 15:40:21 +01004212 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilsone4ffd172011-04-04 09:44:39 +01004213 }
4214
Chris Wilsone4ffd172011-04-04 09:44:39 +01004215 return 0;
4216}
4217
Ben Widawsky199adf42012-09-21 17:01:20 -07004218int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4219 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01004220{
Ben Widawsky199adf42012-09-21 17:01:20 -07004221 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004222 struct drm_i915_gem_object *obj;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004223
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01004224 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilson432be692015-05-07 12:14:55 +01004225 if (&obj->base == NULL)
4226 return -ENOENT;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004227
Chris Wilson651d7942013-08-08 14:41:10 +01004228 switch (obj->cache_level) {
4229 case I915_CACHE_LLC:
4230 case I915_CACHE_L3_LLC:
4231 args->caching = I915_CACHING_CACHED;
4232 break;
4233
Chris Wilson4257d3b2013-08-08 14:41:11 +01004234 case I915_CACHE_WT:
4235 args->caching = I915_CACHING_DISPLAY;
4236 break;
4237
Chris Wilson651d7942013-08-08 14:41:10 +01004238 default:
4239 args->caching = I915_CACHING_NONE;
4240 break;
4241 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01004242
Chris Wilson432be692015-05-07 12:14:55 +01004243 drm_gem_object_unreference_unlocked(&obj->base);
4244 return 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004245}
4246
Ben Widawsky199adf42012-09-21 17:01:20 -07004247int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4248 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01004249{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004250 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07004251 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004252 struct drm_i915_gem_object *obj;
4253 enum i915_cache_level level;
4254 int ret;
4255
Ben Widawsky199adf42012-09-21 17:01:20 -07004256 switch (args->caching) {
4257 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01004258 level = I915_CACHE_NONE;
4259 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07004260 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03004261 /*
4262 * Due to a HW issue on BXT A stepping, GPU stores via a
4263 * snooped mapping may leave stale data in a corresponding CPU
4264 * cacheline, whereas normally such cachelines would get
4265 * invalidated.
4266 */
Tvrtko Ursulinca377802016-03-02 12:10:31 +00004267 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
Imre Deake5756c12015-08-14 18:43:30 +03004268 return -ENODEV;
4269
Chris Wilsone6994ae2012-07-10 10:27:08 +01004270 level = I915_CACHE_LLC;
4271 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01004272 case I915_CACHING_DISPLAY:
4273 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
4274 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01004275 default:
4276 return -EINVAL;
4277 }
4278
Imre Deakfd0fe6a2015-11-04 21:25:32 +02004279 intel_runtime_pm_get(dev_priv);
4280
Ben Widawsky3bc29132012-09-26 16:15:20 -07004281 ret = i915_mutex_lock_interruptible(dev);
4282 if (ret)
Imre Deakfd0fe6a2015-11-04 21:25:32 +02004283 goto rpm_put;
Ben Widawsky3bc29132012-09-26 16:15:20 -07004284
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01004285 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsone6994ae2012-07-10 10:27:08 +01004286 if (&obj->base == NULL) {
4287 ret = -ENOENT;
4288 goto unlock;
4289 }
4290
4291 ret = i915_gem_object_set_cache_level(obj, level);
4292
4293 drm_gem_object_unreference(&obj->base);
4294unlock:
4295 mutex_unlock(&dev->struct_mutex);
Imre Deakfd0fe6a2015-11-04 21:25:32 +02004296rpm_put:
4297 intel_runtime_pm_put(dev_priv);
4298
Chris Wilsone6994ae2012-07-10 10:27:08 +01004299 return ret;
4300}
4301
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004302/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004303 * Prepare buffer for display plane (scanout, cursors, etc).
4304 * Can be called from an uninterruptible phase (modesetting) and allows
4305 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004306 */
4307int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004308i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4309 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004310 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004311{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004312 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004313 int ret;
4314
Chris Wilsoncc98b412013-08-09 12:25:09 +01004315 /* Mark the pin_display early so that we account for the
4316 * display coherency whilst setting up the cache domains.
4317 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01004318 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01004319
Eric Anholta7ef0642011-03-29 16:59:54 -07004320 /* The display engine is not coherent with the LLC cache on gen6. As
4321 * a result, we make sure that the pinning that is about to occur is
4322 * done with uncached PTEs. This is lowest common denominator for all
4323 * chipsets.
4324 *
4325 * However for gen6+, we could do better by using the GFDT bit instead
4326 * of uncaching, which would allow us to flush all the LLC-cached data
4327 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4328 */
Chris Wilson651d7942013-08-08 14:41:10 +01004329 ret = i915_gem_object_set_cache_level(obj,
4330 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07004331 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01004332 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07004333
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004334 /* As the user may map the buffer once pinned in the display plane
4335 * (e.g. libkms for the bootup splash), we have to ensure that we
4336 * always use map_and_fenceable for all scanout buffers.
4337 */
Tvrtko Ursulin50470bb2015-03-23 11:10:36 +00004338 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4339 view->type == I915_GGTT_VIEW_NORMAL ?
4340 PIN_MAPPABLE : 0);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004341 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01004342 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004343
Daniel Vettere62b59e2015-01-21 14:53:48 +01004344 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01004345
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004346 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00004347 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004348
4349 /* It should now be out of any other write domains, and we can update
4350 * the domain values for our changes.
4351 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01004352 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00004353 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004354
4355 trace_i915_gem_object_change_domain(obj,
4356 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01004357 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004358
4359 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01004360
4361err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01004362 obj->pin_display--;
Chris Wilsoncc98b412013-08-09 12:25:09 +01004363 return ret;
4364}
4365
4366void
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004367i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4368 const struct i915_ggtt_view *view)
Chris Wilsoncc98b412013-08-09 12:25:09 +01004369{
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01004370 if (WARN_ON(obj->pin_display == 0))
4371 return;
4372
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004373 i915_gem_object_ggtt_unpin_view(obj, view);
4374
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01004375 obj->pin_display--;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004376}
4377
Eric Anholte47c68e2008-11-14 13:35:19 -08004378/**
4379 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01004380 * @obj: object to act on
4381 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08004382 *
4383 * This function returns when the move is complete, including waiting on
4384 * flushes to occur.
4385 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02004386int
Chris Wilson919926a2010-11-12 13:42:53 +00004387i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08004388{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004389 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08004390 int ret;
4391
Chris Wilson8d7e3de2011-02-07 15:23:02 +00004392 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4393 return 0;
4394
Chris Wilson0201f1e2012-07-20 12:41:01 +01004395 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00004396 if (ret)
4397 return ret;
4398
Eric Anholte47c68e2008-11-14 13:35:19 -08004399 i915_gem_object_flush_gtt_write_domain(obj);
4400
Chris Wilson05394f32010-11-08 19:18:58 +00004401 old_write_domain = obj->base.write_domain;
4402 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004403
Eric Anholte47c68e2008-11-14 13:35:19 -08004404 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00004405 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01004406 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08004407
Chris Wilson05394f32010-11-08 19:18:58 +00004408 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08004409 }
4410
4411 /* It should now be out of any other write domains, and we can update
4412 * the domain values for our changes.
4413 */
Chris Wilson05394f32010-11-08 19:18:58 +00004414 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08004415
4416 /* If we're writing through the CPU, then the GPU read domains will
4417 * need to be invalidated at next use.
4418 */
4419 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00004420 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4421 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08004422 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004423
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004424 trace_i915_gem_object_change_domain(obj,
4425 old_read_domains,
4426 old_write_domain);
4427
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004428 return 0;
4429}
4430
Eric Anholt673a3942008-07-30 12:06:12 -07004431/* Throttle our rendering by waiting until the ring has completed our requests
4432 * emitted over 20 msec ago.
4433 *
Eric Anholtb9624422009-06-03 07:27:35 +00004434 * Note that if we were to use the current jiffies each time around the loop,
4435 * we wouldn't escape the function with any frames outstanding if the time to
4436 * render a frame was over 20ms.
4437 *
Eric Anholt673a3942008-07-30 12:06:12 -07004438 * This should get us reasonable parallelism between CPU and GPU but also
4439 * relatively low latency when blocking on a particular request to finish.
4440 */
4441static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004442i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004443{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004444 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004445 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01004446 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00004447 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004448 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004449
Daniel Vetter308887a2012-11-14 17:14:06 +01004450 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4451 if (ret)
4452 return ret;
4453
Chris Wilsonf4457ae2016-04-13 17:35:08 +01004454 /* ABI: return -EIO if already wedged */
4455 if (i915_terminally_wedged(&dev_priv->gpu_error))
4456 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00004457
Chris Wilson1c255952010-09-26 11:03:27 +01004458 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004459 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00004460 if (time_after_eq(request->emitted_jiffies, recent_enough))
4461 break;
4462
John Harrisonfcfa423c2015-05-29 17:44:12 +01004463 /*
4464 * Note that the request might not have been submitted yet.
4465 * In which case emitted_jiffies will be zero.
4466 */
4467 if (!request->emitted_jiffies)
4468 continue;
4469
John Harrison54fb2412014-11-24 18:49:27 +00004470 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00004471 }
John Harrisonff865882014-11-24 18:49:28 +00004472 if (target)
4473 i915_gem_request_reference(target);
Chris Wilson1c255952010-09-26 11:03:27 +01004474 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004475
John Harrison54fb2412014-11-24 18:49:27 +00004476 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004477 return 0;
4478
Chris Wilson299259a2016-04-13 17:35:06 +01004479 ret = __i915_wait_request(target, true, NULL, NULL);
Chris Wilson73db04c2016-04-28 09:56:55 +01004480 i915_gem_request_unreference(target);
John Harrisonff865882014-11-24 18:49:28 +00004481
Eric Anholt673a3942008-07-30 12:06:12 -07004482 return ret;
4483}
4484
Chris Wilsond23db882014-05-23 08:48:08 +02004485static bool
4486i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4487{
4488 struct drm_i915_gem_object *obj = vma->obj;
4489
4490 if (alignment &&
4491 vma->node.start & (alignment - 1))
4492 return true;
4493
4494 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4495 return true;
4496
4497 if (flags & PIN_OFFSET_BIAS &&
4498 vma->node.start < (flags & PIN_OFFSET_MASK))
4499 return true;
4500
Chris Wilson506a8e82015-12-08 11:55:07 +00004501 if (flags & PIN_OFFSET_FIXED &&
4502 vma->node.start != (flags & PIN_OFFSET_MASK))
4503 return true;
4504
Chris Wilsond23db882014-05-23 08:48:08 +02004505 return false;
4506}
4507
Chris Wilsond0710ab2015-11-20 14:16:39 +00004508void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4509{
4510 struct drm_i915_gem_object *obj = vma->obj;
4511 bool mappable, fenceable;
4512 u32 fence_size, fence_alignment;
4513
4514 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4515 obj->base.size,
4516 obj->tiling_mode);
4517 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4518 obj->base.size,
4519 obj->tiling_mode,
4520 true);
4521
4522 fenceable = (vma->node.size == fence_size &&
4523 (vma->node.start & (fence_alignment - 1)) == 0);
4524
4525 mappable = (vma->node.start + fence_size <=
Joonas Lahtinen62106b42016-03-18 10:42:57 +02004526 to_i915(obj->base.dev)->ggtt.mappable_end);
Chris Wilsond0710ab2015-11-20 14:16:39 +00004527
4528 obj->map_and_fenceable = mappable && fenceable;
4529}
4530
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004531static int
4532i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4533 struct i915_address_space *vm,
4534 const struct i915_ggtt_view *ggtt_view,
4535 uint32_t alignment,
4536 uint64_t flags)
Eric Anholt673a3942008-07-30 12:06:12 -07004537{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004538 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004539 struct i915_vma *vma;
Chris Wilsonef79e172014-10-31 13:53:52 +00004540 unsigned bound;
Eric Anholt673a3942008-07-30 12:06:12 -07004541 int ret;
4542
Ben Widawsky6e7186a2014-05-06 22:21:36 -07004543 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4544 return -ENODEV;
4545
Daniel Vetterbf3d1492014-02-14 14:01:12 +01004546 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004547 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004548
Chris Wilsonc826c442014-10-31 13:53:53 +00004549 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4550 return -EINVAL;
4551
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004552 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4553 return -EINVAL;
4554
4555 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4556 i915_gem_obj_to_vma(obj, vm);
4557
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004558 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004559 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4560 return -EBUSY;
4561
Chris Wilsond23db882014-05-23 08:48:08 +02004562 if (i915_vma_misplaced(vma, alignment, flags)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004563 WARN(vma->pin_count,
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004564 "bo is already pinned in %s with incorrect alignment:"
Michel Thierry088e0df2015-08-07 17:40:17 +01004565 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01004566 " obj->map_and_fenceable=%d\n",
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004567 ggtt_view ? "ggtt" : "ppgtt",
Michel Thierry088e0df2015-08-07 17:40:17 +01004568 upper_32_bits(vma->node.start),
4569 lower_32_bits(vma->node.start),
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00004570 alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02004571 !!(flags & PIN_MAPPABLE),
Chris Wilson05394f32010-11-08 19:18:58 +00004572 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004573 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004574 if (ret)
4575 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004576
4577 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004578 }
4579 }
4580
Chris Wilsonef79e172014-10-31 13:53:52 +00004581 bound = vma ? vma->bound : 0;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004582 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004583 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4584 flags);
Daniel Vetter262de142014-02-14 14:01:20 +01004585 if (IS_ERR(vma))
4586 return PTR_ERR(vma);
Daniel Vetter08755462015-04-20 09:04:05 -07004587 } else {
4588 ret = i915_vma_bind(vma, obj->cache_level, flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00004589 if (ret)
4590 return ret;
4591 }
Daniel Vetter74898d72012-02-15 23:50:22 +01004592
Joonas Lahtinen91e67112015-05-06 14:33:58 +03004593 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4594 (bound ^ vma->bound) & GLOBAL_BIND) {
Chris Wilsond0710ab2015-11-20 14:16:39 +00004595 __i915_vma_set_map_and_fenceable(vma);
Joonas Lahtinen91e67112015-05-06 14:33:58 +03004596 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4597 }
Chris Wilsonef79e172014-10-31 13:53:52 +00004598
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004599 vma->pin_count++;
Eric Anholt673a3942008-07-30 12:06:12 -07004600 return 0;
4601}
4602
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004603int
4604i915_gem_object_pin(struct drm_i915_gem_object *obj,
4605 struct i915_address_space *vm,
4606 uint32_t alignment,
4607 uint64_t flags)
4608{
4609 return i915_gem_object_do_pin(obj, vm,
4610 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4611 alignment, flags);
4612}
4613
4614int
4615i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4616 const struct i915_ggtt_view *view,
4617 uint32_t alignment,
4618 uint64_t flags)
4619{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004620 struct drm_device *dev = obj->base.dev;
4621 struct drm_i915_private *dev_priv = to_i915(dev);
4622 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004623
Matthew Auldade7daa2016-03-24 15:54:20 +00004624 BUG_ON(!view);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004625
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004626 return i915_gem_object_do_pin(obj, &ggtt->base, view,
Tvrtko Ursulin6fafab72015-03-17 15:36:51 +00004627 alignment, flags | PIN_GLOBAL);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004628}
4629
Eric Anholt673a3942008-07-30 12:06:12 -07004630void
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004631i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4632 const struct i915_ggtt_view *view)
Eric Anholt673a3942008-07-30 12:06:12 -07004633{
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004634 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
Eric Anholt673a3942008-07-30 12:06:12 -07004635
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004636 WARN_ON(vma->pin_count == 0);
Joonas Lahtinen9abc4642015-03-27 13:09:22 +02004637 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004638
Chris Wilson30154652015-04-07 17:28:24 +01004639 --vma->pin_count;
Eric Anholt673a3942008-07-30 12:06:12 -07004640}
4641
4642int
Eric Anholt673a3942008-07-30 12:06:12 -07004643i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004644 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004645{
4646 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004647 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004648 int ret;
4649
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004650 ret = i915_mutex_lock_interruptible(dev);
4651 if (ret)
4652 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004653
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01004654 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004655 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004656 ret = -ENOENT;
4657 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004658 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004659
Chris Wilson0be555b2010-08-04 15:36:30 +01004660 /* Count all active objects as busy, even if they are currently not used
4661 * by the gpu. Users of this interface expect objects to eventually
4662 * become non-busy without any further actions, therefore emit any
4663 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004664 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004665 ret = i915_gem_object_flush_active(obj);
Chris Wilsonb4716182015-04-27 13:41:17 +01004666 if (ret)
4667 goto unref;
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004668
Chris Wilson426960b2016-01-15 16:51:46 +00004669 args->busy = 0;
4670 if (obj->active) {
4671 int i;
4672
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004673 for (i = 0; i < I915_NUM_ENGINES; i++) {
Chris Wilson426960b2016-01-15 16:51:46 +00004674 struct drm_i915_gem_request *req;
4675
4676 req = obj->last_read_req[i];
4677 if (req)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004678 args->busy |= 1 << (16 + req->engine->exec_id);
Chris Wilson426960b2016-01-15 16:51:46 +00004679 }
4680 if (obj->last_write_req)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00004681 args->busy |= obj->last_write_req->engine->exec_id;
Chris Wilson426960b2016-01-15 16:51:46 +00004682 }
Eric Anholt673a3942008-07-30 12:06:12 -07004683
Chris Wilsonb4716182015-04-27 13:41:17 +01004684unref:
Chris Wilson05394f32010-11-08 19:18:58 +00004685 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004686unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004687 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004688 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004689}
4690
4691int
4692i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4693 struct drm_file *file_priv)
4694{
Akshay Joshi0206e352011-08-16 15:34:10 -04004695 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004696}
4697
Chris Wilson3ef94da2009-09-14 16:50:29 +01004698int
4699i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4700 struct drm_file *file_priv)
4701{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004702 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004703 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004704 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004705 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004706
4707 switch (args->madv) {
4708 case I915_MADV_DONTNEED:
4709 case I915_MADV_WILLNEED:
4710 break;
4711 default:
4712 return -EINVAL;
4713 }
4714
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004715 ret = i915_mutex_lock_interruptible(dev);
4716 if (ret)
4717 return ret;
4718
Chris Wilsona8ad0bd2016-05-09 11:04:54 +01004719 obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004720 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004721 ret = -ENOENT;
4722 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004723 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004724
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004725 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004726 ret = -EINVAL;
4727 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004728 }
4729
Daniel Vetter656bfa32014-11-20 09:26:30 +01004730 if (obj->pages &&
4731 obj->tiling_mode != I915_TILING_NONE &&
4732 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4733 if (obj->madv == I915_MADV_WILLNEED)
4734 i915_gem_object_unpin_pages(obj);
4735 if (args->madv == I915_MADV_WILLNEED)
4736 i915_gem_object_pin_pages(obj);
4737 }
4738
Chris Wilson05394f32010-11-08 19:18:58 +00004739 if (obj->madv != __I915_MADV_PURGED)
4740 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004741
Chris Wilson6c085a72012-08-20 11:40:46 +02004742 /* if the object is no longer attached, discard its backing storage */
Daniel Vetterbe6a0372015-03-18 10:46:04 +01004743 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004744 i915_gem_object_truncate(obj);
4745
Chris Wilson05394f32010-11-08 19:18:58 +00004746 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004747
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004748out:
Chris Wilson05394f32010-11-08 19:18:58 +00004749 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004750unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004751 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004752 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004753}
4754
Chris Wilson37e680a2012-06-07 15:38:42 +01004755void i915_gem_object_init(struct drm_i915_gem_object *obj,
4756 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004757{
Chris Wilsonb4716182015-04-27 13:41:17 +01004758 int i;
4759
Ben Widawsky35c20a62013-05-31 11:28:48 -07004760 INIT_LIST_HEAD(&obj->global_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004761 for (i = 0; i < I915_NUM_ENGINES; i++)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004762 INIT_LIST_HEAD(&obj->engine_list[i]);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004763 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004764 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01004765 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004766
Chris Wilson37e680a2012-06-07 15:38:42 +01004767 obj->ops = ops;
4768
Chris Wilson0327d6b2012-08-11 15:41:06 +01004769 obj->fence_reg = I915_FENCE_REG_NONE;
4770 obj->madv = I915_MADV_WILLNEED;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004771
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004772 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004773}
4774
Chris Wilson37e680a2012-06-07 15:38:42 +01004775static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Chris Wilsonde472662016-01-22 18:32:31 +00004776 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
Chris Wilson37e680a2012-06-07 15:38:42 +01004777 .get_pages = i915_gem_object_get_pages_gtt,
4778 .put_pages = i915_gem_object_put_pages_gtt,
4779};
4780
Dave Gordond37cd8a2016-04-22 19:14:32 +01004781struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004782 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004783{
Daniel Vetterc397b902010-04-09 19:05:07 +00004784 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004785 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004786 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004787 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00004788
Chris Wilson42dcedd2012-11-15 11:32:30 +00004789 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004790 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01004791 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00004792
Chris Wilsonfe3db792016-04-25 13:32:13 +01004793 ret = drm_gem_object_init(dev, &obj->base, size);
4794 if (ret)
4795 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004796
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004797 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4798 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4799 /* 965gm cannot relocate objects above 4GiB. */
4800 mask &= ~__GFP_HIGHMEM;
4801 mask |= __GFP_DMA32;
4802 }
4803
Al Viro93c76a32015-12-04 23:45:44 -05004804 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004805 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004806
Chris Wilson37e680a2012-06-07 15:38:42 +01004807 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004808
Daniel Vetterc397b902010-04-09 19:05:07 +00004809 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4810 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4811
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004812 if (HAS_LLC(dev)) {
4813 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004814 * cache) for about a 10% performance improvement
4815 * compared to uncached. Graphics requests other than
4816 * display scanout are coherent with the CPU in
4817 * accessing this cache. This means in this mode we
4818 * don't need to clflush on the CPU side, and on the
4819 * GPU side we only need to flush internal caches to
4820 * get data visible to the CPU.
4821 *
4822 * However, we maintain the display planes as UC, and so
4823 * need to rebind when first used as such.
4824 */
4825 obj->cache_level = I915_CACHE_LLC;
4826 } else
4827 obj->cache_level = I915_CACHE_NONE;
4828
Daniel Vetterd861e332013-07-24 23:25:03 +02004829 trace_i915_gem_object_create(obj);
4830
Chris Wilson05394f32010-11-08 19:18:58 +00004831 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004832
4833fail:
4834 i915_gem_object_free(obj);
4835
4836 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004837}
4838
Chris Wilson340fbd82014-05-22 09:16:52 +01004839static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4840{
4841 /* If we are the last user of the backing storage (be it shmemfs
4842 * pages or stolen etc), we know that the pages are going to be
4843 * immediately released. In this case, we can then skip copying
4844 * back the contents from the GPU.
4845 */
4846
4847 if (obj->madv != I915_MADV_WILLNEED)
4848 return false;
4849
4850 if (obj->base.filp == NULL)
4851 return true;
4852
4853 /* At first glance, this looks racy, but then again so would be
4854 * userspace racing mmap against close. However, the first external
4855 * reference to the filp can only be obtained through the
4856 * i915_gem_mmap_ioctl() which safeguards us against the user
4857 * acquiring such a reference whilst we are in the middle of
4858 * freeing the object.
4859 */
4860 return atomic_long_read(&obj->base.filp->f_count) == 1;
4861}
4862
Chris Wilson1488fc02012-04-24 15:47:31 +01004863void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004864{
Chris Wilson1488fc02012-04-24 15:47:31 +01004865 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004866 struct drm_device *dev = obj->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004867 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004868 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004869
Paulo Zanonif65c9162013-11-27 18:20:34 -02004870 intel_runtime_pm_get(dev_priv);
4871
Chris Wilson26e12f892011-03-20 11:20:19 +00004872 trace_i915_gem_object_destroy(obj);
4873
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004874 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004875 int ret;
4876
4877 vma->pin_count = 0;
4878 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004879 if (WARN_ON(ret == -ERESTARTSYS)) {
4880 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004881
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004882 was_interruptible = dev_priv->mm.interruptible;
4883 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004884
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004885 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004886
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004887 dev_priv->mm.interruptible = was_interruptible;
4888 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004889 }
4890
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004891 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4892 * before progressing. */
4893 if (obj->stolen)
4894 i915_gem_object_unpin_pages(obj);
4895
Daniel Vettera071fa02014-06-18 23:28:09 +02004896 WARN_ON(obj->frontbuffer_bits);
4897
Daniel Vetter656bfa32014-11-20 09:26:30 +01004898 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4899 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4900 obj->tiling_mode != I915_TILING_NONE)
4901 i915_gem_object_unpin_pages(obj);
4902
Ben Widawsky401c29f2013-05-31 11:28:47 -07004903 if (WARN_ON(obj->pages_pin_count))
4904 obj->pages_pin_count = 0;
Chris Wilson340fbd82014-05-22 09:16:52 +01004905 if (discard_backing_storage(obj))
Chris Wilson55372522014-03-25 13:23:06 +00004906 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004907 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004908 i915_gem_object_free_mmap_offset(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004909
Chris Wilson9da3da62012-06-01 15:20:22 +01004910 BUG_ON(obj->pages);
4911
Chris Wilson2f745ad2012-09-04 21:02:58 +01004912 if (obj->base.import_attach)
4913 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004914
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004915 if (obj->ops->release)
4916 obj->ops->release(obj);
4917
Chris Wilson05394f32010-11-08 19:18:58 +00004918 drm_gem_object_release(&obj->base);
4919 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004920
Chris Wilson05394f32010-11-08 19:18:58 +00004921 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004922 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004923
4924 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004925}
4926
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004927struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4928 struct i915_address_space *vm)
Ben Widawsky2f633152013-07-17 12:19:03 -07004929{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004930 struct i915_vma *vma;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004931 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Tvrtko Ursulin1b683722015-11-12 11:59:55 +00004932 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4933 vma->vm == vm)
Daniel Vettere656a6c2013-08-14 14:14:04 +02004934 return vma;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004935 }
4936 return NULL;
4937}
Daniel Vettere656a6c2013-08-14 14:14:04 +02004938
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004939struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4940 const struct i915_ggtt_view *view)
4941{
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004942 struct i915_vma *vma;
4943
Tvrtko Ursulin598b9ec2016-04-21 13:04:44 +01004944 GEM_BUG_ON(!view);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004945
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004946 list_for_each_entry(vma, &obj->vma_list, obj_link)
Tvrtko Ursulin598b9ec2016-04-21 13:04:44 +01004947 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004948 return vma;
Daniel Vettere656a6c2013-08-14 14:14:04 +02004949 return NULL;
4950}
4951
Ben Widawsky2f633152013-07-17 12:19:03 -07004952void i915_gem_vma_destroy(struct i915_vma *vma)
4953{
4954 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004955
4956 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4957 if (!list_empty(&vma->exec_list))
4958 return;
4959
Chris Wilson596c5922016-02-26 11:03:20 +00004960 if (!vma->is_ggtt)
4961 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
Michel Thierryb9d06dd2014-08-06 15:04:44 +02004962
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004963 list_del(&vma->obj_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004964
Chris Wilsone20d2ab2015-04-07 16:20:58 +01004965 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
Ben Widawsky2f633152013-07-17 12:19:03 -07004966}
4967
Chris Wilsone3efda42014-04-09 09:19:41 +01004968static void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004969i915_gem_stop_engines(struct drm_device *dev)
Chris Wilsone3efda42014-04-09 09:19:41 +01004970{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004971 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004972 struct intel_engine_cs *engine;
Chris Wilsone3efda42014-04-09 09:19:41 +01004973
Dave Gordonb4ac5af2016-03-24 11:20:38 +00004974 for_each_engine(engine, dev_priv)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004975 dev_priv->gt.stop_engine(engine);
Chris Wilsone3efda42014-04-09 09:19:41 +01004976}
4977
Jesse Barnes5669fca2009-02-17 15:13:31 -08004978int
Chris Wilson45c5f202013-10-16 11:50:01 +01004979i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004980{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004981 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson45c5f202013-10-16 11:50:01 +01004982 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004983
Chris Wilson45c5f202013-10-16 11:50:01 +01004984 mutex_lock(&dev->struct_mutex);
Chris Wilson6e5a5be2016-06-24 14:55:57 +01004985 ret = i915_gem_wait_for_idle(dev_priv);
Chris Wilsonf7403342013-09-13 23:57:04 +01004986 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004987 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004988
Chris Wilsonc0336662016-05-06 15:40:21 +01004989 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004990
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004991 i915_gem_stop_engines(dev);
Chris Wilsonb2e862d2016-04-28 09:56:41 +01004992 i915_gem_context_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004993 mutex_unlock(&dev->struct_mutex);
4994
Chris Wilson737b1502015-01-26 18:03:03 +02004995 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004996 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4997 flush_delayed_work(&dev_priv->gt.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004998
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004999 /* Assert that we sucessfully flushed all the work and
5000 * reset the GPU back to its idle, low power state.
5001 */
Chris Wilson67d97da2016-07-04 08:08:31 +01005002 WARN_ON(dev_priv->gt.awake);
Chris Wilsonbdcf1202014-11-25 11:56:33 +00005003
Eric Anholt673a3942008-07-30 12:06:12 -07005004 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01005005
5006err:
5007 mutex_unlock(&dev->struct_mutex);
5008 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07005009}
5010
Daniel Vetterf691e2f2012-02-02 09:58:12 +01005011void i915_gem_init_swizzling(struct drm_device *dev)
5012{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005013 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01005014
Daniel Vetter11782b02012-01-31 16:47:55 +01005015 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01005016 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
5017 return;
5018
5019 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
5020 DISP_TILE_SURFACE_SWIZZLING);
5021
Daniel Vetter11782b02012-01-31 16:47:55 +01005022 if (IS_GEN5(dev))
5023 return;
5024
Daniel Vetterf691e2f2012-02-02 09:58:12 +01005025 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5026 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02005027 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08005028 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02005029 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07005030 else if (IS_GEN8(dev))
5031 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08005032 else
5033 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01005034}
Daniel Vettere21af882012-02-09 20:53:27 +01005035
Ville Syrjälä81e7f202014-08-15 01:21:55 +03005036static void init_unused_ring(struct drm_device *dev, u32 base)
5037{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005038 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03005039
5040 I915_WRITE(RING_CTL(base), 0);
5041 I915_WRITE(RING_HEAD(base), 0);
5042 I915_WRITE(RING_TAIL(base), 0);
5043 I915_WRITE(RING_START(base), 0);
5044}
5045
5046static void init_unused_rings(struct drm_device *dev)
5047{
5048 if (IS_I830(dev)) {
5049 init_unused_ring(dev, PRB1_BASE);
5050 init_unused_ring(dev, SRB0_BASE);
5051 init_unused_ring(dev, SRB1_BASE);
5052 init_unused_ring(dev, SRB2_BASE);
5053 init_unused_ring(dev, SRB3_BASE);
5054 } else if (IS_GEN2(dev)) {
5055 init_unused_ring(dev, SRB0_BASE);
5056 init_unused_ring(dev, SRB1_BASE);
5057 } else if (IS_GEN3(dev)) {
5058 init_unused_ring(dev, PRB1_BASE);
5059 init_unused_ring(dev, PRB2_BASE);
5060 }
5061}
5062
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005063int i915_gem_init_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005064{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005065 struct drm_i915_private *dev_priv = to_i915(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005066 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01005067
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08005068 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01005069 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00005070 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01005071
5072 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08005073 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01005074 if (ret)
5075 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08005076 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01005077
Jani Nikulad39398f2015-10-07 11:17:44 +03005078 if (HAS_BLT(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01005079 ret = intel_init_blt_ring_buffer(dev);
5080 if (ret)
5081 goto cleanup_bsd_ring;
5082 }
5083
Ben Widawsky9a8a2212013-05-28 19:22:23 -07005084 if (HAS_VEBOX(dev)) {
5085 ret = intel_init_vebox_ring_buffer(dev);
5086 if (ret)
5087 goto cleanup_blt_ring;
5088 }
5089
Zhao Yakui845f74a2014-04-17 10:37:37 +08005090 if (HAS_BSD2(dev)) {
5091 ret = intel_init_bsd2_ring_buffer(dev);
5092 if (ret)
5093 goto cleanup_vebox_ring;
5094 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07005095
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005096 return 0;
5097
Ben Widawsky9a8a2212013-05-28 19:22:23 -07005098cleanup_vebox_ring:
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005099 intel_cleanup_engine(&dev_priv->engine[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005100cleanup_blt_ring:
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005101 intel_cleanup_engine(&dev_priv->engine[BCS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005102cleanup_bsd_ring:
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005103 intel_cleanup_engine(&dev_priv->engine[VCS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005104cleanup_render_ring:
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005105 intel_cleanup_engine(&dev_priv->engine[RCS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005106
5107 return ret;
5108}
5109
5110int
5111i915_gem_init_hw(struct drm_device *dev)
5112{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005113 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00005114 struct intel_engine_cs *engine;
Chris Wilsond200cda2016-04-28 09:56:44 +01005115 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005116
Chris Wilson5e4f5182015-02-13 14:35:59 +00005117 /* Double layer security blanket, see i915_gem_init() */
5118 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5119
Mika Kuoppala3accaf72016-04-13 17:26:43 +03005120 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07005121 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005122
Ville Syrjälä0bf21342013-11-29 14:56:12 +02005123 if (IS_HASWELL(dev))
5124 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
5125 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03005126
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07005127 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01005128 if (IS_IVYBRIDGE(dev)) {
5129 u32 temp = I915_READ(GEN7_MSG_CTL);
5130 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5131 I915_WRITE(GEN7_MSG_CTL, temp);
5132 } else if (INTEL_INFO(dev)->gen >= 7) {
5133 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5134 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5135 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5136 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07005137 }
5138
Ben Widawsky4fc7c972013-02-08 11:49:24 -08005139 i915_gem_init_swizzling(dev);
5140
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01005141 /*
5142 * At least 830 can leave some of the unused rings
5143 * "active" (ie. head != tail) after resume which
5144 * will prevent c3 entry. Makes sure all unused rings
5145 * are totally idle.
5146 */
5147 init_unused_rings(dev);
5148
Dave Gordoned54c1a2016-01-19 19:02:54 +00005149 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01005150
John Harrison4ad2fd82015-06-18 13:11:20 +01005151 ret = i915_ppgtt_init_hw(dev);
5152 if (ret) {
5153 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5154 goto out;
5155 }
5156
5157 /* Need to do basic initialisation of all rings first: */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00005158 for_each_engine(engine, dev_priv) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00005159 ret = engine->init_hw(engine);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01005160 if (ret)
Chris Wilson5e4f5182015-02-13 14:35:59 +00005161 goto out;
Daniel Vetter35a57ff2014-11-20 00:33:07 +01005162 }
Mika Kuoppala99433932013-01-22 14:12:17 +02005163
Peter Antoine0ccdacf2016-04-13 15:03:25 +01005164 intel_mocs_init_l3cc_table(dev);
5165
Alex Dai33a732f2015-08-12 15:43:36 +01005166 /* We can't enable contexts until all firmware is loaded */
Dave Gordone556f7c2016-06-07 09:14:49 +01005167 ret = intel_guc_setup(dev);
Nick Hoathe84fe802015-09-11 12:53:46 +01005168 if (ret)
5169 goto out;
5170
Chris Wilson5e4f5182015-02-13 14:35:59 +00005171out:
5172 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08005173 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005174}
5175
Chris Wilson1070a422012-04-24 15:47:41 +01005176int i915_gem_init(struct drm_device *dev)
5177{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005178 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson1070a422012-04-24 15:47:41 +01005179 int ret;
5180
Chris Wilson1070a422012-04-24 15:47:41 +01005181 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08005182
Oscar Mateoa83014d2014-07-24 17:04:21 +01005183 if (!i915.enable_execlists) {
John Harrisonf3dc74c2015-03-19 12:30:06 +00005184 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005185 dev_priv->gt.init_engines = i915_gem_init_engines;
5186 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
5187 dev_priv->gt.stop_engine = intel_stop_engine;
Oscar Mateo454afeb2014-07-24 17:04:22 +01005188 } else {
John Harrisonf3dc74c2015-03-19 12:30:06 +00005189 dev_priv->gt.execbuf_submit = intel_execlists_submission;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005190 dev_priv->gt.init_engines = intel_logical_rings_init;
5191 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5192 dev_priv->gt.stop_engine = intel_logical_ring_stop;
Oscar Mateoa83014d2014-07-24 17:04:21 +01005193 }
5194
Chris Wilson5e4f5182015-02-13 14:35:59 +00005195 /* This is just a security blanket to placate dragons.
5196 * On some systems, we very sporadically observe that the first TLBs
5197 * used by the CS may be stale, despite us poking the TLB reset. If
5198 * we hold the forcewake during initialisation these problems
5199 * just magically go away.
5200 */
5201 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5202
Chris Wilson72778cb2016-05-19 16:17:16 +01005203 i915_gem_init_userptr(dev_priv);
Joonas Lahtinend85489d2016-03-24 16:47:46 +02005204 i915_gem_init_ggtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08005205
Ben Widawsky2fa48d82013-12-06 14:11:04 -08005206 ret = i915_gem_context_init(dev);
Jani Nikula7bcc3772014-12-05 14:17:42 +02005207 if (ret)
5208 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08005209
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005210 ret = dev_priv->gt.init_engines(dev);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01005211 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02005212 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02005213
5214 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01005215 if (ret == -EIO) {
5216 /* Allow ring initialisation to fail by marking the GPU as
5217 * wedged. But we only want to do this where the GPU is angry,
5218 * for all other failure, such as an allocation failure, bail.
5219 */
5220 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Peter Zijlstra805de8f42015-04-24 01:12:32 +02005221 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
Chris Wilson60990322014-04-09 09:19:42 +01005222 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01005223 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02005224
5225out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00005226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson60990322014-04-09 09:19:42 +01005227 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01005228
Chris Wilson60990322014-04-09 09:19:42 +01005229 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01005230}
5231
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005232void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005233i915_gem_cleanup_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005234{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005235 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00005236 struct intel_engine_cs *engine;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005237
Dave Gordonb4ac5af2016-03-24 11:20:38 +00005238 for_each_engine(engine, dev_priv)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005239 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005240}
5241
Chris Wilson64193402010-10-24 12:38:05 +01005242static void
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00005243init_engine_lists(struct intel_engine_cs *engine)
Chris Wilson64193402010-10-24 12:38:05 +01005244{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00005245 INIT_LIST_HEAD(&engine->active_list);
5246 INIT_LIST_HEAD(&engine->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01005247}
5248
Eric Anholt673a3942008-07-30 12:06:12 -07005249void
Imre Deak40ae4e12016-03-16 14:54:03 +02005250i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5251{
Chris Wilson91c8a322016-07-05 10:40:23 +01005252 struct drm_device *dev = &dev_priv->drm;
Imre Deak40ae4e12016-03-16 14:54:03 +02005253
5254 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5255 !IS_CHERRYVIEW(dev_priv))
5256 dev_priv->num_fence_regs = 32;
5257 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
5258 IS_I945GM(dev_priv) || IS_G33(dev_priv))
5259 dev_priv->num_fence_regs = 16;
5260 else
5261 dev_priv->num_fence_regs = 8;
5262
Chris Wilsonc0336662016-05-06 15:40:21 +01005263 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02005264 dev_priv->num_fence_regs =
5265 I915_READ(vgtif_reg(avail_rs.fence_num));
5266
5267 /* Initialize fence registers to zero */
5268 i915_gem_restore_fences(dev);
5269
5270 i915_gem_detect_bit_6_swizzle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07005271}
5272
5273void
Imre Deakd64aa092016-01-19 15:26:29 +02005274i915_gem_load_init(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07005275{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005276 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson42dcedd2012-11-15 11:32:30 +00005277 int i;
5278
Chris Wilsonefab6d82015-04-07 16:20:57 +01005279 dev_priv->objects =
Chris Wilson42dcedd2012-11-15 11:32:30 +00005280 kmem_cache_create("i915_gem_object",
5281 sizeof(struct drm_i915_gem_object), 0,
5282 SLAB_HWCACHE_ALIGN,
5283 NULL);
Chris Wilsone20d2ab2015-04-07 16:20:58 +01005284 dev_priv->vmas =
5285 kmem_cache_create("i915_gem_vma",
5286 sizeof(struct i915_vma), 0,
5287 SLAB_HWCACHE_ALIGN,
5288 NULL);
Chris Wilsonefab6d82015-04-07 16:20:57 +01005289 dev_priv->requests =
5290 kmem_cache_create("i915_gem_request",
5291 sizeof(struct drm_i915_gem_request), 0,
5292 SLAB_HWCACHE_ALIGN,
5293 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07005294
Ben Widawskyfc8c0672013-07-31 16:59:54 -07005295 INIT_LIST_HEAD(&dev_priv->vm_list);
Ben Widawskya33afea2013-09-17 21:12:45 -07005296 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02005297 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5298 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07005299 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00005300 for (i = 0; i < I915_NUM_ENGINES; i++)
5301 init_engine_lists(&dev_priv->engine[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02005302 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02005303 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Chris Wilson67d97da2016-07-04 08:08:31 +01005304 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07005305 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01005306 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005307 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01005308 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01005309 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01005310
Chris Wilson72bfa192010-12-19 11:42:05 +00005311 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5312
Chris Wilson19b2dbd2013-06-12 10:15:12 +01005313 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Eric Anholt10ed13e2011-05-06 13:53:49 -07005314
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05005315 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01005316
Chris Wilsonce453d82011-02-21 14:43:56 +00005317 dev_priv->mm.interruptible = true;
5318
Daniel Vetterf99d7062014-06-19 16:01:59 +02005319 mutex_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07005320}
Dave Airlie71acb5e2008-12-30 20:31:46 +10005321
Imre Deakd64aa092016-01-19 15:26:29 +02005322void i915_gem_load_cleanup(struct drm_device *dev)
5323{
5324 struct drm_i915_private *dev_priv = to_i915(dev);
5325
5326 kmem_cache_destroy(dev_priv->requests);
5327 kmem_cache_destroy(dev_priv->vmas);
5328 kmem_cache_destroy(dev_priv->objects);
5329}
5330
Chris Wilson461fb992016-05-14 07:26:33 +01005331int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5332{
5333 struct drm_i915_gem_object *obj;
5334
5335 /* Called just before we write the hibernation image.
5336 *
5337 * We need to update the domain tracking to reflect that the CPU
5338 * will be accessing all the pages to create and restore from the
5339 * hibernation, and so upon restoration those pages will be in the
5340 * CPU domain.
5341 *
5342 * To make sure the hibernation image contains the latest state,
5343 * we update that state just before writing out the image.
5344 */
5345
5346 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5347 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5348 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5349 }
5350
5351 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5352 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5353 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5354 }
5355
5356 return 0;
5357}
5358
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005359void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00005360{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005361 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00005362
5363 /* Clean up our request list when the client is going away, so that
5364 * later retire_requests won't dereference our soon-to-be-gone
5365 * file_priv.
5366 */
Chris Wilson1c255952010-09-26 11:03:27 +01005367 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005368 while (!list_empty(&file_priv->mm.request_list)) {
5369 struct drm_i915_gem_request *request;
5370
5371 request = list_first_entry(&file_priv->mm.request_list,
5372 struct drm_i915_gem_request,
5373 client_list);
5374 list_del(&request->client_list);
5375 request->file_priv = NULL;
5376 }
Chris Wilson1c255952010-09-26 11:03:27 +01005377 spin_unlock(&file_priv->mm.lock);
Chris Wilson31169712009-09-14 16:50:28 +01005378
Chris Wilson2e1b8732015-04-27 13:41:22 +01005379 if (!list_empty(&file_priv->rps.link)) {
Chris Wilson8d3afd72015-05-21 21:01:47 +01005380 spin_lock(&to_i915(dev)->rps.client_lock);
Chris Wilson2e1b8732015-04-27 13:41:22 +01005381 list_del(&file_priv->rps.link);
Chris Wilson8d3afd72015-05-21 21:01:47 +01005382 spin_unlock(&to_i915(dev)->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01005383 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005384}
5385
5386int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5387{
5388 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08005389 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005390
5391 DRM_DEBUG_DRIVER("\n");
5392
5393 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5394 if (!file_priv)
5395 return -ENOMEM;
5396
5397 file->driver_priv = file_priv;
Dave Gordonf19ec8c2016-07-04 11:34:37 +01005398 file_priv->dev_priv = to_i915(dev);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02005399 file_priv->file = file;
Chris Wilson2e1b8732015-04-27 13:41:22 +01005400 INIT_LIST_HEAD(&file_priv->rps.link);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005401
5402 spin_lock_init(&file_priv->mm.lock);
5403 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005404
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00005405 file_priv->bsd_ring = -1;
5406
Ben Widawskye422b882013-12-06 14:10:58 -08005407 ret = i915_gem_context_open(dev, file);
5408 if (ret)
5409 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005410
Ben Widawskye422b882013-12-06 14:10:58 -08005411 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005412}
5413
Daniel Vetterb680c372014-09-19 18:27:27 +02005414/**
5415 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07005416 * @old: current GEM buffer for the frontbuffer slots
5417 * @new: new GEM buffer for the frontbuffer slots
5418 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02005419 *
5420 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5421 * from @old and setting them in @new. Both @old and @new can be NULL.
5422 */
Daniel Vettera071fa02014-06-18 23:28:09 +02005423void i915_gem_track_fb(struct drm_i915_gem_object *old,
5424 struct drm_i915_gem_object *new,
5425 unsigned frontbuffer_bits)
5426{
5427 if (old) {
5428 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5429 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5430 old->frontbuffer_bits &= ~frontbuffer_bits;
5431 }
5432
5433 if (new) {
5434 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5435 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5436 new->frontbuffer_bits |= frontbuffer_bits;
5437 }
5438}
5439
Ben Widawskya70a3142013-07-31 16:59:56 -07005440/* All the new VM stuff */
Michel Thierry088e0df2015-08-07 17:40:17 +01005441u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5442 struct i915_address_space *vm)
Ben Widawskya70a3142013-07-31 16:59:56 -07005443{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005444 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
Ben Widawskya70a3142013-07-31 16:59:56 -07005445 struct i915_vma *vma;
5446
Daniel Vetter896ab1a2014-08-06 15:04:51 +02005447 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
Ben Widawskya70a3142013-07-31 16:59:56 -07005448
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005449 list_for_each_entry(vma, &o->vma_list, obj_link) {
Chris Wilson596c5922016-02-26 11:03:20 +00005450 if (vma->is_ggtt &&
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005451 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5452 continue;
5453 if (vma->vm == vm)
Ben Widawskya70a3142013-07-31 16:59:56 -07005454 return vma->node.start;
Ben Widawskya70a3142013-07-31 16:59:56 -07005455 }
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005456
Daniel Vetterf25748ea2014-06-17 22:34:38 +02005457 WARN(1, "%s vma for this object not found.\n",
5458 i915_is_ggtt(vm) ? "global" : "ppgtt");
Ben Widawskya70a3142013-07-31 16:59:56 -07005459 return -1;
5460}
5461
Michel Thierry088e0df2015-08-07 17:40:17 +01005462u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5463 const struct i915_ggtt_view *view)
Ben Widawskya70a3142013-07-31 16:59:56 -07005464{
5465 struct i915_vma *vma;
5466
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005467 list_for_each_entry(vma, &o->vma_list, obj_link)
Tvrtko Ursulin8aac2222016-04-21 13:04:45 +01005468 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005469 return vma->node.start;
5470
Tvrtko Ursulin5678ad72015-03-17 14:45:29 +00005471 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005472 return -1;
5473}
5474
5475bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5476 struct i915_address_space *vm)
5477{
5478 struct i915_vma *vma;
5479
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005480 list_for_each_entry(vma, &o->vma_list, obj_link) {
Chris Wilson596c5922016-02-26 11:03:20 +00005481 if (vma->is_ggtt &&
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005482 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5483 continue;
5484 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5485 return true;
5486 }
5487
5488 return false;
5489}
5490
5491bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
Joonas Lahtinen9abc4642015-03-27 13:09:22 +02005492 const struct i915_ggtt_view *view)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005493{
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005494 struct i915_vma *vma;
5495
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005496 list_for_each_entry(vma, &o->vma_list, obj_link)
Tvrtko Ursulinff5ec222016-04-21 13:04:46 +01005497 if (vma->is_ggtt &&
Joonas Lahtinen9abc4642015-03-27 13:09:22 +02005498 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00005499 drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005500 return true;
5501
5502 return false;
5503}
5504
5505bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5506{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005507 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005508
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005509 list_for_each_entry(vma, &o->vma_list, obj_link)
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005510 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005511 return true;
5512
5513 return false;
5514}
5515
Tvrtko Ursulin8da32722016-04-21 13:04:43 +01005516unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
Ben Widawskya70a3142013-07-31 16:59:56 -07005517{
Ben Widawskya70a3142013-07-31 16:59:56 -07005518 struct i915_vma *vma;
5519
Tvrtko Ursulin8da32722016-04-21 13:04:43 +01005520 GEM_BUG_ON(list_empty(&o->vma_list));
Ben Widawskya70a3142013-07-31 16:59:56 -07005521
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005522 list_for_each_entry(vma, &o->vma_list, obj_link) {
Chris Wilson596c5922016-02-26 11:03:20 +00005523 if (vma->is_ggtt &&
Tvrtko Ursulin8da32722016-04-21 13:04:43 +01005524 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
Ben Widawskya70a3142013-07-31 16:59:56 -07005525 return vma->node.size;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005526 }
Tvrtko Ursulin8da32722016-04-21 13:04:43 +01005527
Ben Widawskya70a3142013-07-31 16:59:56 -07005528 return 0;
5529}
5530
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005531bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005532{
5533 struct i915_vma *vma;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00005534 list_for_each_entry(vma, &obj->vma_list, obj_link)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005535 if (vma->pin_count > 0)
5536 return true;
Joonas Lahtinena6631ae2015-05-06 14:34:58 +03005537
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02005538 return false;
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005539}
Dave Gordonea702992015-07-09 19:29:02 +01005540
Dave Gordon033908a2015-12-10 18:51:23 +00005541/* Like i915_gem_object_get_page(), but mark the returned page dirty */
5542struct page *
5543i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5544{
5545 struct page *page;
5546
5547 /* Only default objects have per-page dirty tracking */
Chris Wilsonb9bcd142016-06-20 15:05:51 +01005548 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
Dave Gordon033908a2015-12-10 18:51:23 +00005549 return NULL;
5550
5551 page = i915_gem_object_get_page(obj, n);
5552 set_page_dirty(page);
5553 return page;
5554}
5555
Dave Gordonea702992015-07-09 19:29:02 +01005556/* Allocate a new GEM object and fill it with the supplied data */
5557struct drm_i915_gem_object *
5558i915_gem_object_create_from_data(struct drm_device *dev,
5559 const void *data, size_t size)
5560{
5561 struct drm_i915_gem_object *obj;
5562 struct sg_table *sg;
5563 size_t bytes;
5564 int ret;
5565
Dave Gordond37cd8a2016-04-22 19:14:32 +01005566 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01005567 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01005568 return obj;
5569
5570 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5571 if (ret)
5572 goto fail;
5573
5574 ret = i915_gem_object_get_pages(obj);
5575 if (ret)
5576 goto fail;
5577
5578 i915_gem_object_pin_pages(obj);
5579 sg = obj->pages;
5580 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
Dave Gordon9e7d18c2015-12-10 18:51:24 +00005581 obj->dirty = 1; /* Backing store is now out of date */
Dave Gordonea702992015-07-09 19:29:02 +01005582 i915_gem_object_unpin_pages(obj);
5583
5584 if (WARN_ON(bytes != size)) {
5585 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5586 ret = -EFAULT;
5587 goto fail;
5588 }
5589
5590 return obj;
5591
5592fail:
5593 drm_gem_object_unreference(&obj->base);
5594 return ERR_PTR(ret);
5595}