blob: bafe1cdd57d8d9cdf2e2d46314701692d3e91885 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000032#include "i915_gem_clflush.h"
Yu Zhangeb822892015-02-10 19:05:49 +080033#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010034#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070035#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010036#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010037#include "intel_mocs.h"
Matthew Auld465c4032017-10-06 23:18:14 +010038#include "i915_gemfs.h"
Chris Wilson6b5e90f2016-11-14 20:41:05 +000039#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000040#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010041#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070042#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000044#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070045#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080046#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020047#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070048
Chris Wilsonfbbd37b2016-10-28 13:58:42 +010049static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
Chris Wilson61050802012-04-17 15:31:31 +010050
Chris Wilson2c225692013-08-09 12:26:45 +010051static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52{
Chris Wilsone27ab732017-06-15 13:38:49 +010053 if (obj->cache_dirty)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053054 return false;
55
Chris Wilsonb8f55be2017-08-11 12:11:16 +010056 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
Chris Wilson2c225692013-08-09 12:26:45 +010057 return true;
58
Chris Wilsonbd3d2252017-10-13 21:26:14 +010059 return obj->pin_global; /* currently in use by HW, keep flushed */
Chris Wilson2c225692013-08-09 12:26:45 +010060}
61
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053062static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010063insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053064 struct drm_mm_node *node, u32 size)
65{
66 memset(node, 0, sizeof(*node));
Chris Wilson4e64e552017-02-02 21:04:38 +000067 return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68 size, 0, I915_COLOR_UNEVICTABLE,
69 0, ggtt->mappable_end,
70 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053071}
72
73static void
74remove_mappable_node(struct drm_mm_node *node)
75{
76 drm_mm_remove_node(node);
77}
78
Chris Wilson73aa8082010-09-30 11:46:12 +010079/* some bookkeeping */
80static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010081 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010082{
Daniel Vetterc20e8352013-07-24 22:40:23 +020083 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010084 dev_priv->mm.object_count++;
85 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020086 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010087}
88
89static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010090 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010091{
Daniel Vetterc20e8352013-07-24 22:40:23 +020092 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010093 dev_priv->mm.object_count--;
94 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020095 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010096}
97
Chris Wilson21dd3732011-01-26 15:55:56 +000098static int
Daniel Vetter33196de2012-11-14 17:14:05 +010099i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100100{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100101 int ret;
102
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100103 might_sleep();
104
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200105 /*
106 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107 * userspace. If it takes that long something really bad is going on and
108 * we should simply try to bail out and fail as gracefully as possible.
109 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100110 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilson8c185ec2017-03-16 17:13:02 +0000111 !i915_reset_backoff(error),
Chris Wilsonb52992c2016-10-28 13:58:24 +0100112 I915_RESET_TIMEOUT);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200113 if (ret == 0) {
114 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115 return -EIO;
116 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100117 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100118 } else {
119 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200120 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100121}
122
Chris Wilson54cf91d2010-11-25 18:00:26 +0000123int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100124{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100125 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100126 int ret;
127
Daniel Vetter33196de2012-11-14 17:14:05 +0100128 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100129 if (ret)
130 return ret;
131
132 ret = mutex_lock_interruptible(&dev->struct_mutex);
133 if (ret)
134 return ret;
135
Chris Wilson76c1dec2010-09-25 11:22:51 +0100136 return 0;
137}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138
Eric Anholt673a3942008-07-30 12:06:12 -0700139int
Eric Anholt5a125c32008-10-22 21:40:13 -0700140i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000141 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700142{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300143 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200144 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300145 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100146 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +0800147 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700148
Weinan Liff8f7972017-05-31 10:35:52 +0800149 pinned = ggtt->base.reserved;
Chris Wilson73aa8082010-09-30 11:46:12 +0100150 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000151 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100152 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100153 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000154 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100155 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100156 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700158
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300159 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400160 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000161
Eric Anholt5a125c32008-10-22 21:40:13 -0700162 return 0;
163}
164
Matthew Auldb91b09e2017-10-06 23:18:17 +0100165static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100166{
Al Viro93c76a32015-12-04 23:45:44 -0500167 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilsondbb43512016-12-07 13:34:11 +0000168 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800169 struct sg_table *st;
170 struct scatterlist *sg;
Chris Wilsondbb43512016-12-07 13:34:11 +0000171 char *vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800172 int i;
Matthew Auldb91b09e2017-10-06 23:18:17 +0100173 int err;
Chris Wilson00731152014-05-21 12:42:56 +0100174
Chris Wilson6a2c4232014-11-04 04:51:40 -0800175 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
Matthew Auldb91b09e2017-10-06 23:18:17 +0100176 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100177
Chris Wilsondbb43512016-12-07 13:34:11 +0000178 /* Always aligning to the object size, allows a single allocation
179 * to handle all possible callers, and given typical object sizes,
180 * the alignment of the buddy allocation will naturally match.
181 */
182 phys = drm_pci_alloc(obj->base.dev,
Ville Syrjälä750fae22017-09-07 17:32:03 +0300183 roundup_pow_of_two(obj->base.size),
Chris Wilsondbb43512016-12-07 13:34:11 +0000184 roundup_pow_of_two(obj->base.size));
185 if (!phys)
Matthew Auldb91b09e2017-10-06 23:18:17 +0100186 return -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000187
188 vaddr = phys->vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800189 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190 struct page *page;
191 char *src;
192
193 page = shmem_read_mapping_page(mapping, i);
Chris Wilsondbb43512016-12-07 13:34:11 +0000194 if (IS_ERR(page)) {
Matthew Auldb91b09e2017-10-06 23:18:17 +0100195 err = PTR_ERR(page);
Chris Wilsondbb43512016-12-07 13:34:11 +0000196 goto err_phys;
197 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800198
199 src = kmap_atomic(page);
200 memcpy(vaddr, src, PAGE_SIZE);
201 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202 kunmap_atomic(src);
203
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300204 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800205 vaddr += PAGE_SIZE;
206 }
207
Chris Wilsonc0336662016-05-06 15:40:21 +0100208 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800209
210 st = kmalloc(sizeof(*st), GFP_KERNEL);
Chris Wilsondbb43512016-12-07 13:34:11 +0000211 if (!st) {
Matthew Auldb91b09e2017-10-06 23:18:17 +0100212 err = -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000213 goto err_phys;
214 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800215
216 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217 kfree(st);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100218 err = -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000219 goto err_phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800220 }
221
222 sg = st->sgl;
223 sg->offset = 0;
224 sg->length = obj->base.size;
225
Chris Wilsondbb43512016-12-07 13:34:11 +0000226 sg_dma_address(sg) = phys->busaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800227 sg_dma_len(sg) = obj->base.size;
228
Chris Wilsondbb43512016-12-07 13:34:11 +0000229 obj->phys_handle = phys;
Matthew Auldb91b09e2017-10-06 23:18:17 +0100230
Matthew Aulda5c081662017-10-06 23:18:18 +0100231 __i915_gem_object_set_pages(obj, st, sg->length);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100232
233 return 0;
Chris Wilsondbb43512016-12-07 13:34:11 +0000234
235err_phys:
236 drm_pci_free(obj->base.dev, phys);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100237
238 return err;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800239}
240
Chris Wilsone27ab732017-06-15 13:38:49 +0100241static void __start_cpu_write(struct drm_i915_gem_object *obj)
242{
243 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245 if (cpu_write_needs_clflush(obj))
246 obj->cache_dirty = true;
247}
248
Chris Wilson6a2c4232014-11-04 04:51:40 -0800249static void
Chris Wilson2b3c8312016-11-11 14:58:09 +0000250__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
Chris Wilsone5facdf2016-12-23 14:57:57 +0000251 struct sg_table *pages,
252 bool needs_clflush)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800253{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100254 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800255
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100256 if (obj->mm.madv == I915_MADV_DONTNEED)
257 obj->mm.dirty = false;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800258
Chris Wilsone5facdf2016-12-23 14:57:57 +0000259 if (needs_clflush &&
260 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
Chris Wilson2b3c8312016-11-11 14:58:09 +0000262 drm_clflush_sg(pages);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100263
Chris Wilsone27ab732017-06-15 13:38:49 +0100264 __start_cpu_write(obj);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100265}
266
267static void
268i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269 struct sg_table *pages)
270{
Chris Wilsone5facdf2016-12-23 14:57:57 +0000271 __i915_gem_object_release_shmem(obj, pages, false);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100272
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100273 if (obj->mm.dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500274 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800275 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100276 int i;
277
278 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800279 struct page *page;
280 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100281
Chris Wilson6a2c4232014-11-04 04:51:40 -0800282 page = shmem_read_mapping_page(mapping, i);
283 if (IS_ERR(page))
284 continue;
285
286 dst = kmap_atomic(page);
287 drm_clflush_virt_range(vaddr, PAGE_SIZE);
288 memcpy(dst, vaddr, PAGE_SIZE);
289 kunmap_atomic(dst);
290
291 set_page_dirty(page);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100292 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100293 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300294 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100295 vaddr += PAGE_SIZE;
296 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100297 obj->mm.dirty = false;
Chris Wilson00731152014-05-21 12:42:56 +0100298 }
299
Chris Wilson03ac84f2016-10-28 13:58:36 +0100300 sg_free_table(pages);
301 kfree(pages);
Chris Wilsondbb43512016-12-07 13:34:11 +0000302
303 drm_pci_free(obj->base.dev, obj->phys_handle);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800304}
305
306static void
307i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100309 i915_gem_object_unpin_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800310}
311
312static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313 .get_pages = i915_gem_object_get_pages_phys,
314 .put_pages = i915_gem_object_put_pages_phys,
315 .release = i915_gem_object_release_phys,
316};
317
Chris Wilson581ab1f2017-02-15 16:39:00 +0000318static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
Chris Wilson35a96112016-08-14 18:44:40 +0100320int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100321{
322 struct i915_vma *vma;
323 LIST_HEAD(still_in_list);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100324 int ret;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100325
Chris Wilson02bef8f2016-08-14 18:44:41 +0100326 lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328 /* Closed vma are removed from the obj->vma_list - but they may
329 * still have an active binding on the object. To remove those we
330 * must wait for all rendering to complete to the object (as unbinding
331 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100332 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100333 ret = i915_gem_object_wait(obj,
334 I915_WAIT_INTERRUPTIBLE |
335 I915_WAIT_LOCKED |
336 I915_WAIT_ALL,
337 MAX_SCHEDULE_TIMEOUT,
338 NULL);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100339 if (ret)
340 return ret;
341
342 i915_gem_retire_requests(to_i915(obj->base.dev));
343
Chris Wilsonaa653a62016-08-04 07:52:27 +0100344 while ((vma = list_first_entry_or_null(&obj->vma_list,
345 struct i915_vma,
346 obj_link))) {
347 list_move_tail(&vma->obj_link, &still_in_list);
348 ret = i915_vma_unbind(vma);
349 if (ret)
350 break;
351 }
352 list_splice(&still_in_list, &obj->vma_list);
353
354 return ret;
355}
356
Chris Wilsone95433c2016-10-28 13:58:27 +0100357static long
358i915_gem_object_wait_fence(struct dma_fence *fence,
359 unsigned int flags,
360 long timeout,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100361 struct intel_rps_client *rps_client)
Chris Wilsone95433c2016-10-28 13:58:27 +0100362{
363 struct drm_i915_gem_request *rq;
364
365 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
366
367 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
368 return timeout;
369
370 if (!dma_fence_is_i915(fence))
371 return dma_fence_wait_timeout(fence,
372 flags & I915_WAIT_INTERRUPTIBLE,
373 timeout);
374
375 rq = to_request(fence);
376 if (i915_gem_request_completed(rq))
377 goto out;
378
379 /* This client is about to stall waiting for the GPU. In many cases
380 * this is undesirable and limits the throughput of the system, as
381 * many clients cannot continue processing user input/output whilst
382 * blocked. RPS autotuning may take tens of milliseconds to respond
383 * to the GPU load and thus incurs additional latency for the client.
384 * We can circumvent that by promoting the GPU frequency to maximum
385 * before we wait. This makes the GPU throttle up much more quickly
386 * (good for benchmarks and user experience, e.g. window animations),
387 * but at a cost of spending more power processing the workload
388 * (bad for battery). Not all clients even want their results
389 * immediately and for them we should just let the GPU select its own
390 * frequency to maximise efficiency. To prevent a single client from
391 * forcing the clocks too high for the whole system, we only allow
392 * each client to waitboost once in a busy period.
393 */
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100394 if (rps_client) {
Chris Wilsone95433c2016-10-28 13:58:27 +0100395 if (INTEL_GEN(rq->i915) >= 6)
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100396 gen6_rps_boost(rq, rps_client);
Chris Wilsone95433c2016-10-28 13:58:27 +0100397 else
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100398 rps_client = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +0100399 }
400
401 timeout = i915_wait_request(rq, flags, timeout);
402
403out:
404 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
405 i915_gem_request_retire_upto(rq);
406
Chris Wilsone95433c2016-10-28 13:58:27 +0100407 return timeout;
408}
409
410static long
411i915_gem_object_wait_reservation(struct reservation_object *resv,
412 unsigned int flags,
413 long timeout,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100414 struct intel_rps_client *rps_client)
Chris Wilsone95433c2016-10-28 13:58:27 +0100415{
Chris Wilsone54ca972017-02-17 15:13:04 +0000416 unsigned int seq = __read_seqcount_begin(&resv->seq);
Chris Wilsone95433c2016-10-28 13:58:27 +0100417 struct dma_fence *excl;
Chris Wilsone54ca972017-02-17 15:13:04 +0000418 bool prune_fences = false;
Chris Wilsone95433c2016-10-28 13:58:27 +0100419
420 if (flags & I915_WAIT_ALL) {
421 struct dma_fence **shared;
422 unsigned int count, i;
423 int ret;
424
425 ret = reservation_object_get_fences_rcu(resv,
426 &excl, &count, &shared);
427 if (ret)
428 return ret;
429
430 for (i = 0; i < count; i++) {
431 timeout = i915_gem_object_wait_fence(shared[i],
432 flags, timeout,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100433 rps_client);
Chris Wilsond892e932017-02-12 21:53:43 +0000434 if (timeout < 0)
Chris Wilsone95433c2016-10-28 13:58:27 +0100435 break;
436
437 dma_fence_put(shared[i]);
438 }
439
440 for (; i < count; i++)
441 dma_fence_put(shared[i]);
442 kfree(shared);
Chris Wilsone54ca972017-02-17 15:13:04 +0000443
444 prune_fences = count && timeout >= 0;
Chris Wilsone95433c2016-10-28 13:58:27 +0100445 } else {
446 excl = reservation_object_get_excl_rcu(resv);
447 }
448
Chris Wilsone54ca972017-02-17 15:13:04 +0000449 if (excl && timeout >= 0) {
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100450 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
451 rps_client);
Chris Wilsone54ca972017-02-17 15:13:04 +0000452 prune_fences = timeout >= 0;
453 }
Chris Wilsone95433c2016-10-28 13:58:27 +0100454
455 dma_fence_put(excl);
456
Chris Wilson03d1cac2017-03-08 13:26:28 +0000457 /* Oportunistically prune the fences iff we know they have *all* been
458 * signaled and that the reservation object has not been changed (i.e.
459 * no new fences have been added).
460 */
Chris Wilsone54ca972017-02-17 15:13:04 +0000461 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
Chris Wilson03d1cac2017-03-08 13:26:28 +0000462 if (reservation_object_trylock(resv)) {
463 if (!__read_seqcount_retry(&resv->seq, seq))
464 reservation_object_add_excl_fence(resv, NULL);
465 reservation_object_unlock(resv);
466 }
Chris Wilsone54ca972017-02-17 15:13:04 +0000467 }
468
Chris Wilsone95433c2016-10-28 13:58:27 +0100469 return timeout;
470}
471
Chris Wilson6b5e90f2016-11-14 20:41:05 +0000472static void __fence_set_priority(struct dma_fence *fence, int prio)
473{
474 struct drm_i915_gem_request *rq;
475 struct intel_engine_cs *engine;
476
477 if (!dma_fence_is_i915(fence))
478 return;
479
480 rq = to_request(fence);
481 engine = rq->engine;
482 if (!engine->schedule)
483 return;
484
485 engine->schedule(rq, prio);
486}
487
488static void fence_set_priority(struct dma_fence *fence, int prio)
489{
490 /* Recurse once into a fence-array */
491 if (dma_fence_is_array(fence)) {
492 struct dma_fence_array *array = to_dma_fence_array(fence);
493 int i;
494
495 for (i = 0; i < array->num_fences; i++)
496 __fence_set_priority(array->fences[i], prio);
497 } else {
498 __fence_set_priority(fence, prio);
499 }
500}
501
502int
503i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
504 unsigned int flags,
505 int prio)
506{
507 struct dma_fence *excl;
508
509 if (flags & I915_WAIT_ALL) {
510 struct dma_fence **shared;
511 unsigned int count, i;
512 int ret;
513
514 ret = reservation_object_get_fences_rcu(obj->resv,
515 &excl, &count, &shared);
516 if (ret)
517 return ret;
518
519 for (i = 0; i < count; i++) {
520 fence_set_priority(shared[i], prio);
521 dma_fence_put(shared[i]);
522 }
523
524 kfree(shared);
525 } else {
526 excl = reservation_object_get_excl_rcu(obj->resv);
527 }
528
529 if (excl) {
530 fence_set_priority(excl, prio);
531 dma_fence_put(excl);
532 }
533 return 0;
534}
535
Chris Wilson00e60f22016-08-04 16:32:40 +0100536/**
Chris Wilsone95433c2016-10-28 13:58:27 +0100537 * Waits for rendering to the object to be completed
Chris Wilson00e60f22016-08-04 16:32:40 +0100538 * @obj: i915 gem object
Chris Wilsone95433c2016-10-28 13:58:27 +0100539 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
540 * @timeout: how long to wait
541 * @rps: client (user process) to charge for any waitboosting
Chris Wilson00e60f22016-08-04 16:32:40 +0100542 */
543int
Chris Wilsone95433c2016-10-28 13:58:27 +0100544i915_gem_object_wait(struct drm_i915_gem_object *obj,
545 unsigned int flags,
546 long timeout,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100547 struct intel_rps_client *rps_client)
Chris Wilson00e60f22016-08-04 16:32:40 +0100548{
Chris Wilsone95433c2016-10-28 13:58:27 +0100549 might_sleep();
550#if IS_ENABLED(CONFIG_LOCKDEP)
551 GEM_BUG_ON(debug_locks &&
552 !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
553 !!(flags & I915_WAIT_LOCKED));
554#endif
555 GEM_BUG_ON(timeout < 0);
Chris Wilson00e60f22016-08-04 16:32:40 +0100556
Chris Wilsond07f0e52016-10-28 13:58:44 +0100557 timeout = i915_gem_object_wait_reservation(obj->resv,
558 flags, timeout,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100559 rps_client);
Chris Wilsone95433c2016-10-28 13:58:27 +0100560 return timeout < 0 ? timeout : 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100561}
562
563static struct intel_rps_client *to_rps_client(struct drm_file *file)
564{
565 struct drm_i915_file_private *fpriv = file->driver_priv;
566
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100567 return &fpriv->rps_client;
Chris Wilson00e60f22016-08-04 16:32:40 +0100568}
569
Chris Wilson00731152014-05-21 12:42:56 +0100570static int
571i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
572 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100573 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100574{
Chris Wilson00731152014-05-21 12:42:56 +0100575 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300576 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800577
578 /* We manually control the domain here and pretend that it
579 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
580 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700581 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000582 if (copy_from_user(vaddr, user_data, args->size))
583 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100584
Chris Wilson6a2c4232014-11-04 04:51:40 -0800585 drm_clflush_virt_range(vaddr, args->size);
Chris Wilson10466d22017-01-06 15:22:38 +0000586 i915_gem_chipset_flush(to_i915(obj->base.dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200587
Chris Wilsond59b21e2017-02-22 11:40:49 +0000588 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000589 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100590}
591
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000592void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
Chris Wilson42dcedd2012-11-15 11:32:30 +0000593{
Chris Wilsonefab6d82015-04-07 16:20:57 +0100594 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000595}
596
597void i915_gem_object_free(struct drm_i915_gem_object *obj)
598{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100599 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100600 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000601}
602
Dave Airlieff72145b2011-02-07 12:16:14 +1000603static int
604i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000605 struct drm_i915_private *dev_priv,
Dave Airlieff72145b2011-02-07 12:16:14 +1000606 uint64_t size,
607 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700608{
Chris Wilson05394f32010-11-08 19:18:58 +0000609 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300610 int ret;
611 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700612
Dave Airlieff72145b2011-02-07 12:16:14 +1000613 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200614 if (size == 0)
615 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700616
617 /* Allocate the new object */
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000618 obj = i915_gem_object_create(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100619 if (IS_ERR(obj))
620 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700621
Chris Wilson05394f32010-11-08 19:18:58 +0000622 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100623 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100624 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200625 if (ret)
626 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100627
Dave Airlieff72145b2011-02-07 12:16:14 +1000628 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700629 return 0;
630}
631
Dave Airlieff72145b2011-02-07 12:16:14 +1000632int
633i915_gem_dumb_create(struct drm_file *file,
634 struct drm_device *dev,
635 struct drm_mode_create_dumb *args)
636{
637 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300638 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000639 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000640 return i915_gem_create(file, to_i915(dev),
Dave Airlieda6b51d2014-12-24 13:11:17 +1000641 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000642}
643
Chris Wilsone27ab732017-06-15 13:38:49 +0100644static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
645{
646 return !(obj->cache_level == I915_CACHE_NONE ||
647 obj->cache_level == I915_CACHE_WT);
648}
649
Dave Airlieff72145b2011-02-07 12:16:14 +1000650/**
651 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100652 * @dev: drm device pointer
653 * @data: ioctl data blob
654 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000655 */
656int
657i915_gem_create_ioctl(struct drm_device *dev, void *data,
658 struct drm_file *file)
659{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000660 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000661 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200662
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000663 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100664
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000665 return i915_gem_create(file, dev_priv,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000666 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000667}
668
Chris Wilsonef749212017-04-12 12:01:10 +0100669static inline enum fb_op_origin
670fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
671{
672 return (domain == I915_GEM_DOMAIN_GTT ?
673 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
674}
675
676static void
677flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
678{
679 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
680
681 if (!(obj->base.write_domain & flush_domains))
682 return;
683
684 /* No actual flushing is required for the GTT write domain. Writes
685 * to it "immediately" go to main memory as far as we know, so there's
686 * no chipset flush. It also doesn't land in render cache.
687 *
688 * However, we do have to enforce the order so that all writes through
689 * the GTT land before any writes to the device, such as updates to
690 * the GATT itself.
691 *
692 * We also have to wait a bit for the writes to land from the GTT.
693 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
694 * timing. This issue has only been observed when switching quickly
695 * between GTT writes and CPU reads from inside the kernel on recent hw,
696 * and it appears to only affect discrete GTT blocks (i.e. on LLC
697 * system agents we cannot reproduce this behaviour).
698 */
699 wmb();
700
701 switch (obj->base.write_domain) {
702 case I915_GEM_DOMAIN_GTT:
Chris Wilsonc5ba5b22017-09-07 19:45:20 +0100703 if (!HAS_LLC(dev_priv)) {
Chris Wilsonb69a7842017-08-29 20:25:46 +0100704 intel_runtime_pm_get(dev_priv);
705 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilsonc5ba5b22017-09-07 19:45:20 +0100706 POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
Chris Wilsonb69a7842017-08-29 20:25:46 +0100707 spin_unlock_irq(&dev_priv->uncore.lock);
708 intel_runtime_pm_put(dev_priv);
Chris Wilsonef749212017-04-12 12:01:10 +0100709 }
710
711 intel_fb_obj_flush(obj,
712 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
713 break;
714
715 case I915_GEM_DOMAIN_CPU:
716 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
717 break;
Chris Wilsone27ab732017-06-15 13:38:49 +0100718
719 case I915_GEM_DOMAIN_RENDER:
720 if (gpu_write_needs_clflush(obj))
721 obj->cache_dirty = true;
722 break;
Chris Wilsonef749212017-04-12 12:01:10 +0100723 }
724
725 obj->base.write_domain = 0;
726}
727
Daniel Vetter8c599672011-12-14 13:57:31 +0100728static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100729__copy_to_user_swizzled(char __user *cpu_vaddr,
730 const char *gpu_vaddr, int gpu_offset,
731 int length)
732{
733 int ret, cpu_offset = 0;
734
735 while (length > 0) {
736 int cacheline_end = ALIGN(gpu_offset + 1, 64);
737 int this_length = min(cacheline_end - gpu_offset, length);
738 int swizzled_gpu_offset = gpu_offset ^ 64;
739
740 ret = __copy_to_user(cpu_vaddr + cpu_offset,
741 gpu_vaddr + swizzled_gpu_offset,
742 this_length);
743 if (ret)
744 return ret + length;
745
746 cpu_offset += this_length;
747 gpu_offset += this_length;
748 length -= this_length;
749 }
750
751 return 0;
752}
753
754static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700755__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
756 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100757 int length)
758{
759 int ret, cpu_offset = 0;
760
761 while (length > 0) {
762 int cacheline_end = ALIGN(gpu_offset + 1, 64);
763 int this_length = min(cacheline_end - gpu_offset, length);
764 int swizzled_gpu_offset = gpu_offset ^ 64;
765
766 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
767 cpu_vaddr + cpu_offset,
768 this_length);
769 if (ret)
770 return ret + length;
771
772 cpu_offset += this_length;
773 gpu_offset += this_length;
774 length -= this_length;
775 }
776
777 return 0;
778}
779
Brad Volkin4c914c02014-02-18 10:15:45 -0800780/*
781 * Pins the specified object's pages and synchronizes the object with
782 * GPU accesses. Sets needs_clflush to non-zero if the caller should
783 * flush the object from the CPU cache.
784 */
785int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100786 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800787{
788 int ret;
789
Chris Wilsone95433c2016-10-28 13:58:27 +0100790 lockdep_assert_held(&obj->base.dev->struct_mutex);
Brad Volkin4c914c02014-02-18 10:15:45 -0800791
Chris Wilsone95433c2016-10-28 13:58:27 +0100792 *needs_clflush = 0;
Chris Wilson43394c72016-08-18 17:16:47 +0100793 if (!i915_gem_object_has_struct_page(obj))
794 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800795
Chris Wilsone95433c2016-10-28 13:58:27 +0100796 ret = i915_gem_object_wait(obj,
797 I915_WAIT_INTERRUPTIBLE |
798 I915_WAIT_LOCKED,
799 MAX_SCHEDULE_TIMEOUT,
800 NULL);
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100801 if (ret)
802 return ret;
803
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100804 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100805 if (ret)
806 return ret;
807
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100808 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
809 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000810 ret = i915_gem_object_set_to_cpu_domain(obj, false);
811 if (ret)
812 goto err_unpin;
813 else
814 goto out;
815 }
816
Chris Wilsonef749212017-04-12 12:01:10 +0100817 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Chris Wilsona314d5c2016-08-18 17:16:48 +0100818
Chris Wilson43394c72016-08-18 17:16:47 +0100819 /* If we're not in the cpu read domain, set ourself into the gtt
820 * read domain and manually flush cachelines (if required). This
821 * optimizes for the case when the gpu will dirty the data
822 * anyway again before the next pread happens.
823 */
Chris Wilsone27ab732017-06-15 13:38:49 +0100824 if (!obj->cache_dirty &&
825 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000826 *needs_clflush = CLFLUSH_BEFORE;
Brad Volkin4c914c02014-02-18 10:15:45 -0800827
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000828out:
Chris Wilson97649512016-08-18 17:16:50 +0100829 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100830 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100831
832err_unpin:
833 i915_gem_object_unpin_pages(obj);
834 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100835}
836
837int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
838 unsigned int *needs_clflush)
839{
840 int ret;
841
Chris Wilsone95433c2016-10-28 13:58:27 +0100842 lockdep_assert_held(&obj->base.dev->struct_mutex);
843
Chris Wilson43394c72016-08-18 17:16:47 +0100844 *needs_clflush = 0;
845 if (!i915_gem_object_has_struct_page(obj))
846 return -ENODEV;
847
Chris Wilsone95433c2016-10-28 13:58:27 +0100848 ret = i915_gem_object_wait(obj,
849 I915_WAIT_INTERRUPTIBLE |
850 I915_WAIT_LOCKED |
851 I915_WAIT_ALL,
852 MAX_SCHEDULE_TIMEOUT,
853 NULL);
Chris Wilson43394c72016-08-18 17:16:47 +0100854 if (ret)
855 return ret;
856
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100857 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100858 if (ret)
859 return ret;
860
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100861 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
862 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000863 ret = i915_gem_object_set_to_cpu_domain(obj, true);
864 if (ret)
865 goto err_unpin;
866 else
867 goto out;
868 }
869
Chris Wilsonef749212017-04-12 12:01:10 +0100870 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Chris Wilsona314d5c2016-08-18 17:16:48 +0100871
Chris Wilson43394c72016-08-18 17:16:47 +0100872 /* If we're not in the cpu write domain, set ourself into the
873 * gtt write domain and manually flush cachelines (as required).
874 * This optimizes for the case when the gpu will use the data
875 * right away and we therefore have to clflush anyway.
876 */
Chris Wilsone27ab732017-06-15 13:38:49 +0100877 if (!obj->cache_dirty) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000878 *needs_clflush |= CLFLUSH_AFTER;
Chris Wilson43394c72016-08-18 17:16:47 +0100879
Chris Wilsone27ab732017-06-15 13:38:49 +0100880 /*
881 * Same trick applies to invalidate partially written
882 * cachelines read before writing.
883 */
884 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
885 *needs_clflush |= CLFLUSH_BEFORE;
886 }
Chris Wilson43394c72016-08-18 17:16:47 +0100887
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000888out:
Chris Wilson43394c72016-08-18 17:16:47 +0100889 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100890 obj->mm.dirty = true;
Chris Wilson97649512016-08-18 17:16:50 +0100891 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100892 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100893
894err_unpin:
895 i915_gem_object_unpin_pages(obj);
896 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800897}
898
Daniel Vetter23c18c72012-03-25 19:47:42 +0200899static void
900shmem_clflush_swizzled_range(char *addr, unsigned long length,
901 bool swizzled)
902{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200903 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200904 unsigned long start = (unsigned long) addr;
905 unsigned long end = (unsigned long) addr + length;
906
907 /* For swizzling simply ensure that we always flush both
908 * channels. Lame, but simple and it works. Swizzled
909 * pwrite/pread is far from a hotpath - current userspace
910 * doesn't use it at all. */
911 start = round_down(start, 128);
912 end = round_up(end, 128);
913
914 drm_clflush_virt_range((void *)start, end - start);
915 } else {
916 drm_clflush_virt_range(addr, length);
917 }
918
919}
920
Daniel Vetterd174bd62012-03-25 19:47:40 +0200921/* Only difference to the fast-path function is that this can handle bit17
922 * and uses non-atomic copy and kmap functions. */
923static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100924shmem_pread_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +0200925 char __user *user_data,
926 bool page_do_bit17_swizzling, bool needs_clflush)
927{
928 char *vaddr;
929 int ret;
930
931 vaddr = kmap(page);
932 if (needs_clflush)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100933 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +0200934 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200935
936 if (page_do_bit17_swizzling)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100937 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200938 else
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100939 ret = __copy_to_user(user_data, vaddr + offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200940 kunmap(page);
941
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100942 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200943}
944
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100945static int
946shmem_pread(struct page *page, int offset, int length, char __user *user_data,
947 bool page_do_bit17_swizzling, bool needs_clflush)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530948{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100949 int ret;
950
951 ret = -ENODEV;
952 if (!page_do_bit17_swizzling) {
953 char *vaddr = kmap_atomic(page);
954
955 if (needs_clflush)
956 drm_clflush_virt_range(vaddr + offset, length);
957 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
958 kunmap_atomic(vaddr);
959 }
960 if (ret == 0)
961 return 0;
962
963 return shmem_pread_slow(page, offset, length, user_data,
964 page_do_bit17_swizzling, needs_clflush);
965}
966
967static int
968i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
969 struct drm_i915_gem_pread *args)
970{
971 char __user *user_data;
972 u64 remain;
973 unsigned int obj_do_bit17_swizzling;
974 unsigned int needs_clflush;
975 unsigned int idx, offset;
976 int ret;
977
978 obj_do_bit17_swizzling = 0;
979 if (i915_gem_object_needs_bit17_swizzle(obj))
980 obj_do_bit17_swizzling = BIT(17);
981
982 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
983 if (ret)
984 return ret;
985
986 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
987 mutex_unlock(&obj->base.dev->struct_mutex);
988 if (ret)
989 return ret;
990
991 remain = args->size;
992 user_data = u64_to_user_ptr(args->data_ptr);
993 offset = offset_in_page(args->offset);
994 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
995 struct page *page = i915_gem_object_get_page(obj, idx);
996 int length;
997
998 length = remain;
999 if (offset + length > PAGE_SIZE)
1000 length = PAGE_SIZE - offset;
1001
1002 ret = shmem_pread(page, offset, length, user_data,
1003 page_to_phys(page) & obj_do_bit17_swizzling,
1004 needs_clflush);
1005 if (ret)
1006 break;
1007
1008 remain -= length;
1009 user_data += length;
1010 offset = 0;
1011 }
1012
1013 i915_gem_obj_finish_shmem_access(obj);
1014 return ret;
1015}
1016
1017static inline bool
1018gtt_user_read(struct io_mapping *mapping,
1019 loff_t base, int offset,
1020 char __user *user_data, int length)
1021{
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001022 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001023 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301024
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301025 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001026 vaddr = io_mapping_map_atomic_wc(mapping, base);
1027 unwritten = __copy_to_user_inatomic(user_data,
1028 (void __force *)vaddr + offset,
1029 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001030 io_mapping_unmap_atomic(vaddr);
1031 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001032 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1033 unwritten = copy_to_user(user_data,
1034 (void __force *)vaddr + offset,
1035 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001036 io_mapping_unmap(vaddr);
1037 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301038 return unwritten;
1039}
1040
1041static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001042i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1043 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301044{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001045 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1046 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301047 struct drm_mm_node node;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001048 struct i915_vma *vma;
1049 void __user *user_data;
1050 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301051 int ret;
1052
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001053 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1054 if (ret)
1055 return ret;
1056
1057 intel_runtime_pm_get(i915);
1058 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +01001059 PIN_MAPPABLE |
1060 PIN_NONFAULT |
1061 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001062 if (!IS_ERR(vma)) {
1063 node.start = i915_ggtt_offset(vma);
1064 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001065 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001066 if (ret) {
1067 i915_vma_unpin(vma);
1068 vma = ERR_PTR(ret);
1069 }
1070 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001071 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001072 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301073 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001074 goto out_unlock;
1075 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301076 }
1077
1078 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1079 if (ret)
1080 goto out_unpin;
1081
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001082 mutex_unlock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301083
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001084 user_data = u64_to_user_ptr(args->data_ptr);
1085 remain = args->size;
1086 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301087
1088 while (remain > 0) {
1089 /* Operation in this page
1090 *
1091 * page_base = page offset within aperture
1092 * page_offset = offset within page
1093 * page_length = bytes to copy for this page
1094 */
1095 u32 page_base = node.start;
1096 unsigned page_offset = offset_in_page(offset);
1097 unsigned page_length = PAGE_SIZE - page_offset;
1098 page_length = remain < page_length ? remain : page_length;
1099 if (node.allocated) {
1100 wmb();
1101 ggtt->base.insert_page(&ggtt->base,
1102 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001103 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301104 wmb();
1105 } else {
1106 page_base += offset & PAGE_MASK;
1107 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001108
1109 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1110 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301111 ret = -EFAULT;
1112 break;
1113 }
1114
1115 remain -= page_length;
1116 user_data += page_length;
1117 offset += page_length;
1118 }
1119
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001120 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301121out_unpin:
1122 if (node.allocated) {
1123 wmb();
1124 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001125 node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301126 remove_mappable_node(&node);
1127 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001128 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301129 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001130out_unlock:
1131 intel_runtime_pm_put(i915);
1132 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001133
Eric Anholteb014592009-03-10 11:44:52 -07001134 return ret;
1135}
1136
Eric Anholt673a3942008-07-30 12:06:12 -07001137/**
1138 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001139 * @dev: drm device pointer
1140 * @data: ioctl data blob
1141 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001142 *
1143 * On error, the contents of *data are undefined.
1144 */
1145int
1146i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001147 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001148{
1149 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001150 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001151 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001152
Chris Wilson51311d02010-11-17 09:10:42 +00001153 if (args->size == 0)
1154 return 0;
1155
1156 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001157 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001158 args->size))
1159 return -EFAULT;
1160
Chris Wilson03ac0642016-07-20 13:31:51 +01001161 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001162 if (!obj)
1163 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001164
Chris Wilson7dcd2492010-09-26 20:21:44 +01001165 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +00001166 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001167 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001168 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001169 }
1170
Chris Wilsondb53a302011-02-03 11:57:46 +00001171 trace_i915_gem_object_pread(obj, args->offset, args->size);
1172
Chris Wilsone95433c2016-10-28 13:58:27 +01001173 ret = i915_gem_object_wait(obj,
1174 I915_WAIT_INTERRUPTIBLE,
1175 MAX_SCHEDULE_TIMEOUT,
1176 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001177 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001178 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001179
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001180 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001181 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001182 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001183
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001184 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +01001185 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001186 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301187
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001188 i915_gem_object_unpin_pages(obj);
1189out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001190 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001191 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001192}
1193
Keith Packard0839ccb2008-10-30 19:38:48 -07001194/* This is the fast write path which cannot handle
1195 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001196 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001197
Chris Wilsonfe115622016-10-28 13:58:40 +01001198static inline bool
1199ggtt_write(struct io_mapping *mapping,
1200 loff_t base, int offset,
1201 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -07001202{
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001203 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001204 unsigned long unwritten;
1205
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001206 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001207 vaddr = io_mapping_map_atomic_wc(mapping, base);
1208 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -07001209 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001210 io_mapping_unmap_atomic(vaddr);
1211 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001212 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1213 unwritten = copy_from_user((void __force *)vaddr + offset,
1214 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001215 io_mapping_unmap(vaddr);
1216 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001217
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001218 return unwritten;
1219}
1220
Eric Anholt3de09aa2009-03-09 09:42:23 -07001221/**
1222 * This is the fast pwrite path, where we copy the data directly from the
1223 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +01001224 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001225 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -07001226 */
Eric Anholt673a3942008-07-30 12:06:12 -07001227static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001228i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1229 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -07001230{
Chris Wilsonfe115622016-10-28 13:58:40 +01001231 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301232 struct i915_ggtt *ggtt = &i915->ggtt;
1233 struct drm_mm_node node;
Chris Wilsonfe115622016-10-28 13:58:40 +01001234 struct i915_vma *vma;
1235 u64 remain, offset;
1236 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301237 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301238
Chris Wilsonfe115622016-10-28 13:58:40 +01001239 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1240 if (ret)
1241 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001242
Chris Wilson8bd81812017-10-19 07:37:33 +01001243 if (i915_gem_object_has_struct_page(obj)) {
1244 /*
1245 * Avoid waking the device up if we can fallback, as
1246 * waking/resuming is very slow (worst-case 10-100 ms
1247 * depending on PCI sleeps and our own resume time).
1248 * This easily dwarfs any performance advantage from
1249 * using the cache bypass of indirect GGTT access.
1250 */
1251 if (!intel_runtime_pm_get_if_in_use(i915)) {
1252 ret = -EFAULT;
1253 goto out_unlock;
1254 }
1255 } else {
1256 /* No backing pages, no fallback, we must force GGTT access */
1257 intel_runtime_pm_get(i915);
1258 }
1259
Chris Wilson058d88c2016-08-15 10:49:06 +01001260 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsona3259ca2017-10-09 09:44:00 +01001261 PIN_MAPPABLE |
1262 PIN_NONFAULT |
1263 PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001264 if (!IS_ERR(vma)) {
1265 node.start = i915_ggtt_offset(vma);
1266 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001267 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001268 if (ret) {
1269 i915_vma_unpin(vma);
1270 vma = ERR_PTR(ret);
1271 }
1272 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001273 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001274 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301275 if (ret)
Chris Wilson8bd81812017-10-19 07:37:33 +01001276 goto out_rpm;
Chris Wilsonfe115622016-10-28 13:58:40 +01001277 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301278 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001279
1280 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1281 if (ret)
1282 goto out_unpin;
1283
Chris Wilsonfe115622016-10-28 13:58:40 +01001284 mutex_unlock(&i915->drm.struct_mutex);
1285
Chris Wilsonb19482d2016-08-18 17:16:43 +01001286 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001287
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301288 user_data = u64_to_user_ptr(args->data_ptr);
1289 offset = args->offset;
1290 remain = args->size;
1291 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001292 /* Operation in this page
1293 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001294 * page_base = page offset within aperture
1295 * page_offset = offset within page
1296 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001297 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301298 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001299 unsigned int page_offset = offset_in_page(offset);
1300 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301301 page_length = remain < page_length ? remain : page_length;
1302 if (node.allocated) {
1303 wmb(); /* flush the write before we modify the GGTT */
1304 ggtt->base.insert_page(&ggtt->base,
1305 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1306 node.start, I915_CACHE_NONE, 0);
1307 wmb(); /* flush modifications to the GGTT (insert_page) */
1308 } else {
1309 page_base += offset & PAGE_MASK;
1310 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001311 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001312 * source page isn't available. Return the error and we'll
1313 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301314 * If the object is non-shmem backed, we retry again with the
1315 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001316 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001317 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1318 user_data, page_length)) {
1319 ret = -EFAULT;
1320 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001321 }
Eric Anholt673a3942008-07-30 12:06:12 -07001322
Keith Packard0839ccb2008-10-30 19:38:48 -07001323 remain -= page_length;
1324 user_data += page_length;
1325 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001326 }
Chris Wilsond59b21e2017-02-22 11:40:49 +00001327 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001328
1329 mutex_lock(&i915->drm.struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001330out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301331 if (node.allocated) {
1332 wmb();
1333 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001334 node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301335 remove_mappable_node(&node);
1336 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001337 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301338 }
Chris Wilson8bd81812017-10-19 07:37:33 +01001339out_rpm:
Chris Wilson9c870d02016-10-24 13:42:15 +01001340 intel_runtime_pm_put(i915);
Chris Wilson8bd81812017-10-19 07:37:33 +01001341out_unlock:
Chris Wilsonfe115622016-10-28 13:58:40 +01001342 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001343 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001344}
1345
Eric Anholt673a3942008-07-30 12:06:12 -07001346static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001347shmem_pwrite_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +02001348 char __user *user_data,
1349 bool page_do_bit17_swizzling,
1350 bool needs_clflush_before,
1351 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001352{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001353 char *vaddr;
1354 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001355
Daniel Vetterd174bd62012-03-25 19:47:40 +02001356 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001357 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Chris Wilsonfe115622016-10-28 13:58:40 +01001358 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001359 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001360 if (page_do_bit17_swizzling)
Chris Wilsonfe115622016-10-28 13:58:40 +01001361 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1362 length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001363 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001364 ret = __copy_from_user(vaddr + offset, user_data, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001365 if (needs_clflush_after)
Chris Wilsonfe115622016-10-28 13:58:40 +01001366 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001367 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001368 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001369
Chris Wilson755d2212012-09-04 21:02:55 +01001370 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001371}
1372
Chris Wilsonfe115622016-10-28 13:58:40 +01001373/* Per-page copy function for the shmem pwrite fastpath.
1374 * Flushes invalid cachelines before writing to the target if
1375 * needs_clflush_before is set and flushes out any written cachelines after
1376 * writing if needs_clflush is set.
1377 */
Eric Anholt40123c12009-03-09 13:42:30 -07001378static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001379shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1380 bool page_do_bit17_swizzling,
1381 bool needs_clflush_before,
1382 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -07001383{
Chris Wilsonfe115622016-10-28 13:58:40 +01001384 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001385
Chris Wilsonfe115622016-10-28 13:58:40 +01001386 ret = -ENODEV;
1387 if (!page_do_bit17_swizzling) {
1388 char *vaddr = kmap_atomic(page);
1389
1390 if (needs_clflush_before)
1391 drm_clflush_virt_range(vaddr + offset, len);
1392 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1393 if (needs_clflush_after)
1394 drm_clflush_virt_range(vaddr + offset, len);
1395
1396 kunmap_atomic(vaddr);
1397 }
1398 if (ret == 0)
1399 return ret;
1400
1401 return shmem_pwrite_slow(page, offset, len, user_data,
1402 page_do_bit17_swizzling,
1403 needs_clflush_before,
1404 needs_clflush_after);
1405}
1406
1407static int
1408i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1409 const struct drm_i915_gem_pwrite *args)
1410{
1411 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1412 void __user *user_data;
1413 u64 remain;
1414 unsigned int obj_do_bit17_swizzling;
1415 unsigned int partial_cacheline_write;
1416 unsigned int needs_clflush;
1417 unsigned int offset, idx;
1418 int ret;
1419
1420 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilson43394c72016-08-18 17:16:47 +01001421 if (ret)
1422 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001423
Chris Wilsonfe115622016-10-28 13:58:40 +01001424 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1425 mutex_unlock(&i915->drm.struct_mutex);
1426 if (ret)
1427 return ret;
1428
1429 obj_do_bit17_swizzling = 0;
1430 if (i915_gem_object_needs_bit17_swizzle(obj))
1431 obj_do_bit17_swizzling = BIT(17);
1432
1433 /* If we don't overwrite a cacheline completely we need to be
1434 * careful to have up-to-date data by first clflushing. Don't
1435 * overcomplicate things and flush the entire patch.
1436 */
1437 partial_cacheline_write = 0;
1438 if (needs_clflush & CLFLUSH_BEFORE)
1439 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1440
Chris Wilson43394c72016-08-18 17:16:47 +01001441 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +01001442 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +01001443 offset = offset_in_page(args->offset);
1444 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1445 struct page *page = i915_gem_object_get_page(obj, idx);
1446 int length;
Eric Anholt40123c12009-03-09 13:42:30 -07001447
Chris Wilsonfe115622016-10-28 13:58:40 +01001448 length = remain;
1449 if (offset + length > PAGE_SIZE)
1450 length = PAGE_SIZE - offset;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001451
Chris Wilsonfe115622016-10-28 13:58:40 +01001452 ret = shmem_pwrite(page, offset, length, user_data,
1453 page_to_phys(page) & obj_do_bit17_swizzling,
1454 (offset | length) & partial_cacheline_write,
1455 needs_clflush & CLFLUSH_AFTER);
1456 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +01001457 break;
1458
Chris Wilsonfe115622016-10-28 13:58:40 +01001459 remain -= length;
1460 user_data += length;
1461 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001462 }
1463
Chris Wilsond59b21e2017-02-22 11:40:49 +00001464 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001465 i915_gem_obj_finish_shmem_access(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001466 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001467}
1468
1469/**
1470 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001471 * @dev: drm device
1472 * @data: ioctl data blob
1473 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001474 *
1475 * On error, the contents of the buffer that were to be modified are undefined.
1476 */
1477int
1478i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001479 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001480{
1481 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001482 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001483 int ret;
1484
1485 if (args->size == 0)
1486 return 0;
1487
1488 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001489 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001490 args->size))
1491 return -EFAULT;
1492
Chris Wilson03ac0642016-07-20 13:31:51 +01001493 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001494 if (!obj)
1495 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001496
Chris Wilson7dcd2492010-09-26 20:21:44 +01001497 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +00001498 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001499 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001500 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001501 }
1502
Chris Wilsondb53a302011-02-03 11:57:46 +00001503 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1504
Chris Wilson7c55e2c2017-03-07 12:03:38 +00001505 ret = -ENODEV;
1506 if (obj->ops->pwrite)
1507 ret = obj->ops->pwrite(obj, args);
1508 if (ret != -ENODEV)
1509 goto err;
1510
Chris Wilsone95433c2016-10-28 13:58:27 +01001511 ret = i915_gem_object_wait(obj,
1512 I915_WAIT_INTERRUPTIBLE |
1513 I915_WAIT_ALL,
1514 MAX_SCHEDULE_TIMEOUT,
1515 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001516 if (ret)
1517 goto err;
1518
Chris Wilsonfe115622016-10-28 13:58:40 +01001519 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001520 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001521 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001522
Daniel Vetter935aaa62012-03-25 19:47:35 +02001523 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001524 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1525 * it would end up going through the fenced access, and we'll get
1526 * different detiling behavior between reading and writing.
1527 * pread/pwrite currently are reading and writing from the CPU
1528 * perspective, requiring manual detiling by the client.
1529 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001530 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +01001531 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +02001532 /* Note that the gtt paths might fail with non-page-backed user
1533 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +01001534 * textures). Fallback to the shmem path in that case.
1535 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001536 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -07001537
Chris Wilsond1054ee2016-07-16 18:42:36 +01001538 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001539 if (obj->phys_handle)
1540 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301541 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001542 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001543 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001544
Chris Wilsonfe115622016-10-28 13:58:40 +01001545 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001546err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001547 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001548 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001549}
1550
Chris Wilson40e62d52016-10-28 13:58:41 +01001551static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1552{
1553 struct drm_i915_private *i915;
1554 struct list_head *list;
1555 struct i915_vma *vma;
1556
Chris Wilsonf2123812017-10-16 12:40:37 +01001557 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1558
Chris Wilson40e62d52016-10-28 13:58:41 +01001559 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1560 if (!i915_vma_is_ggtt(vma))
Chris Wilson28f412e2016-12-23 14:57:55 +00001561 break;
Chris Wilson40e62d52016-10-28 13:58:41 +01001562
1563 if (i915_vma_is_active(vma))
1564 continue;
1565
1566 if (!drm_mm_node_allocated(&vma->node))
1567 continue;
1568
1569 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1570 }
1571
1572 i915 = to_i915(obj->base.dev);
Chris Wilsonf2123812017-10-16 12:40:37 +01001573 spin_lock(&i915->mm.obj_lock);
Chris Wilson40e62d52016-10-28 13:58:41 +01001574 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
Chris Wilsonf2123812017-10-16 12:40:37 +01001575 list_move_tail(&obj->mm.link, list);
1576 spin_unlock(&i915->mm.obj_lock);
Chris Wilson40e62d52016-10-28 13:58:41 +01001577}
1578
Eric Anholt673a3942008-07-30 12:06:12 -07001579/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001580 * Called when user space prepares to use an object with the CPU, either
1581 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001582 * @dev: drm device
1583 * @data: ioctl data blob
1584 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001585 */
1586int
1587i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001588 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001589{
1590 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001591 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001592 uint32_t read_domains = args->read_domains;
1593 uint32_t write_domain = args->write_domain;
Chris Wilson40e62d52016-10-28 13:58:41 +01001594 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07001595
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001596 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001597 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001598 return -EINVAL;
1599
1600 /* Having something in the write domain implies it's in the read
1601 * domain, and only that read domain. Enforce that in the request.
1602 */
1603 if (write_domain != 0 && read_domains != write_domain)
1604 return -EINVAL;
1605
Chris Wilson03ac0642016-07-20 13:31:51 +01001606 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001607 if (!obj)
1608 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001609
Chris Wilson3236f572012-08-24 09:35:09 +01001610 /* Try to flush the object off the GPU without holding the lock.
1611 * We will repeat the flush holding the lock in the normal manner
1612 * to catch cases where we are gazumped.
1613 */
Chris Wilson40e62d52016-10-28 13:58:41 +01001614 err = i915_gem_object_wait(obj,
Chris Wilsone95433c2016-10-28 13:58:27 +01001615 I915_WAIT_INTERRUPTIBLE |
1616 (write_domain ? I915_WAIT_ALL : 0),
1617 MAX_SCHEDULE_TIMEOUT,
1618 to_rps_client(file));
Chris Wilson40e62d52016-10-28 13:58:41 +01001619 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001620 goto out;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001621
Chris Wilson40e62d52016-10-28 13:58:41 +01001622 /* Flush and acquire obj->pages so that we are coherent through
1623 * direct access in memory with previous cached writes through
1624 * shmemfs and that our cache domain tracking remains valid.
1625 * For example, if the obj->filp was moved to swap without us
1626 * being notified and releasing the pages, we would mistakenly
1627 * continue to assume that the obj remained out of the CPU cached
1628 * domain.
1629 */
1630 err = i915_gem_object_pin_pages(obj);
1631 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001632 goto out;
Chris Wilson40e62d52016-10-28 13:58:41 +01001633
1634 err = i915_mutex_lock_interruptible(dev);
1635 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001636 goto out_unpin;
Chris Wilson3236f572012-08-24 09:35:09 +01001637
Chris Wilsone22d8e32017-04-12 12:01:11 +01001638 if (read_domains & I915_GEM_DOMAIN_WC)
1639 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1640 else if (read_domains & I915_GEM_DOMAIN_GTT)
1641 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
Chris Wilson43566de2015-01-02 16:29:29 +05301642 else
Chris Wilsone22d8e32017-04-12 12:01:11 +01001643 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
Chris Wilson40e62d52016-10-28 13:58:41 +01001644
1645 /* And bump the LRU for this access */
1646 i915_gem_object_bump_inactive_ggtt(obj);
1647
1648 mutex_unlock(&dev->struct_mutex);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001649
Daniel Vetter031b6982015-06-26 19:35:16 +02001650 if (write_domain != 0)
Chris Wilsonef749212017-04-12 12:01:10 +01001651 intel_fb_obj_invalidate(obj,
1652 fb_write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001653
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001654out_unpin:
Chris Wilson40e62d52016-10-28 13:58:41 +01001655 i915_gem_object_unpin_pages(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001656out:
1657 i915_gem_object_put(obj);
Chris Wilson40e62d52016-10-28 13:58:41 +01001658 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001659}
1660
1661/**
1662 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001663 * @dev: drm device
1664 * @data: ioctl data blob
1665 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001666 */
1667int
1668i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001669 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001670{
1671 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001672 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001673
Chris Wilson03ac0642016-07-20 13:31:51 +01001674 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001675 if (!obj)
1676 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001677
Eric Anholt673a3942008-07-30 12:06:12 -07001678 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +00001679 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001680 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +00001681
1682 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001683}
1684
1685/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001686 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1687 * it is mapped to.
1688 * @dev: drm device
1689 * @data: ioctl data blob
1690 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001691 *
1692 * While the mapping holds a reference on the contents of the object, it doesn't
1693 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001694 *
1695 * IMPORTANT:
1696 *
1697 * DRM driver writers who look a this function as an example for how to do GEM
1698 * mmap support, please don't implement mmap support like here. The modern way
1699 * to implement DRM mmap support is with an mmap offset ioctl (like
1700 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1701 * That way debug tooling like valgrind will understand what's going on, hiding
1702 * the mmap call in a driver private ioctl will break that. The i915 driver only
1703 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001704 */
1705int
1706i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001707 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001708{
1709 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001710 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001711 unsigned long addr;
1712
Akash Goel1816f922015-01-02 16:29:30 +05301713 if (args->flags & ~(I915_MMAP_WC))
1714 return -EINVAL;
1715
Borislav Petkov568a58e2016-03-29 17:42:01 +02001716 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301717 return -ENODEV;
1718
Chris Wilson03ac0642016-07-20 13:31:51 +01001719 obj = i915_gem_object_lookup(file, args->handle);
1720 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001721 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001722
Daniel Vetter1286ff72012-05-10 15:25:09 +02001723 /* prime objects have no backing filp to GEM mmap
1724 * pages from.
1725 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001726 if (!obj->base.filp) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001727 i915_gem_object_put(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001728 return -EINVAL;
1729 }
1730
Chris Wilson03ac0642016-07-20 13:31:51 +01001731 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001732 PROT_READ | PROT_WRITE, MAP_SHARED,
1733 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301734 if (args->flags & I915_MMAP_WC) {
1735 struct mm_struct *mm = current->mm;
1736 struct vm_area_struct *vma;
1737
Michal Hocko80a89a52016-05-23 16:26:11 -07001738 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001739 i915_gem_object_put(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001740 return -EINTR;
1741 }
Akash Goel1816f922015-01-02 16:29:30 +05301742 vma = find_vma(mm, addr);
1743 if (vma)
1744 vma->vm_page_prot =
1745 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1746 else
1747 addr = -ENOMEM;
1748 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001749
1750 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001751 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301752 }
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001753 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001754 if (IS_ERR((void *)addr))
1755 return addr;
1756
1757 args->addr_ptr = (uint64_t) addr;
1758
1759 return 0;
1760}
1761
Chris Wilson03af84f2016-08-18 17:17:01 +01001762static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1763{
Chris Wilson6649a0b2017-01-09 16:16:08 +00001764 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
Chris Wilson03af84f2016-08-18 17:17:01 +01001765}
1766
Jesse Barnesde151cf2008-11-12 10:03:55 -08001767/**
Chris Wilson4cc69072016-08-25 19:05:19 +01001768 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1769 *
1770 * A history of the GTT mmap interface:
1771 *
1772 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1773 * aligned and suitable for fencing, and still fit into the available
1774 * mappable space left by the pinned display objects. A classic problem
1775 * we called the page-fault-of-doom where we would ping-pong between
1776 * two objects that could not fit inside the GTT and so the memcpy
1777 * would page one object in at the expense of the other between every
1778 * single byte.
1779 *
1780 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1781 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1782 * object is too large for the available space (or simply too large
1783 * for the mappable aperture!), a view is created instead and faulted
1784 * into userspace. (This view is aligned and sized appropriately for
1785 * fenced access.)
1786 *
Chris Wilsone22d8e32017-04-12 12:01:11 +01001787 * 2 - Recognise WC as a separate cache domain so that we can flush the
1788 * delayed writes via GTT before performing direct access via WC.
1789 *
Chris Wilson4cc69072016-08-25 19:05:19 +01001790 * Restrictions:
1791 *
1792 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1793 * hangs on some architectures, corruption on others. An attempt to service
1794 * a GTT page fault from a snoopable object will generate a SIGBUS.
1795 *
1796 * * the object must be able to fit into RAM (physical memory, though no
1797 * limited to the mappable aperture).
1798 *
1799 *
1800 * Caveats:
1801 *
1802 * * a new GTT page fault will synchronize rendering from the GPU and flush
1803 * all data to system memory. Subsequent access will not be synchronized.
1804 *
1805 * * all mappings are revoked on runtime device suspend.
1806 *
1807 * * there are only 8, 16 or 32 fence registers to share between all users
1808 * (older machines require fence register for display and blitter access
1809 * as well). Contention of the fence registers will cause the previous users
1810 * to be unmapped and any new access will generate new page faults.
1811 *
1812 * * running out of memory while servicing a fault may generate a SIGBUS,
1813 * rather than the expected SIGSEGV.
1814 */
1815int i915_gem_mmap_gtt_version(void)
1816{
Chris Wilsone22d8e32017-04-12 12:01:11 +01001817 return 2;
Chris Wilson4cc69072016-08-25 19:05:19 +01001818}
1819
Chris Wilson2d4281b2017-01-10 09:56:32 +00001820static inline struct i915_ggtt_view
1821compute_partial_view(struct drm_i915_gem_object *obj,
Chris Wilson2d4281b2017-01-10 09:56:32 +00001822 pgoff_t page_offset,
1823 unsigned int chunk)
1824{
1825 struct i915_ggtt_view view;
1826
1827 if (i915_gem_object_is_tiled(obj))
1828 chunk = roundup(chunk, tile_row_pages(obj));
1829
Chris Wilson2d4281b2017-01-10 09:56:32 +00001830 view.type = I915_GGTT_VIEW_PARTIAL;
Chris Wilson8bab11932017-01-14 00:28:25 +00001831 view.partial.offset = rounddown(page_offset, chunk);
1832 view.partial.size =
Chris Wilson2d4281b2017-01-10 09:56:32 +00001833 min_t(unsigned int, chunk,
Chris Wilson8bab11932017-01-14 00:28:25 +00001834 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
Chris Wilson2d4281b2017-01-10 09:56:32 +00001835
1836 /* If the partial covers the entire object, just create a normal VMA. */
1837 if (chunk >= obj->base.size >> PAGE_SHIFT)
1838 view.type = I915_GGTT_VIEW_NORMAL;
1839
1840 return view;
1841}
1842
Chris Wilson4cc69072016-08-25 19:05:19 +01001843/**
Jesse Barnesde151cf2008-11-12 10:03:55 -08001844 * i915_gem_fault - fault a page into the GTT
Geliang Tangd9072a32015-09-15 05:58:44 -07001845 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001846 *
1847 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1848 * from userspace. The fault handler takes care of binding the object to
1849 * the GTT (if needed), allocating and programming a fence register (again,
1850 * only if needed based on whether the old reg is still valid or the object
1851 * is tiled) and inserting a new PTE into the faulting process.
1852 *
1853 * Note that the faulting process may involve evicting existing objects
1854 * from the GTT and/or fence registers to make room. So performance may
1855 * suffer if the GTT working set is large or there are few fence registers
1856 * left.
Chris Wilson4cc69072016-08-25 19:05:19 +01001857 *
1858 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1859 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
Jesse Barnesde151cf2008-11-12 10:03:55 -08001860 */
Dave Jiang11bac802017-02-24 14:56:41 -08001861int i915_gem_fault(struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001862{
Chris Wilson03af84f2016-08-18 17:17:01 +01001863#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Dave Jiang11bac802017-02-24 14:56:41 -08001864 struct vm_area_struct *area = vmf->vma;
Chris Wilson058d88c2016-08-15 10:49:06 +01001865 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001866 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001867 struct drm_i915_private *dev_priv = to_i915(dev);
1868 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001869 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001870 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001871 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001872 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001873 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001874
Jesse Barnesde151cf2008-11-12 10:03:55 -08001875 /* We don't use vmf->pgoff since that has the fake offset */
Jan Kara1a29d852016-12-14 15:07:01 -08001876 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001877
Chris Wilsondb53a302011-02-03 11:57:46 +00001878 trace_i915_gem_object_fault(obj, page_offset, true, write);
1879
Chris Wilson6e4930f2014-02-07 18:37:06 -02001880 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001881 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001882 * repeat the flush holding the lock in the normal manner to catch cases
1883 * where we are gazumped.
1884 */
Chris Wilsone95433c2016-10-28 13:58:27 +01001885 ret = i915_gem_object_wait(obj,
1886 I915_WAIT_INTERRUPTIBLE,
1887 MAX_SCHEDULE_TIMEOUT,
1888 NULL);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001889 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001890 goto err;
1891
Chris Wilson40e62d52016-10-28 13:58:41 +01001892 ret = i915_gem_object_pin_pages(obj);
1893 if (ret)
1894 goto err;
1895
Chris Wilsonb8f90962016-08-05 10:14:07 +01001896 intel_runtime_pm_get(dev_priv);
1897
1898 ret = i915_mutex_lock_interruptible(dev);
1899 if (ret)
1900 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001901
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001902 /* Access to snoopable pages through the GTT is incoherent. */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001903 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001904 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001905 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001906 }
1907
Chris Wilson82118872016-08-18 17:17:05 +01001908 /* If the object is smaller than a couple of partial vma, it is
1909 * not worth only creating a single partial vma - we may as well
1910 * clear enough space for the full object.
1911 */
1912 flags = PIN_MAPPABLE;
1913 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1914 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1915
Chris Wilsona61007a2016-08-18 17:17:02 +01001916 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001917 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001918 if (IS_ERR(vma)) {
Chris Wilsona61007a2016-08-18 17:17:02 +01001919 /* Use a partial view if it is bigger than available space */
Chris Wilson2d4281b2017-01-10 09:56:32 +00001920 struct i915_ggtt_view view =
Chris Wilson8201c1f2017-01-10 09:56:33 +00001921 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
Chris Wilsonaa136d92016-08-18 17:17:03 +01001922
Chris Wilson50349242016-08-18 17:17:04 +01001923 /* Userspace is now writing through an untracked VMA, abandon
1924 * all hope that the hardware is able to track future writes.
1925 */
1926 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1927
Chris Wilsona61007a2016-08-18 17:17:02 +01001928 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1929 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001930 if (IS_ERR(vma)) {
1931 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001932 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001933 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001934
Chris Wilsonc9839302012-11-20 10:45:17 +00001935 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1936 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001937 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001938
Chris Wilson3bd40732017-10-09 09:43:56 +01001939 ret = i915_vma_pin_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001940 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001941 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001942
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001943 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001944 ret = remap_io_mapping(area,
Chris Wilson8bab11932017-01-14 00:28:25 +00001945 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
Chris Wilsonc58305a2016-08-19 16:54:28 +01001946 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1947 min_t(u64, vma->size, area->vm_end - area->vm_start),
1948 &ggtt->mappable);
Chris Wilsona65adaf2017-10-09 09:43:57 +01001949 if (ret)
1950 goto err_fence;
Chris Wilsona61007a2016-08-18 17:17:02 +01001951
Chris Wilsona65adaf2017-10-09 09:43:57 +01001952 /* Mark as being mmapped into userspace for later revocation */
1953 assert_rpm_wakelock_held(dev_priv);
1954 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1955 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1956 GEM_BUG_ON(!obj->userfault_count);
1957
1958err_fence:
Chris Wilson3bd40732017-10-09 09:43:56 +01001959 i915_vma_unpin_fence(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001960err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001961 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001962err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001963 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001964err_rpm:
1965 intel_runtime_pm_put(dev_priv);
Chris Wilson40e62d52016-10-28 13:58:41 +01001966 i915_gem_object_unpin_pages(obj);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001967err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001968 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001969 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001970 /*
1971 * We eat errors when the gpu is terminally wedged to avoid
1972 * userspace unduly crashing (gl has no provisions for mmaps to
1973 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1974 * and so needs to be reported.
1975 */
1976 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001977 ret = VM_FAULT_SIGBUS;
1978 break;
1979 }
Chris Wilson045e7692010-11-07 09:18:22 +00001980 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001981 /*
1982 * EAGAIN means the gpu is hung and we'll wait for the error
1983 * handler to reset everything when re-faulting in
1984 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001985 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001986 case 0:
1987 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001988 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001989 case -EBUSY:
1990 /*
1991 * EBUSY is ok: this just means that another thread
1992 * already did the job.
1993 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001994 ret = VM_FAULT_NOPAGE;
1995 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001996 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001997 ret = VM_FAULT_OOM;
1998 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001999 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00002000 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02002001 ret = VM_FAULT_SIGBUS;
2002 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002003 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02002004 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02002005 ret = VM_FAULT_SIGBUS;
2006 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002007 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02002008 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002009}
2010
Chris Wilsona65adaf2017-10-09 09:43:57 +01002011static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2012{
2013 struct i915_vma *vma;
2014
2015 GEM_BUG_ON(!obj->userfault_count);
2016
2017 obj->userfault_count = 0;
2018 list_del(&obj->userfault_link);
2019 drm_vma_node_unmap(&obj->base.vma_node,
2020 obj->base.dev->anon_inode->i_mapping);
2021
2022 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2023 if (!i915_vma_is_ggtt(vma))
2024 break;
2025
2026 i915_vma_unset_userfault(vma);
2027 }
2028}
2029
Jesse Barnesde151cf2008-11-12 10:03:55 -08002030/**
Chris Wilson901782b2009-07-10 08:18:50 +01002031 * i915_gem_release_mmap - remove physical page mappings
2032 * @obj: obj in question
2033 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002034 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01002035 * relinquish ownership of the pages back to the system.
2036 *
2037 * It is vital that we remove the page mapping if we have mapped a tiled
2038 * object through the GTT and then lose the fence register due to
2039 * resource pressure. Similarly if the object has been moved out of the
2040 * aperture, than pages mapped into userspace must be revoked. Removing the
2041 * mapping will then trigger a page fault on the next user access, allowing
2042 * fixup by i915_gem_fault().
2043 */
Eric Anholtd05ca302009-07-10 13:02:26 -07002044void
Chris Wilson05394f32010-11-08 19:18:58 +00002045i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01002046{
Chris Wilson275f0392016-10-24 13:42:14 +01002047 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilson275f0392016-10-24 13:42:14 +01002048
Chris Wilson349f2cc2016-04-13 17:35:12 +01002049 /* Serialisation between user GTT access and our code depends upon
2050 * revoking the CPU's PTE whilst the mutex is held. The next user
2051 * pagefault then has to wait until we release the mutex.
Chris Wilson9c870d02016-10-24 13:42:15 +01002052 *
2053 * Note that RPM complicates somewhat by adding an additional
2054 * requirement that operations to the GGTT be made holding the RPM
2055 * wakeref.
Chris Wilson349f2cc2016-04-13 17:35:12 +01002056 */
Chris Wilson275f0392016-10-24 13:42:14 +01002057 lockdep_assert_held(&i915->drm.struct_mutex);
Chris Wilson9c870d02016-10-24 13:42:15 +01002058 intel_runtime_pm_get(i915);
Chris Wilson349f2cc2016-04-13 17:35:12 +01002059
Chris Wilsona65adaf2017-10-09 09:43:57 +01002060 if (!obj->userfault_count)
Chris Wilson9c870d02016-10-24 13:42:15 +01002061 goto out;
Chris Wilson901782b2009-07-10 08:18:50 +01002062
Chris Wilsona65adaf2017-10-09 09:43:57 +01002063 __i915_gem_object_release_mmap(obj);
Chris Wilson349f2cc2016-04-13 17:35:12 +01002064
2065 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2066 * memory transactions from userspace before we return. The TLB
2067 * flushing implied above by changing the PTE above *should* be
2068 * sufficient, an extra barrier here just provides us with a bit
2069 * of paranoid documentation about our requirement to serialise
2070 * memory writes before touching registers / GSM.
2071 */
2072 wmb();
Chris Wilson9c870d02016-10-24 13:42:15 +01002073
2074out:
2075 intel_runtime_pm_put(i915);
Chris Wilson901782b2009-07-10 08:18:50 +01002076}
2077
Chris Wilson7c108fd2016-10-24 13:42:18 +01002078void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002079{
Chris Wilson3594a3e2016-10-24 13:42:16 +01002080 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +01002081 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002082
Chris Wilson3594a3e2016-10-24 13:42:16 +01002083 /*
2084 * Only called during RPM suspend. All users of the userfault_list
2085 * must be holding an RPM wakeref to ensure that this can not
2086 * run concurrently with themselves (and use the struct_mutex for
2087 * protection between themselves).
2088 */
2089
2090 list_for_each_entry_safe(obj, on,
Chris Wilsona65adaf2017-10-09 09:43:57 +01002091 &dev_priv->mm.userfault_list, userfault_link)
2092 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +01002093
2094 /* The fence will be lost when the device powers down. If any were
2095 * in use by hardware (i.e. they are pinned), we should not be powering
2096 * down! All other fences will be reacquired by the user upon waking.
2097 */
2098 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2099 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2100
Chris Wilsone0ec3ec2017-02-03 12:57:17 +00002101 /* Ideally we want to assert that the fence register is not
2102 * live at this point (i.e. that no piece of code will be
2103 * trying to write through fence + GTT, as that both violates
2104 * our tracking of activity and associated locking/barriers,
2105 * but also is illegal given that the hw is powered down).
2106 *
2107 * Previously we used reg->pin_count as a "liveness" indicator.
2108 * That is not sufficient, and we need a more fine-grained
2109 * tool if we want to have a sanity check here.
2110 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01002111
2112 if (!reg->vma)
2113 continue;
2114
Chris Wilsona65adaf2017-10-09 09:43:57 +01002115 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +01002116 reg->dirty = true;
2117 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002118}
2119
Chris Wilsond8cb5082012-08-11 15:41:03 +01002120static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2121{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002122 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002123 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002124
Chris Wilsonf3f61842016-08-05 10:14:14 +01002125 err = drm_gem_create_mmap_offset(&obj->base);
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002126 if (likely(!err))
Chris Wilsonf3f61842016-08-05 10:14:14 +01002127 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01002128
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002129 /* Attempt to reap some mmap space from dead objects */
2130 do {
2131 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2132 if (err)
2133 break;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002134
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002135 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002136 err = drm_gem_create_mmap_offset(&obj->base);
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002137 if (!err)
2138 break;
2139
2140 } while (flush_delayed_work(&dev_priv->gt.retire_work));
Daniel Vetterda494d72012-12-20 15:11:16 +01002141
Chris Wilsonf3f61842016-08-05 10:14:14 +01002142 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002143}
2144
2145static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2146{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002147 drm_gem_free_mmap_offset(&obj->base);
2148}
2149
Dave Airlieda6b51d2014-12-24 13:11:17 +10002150int
Dave Airlieff72145b2011-02-07 12:16:14 +10002151i915_gem_mmap_gtt(struct drm_file *file,
2152 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002153 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002154 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002155{
Chris Wilson05394f32010-11-08 19:18:58 +00002156 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002157 int ret;
2158
Chris Wilson03ac0642016-07-20 13:31:51 +01002159 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002160 if (!obj)
2161 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002162
Chris Wilsond8cb5082012-08-11 15:41:03 +01002163 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002164 if (ret == 0)
2165 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002166
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002167 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002168 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002169}
2170
Dave Airlieff72145b2011-02-07 12:16:14 +10002171/**
2172 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2173 * @dev: DRM device
2174 * @data: GTT mapping ioctl data
2175 * @file: GEM object info
2176 *
2177 * Simply returns the fake offset to userspace so it can mmap it.
2178 * The mmap call will end up in drm_gem_mmap(), which will set things
2179 * up so we can get faults in the handler above.
2180 *
2181 * The fault handler will take care of binding the object into the GTT
2182 * (since it may have been evicted to make room for something), allocating
2183 * a fence register, and mapping the appropriate aperture address into
2184 * userspace.
2185 */
2186int
2187i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2188 struct drm_file *file)
2189{
2190 struct drm_i915_gem_mmap_gtt *args = data;
2191
Dave Airlieda6b51d2014-12-24 13:11:17 +10002192 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002193}
2194
Daniel Vetter225067e2012-08-20 10:23:20 +02002195/* Immediately discard the backing storage */
2196static void
2197i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002198{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002199 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002200
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002201 if (obj->base.filp == NULL)
2202 return;
2203
Daniel Vetter225067e2012-08-20 10:23:20 +02002204 /* Our goal here is to return as much of the memory as
2205 * is possible back to the system as we are called from OOM.
2206 * To do this we must instruct the shmfs to drop all of its
2207 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002208 */
Chris Wilson55372522014-03-25 13:23:06 +00002209 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002210 obj->mm.madv = __I915_MADV_PURGED;
Chris Wilson4e5462e2017-03-07 13:20:31 +00002211 obj->mm.pages = ERR_PTR(-EFAULT);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002212}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002213
Chris Wilson55372522014-03-25 13:23:06 +00002214/* Try to discard unwanted pages */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002215void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002216{
Chris Wilson55372522014-03-25 13:23:06 +00002217 struct address_space *mapping;
2218
Chris Wilson1233e2d2016-10-28 13:58:37 +01002219 lockdep_assert_held(&obj->mm.lock);
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002220 GEM_BUG_ON(i915_gem_object_has_pages(obj));
Chris Wilson1233e2d2016-10-28 13:58:37 +01002221
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002222 switch (obj->mm.madv) {
Chris Wilson55372522014-03-25 13:23:06 +00002223 case I915_MADV_DONTNEED:
2224 i915_gem_object_truncate(obj);
2225 case __I915_MADV_PURGED:
2226 return;
2227 }
2228
2229 if (obj->base.filp == NULL)
2230 return;
2231
Al Viro93c76a32015-12-04 23:45:44 -05002232 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002233 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002234}
2235
Chris Wilson5cdf5882010-09-27 15:51:07 +01002236static void
Chris Wilson03ac84f2016-10-28 13:58:36 +01002237i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2238 struct sg_table *pages)
Eric Anholt673a3942008-07-30 12:06:12 -07002239{
Dave Gordon85d12252016-05-20 11:54:06 +01002240 struct sgt_iter sgt_iter;
2241 struct page *page;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002242
Chris Wilsone5facdf2016-12-23 14:57:57 +00002243 __i915_gem_object_release_shmem(obj, pages, true);
Eric Anholt856fa192009-03-19 14:10:50 -07002244
Chris Wilson03ac84f2016-10-28 13:58:36 +01002245 i915_gem_gtt_finish_pages(obj, pages);
Imre Deake2273302015-07-09 12:59:05 +03002246
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002247 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002248 i915_gem_object_save_bit_17_swizzle(obj, pages);
Eric Anholt280b7132009-03-12 16:56:27 -07002249
Chris Wilson03ac84f2016-10-28 13:58:36 +01002250 for_each_sgt_page(page, sgt_iter, pages) {
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002251 if (obj->mm.dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002252 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002253
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002254 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002255 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002256
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002257 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002258 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002259 obj->mm.dirty = false;
Eric Anholt673a3942008-07-30 12:06:12 -07002260
Chris Wilson03ac84f2016-10-28 13:58:36 +01002261 sg_free_table(pages);
2262 kfree(pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002263}
2264
Chris Wilson96d77632016-10-28 13:58:33 +01002265static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2266{
2267 struct radix_tree_iter iter;
Ville Syrjäläc23aa712017-09-01 20:12:51 +03002268 void __rcu **slot;
Chris Wilson96d77632016-10-28 13:58:33 +01002269
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002270 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2271 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
Chris Wilson96d77632016-10-28 13:58:33 +01002272}
2273
Chris Wilson548625e2016-11-01 12:11:34 +00002274void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2275 enum i915_mm_subclass subclass)
Chris Wilson37e680a2012-06-07 15:38:42 +01002276{
Chris Wilsonf2123812017-10-16 12:40:37 +01002277 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002278 struct sg_table *pages;
Chris Wilson37e680a2012-06-07 15:38:42 +01002279
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002280 if (i915_gem_object_has_pinned_pages(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002281 return;
Chris Wilsona5570172012-09-04 21:02:54 +01002282
Chris Wilson15717de2016-08-04 07:52:26 +01002283 GEM_BUG_ON(obj->bind_count);
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002284 if (!i915_gem_object_has_pages(obj))
Chris Wilson1233e2d2016-10-28 13:58:37 +01002285 return;
2286
2287 /* May be called by shrinker from within get_pages() (on another bo) */
Chris Wilson548625e2016-11-01 12:11:34 +00002288 mutex_lock_nested(&obj->mm.lock, subclass);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002289 if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2290 goto unlock;
Ben Widawsky3e123022013-07-31 17:00:04 -07002291
Chris Wilsona2165e32012-12-03 11:49:00 +00002292 /* ->put_pages might need to allocate memory for the bit17 swizzle
2293 * array, hence protect them from being reaped by removing them from gtt
2294 * lists early. */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002295 pages = fetch_and_zero(&obj->mm.pages);
2296 GEM_BUG_ON(!pages);
Chris Wilsona2165e32012-12-03 11:49:00 +00002297
Chris Wilsonf2123812017-10-16 12:40:37 +01002298 spin_lock(&i915->mm.obj_lock);
2299 list_del(&obj->mm.link);
2300 spin_unlock(&i915->mm.obj_lock);
2301
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002302 if (obj->mm.mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002303 void *ptr;
2304
Chris Wilson0ce81782017-05-17 13:09:59 +01002305 ptr = page_mask_bits(obj->mm.mapping);
Chris Wilson4b30cb22016-08-18 17:16:42 +01002306 if (is_vmalloc_addr(ptr))
2307 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002308 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002309 kunmap(kmap_to_page(ptr));
2310
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002311 obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002312 }
2313
Chris Wilson96d77632016-10-28 13:58:33 +01002314 __i915_gem_object_reset_page_iter(obj);
2315
Chris Wilson4e5462e2017-03-07 13:20:31 +00002316 if (!IS_ERR(pages))
2317 obj->ops->put_pages(obj, pages);
2318
Matthew Aulda5c081662017-10-06 23:18:18 +01002319 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2320
Chris Wilson1233e2d2016-10-28 13:58:37 +01002321unlock:
2322 mutex_unlock(&obj->mm.lock);
Chris Wilson6c085a72012-08-20 11:40:46 +02002323}
2324
Chris Wilson935a2f72017-02-13 17:15:13 +00002325static bool i915_sg_trim(struct sg_table *orig_st)
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002326{
2327 struct sg_table new_st;
2328 struct scatterlist *sg, *new_sg;
2329 unsigned int i;
2330
2331 if (orig_st->nents == orig_st->orig_nents)
Chris Wilson935a2f72017-02-13 17:15:13 +00002332 return false;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002333
Chris Wilson8bfc478f2016-12-23 14:57:58 +00002334 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
Chris Wilson935a2f72017-02-13 17:15:13 +00002335 return false;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002336
2337 new_sg = new_st.sgl;
2338 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2339 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2340 /* called before being DMA mapped, no need to copy sg->dma_* */
2341 new_sg = sg_next(new_sg);
2342 }
Chris Wilsonc2dc6cc2016-12-19 12:43:46 +00002343 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002344
2345 sg_free_table(orig_st);
2346
2347 *orig_st = new_st;
Chris Wilson935a2f72017-02-13 17:15:13 +00002348 return true;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002349}
2350
Matthew Auldb91b09e2017-10-06 23:18:17 +01002351static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002352{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002353 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsond766ef52016-12-19 12:43:45 +00002354 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2355 unsigned long i;
Eric Anholt673a3942008-07-30 12:06:12 -07002356 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002357 struct sg_table *st;
2358 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002359 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002360 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002361 unsigned long last_pfn = 0; /* suppress gcc warning */
Tvrtko Ursulin56024522017-08-03 10:14:17 +01002362 unsigned int max_segment = i915_sg_segment_size();
Matthew Auld84e89782017-10-09 12:00:24 +01002363 unsigned int sg_page_sizes;
Chris Wilson4846bf02017-06-09 12:03:46 +01002364 gfp_t noreclaim;
Imre Deake2273302015-07-09 12:59:05 +03002365 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002366
Chris Wilson6c085a72012-08-20 11:40:46 +02002367 /* Assert that the object is not currently in any GPU domain. As it
2368 * wasn't in the GTT, there shouldn't be any way it could have been in
2369 * a GPU cache
2370 */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002371 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2372 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Chris Wilson6c085a72012-08-20 11:40:46 +02002373
Chris Wilson9da3da62012-06-01 15:20:22 +01002374 st = kmalloc(sizeof(*st), GFP_KERNEL);
2375 if (st == NULL)
Matthew Auldb91b09e2017-10-06 23:18:17 +01002376 return -ENOMEM;
Eric Anholt673a3942008-07-30 12:06:12 -07002377
Chris Wilsond766ef52016-12-19 12:43:45 +00002378rebuild_st:
Chris Wilson9da3da62012-06-01 15:20:22 +01002379 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002380 kfree(st);
Matthew Auldb91b09e2017-10-06 23:18:17 +01002381 return -ENOMEM;
Chris Wilson9da3da62012-06-01 15:20:22 +01002382 }
2383
2384 /* Get the list of pages out of our struct file. They'll be pinned
2385 * at this point until we release them.
2386 *
2387 * Fail silently without starting the shrinker
2388 */
Al Viro93c76a32015-12-04 23:45:44 -05002389 mapping = obj->base.filp->f_mapping;
Chris Wilson0f6ab552017-06-09 12:03:48 +01002390 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
Chris Wilson4846bf02017-06-09 12:03:46 +01002391 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2392
Imre Deak90797e62013-02-18 19:28:03 +02002393 sg = st->sgl;
2394 st->nents = 0;
Matthew Auld84e89782017-10-09 12:00:24 +01002395 sg_page_sizes = 0;
Imre Deak90797e62013-02-18 19:28:03 +02002396 for (i = 0; i < page_count; i++) {
Chris Wilson4846bf02017-06-09 12:03:46 +01002397 const unsigned int shrink[] = {
2398 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2399 0,
2400 }, *s = shrink;
2401 gfp_t gfp = noreclaim;
2402
2403 do {
Chris Wilson6c085a72012-08-20 11:40:46 +02002404 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
Chris Wilson4846bf02017-06-09 12:03:46 +01002405 if (likely(!IS_ERR(page)))
2406 break;
2407
2408 if (!*s) {
2409 ret = PTR_ERR(page);
2410 goto err_sg;
2411 }
2412
Chris Wilson912d5722017-09-06 16:19:30 -07002413 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
Chris Wilson4846bf02017-06-09 12:03:46 +01002414 cond_resched();
Chris Wilson24f8e002017-03-22 11:05:21 +00002415
Chris Wilson6c085a72012-08-20 11:40:46 +02002416 /* We've tried hard to allocate the memory by reaping
2417 * our own buffer, now let the real VM do its job and
2418 * go down in flames if truly OOM.
Chris Wilson24f8e002017-03-22 11:05:21 +00002419 *
2420 * However, since graphics tend to be disposable,
2421 * defer the oom here by reporting the ENOMEM back
2422 * to userspace.
Chris Wilson6c085a72012-08-20 11:40:46 +02002423 */
Chris Wilson4846bf02017-06-09 12:03:46 +01002424 if (!*s) {
2425 /* reclaim and warn, but no oom */
2426 gfp = mapping_gfp_mask(mapping);
Chris Wilsoneaf41802017-06-09 12:03:47 +01002427
2428 /* Our bo are always dirty and so we require
2429 * kswapd to reclaim our pages (direct reclaim
2430 * does not effectively begin pageout of our
2431 * buffers on its own). However, direct reclaim
2432 * only waits for kswapd when under allocation
2433 * congestion. So as a result __GFP_RECLAIM is
2434 * unreliable and fails to actually reclaim our
2435 * dirty pages -- unless you try over and over
2436 * again with !__GFP_NORETRY. However, we still
2437 * want to fail this allocation rather than
2438 * trigger the out-of-memory killer and for
Michal Hockodbb32952017-07-12 14:36:55 -07002439 * this we want __GFP_RETRY_MAYFAIL.
Chris Wilsoneaf41802017-06-09 12:03:47 +01002440 */
Michal Hockodbb32952017-07-12 14:36:55 -07002441 gfp |= __GFP_RETRY_MAYFAIL;
Imre Deake2273302015-07-09 12:59:05 +03002442 }
Chris Wilson4846bf02017-06-09 12:03:46 +01002443 } while (1);
2444
Chris Wilson871dfbd2016-10-11 09:20:21 +01002445 if (!i ||
2446 sg->length >= max_segment ||
2447 page_to_pfn(page) != last_pfn + 1) {
Matthew Aulda5c081662017-10-06 23:18:18 +01002448 if (i) {
Matthew Auld84e89782017-10-09 12:00:24 +01002449 sg_page_sizes |= sg->length;
Imre Deak90797e62013-02-18 19:28:03 +02002450 sg = sg_next(sg);
Matthew Aulda5c081662017-10-06 23:18:18 +01002451 }
Imre Deak90797e62013-02-18 19:28:03 +02002452 st->nents++;
2453 sg_set_page(sg, page, PAGE_SIZE, 0);
2454 } else {
2455 sg->length += PAGE_SIZE;
2456 }
2457 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002458
2459 /* Check that the i965g/gm workaround works. */
2460 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002461 }
Matthew Aulda5c081662017-10-06 23:18:18 +01002462 if (sg) { /* loop terminated early; short sg table */
Matthew Auld84e89782017-10-09 12:00:24 +01002463 sg_page_sizes |= sg->length;
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002464 sg_mark_end(sg);
Matthew Aulda5c081662017-10-06 23:18:18 +01002465 }
Chris Wilson74ce6b62012-10-19 15:51:06 +01002466
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002467 /* Trim unused sg entries to avoid wasting memory. */
2468 i915_sg_trim(st);
2469
Chris Wilson03ac84f2016-10-28 13:58:36 +01002470 ret = i915_gem_gtt_prepare_pages(obj, st);
Chris Wilsond766ef52016-12-19 12:43:45 +00002471 if (ret) {
2472 /* DMA remapping failed? One possible cause is that
2473 * it could not reserve enough large entries, asking
2474 * for PAGE_SIZE chunks instead may be helpful.
2475 */
2476 if (max_segment > PAGE_SIZE) {
2477 for_each_sgt_page(page, sgt_iter, st)
2478 put_page(page);
2479 sg_free_table(st);
2480
2481 max_segment = PAGE_SIZE;
2482 goto rebuild_st;
2483 } else {
2484 dev_warn(&dev_priv->drm.pdev->dev,
2485 "Failed to DMA remap %lu pages\n",
2486 page_count);
2487 goto err_pages;
2488 }
2489 }
Imre Deake2273302015-07-09 12:59:05 +03002490
Eric Anholt673a3942008-07-30 12:06:12 -07002491 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002492 i915_gem_object_do_bit_17_swizzle(obj, st);
Eric Anholt673a3942008-07-30 12:06:12 -07002493
Matthew Auld84e89782017-10-09 12:00:24 +01002494 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
Matthew Auldb91b09e2017-10-06 23:18:17 +01002495
2496 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002497
Chris Wilsonb17993b2016-11-14 11:29:30 +00002498err_sg:
Imre Deak90797e62013-02-18 19:28:03 +02002499 sg_mark_end(sg);
Chris Wilsonb17993b2016-11-14 11:29:30 +00002500err_pages:
Dave Gordon85d12252016-05-20 11:54:06 +01002501 for_each_sgt_page(page, sgt_iter, st)
2502 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002503 sg_free_table(st);
2504 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002505
2506 /* shmemfs first checks if there is enough memory to allocate the page
2507 * and reports ENOSPC should there be insufficient, along with the usual
2508 * ENOMEM for a genuine allocation failure.
2509 *
2510 * We use ENOSPC in our driver to mean that we have run out of aperture
2511 * space and so want to translate the error from shmemfs back to our
2512 * usual understanding of ENOMEM.
2513 */
Imre Deake2273302015-07-09 12:59:05 +03002514 if (ret == -ENOSPC)
2515 ret = -ENOMEM;
2516
Matthew Auldb91b09e2017-10-06 23:18:17 +01002517 return ret;
Chris Wilson03ac84f2016-10-28 13:58:36 +01002518}
2519
2520void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
Matthew Aulda5c081662017-10-06 23:18:18 +01002521 struct sg_table *pages,
Matthew Auld84e89782017-10-09 12:00:24 +01002522 unsigned int sg_page_sizes)
Chris Wilson03ac84f2016-10-28 13:58:36 +01002523{
Matthew Aulda5c081662017-10-06 23:18:18 +01002524 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2525 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2526 int i;
2527
Chris Wilson1233e2d2016-10-28 13:58:37 +01002528 lockdep_assert_held(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002529
2530 obj->mm.get_page.sg_pos = pages->sgl;
2531 obj->mm.get_page.sg_idx = 0;
2532
2533 obj->mm.pages = pages;
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002534
2535 if (i915_gem_object_is_tiled(obj) &&
Chris Wilsonf2123812017-10-16 12:40:37 +01002536 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002537 GEM_BUG_ON(obj->mm.quirked);
2538 __i915_gem_object_pin_pages(obj);
2539 obj->mm.quirked = true;
2540 }
Matthew Aulda5c081662017-10-06 23:18:18 +01002541
Matthew Auld84e89782017-10-09 12:00:24 +01002542 GEM_BUG_ON(!sg_page_sizes);
2543 obj->mm.page_sizes.phys = sg_page_sizes;
Matthew Aulda5c081662017-10-06 23:18:18 +01002544
2545 /*
Matthew Auld84e89782017-10-09 12:00:24 +01002546 * Calculate the supported page-sizes which fit into the given
2547 * sg_page_sizes. This will give us the page-sizes which we may be able
2548 * to use opportunistically when later inserting into the GTT. For
2549 * example if phys=2G, then in theory we should be able to use 1G, 2M,
2550 * 64K or 4K pages, although in practice this will depend on a number of
2551 * other factors.
Matthew Aulda5c081662017-10-06 23:18:18 +01002552 */
2553 obj->mm.page_sizes.sg = 0;
2554 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2555 if (obj->mm.page_sizes.phys & ~0u << i)
2556 obj->mm.page_sizes.sg |= BIT(i);
2557 }
Matthew Aulda5c081662017-10-06 23:18:18 +01002558 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
Chris Wilsonf2123812017-10-16 12:40:37 +01002559
2560 spin_lock(&i915->mm.obj_lock);
2561 list_add(&obj->mm.link, &i915->mm.unbound_list);
2562 spin_unlock(&i915->mm.obj_lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002563}
2564
2565static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2566{
Matthew Auldb91b09e2017-10-06 23:18:17 +01002567 int err;
Chris Wilson03ac84f2016-10-28 13:58:36 +01002568
2569 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2570 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2571 return -EFAULT;
2572 }
2573
Matthew Auldb91b09e2017-10-06 23:18:17 +01002574 err = obj->ops->get_pages(obj);
2575 GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
Chris Wilson03ac84f2016-10-28 13:58:36 +01002576
Matthew Auldb91b09e2017-10-06 23:18:17 +01002577 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002578}
2579
Chris Wilson37e680a2012-06-07 15:38:42 +01002580/* Ensure that the associated pages are gathered from the backing storage
Chris Wilson1233e2d2016-10-28 13:58:37 +01002581 * and pinned into our object. i915_gem_object_pin_pages() may be called
Chris Wilson37e680a2012-06-07 15:38:42 +01002582 * multiple times before they are released by a single call to
Chris Wilson1233e2d2016-10-28 13:58:37 +01002583 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
Chris Wilson37e680a2012-06-07 15:38:42 +01002584 * either as a result of memory pressure (reaping pages under the shrinker)
2585 * or as the object is itself released.
2586 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002587int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
Chris Wilson37e680a2012-06-07 15:38:42 +01002588{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002589 int err;
Chris Wilson37e680a2012-06-07 15:38:42 +01002590
Chris Wilson1233e2d2016-10-28 13:58:37 +01002591 err = mutex_lock_interruptible(&obj->mm.lock);
2592 if (err)
2593 return err;
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002594
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002595 if (unlikely(!i915_gem_object_has_pages(obj))) {
Chris Wilson88c880b2017-09-06 14:52:20 +01002596 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2597
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002598 err = ____i915_gem_object_get_pages(obj);
2599 if (err)
2600 goto unlock;
2601
2602 smp_mb__before_atomic();
Chris Wilson1233e2d2016-10-28 13:58:37 +01002603 }
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002604 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson43e28f02013-01-08 10:53:09 +00002605
Chris Wilson1233e2d2016-10-28 13:58:37 +01002606unlock:
2607 mutex_unlock(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002608 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002609}
2610
Dave Gordondd6034c2016-05-20 11:54:04 +01002611/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002612static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2613 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002614{
2615 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002616 struct sg_table *sgt = obj->mm.pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002617 struct sgt_iter sgt_iter;
2618 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002619 struct page *stack_pages[32];
2620 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002621 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002622 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002623 void *addr;
2624
2625 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002626 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002627 return kmap(sg_page(sgt->sgl));
2628
Dave Gordonb338fa42016-05-20 11:54:05 +01002629 if (n_pages > ARRAY_SIZE(stack_pages)) {
2630 /* Too big for stack -- allocate temporary array instead */
Michal Hocko0ee931c2017-09-13 16:28:29 -07002631 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
Dave Gordonb338fa42016-05-20 11:54:05 +01002632 if (!pages)
2633 return NULL;
2634 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002635
Dave Gordon85d12252016-05-20 11:54:06 +01002636 for_each_sgt_page(page, sgt_iter, sgt)
2637 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002638
2639 /* Check that we have the expected number of pages */
2640 GEM_BUG_ON(i != n_pages);
2641
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002642 switch (type) {
Chris Wilsona575c672017-08-28 11:46:31 +01002643 default:
2644 MISSING_CASE(type);
2645 /* fallthrough to use PAGE_KERNEL anyway */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002646 case I915_MAP_WB:
2647 pgprot = PAGE_KERNEL;
2648 break;
2649 case I915_MAP_WC:
2650 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2651 break;
2652 }
2653 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002654
Dave Gordonb338fa42016-05-20 11:54:05 +01002655 if (pages != stack_pages)
Michal Hocko20981052017-05-17 14:23:12 +02002656 kvfree(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002657
2658 return addr;
2659}
2660
2661/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002662void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2663 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002664{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002665 enum i915_map_type has_type;
2666 bool pinned;
2667 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002668 int ret;
2669
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002670 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002671
Chris Wilson1233e2d2016-10-28 13:58:37 +01002672 ret = mutex_lock_interruptible(&obj->mm.lock);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002673 if (ret)
2674 return ERR_PTR(ret);
2675
Chris Wilsona575c672017-08-28 11:46:31 +01002676 pinned = !(type & I915_MAP_OVERRIDE);
2677 type &= ~I915_MAP_OVERRIDE;
2678
Chris Wilson1233e2d2016-10-28 13:58:37 +01002679 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002680 if (unlikely(!i915_gem_object_has_pages(obj))) {
Chris Wilson88c880b2017-09-06 14:52:20 +01002681 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2682
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002683 ret = ____i915_gem_object_get_pages(obj);
2684 if (ret)
2685 goto err_unlock;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002686
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002687 smp_mb__before_atomic();
2688 }
2689 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002690 pinned = false;
2691 }
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002692 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002693
Chris Wilson0ce81782017-05-17 13:09:59 +01002694 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002695 if (ptr && has_type != type) {
2696 if (pinned) {
2697 ret = -EBUSY;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002698 goto err_unpin;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002699 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002700
2701 if (is_vmalloc_addr(ptr))
2702 vunmap(ptr);
2703 else
2704 kunmap(kmap_to_page(ptr));
2705
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002706 ptr = obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002707 }
2708
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002709 if (!ptr) {
2710 ptr = i915_gem_object_map(obj, type);
2711 if (!ptr) {
2712 ret = -ENOMEM;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002713 goto err_unpin;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002714 }
2715
Chris Wilson0ce81782017-05-17 13:09:59 +01002716 obj->mm.mapping = page_pack_bits(ptr, type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002717 }
2718
Chris Wilson1233e2d2016-10-28 13:58:37 +01002719out_unlock:
2720 mutex_unlock(&obj->mm.lock);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002721 return ptr;
2722
Chris Wilson1233e2d2016-10-28 13:58:37 +01002723err_unpin:
2724 atomic_dec(&obj->mm.pages_pin_count);
2725err_unlock:
2726 ptr = ERR_PTR(ret);
2727 goto out_unlock;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002728}
2729
Chris Wilson7c55e2c2017-03-07 12:03:38 +00002730static int
2731i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2732 const struct drm_i915_gem_pwrite *arg)
2733{
2734 struct address_space *mapping = obj->base.filp->f_mapping;
2735 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2736 u64 remain, offset;
2737 unsigned int pg;
2738
2739 /* Before we instantiate/pin the backing store for our use, we
2740 * can prepopulate the shmemfs filp efficiently using a write into
2741 * the pagecache. We avoid the penalty of instantiating all the
2742 * pages, important if the user is just writing to a few and never
2743 * uses the object on the GPU, and using a direct write into shmemfs
2744 * allows it to avoid the cost of retrieving a page (either swapin
2745 * or clearing-before-use) before it is overwritten.
2746 */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01002747 if (i915_gem_object_has_pages(obj))
Chris Wilson7c55e2c2017-03-07 12:03:38 +00002748 return -ENODEV;
2749
Chris Wilsona6d65e42017-10-16 21:27:32 +01002750 if (obj->mm.madv != I915_MADV_WILLNEED)
2751 return -EFAULT;
2752
Chris Wilson7c55e2c2017-03-07 12:03:38 +00002753 /* Before the pages are instantiated the object is treated as being
2754 * in the CPU domain. The pages will be clflushed as required before
2755 * use, and we can freely write into the pages directly. If userspace
2756 * races pwrite with any other operation; corruption will ensue -
2757 * that is userspace's prerogative!
2758 */
2759
2760 remain = arg->size;
2761 offset = arg->offset;
2762 pg = offset_in_page(offset);
2763
2764 do {
2765 unsigned int len, unwritten;
2766 struct page *page;
2767 void *data, *vaddr;
2768 int err;
2769
2770 len = PAGE_SIZE - pg;
2771 if (len > remain)
2772 len = remain;
2773
2774 err = pagecache_write_begin(obj->base.filp, mapping,
2775 offset, len, 0,
2776 &page, &data);
2777 if (err < 0)
2778 return err;
2779
2780 vaddr = kmap(page);
2781 unwritten = copy_from_user(vaddr + pg, user_data, len);
2782 kunmap(page);
2783
2784 err = pagecache_write_end(obj->base.filp, mapping,
2785 offset, len, len - unwritten,
2786 page, data);
2787 if (err < 0)
2788 return err;
2789
2790 if (unwritten)
2791 return -EFAULT;
2792
2793 remain -= len;
2794 user_data += len;
2795 offset += len;
2796 pg = 0;
2797 } while (remain);
2798
2799 return 0;
2800}
2801
Chris Wilson77b25a92017-07-21 13:32:30 +01002802static bool ban_context(const struct i915_gem_context *ctx,
2803 unsigned int score)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002804{
Chris Wilson60958682016-12-31 11:20:11 +00002805 return (i915_gem_context_is_bannable(ctx) &&
Chris Wilson77b25a92017-07-21 13:32:30 +01002806 score >= CONTEXT_SCORE_BAN_THRESHOLD);
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002807}
2808
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +02002809static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002810{
Chris Wilson77b25a92017-07-21 13:32:30 +01002811 unsigned int score;
2812 bool banned;
Mika Kuoppalab083a082016-11-18 15:10:47 +02002813
Chris Wilson77b25a92017-07-21 13:32:30 +01002814 atomic_inc(&ctx->guilty_count);
2815
2816 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2817 banned = ban_context(ctx, score);
Mika Kuoppalab083a082016-11-18 15:10:47 +02002818 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
Chris Wilson77b25a92017-07-21 13:32:30 +01002819 ctx->name, score, yesno(banned));
2820 if (!banned)
Mika Kuoppalab083a082016-11-18 15:10:47 +02002821 return;
2822
Chris Wilson77b25a92017-07-21 13:32:30 +01002823 i915_gem_context_set_banned(ctx);
2824 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2825 atomic_inc(&ctx->file_priv->context_bans);
2826 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2827 ctx->name, atomic_read(&ctx->file_priv->context_bans));
2828 }
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +02002829}
2830
2831static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2832{
Chris Wilson77b25a92017-07-21 13:32:30 +01002833 atomic_inc(&ctx->active_count);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002834}
2835
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002836struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002837i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002838{
Chris Wilson754c9fd2017-02-23 07:44:14 +00002839 struct drm_i915_gem_request *request, *active = NULL;
2840 unsigned long flags;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002841
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002842 /* We are called by the error capture and reset at a random
2843 * point in time. In particular, note that neither is crucially
2844 * ordered with an interrupt. After a hang, the GPU is dead and we
2845 * assume that no more writes can happen (we waited long enough for
2846 * all writes that were in transaction to be flushed) - adding an
2847 * extra delay for a recent interrupt is pointless. Hence, we do
2848 * not need an engine->irq_seqno_barrier() before the seqno reads.
2849 */
Chris Wilson754c9fd2017-02-23 07:44:14 +00002850 spin_lock_irqsave(&engine->timeline->lock, flags);
Chris Wilson73cb9702016-10-28 13:58:46 +01002851 list_for_each_entry(request, &engine->timeline->requests, link) {
Chris Wilson754c9fd2017-02-23 07:44:14 +00002852 if (__i915_gem_request_completed(request,
2853 request->global_seqno))
Chris Wilson4db080f2013-12-04 11:37:09 +00002854 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002855
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002856 GEM_BUG_ON(request->engine != engine);
Chris Wilsonc00122f32017-02-12 17:19:58 +00002857 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2858 &request->fence.flags));
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002859
Chris Wilson754c9fd2017-02-23 07:44:14 +00002860 active = request;
2861 break;
2862 }
2863 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2864
2865 return active;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002866}
2867
Mika Kuoppalabf2f0432017-01-17 17:59:04 +02002868static bool engine_stalled(struct intel_engine_cs *engine)
2869{
2870 if (!engine->hangcheck.stalled)
2871 return false;
2872
2873 /* Check for possible seqno movement after hang declaration */
2874 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2875 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2876 return false;
2877 }
2878
2879 return true;
2880}
2881
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002882/*
2883 * Ensure irq handler finishes, and not run again.
2884 * Also return the active request so that we only search for it once.
2885 */
2886struct drm_i915_gem_request *
2887i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2888{
2889 struct drm_i915_gem_request *request = NULL;
2890
Chris Wilson1749d902017-10-09 12:02:59 +01002891 /*
2892 * During the reset sequence, we must prevent the engine from
2893 * entering RC6. As the context state is undefined until we restart
2894 * the engine, if it does enter RC6 during the reset, the state
2895 * written to the powercontext is undefined and so we may lose
2896 * GPU state upon resume, i.e. fail to restart after a reset.
2897 */
2898 intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2899
2900 /*
2901 * Prevent the signaler thread from updating the request
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002902 * state (by calling dma_fence_signal) as we are processing
2903 * the reset. The write from the GPU of the seqno is
2904 * asynchronous and the signaler thread may see a different
2905 * value to us and declare the request complete, even though
2906 * the reset routine have picked that request as the active
2907 * (incomplete) request. This conflict is not handled
2908 * gracefully!
2909 */
2910 kthread_park(engine->breadcrumbs.signaler);
2911
Chris Wilson1749d902017-10-09 12:02:59 +01002912 /*
2913 * Prevent request submission to the hardware until we have
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002914 * completed the reset in i915_gem_reset_finish(). If a request
2915 * is completed by one engine, it may then queue a request
2916 * to a second via its engine->irq_tasklet *just* as we are
2917 * calling engine->init_hw() and also writing the ELSP.
2918 * Turning off the engine->irq_tasklet until the reset is over
2919 * prevents the race.
2920 */
Mika Kuoppalab620e872017-09-22 15:43:03 +03002921 tasklet_kill(&engine->execlists.irq_tasklet);
2922 tasklet_disable(&engine->execlists.irq_tasklet);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002923
2924 if (engine->irq_seqno_barrier)
2925 engine->irq_seqno_barrier(engine);
2926
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002927 request = i915_gem_find_active_request(engine);
2928 if (request && request->fence.error == -EIO)
2929 request = ERR_PTR(-EIO); /* Previous reset failed! */
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002930
2931 return request;
2932}
2933
Chris Wilson0e178ae2017-01-17 17:59:06 +02002934int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
Chris Wilson4c965542017-01-17 17:59:01 +02002935{
2936 struct intel_engine_cs *engine;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002937 struct drm_i915_gem_request *request;
Chris Wilson4c965542017-01-17 17:59:01 +02002938 enum intel_engine_id id;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002939 int err = 0;
Chris Wilson4c965542017-01-17 17:59:01 +02002940
Chris Wilson0e178ae2017-01-17 17:59:06 +02002941 for_each_engine(engine, dev_priv, id) {
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002942 request = i915_gem_reset_prepare_engine(engine);
2943 if (IS_ERR(request)) {
2944 err = PTR_ERR(request);
2945 continue;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002946 }
Michel Thierryc64992e2017-06-20 10:57:44 +01002947
2948 engine->hangcheck.active_request = request;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002949 }
2950
Chris Wilson4c965542017-01-17 17:59:01 +02002951 i915_gem_revoke_fences(dev_priv);
Chris Wilson0e178ae2017-01-17 17:59:06 +02002952
2953 return err;
Chris Wilson4c965542017-01-17 17:59:01 +02002954}
2955
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002956static void skip_request(struct drm_i915_gem_request *request)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002957{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002958 void *vaddr = request->ring->vaddr;
2959 u32 head;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002960
Chris Wilson821ed7d2016-09-09 14:11:53 +01002961 /* As this request likely depends on state from the lost
2962 * context, clear out all the user operations leaving the
2963 * breadcrumb at the end (so we get the fence notifications).
2964 */
2965 head = request->head;
2966 if (request->postfix < head) {
2967 memset(vaddr + head, 0, request->ring->size - head);
2968 head = 0;
2969 }
2970 memset(vaddr + head, 0, request->postfix - head);
Chris Wilsonc0d5f322017-01-10 17:22:43 +00002971
2972 dma_fence_set_error(&request->fence, -EIO);
Chris Wilson4db080f2013-12-04 11:37:09 +00002973}
2974
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002975static void engine_skip_context(struct drm_i915_gem_request *request)
2976{
2977 struct intel_engine_cs *engine = request->engine;
2978 struct i915_gem_context *hung_ctx = request->ctx;
2979 struct intel_timeline *timeline;
2980 unsigned long flags;
2981
2982 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
2983
2984 spin_lock_irqsave(&engine->timeline->lock, flags);
2985 spin_lock(&timeline->lock);
2986
2987 list_for_each_entry_continue(request, &engine->timeline->requests, link)
2988 if (request->ctx == hung_ctx)
2989 skip_request(request);
2990
2991 list_for_each_entry(request, &timeline->requests, link)
2992 skip_request(request);
2993
2994 spin_unlock(&timeline->lock);
2995 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2996}
2997
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002998/* Returns the request if it was guilty of the hang */
2999static struct drm_i915_gem_request *
3000i915_gem_reset_request(struct intel_engine_cs *engine,
3001 struct drm_i915_gem_request *request)
Mika Kuoppala61da5362017-01-17 17:59:05 +02003002{
Mika Kuoppala71895a02017-01-17 17:59:07 +02003003 /* The guilty request will get skipped on a hung engine.
3004 *
3005 * Users of client default contexts do not rely on logical
3006 * state preserved between batches so it is safe to execute
3007 * queued requests following the hang. Non default contexts
3008 * rely on preserved state, so skipping a batch loses the
3009 * evolution of the state and it needs to be considered corrupted.
3010 * Executing more queued batches on top of corrupted state is
3011 * risky. But we take the risk by trying to advance through
3012 * the queued requests in order to make the client behaviour
3013 * more predictable around resets, by not throwing away random
3014 * amount of batches it has prepared for execution. Sophisticated
3015 * clients can use gem_reset_stats_ioctl and dma fence status
3016 * (exported via sync_file info ioctl on explicit fences) to observe
3017 * when it loses the context state and should rebuild accordingly.
3018 *
3019 * The context ban, and ultimately the client ban, mechanism are safety
3020 * valves if client submission ends up resulting in nothing more than
3021 * subsequent hangs.
3022 */
3023
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003024 if (engine_stalled(engine)) {
Mika Kuoppala61da5362017-01-17 17:59:05 +02003025 i915_gem_context_mark_guilty(request->ctx);
3026 skip_request(request);
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003027
3028 /* If this context is now banned, skip all pending requests. */
3029 if (i915_gem_context_is_banned(request->ctx))
3030 engine_skip_context(request);
Mika Kuoppala61da5362017-01-17 17:59:05 +02003031 } else {
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003032 /*
3033 * Since this is not the hung engine, it may have advanced
3034 * since the hang declaration. Double check by refinding
3035 * the active request at the time of the reset.
3036 */
3037 request = i915_gem_find_active_request(engine);
3038 if (request) {
3039 i915_gem_context_mark_innocent(request->ctx);
3040 dma_fence_set_error(&request->fence, -EAGAIN);
3041
3042 /* Rewind the engine to replay the incomplete rq */
3043 spin_lock_irq(&engine->timeline->lock);
3044 request = list_prev_entry(request, link);
3045 if (&request->link == &engine->timeline->requests)
3046 request = NULL;
3047 spin_unlock_irq(&engine->timeline->lock);
3048 }
Mika Kuoppala61da5362017-01-17 17:59:05 +02003049 }
3050
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003051 return request;
Mika Kuoppala61da5362017-01-17 17:59:05 +02003052}
3053
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003054void i915_gem_reset_engine(struct intel_engine_cs *engine,
3055 struct drm_i915_gem_request *request)
Chris Wilson4db080f2013-12-04 11:37:09 +00003056{
Chris Wilsoned454f22017-07-21 13:32:29 +01003057 engine->irq_posted = 0;
3058
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003059 if (request)
3060 request = i915_gem_reset_request(engine, request);
3061
3062 if (request) {
Chris Wilsonc0dcb202017-02-07 15:24:37 +00003063 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3064 engine->name, request->global_seqno);
Chris Wilsonc0dcb202017-02-07 15:24:37 +00003065 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003066
3067 /* Setup the CS to resume from the breadcrumb of the hung request */
3068 engine->reset_hw(engine, request);
Chris Wilson821ed7d2016-09-09 14:11:53 +01003069}
3070
Chris Wilsond8027092017-02-08 14:30:32 +00003071void i915_gem_reset(struct drm_i915_private *dev_priv)
Chris Wilson821ed7d2016-09-09 14:11:53 +01003072{
3073 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303074 enum intel_engine_id id;
Chris Wilson821ed7d2016-09-09 14:11:53 +01003075
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003076 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3077
Chris Wilson821ed7d2016-09-09 14:11:53 +01003078 i915_gem_retire_requests(dev_priv);
3079
Chris Wilson2ae55732017-02-12 17:20:02 +00003080 for_each_engine(engine, dev_priv, id) {
3081 struct i915_gem_context *ctx;
3082
Michel Thierryc64992e2017-06-20 10:57:44 +01003083 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
Chris Wilson2ae55732017-02-12 17:20:02 +00003084 ctx = fetch_and_zero(&engine->last_retired_context);
3085 if (ctx)
3086 engine->context_unpin(engine, ctx);
3087 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003088
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00003089 i915_gem_restore_fences(dev_priv);
Chris Wilsonf2a91d12016-09-21 14:51:06 +01003090
3091 if (dev_priv->gt.awake) {
3092 intel_sanitize_gt_powersave(dev_priv);
3093 intel_enable_gt_powersave(dev_priv);
3094 if (INTEL_GEN(dev_priv) >= 6)
3095 gen6_rps_busy(dev_priv);
3096 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003097}
3098
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003099void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3100{
Mika Kuoppalab620e872017-09-22 15:43:03 +03003101 tasklet_enable(&engine->execlists.irq_tasklet);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003102 kthread_unpark(engine->breadcrumbs.signaler);
Chris Wilson1749d902017-10-09 12:02:59 +01003103
3104 intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003105}
3106
Chris Wilsond8027092017-02-08 14:30:32 +00003107void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3108{
Chris Wilson1f7b8472017-02-08 14:30:33 +00003109 struct intel_engine_cs *engine;
3110 enum intel_engine_id id;
3111
Chris Wilsond8027092017-02-08 14:30:32 +00003112 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson1f7b8472017-02-08 14:30:33 +00003113
Chris Wilsonfe3288b2017-02-12 17:20:01 +00003114 for_each_engine(engine, dev_priv, id) {
Michel Thierryc64992e2017-06-20 10:57:44 +01003115 engine->hangcheck.active_request = NULL;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003116 i915_gem_reset_finish_engine(engine);
Chris Wilsonfe3288b2017-02-12 17:20:01 +00003117 }
Chris Wilsond8027092017-02-08 14:30:32 +00003118}
3119
Chris Wilson821ed7d2016-09-09 14:11:53 +01003120static void nop_submit_request(struct drm_i915_gem_request *request)
3121{
Daniel Vetteraf7a8ff2017-10-11 11:10:19 +02003122 dma_fence_set_error(&request->fence, -EIO);
3123
3124 i915_gem_request_submit(request);
3125}
3126
3127static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3128{
Chris Wilson8d550822017-10-06 12:56:17 +01003129 unsigned long flags;
3130
Chris Wilson3cd94422017-01-10 17:22:45 +00003131 dma_fence_set_error(&request->fence, -EIO);
Chris Wilson8d550822017-10-06 12:56:17 +01003132
3133 spin_lock_irqsave(&request->engine->timeline->lock, flags);
3134 __i915_gem_request_submit(request);
Chris Wilson3dcf93f2016-11-22 14:41:20 +00003135 intel_engine_init_global_seqno(request->engine, request->global_seqno);
Chris Wilson8d550822017-10-06 12:56:17 +01003136 spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
Chris Wilson821ed7d2016-09-09 14:11:53 +01003137}
3138
Daniel Vetteraf7a8ff2017-10-11 11:10:19 +02003139void i915_gem_set_wedged(struct drm_i915_private *i915)
Chris Wilson821ed7d2016-09-09 14:11:53 +01003140{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003141 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303142 enum intel_engine_id id;
Eric Anholt673a3942008-07-30 12:06:12 -07003143
Daniel Vetteraf7a8ff2017-10-11 11:10:19 +02003144 /*
3145 * First, stop submission to hw, but do not yet complete requests by
3146 * rolling the global seqno forward (since this would complete requests
3147 * for which we haven't set the fence error to EIO yet).
3148 */
Chris Wilson20e49332016-11-22 14:41:21 +00003149 for_each_engine(engine, i915, id)
Daniel Vetteraf7a8ff2017-10-11 11:10:19 +02003150 engine->submit_request = nop_submit_request;
3151
3152 /*
3153 * Make sure no one is running the old callback before we proceed with
3154 * cancelling requests and resetting the completion tracking. Otherwise
3155 * we might submit a request to the hardware which never completes.
3156 */
3157 synchronize_rcu();
3158
3159 for_each_engine(engine, i915, id) {
3160 /* Mark all executing requests as skipped */
3161 engine->cancel_requests(engine);
3162
3163 /*
3164 * Only once we've force-cancelled all in-flight requests can we
3165 * start to complete all requests.
3166 */
3167 engine->submit_request = nop_complete_submit_request;
3168 }
3169
3170 /*
3171 * Make sure no request can slip through without getting completed by
3172 * either this call here to intel_engine_init_global_seqno, or the one
3173 * in nop_complete_submit_request.
3174 */
3175 synchronize_rcu();
3176
3177 for_each_engine(engine, i915, id) {
3178 unsigned long flags;
3179
3180 /* Mark all pending requests as complete so that any concurrent
3181 * (lockless) lookup doesn't try and wait upon the request as we
3182 * reset it.
3183 */
3184 spin_lock_irqsave(&engine->timeline->lock, flags);
3185 intel_engine_init_global_seqno(engine,
3186 intel_engine_last_submit(engine));
3187 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3188 }
Chris Wilson20e49332016-11-22 14:41:21 +00003189
Chris Wilson3d7adbb2017-07-21 13:32:27 +01003190 set_bit(I915_WEDGED, &i915->gpu_error.flags);
3191 wake_up_all(&i915->gpu_error.reset_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07003192}
3193
Chris Wilson2e8f9d32017-03-16 17:13:04 +00003194bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3195{
3196 struct i915_gem_timeline *tl;
3197 int i;
3198
3199 lockdep_assert_held(&i915->drm.struct_mutex);
3200 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3201 return true;
3202
3203 /* Before unwedging, make sure that all pending operations
3204 * are flushed and errored out - we may have requests waiting upon
3205 * third party fences. We marked all inflight requests as EIO, and
3206 * every execbuf since returned EIO, for consistency we want all
3207 * the currently pending requests to also be marked as EIO, which
3208 * is done inside our nop_submit_request - and so we must wait.
3209 *
3210 * No more can be submitted until we reset the wedged bit.
3211 */
3212 list_for_each_entry(tl, &i915->gt.timelines, link) {
3213 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3214 struct drm_i915_gem_request *rq;
3215
3216 rq = i915_gem_active_peek(&tl->engine[i].last_request,
3217 &i915->drm.struct_mutex);
3218 if (!rq)
3219 continue;
3220
3221 /* We can't use our normal waiter as we want to
3222 * avoid recursively trying to handle the current
3223 * reset. The basic dma_fence_default_wait() installs
3224 * a callback for dma_fence_signal(), which is
3225 * triggered by our nop handler (indirectly, the
3226 * callback enables the signaler thread which is
3227 * woken by the nop_submit_request() advancing the seqno
3228 * and when the seqno passes the fence, the signaler
3229 * then signals the fence waking us up).
3230 */
3231 if (dma_fence_default_wait(&rq->fence, true,
3232 MAX_SCHEDULE_TIMEOUT) < 0)
3233 return false;
3234 }
3235 }
3236
3237 /* Undo nop_submit_request. We prevent all new i915 requests from
3238 * being queued (by disallowing execbuf whilst wedged) so having
3239 * waited for all active requests above, we know the system is idle
3240 * and do not have to worry about a thread being inside
3241 * engine->submit_request() as we swap over. So unlike installing
3242 * the nop_submit_request on reset, we can do this from normal
3243 * context and do not require stop_machine().
3244 */
3245 intel_engines_reset_default_submission(i915);
Chris Wilson36703e72017-06-22 11:56:25 +01003246 i915_gem_contexts_lost(i915);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00003247
3248 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3249 clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3250
3251 return true;
3252}
3253
Daniel Vetter75ef9da2010-08-21 00:25:16 +02003254static void
Eric Anholt673a3942008-07-30 12:06:12 -07003255i915_gem_retire_work_handler(struct work_struct *work)
3256{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003257 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003258 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01003259 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07003260
Chris Wilson891b48c2010-09-29 12:26:37 +01003261 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003262 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01003263 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003264 mutex_unlock(&dev->struct_mutex);
3265 }
Chris Wilson67d97da2016-07-04 08:08:31 +01003266
3267 /* Keep the retire handler running until we are finally idle.
3268 * We do not need to do this test under locking as in the worst-case
3269 * we queue the retire worker once too often.
3270 */
Chris Wilsonc9615612016-07-09 10:12:06 +01003271 if (READ_ONCE(dev_priv->gt.awake)) {
3272 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01003273 queue_delayed_work(dev_priv->wq,
3274 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01003275 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01003276 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003277}
Chris Wilson891b48c2010-09-29 12:26:37 +01003278
Chris Wilson5427f202017-10-23 22:32:34 +01003279static inline bool
3280new_requests_since_last_retire(const struct drm_i915_private *i915)
3281{
3282 return (READ_ONCE(i915->gt.active_requests) ||
3283 work_pending(&i915->gt.idle_work.work));
3284}
3285
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003286static void
3287i915_gem_idle_work_handler(struct work_struct *work)
3288{
3289 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003290 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson67d97da2016-07-04 08:08:31 +01003291 bool rearm_hangcheck;
Chris Wilson5427f202017-10-23 22:32:34 +01003292 ktime_t end;
Chris Wilson67d97da2016-07-04 08:08:31 +01003293
3294 if (!READ_ONCE(dev_priv->gt.awake))
3295 return;
3296
Imre Deak0cb56702016-11-07 11:20:04 +02003297 /*
3298 * Wait for last execlists context complete, but bail out in case a
3299 * new request is submitted.
3300 */
Chris Wilson5427f202017-10-23 22:32:34 +01003301 end = ktime_add_ms(ktime_get(), 200);
3302 do {
3303 if (new_requests_since_last_retire(dev_priv))
3304 return;
3305
3306 if (intel_engines_are_idle(dev_priv))
3307 break;
3308
3309 usleep_range(100, 500);
3310 } while (ktime_before(ktime_get(), end));
Chris Wilson67d97da2016-07-04 08:08:31 +01003311
3312 rearm_hangcheck =
3313 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3314
Chris Wilson5427f202017-10-23 22:32:34 +01003315 if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01003316 /* Currently busy, come back later */
3317 mod_delayed_work(dev_priv->wq,
3318 &dev_priv->gt.idle_work,
3319 msecs_to_jiffies(50));
3320 goto out_rearm;
3321 }
3322
Imre Deak93c97dc2016-11-07 11:20:03 +02003323 /*
3324 * New request retired after this work handler started, extend active
3325 * period until next instance of the work.
3326 */
Chris Wilson5427f202017-10-23 22:32:34 +01003327 if (new_requests_since_last_retire(dev_priv))
Imre Deak93c97dc2016-11-07 11:20:03 +02003328 goto out_unlock;
3329
Chris Wilson5427f202017-10-23 22:32:34 +01003330 /*
Chris Wilsonff320d62017-10-23 22:32:35 +01003331 * Be paranoid and flush a concurrent interrupt to make sure
3332 * we don't reactivate any irq tasklets after parking.
3333 *
3334 * FIXME: Note that even though we have waited for execlists to be idle,
3335 * there may still be an in-flight interrupt even though the CSB
3336 * is now empty. synchronize_irq() makes sure that a residual interrupt
3337 * is completed before we continue, but it doesn't prevent the HW from
3338 * raising a spurious interrupt later. To complete the shield we should
3339 * coordinate disabling the CS irq with flushing the interrupts.
3340 */
3341 synchronize_irq(dev_priv->drm.irq);
3342
3343 /*
Chris Wilson5427f202017-10-23 22:32:34 +01003344 * We are committed now to parking the engines, make sure there
3345 * will be no more interrupts arriving later.
3346 */
3347 if (!intel_engines_are_idle(dev_priv))
Imre Deak0cb56702016-11-07 11:20:04 +02003348 DRM_ERROR("Timeout waiting for engines to idle\n");
3349
Chris Wilson6c067572017-05-17 13:10:03 +01003350 intel_engines_mark_idle(dev_priv);
Chris Wilson47979482017-05-03 10:39:21 +01003351 i915_gem_timelines_mark_idle(dev_priv);
Zou Nan haid1b851f2010-05-21 09:08:57 +08003352
Chris Wilson67d97da2016-07-04 08:08:31 +01003353 GEM_BUG_ON(!dev_priv->gt.awake);
3354 dev_priv->gt.awake = false;
3355 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01003356
Chris Wilson67d97da2016-07-04 08:08:31 +01003357 if (INTEL_GEN(dev_priv) >= 6)
3358 gen6_rps_idle(dev_priv);
3359 intel_runtime_pm_put(dev_priv);
3360out_unlock:
Chris Wilson5427f202017-10-23 22:32:34 +01003361 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01003362
Chris Wilson67d97da2016-07-04 08:08:31 +01003363out_rearm:
3364 if (rearm_hangcheck) {
3365 GEM_BUG_ON(!dev_priv->gt.awake);
3366 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01003367 }
Eric Anholt673a3942008-07-30 12:06:12 -07003368}
3369
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003370void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3371{
Chris Wilsond1b48c12017-08-16 09:52:08 +01003372 struct drm_i915_private *i915 = to_i915(gem->dev);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003373 struct drm_i915_gem_object *obj = to_intel_bo(gem);
3374 struct drm_i915_file_private *fpriv = file->driver_priv;
Chris Wilsond1b48c12017-08-16 09:52:08 +01003375 struct i915_lut_handle *lut, *ln;
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003376
Chris Wilsond1b48c12017-08-16 09:52:08 +01003377 mutex_lock(&i915->drm.struct_mutex);
3378
3379 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3380 struct i915_gem_context *ctx = lut->ctx;
3381 struct i915_vma *vma;
3382
Chris Wilson432295d2017-08-22 12:05:15 +01003383 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
Chris Wilsond1b48c12017-08-16 09:52:08 +01003384 if (ctx->file_priv != fpriv)
3385 continue;
3386
3387 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
Chris Wilson3ffff012017-08-22 12:05:17 +01003388 GEM_BUG_ON(vma->obj != obj);
3389
3390 /* We allow the process to have multiple handles to the same
3391 * vma, in the same fd namespace, by virtue of flink/open.
3392 */
3393 GEM_BUG_ON(!vma->open_count);
3394 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003395 i915_vma_close(vma);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01003396
Chris Wilsond1b48c12017-08-16 09:52:08 +01003397 list_del(&lut->obj_link);
3398 list_del(&lut->ctx_link);
Chris Wilson4ff4b442017-06-16 15:05:16 +01003399
Chris Wilsond1b48c12017-08-16 09:52:08 +01003400 kmem_cache_free(i915->luts, lut);
3401 __i915_gem_object_release_unless_active(obj);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01003402 }
Chris Wilsond1b48c12017-08-16 09:52:08 +01003403
3404 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003405}
3406
Chris Wilsone95433c2016-10-28 13:58:27 +01003407static unsigned long to_wait_timeout(s64 timeout_ns)
3408{
3409 if (timeout_ns < 0)
3410 return MAX_SCHEDULE_TIMEOUT;
3411
3412 if (timeout_ns == 0)
3413 return 0;
3414
3415 return nsecs_to_jiffies_timeout(timeout_ns);
3416}
3417
Ben Widawsky5816d642012-04-11 11:18:19 -07003418/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003419 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003420 * @dev: drm device pointer
3421 * @data: ioctl data blob
3422 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003423 *
3424 * Returns 0 if successful, else an error is returned with the remaining time in
3425 * the timeout parameter.
3426 * -ETIME: object is still busy after timeout
3427 * -ERESTARTSYS: signal interrupted the wait
3428 * -ENONENT: object doesn't exist
3429 * Also possible, but rare:
Chris Wilsonb8050142017-08-11 11:57:31 +01003430 * -EAGAIN: incomplete, restart syscall
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003431 * -ENOMEM: damn
3432 * -ENODEV: Internal IRQ fail
3433 * -E?: The add request failed
3434 *
3435 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3436 * non-zero timeout parameter the wait ioctl will wait for the given number of
3437 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3438 * without holding struct_mutex the object may become re-busied before this
3439 * function completes. A similar but shorter * race condition exists in the busy
3440 * ioctl
3441 */
3442int
3443i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3444{
3445 struct drm_i915_gem_wait *args = data;
3446 struct drm_i915_gem_object *obj;
Chris Wilsone95433c2016-10-28 13:58:27 +01003447 ktime_t start;
3448 long ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003449
Daniel Vetter11b5d512014-09-29 15:31:26 +02003450 if (args->flags != 0)
3451 return -EINVAL;
3452
Chris Wilson03ac0642016-07-20 13:31:51 +01003453 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01003454 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003455 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01003456
Chris Wilsone95433c2016-10-28 13:58:27 +01003457 start = ktime_get();
3458
3459 ret = i915_gem_object_wait(obj,
3460 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3461 to_wait_timeout(args->timeout_ns),
3462 to_rps_client(file));
3463
3464 if (args->timeout_ns > 0) {
3465 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3466 if (args->timeout_ns < 0)
3467 args->timeout_ns = 0;
Chris Wilsonc1d20612017-02-16 12:54:41 +00003468
3469 /*
3470 * Apparently ktime isn't accurate enough and occasionally has a
3471 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3472 * things up to make the test happy. We allow up to 1 jiffy.
3473 *
3474 * This is a regression from the timespec->ktime conversion.
3475 */
3476 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3477 args->timeout_ns = 0;
Chris Wilsonb8050142017-08-11 11:57:31 +01003478
3479 /* Asked to wait beyond the jiffie/scheduler precision? */
3480 if (ret == -ETIME && args->timeout_ns)
3481 ret = -EAGAIN;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003482 }
3483
Chris Wilsonf0cd5182016-10-28 13:58:43 +01003484 i915_gem_object_put(obj);
John Harrisonff865882014-11-24 18:49:28 +00003485 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003486}
3487
Chris Wilson73cb9702016-10-28 13:58:46 +01003488static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003489{
Chris Wilson73cb9702016-10-28 13:58:46 +01003490 int ret, i;
3491
3492 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3493 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3494 if (ret)
3495 return ret;
3496 }
3497
3498 return 0;
3499}
3500
Chris Wilson25112b62017-03-30 15:50:39 +01003501static int wait_for_engines(struct drm_i915_private *i915)
3502{
Chris Wilsoncad99462017-08-26 12:09:33 +01003503 if (wait_for(intel_engines_are_idle(i915), 50)) {
3504 DRM_ERROR("Failed to idle engines, declaring wedged!\n");
3505 i915_gem_set_wedged(i915);
3506 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +01003507 }
3508
3509 return 0;
3510}
3511
Chris Wilson73cb9702016-10-28 13:58:46 +01003512int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3513{
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003514 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003515
Chris Wilson863e9fd2017-05-30 13:13:32 +01003516 /* If the device is asleep, we have no requests outstanding */
3517 if (!READ_ONCE(i915->gt.awake))
3518 return 0;
3519
Chris Wilson9caa34a2016-11-11 14:58:08 +00003520 if (flags & I915_WAIT_LOCKED) {
3521 struct i915_gem_timeline *tl;
3522
3523 lockdep_assert_held(&i915->drm.struct_mutex);
3524
3525 list_for_each_entry(tl, &i915->gt.timelines, link) {
3526 ret = wait_for_timeline(tl, flags);
3527 if (ret)
3528 return ret;
3529 }
Chris Wilson72022a72017-03-30 15:50:38 +01003530
3531 i915_gem_retire_requests(i915);
3532 GEM_BUG_ON(i915->gt.active_requests);
Chris Wilson25112b62017-03-30 15:50:39 +01003533
3534 ret = wait_for_engines(i915);
Chris Wilson9caa34a2016-11-11 14:58:08 +00003535 } else {
3536 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003537 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003538
Chris Wilson25112b62017-03-30 15:50:39 +01003539 return ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003540}
3541
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003542static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3543{
Chris Wilsone27ab732017-06-15 13:38:49 +01003544 /*
3545 * We manually flush the CPU domain so that we can override and
3546 * force the flush for the display, and perform it asyncrhonously.
3547 */
3548 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3549 if (obj->cache_dirty)
3550 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003551 obj->base.write_domain = 0;
3552}
3553
3554void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3555{
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003556 if (!READ_ONCE(obj->pin_global))
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003557 return;
3558
3559 mutex_lock(&obj->base.dev->struct_mutex);
3560 __i915_gem_object_flush_for_display(obj);
3561 mutex_unlock(&obj->base.dev->struct_mutex);
3562}
3563
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003564/**
Chris Wilsone22d8e32017-04-12 12:01:11 +01003565 * Moves a single object to the WC read, and possibly write domain.
3566 * @obj: object to act on
3567 * @write: ask for write access or read only
3568 *
3569 * This function returns when the move is complete, including waiting on
3570 * flushes to occur.
3571 */
3572int
3573i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3574{
3575 int ret;
3576
3577 lockdep_assert_held(&obj->base.dev->struct_mutex);
3578
3579 ret = i915_gem_object_wait(obj,
3580 I915_WAIT_INTERRUPTIBLE |
3581 I915_WAIT_LOCKED |
3582 (write ? I915_WAIT_ALL : 0),
3583 MAX_SCHEDULE_TIMEOUT,
3584 NULL);
3585 if (ret)
3586 return ret;
3587
3588 if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3589 return 0;
3590
3591 /* Flush and acquire obj->pages so that we are coherent through
3592 * direct access in memory with previous cached writes through
3593 * shmemfs and that our cache domain tracking remains valid.
3594 * For example, if the obj->filp was moved to swap without us
3595 * being notified and releasing the pages, we would mistakenly
3596 * continue to assume that the obj remained out of the CPU cached
3597 * domain.
3598 */
3599 ret = i915_gem_object_pin_pages(obj);
3600 if (ret)
3601 return ret;
3602
3603 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3604
3605 /* Serialise direct access to this object with the barriers for
3606 * coherent writes from the GPU, by effectively invalidating the
3607 * WC domain upon first access.
3608 */
3609 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3610 mb();
3611
3612 /* It should now be out of any other write domains, and we can update
3613 * the domain values for our changes.
3614 */
3615 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3616 obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3617 if (write) {
3618 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3619 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3620 obj->mm.dirty = true;
3621 }
3622
3623 i915_gem_object_unpin_pages(obj);
3624 return 0;
3625}
3626
3627/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003628 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003629 * @obj: object to act on
3630 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003631 *
3632 * This function returns when the move is complete, including waiting on
3633 * flushes to occur.
3634 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003635int
Chris Wilson20217462010-11-23 15:26:33 +00003636i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003637{
Eric Anholte47c68e2008-11-14 13:35:19 -08003638 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003639
Chris Wilsone95433c2016-10-28 13:58:27 +01003640 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003641
Chris Wilsone95433c2016-10-28 13:58:27 +01003642 ret = i915_gem_object_wait(obj,
3643 I915_WAIT_INTERRUPTIBLE |
3644 I915_WAIT_LOCKED |
3645 (write ? I915_WAIT_ALL : 0),
3646 MAX_SCHEDULE_TIMEOUT,
3647 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003648 if (ret)
3649 return ret;
3650
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003651 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3652 return 0;
3653
Chris Wilson43566de2015-01-02 16:29:29 +05303654 /* Flush and acquire obj->pages so that we are coherent through
3655 * direct access in memory with previous cached writes through
3656 * shmemfs and that our cache domain tracking remains valid.
3657 * For example, if the obj->filp was moved to swap without us
3658 * being notified and releasing the pages, we would mistakenly
3659 * continue to assume that the obj remained out of the CPU cached
3660 * domain.
3661 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003662 ret = i915_gem_object_pin_pages(obj);
Chris Wilson43566de2015-01-02 16:29:29 +05303663 if (ret)
3664 return ret;
3665
Chris Wilsonef749212017-04-12 12:01:10 +01003666 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003667
Chris Wilsond0a57782012-10-09 19:24:37 +01003668 /* Serialise direct access to this object with the barriers for
3669 * coherent writes from the GPU, by effectively invalidating the
3670 * GTT domain upon first access.
3671 */
3672 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3673 mb();
3674
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003675 /* It should now be out of any other write domains, and we can update
3676 * the domain values for our changes.
3677 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003678 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003679 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003680 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003681 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3682 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003683 obj->mm.dirty = true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003684 }
3685
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003686 i915_gem_object_unpin_pages(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003687 return 0;
3688}
3689
Chris Wilsonef55f922015-10-09 14:11:27 +01003690/**
3691 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003692 * @obj: object to act on
3693 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003694 *
3695 * After this function returns, the object will be in the new cache-level
3696 * across all GTT and the contents of the backing storage will be coherent,
3697 * with respect to the new cache-level. In order to keep the backing storage
3698 * coherent for all users, we only allow a single cache level to be set
3699 * globally on the object and prevent it from being changed whilst the
3700 * hardware is reading from the object. That is if the object is currently
3701 * on the scanout it will be set to uncached (or equivalent display
3702 * cache coherency) and all non-MOCS GPU access will also be uncached so
3703 * that all direct access to the scanout remains coherent.
3704 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003705int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3706 enum i915_cache_level cache_level)
3707{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003708 struct i915_vma *vma;
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003709 int ret;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003710
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003711 lockdep_assert_held(&obj->base.dev->struct_mutex);
3712
Chris Wilsone4ffd172011-04-04 09:44:39 +01003713 if (obj->cache_level == cache_level)
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003714 return 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003715
Chris Wilsonef55f922015-10-09 14:11:27 +01003716 /* Inspect the list of currently bound VMA and unbind any that would
3717 * be invalid given the new cache-level. This is principally to
3718 * catch the issue of the CS prefetch crossing page boundaries and
3719 * reading an invalid PTE on older architectures.
3720 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003721restart:
3722 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003723 if (!drm_mm_node_allocated(&vma->node))
3724 continue;
3725
Chris Wilson20dfbde2016-08-04 16:32:30 +01003726 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003727 DRM_DEBUG("can not change the cache level of pinned objects\n");
3728 return -EBUSY;
3729 }
3730
Chris Wilsonaa653a62016-08-04 07:52:27 +01003731 if (i915_gem_valid_gtt_space(vma, cache_level))
3732 continue;
3733
3734 ret = i915_vma_unbind(vma);
3735 if (ret)
3736 return ret;
3737
3738 /* As unbinding may affect other elements in the
3739 * obj->vma_list (due to side-effects from retiring
3740 * an active vma), play safe and restart the iterator.
3741 */
3742 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003743 }
3744
Chris Wilsonef55f922015-10-09 14:11:27 +01003745 /* We can reuse the existing drm_mm nodes but need to change the
3746 * cache-level on the PTE. We could simply unbind them all and
3747 * rebind with the correct cache-level on next use. However since
3748 * we already have a valid slot, dma mapping, pages etc, we may as
3749 * rewrite the PTE in the belief that doing so tramples upon less
3750 * state and so involves less work.
3751 */
Chris Wilson15717de2016-08-04 07:52:26 +01003752 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003753 /* Before we change the PTE, the GPU must not be accessing it.
3754 * If we wait upon the object, we know that all the bound
3755 * VMA are no longer active.
3756 */
Chris Wilsone95433c2016-10-28 13:58:27 +01003757 ret = i915_gem_object_wait(obj,
3758 I915_WAIT_INTERRUPTIBLE |
3759 I915_WAIT_LOCKED |
3760 I915_WAIT_ALL,
3761 MAX_SCHEDULE_TIMEOUT,
3762 NULL);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003763 if (ret)
3764 return ret;
3765
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00003766 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3767 cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003768 /* Access to snoopable pages through the GTT is
3769 * incoherent and on some machines causes a hard
3770 * lockup. Relinquish the CPU mmaping to force
3771 * userspace to refault in the pages and we can
3772 * then double check if the GTT mapping is still
3773 * valid for that pointer access.
3774 */
3775 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003776
Chris Wilsonef55f922015-10-09 14:11:27 +01003777 /* As we no longer need a fence for GTT access,
3778 * we can relinquish it now (and so prevent having
3779 * to steal a fence from someone else on the next
3780 * fence request). Note GPU activity would have
3781 * dropped the fence as all snoopable access is
3782 * supposed to be linear.
3783 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003784 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3785 ret = i915_vma_put_fence(vma);
3786 if (ret)
3787 return ret;
3788 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003789 } else {
3790 /* We either have incoherent backing store and
3791 * so no GTT access or the architecture is fully
3792 * coherent. In such cases, existing GTT mmaps
3793 * ignore the cache bit in the PTE and we can
3794 * rewrite it without confusing the GPU or having
3795 * to force userspace to fault back in its mmaps.
3796 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003797 }
3798
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003799 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003800 if (!drm_mm_node_allocated(&vma->node))
3801 continue;
3802
3803 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3804 if (ret)
3805 return ret;
3806 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003807 }
3808
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003809 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003810 vma->node.color = cache_level;
Chris Wilsonb8f55be2017-08-11 12:11:16 +01003811 i915_gem_object_set_cache_coherency(obj, cache_level);
Chris Wilsone27ab732017-06-15 13:38:49 +01003812 obj->cache_dirty = true; /* Always invalidate stale cachelines */
Chris Wilson2c225692013-08-09 12:26:45 +01003813
Chris Wilsone4ffd172011-04-04 09:44:39 +01003814 return 0;
3815}
3816
Ben Widawsky199adf42012-09-21 17:01:20 -07003817int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3818 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003819{
Ben Widawsky199adf42012-09-21 17:01:20 -07003820 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003821 struct drm_i915_gem_object *obj;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003822 int err = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003823
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003824 rcu_read_lock();
3825 obj = i915_gem_object_lookup_rcu(file, args->handle);
3826 if (!obj) {
3827 err = -ENOENT;
3828 goto out;
3829 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003830
Chris Wilson651d7942013-08-08 14:41:10 +01003831 switch (obj->cache_level) {
3832 case I915_CACHE_LLC:
3833 case I915_CACHE_L3_LLC:
3834 args->caching = I915_CACHING_CACHED;
3835 break;
3836
Chris Wilson4257d3b2013-08-08 14:41:11 +01003837 case I915_CACHE_WT:
3838 args->caching = I915_CACHING_DISPLAY;
3839 break;
3840
Chris Wilson651d7942013-08-08 14:41:10 +01003841 default:
3842 args->caching = I915_CACHING_NONE;
3843 break;
3844 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003845out:
3846 rcu_read_unlock();
3847 return err;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003848}
3849
Ben Widawsky199adf42012-09-21 17:01:20 -07003850int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3851 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003852{
Chris Wilson9c870d02016-10-24 13:42:15 +01003853 struct drm_i915_private *i915 = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003854 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003855 struct drm_i915_gem_object *obj;
3856 enum i915_cache_level level;
Chris Wilsond65415d2017-01-19 08:22:10 +00003857 int ret = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003858
Ben Widawsky199adf42012-09-21 17:01:20 -07003859 switch (args->caching) {
3860 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003861 level = I915_CACHE_NONE;
3862 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003863 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003864 /*
3865 * Due to a HW issue on BXT A stepping, GPU stores via a
3866 * snooped mapping may leave stale data in a corresponding CPU
3867 * cacheline, whereas normally such cachelines would get
3868 * invalidated.
3869 */
Chris Wilson9c870d02016-10-24 13:42:15 +01003870 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
Imre Deake5756c12015-08-14 18:43:30 +03003871 return -ENODEV;
3872
Chris Wilsone6994ae2012-07-10 10:27:08 +01003873 level = I915_CACHE_LLC;
3874 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003875 case I915_CACHING_DISPLAY:
Chris Wilson9c870d02016-10-24 13:42:15 +01003876 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003877 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003878 default:
3879 return -EINVAL;
3880 }
3881
Chris Wilsond65415d2017-01-19 08:22:10 +00003882 obj = i915_gem_object_lookup(file, args->handle);
3883 if (!obj)
3884 return -ENOENT;
3885
3886 if (obj->cache_level == level)
3887 goto out;
3888
3889 ret = i915_gem_object_wait(obj,
3890 I915_WAIT_INTERRUPTIBLE,
3891 MAX_SCHEDULE_TIMEOUT,
3892 to_rps_client(file));
3893 if (ret)
3894 goto out;
3895
Ben Widawsky3bc29132012-09-26 16:15:20 -07003896 ret = i915_mutex_lock_interruptible(dev);
3897 if (ret)
Chris Wilsond65415d2017-01-19 08:22:10 +00003898 goto out;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003899
3900 ret = i915_gem_object_set_cache_level(obj, level);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003901 mutex_unlock(&dev->struct_mutex);
Chris Wilsond65415d2017-01-19 08:22:10 +00003902
3903out:
3904 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003905 return ret;
3906}
3907
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003908/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003909 * Prepare buffer for display plane (scanout, cursors, etc).
3910 * Can be called from an uninterruptible phase (modesetting) and allows
3911 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003912 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003913struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003914i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3915 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003916 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003917{
Chris Wilson058d88c2016-08-15 10:49:06 +01003918 struct i915_vma *vma;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003919 int ret;
3920
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003921 lockdep_assert_held(&obj->base.dev->struct_mutex);
3922
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003923 /* Mark the global pin early so that we account for the
Chris Wilsoncc98b412013-08-09 12:25:09 +01003924 * display coherency whilst setting up the cache domains.
3925 */
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003926 obj->pin_global++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003927
Eric Anholta7ef0642011-03-29 16:59:54 -07003928 /* The display engine is not coherent with the LLC cache on gen6. As
3929 * a result, we make sure that the pinning that is about to occur is
3930 * done with uncached PTEs. This is lowest common denominator for all
3931 * chipsets.
3932 *
3933 * However for gen6+, we could do better by using the GFDT bit instead
3934 * of uncaching, which would allow us to flush all the LLC-cached data
3935 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3936 */
Chris Wilson651d7942013-08-08 14:41:10 +01003937 ret = i915_gem_object_set_cache_level(obj,
Tvrtko Ursulin86527442016-10-13 11:03:00 +01003938 HAS_WT(to_i915(obj->base.dev)) ?
3939 I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003940 if (ret) {
3941 vma = ERR_PTR(ret);
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003942 goto err_unpin_global;
Chris Wilson058d88c2016-08-15 10:49:06 +01003943 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003944
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003945 /* As the user may map the buffer once pinned in the display plane
3946 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003947 * always use map_and_fenceable for all scanout buffers. However,
3948 * it may simply be too big to fit into mappable, in which case
3949 * put it anyway and hope that userspace can cope (but always first
3950 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003951 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003952 vma = ERR_PTR(-ENOSPC);
Chris Wilson47a8e3f2017-01-14 00:28:27 +00003953 if (!view || view->type == I915_GGTT_VIEW_NORMAL)
Chris Wilson2efb8132016-08-18 17:17:06 +01003954 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3955 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson767a2222016-11-07 11:01:28 +00003956 if (IS_ERR(vma)) {
3957 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3958 unsigned int flags;
3959
3960 /* Valleyview is definitely limited to scanning out the first
3961 * 512MiB. Lets presume this behaviour was inherited from the
3962 * g4x display engine and that all earlier gen are similarly
3963 * limited. Testing suggests that it is a little more
3964 * complicated than this. For example, Cherryview appears quite
3965 * happy to scanout from anywhere within its global aperture.
3966 */
3967 flags = 0;
3968 if (HAS_GMCH_DISPLAY(i915))
3969 flags = PIN_MAPPABLE;
3970 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3971 }
Chris Wilson058d88c2016-08-15 10:49:06 +01003972 if (IS_ERR(vma))
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003973 goto err_unpin_global;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003974
Chris Wilsond8923dc2016-08-18 17:17:07 +01003975 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3976
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003977 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003978 __i915_gem_object_flush_for_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +00003979 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003980
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003981 /* It should now be out of any other write domains, and we can update
3982 * the domain values for our changes.
3983 */
Chris Wilson05394f32010-11-08 19:18:58 +00003984 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003985
Chris Wilson058d88c2016-08-15 10:49:06 +01003986 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003987
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003988err_unpin_global:
3989 obj->pin_global--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003990 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003991}
3992
3993void
Chris Wilson058d88c2016-08-15 10:49:06 +01003994i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003995{
Chris Wilson49d73912016-11-29 09:50:08 +00003996 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003997
Chris Wilsonbd3d2252017-10-13 21:26:14 +01003998 if (WARN_ON(vma->obj->pin_global == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003999 return;
4000
Chris Wilsonbd3d2252017-10-13 21:26:14 +01004001 if (--vma->obj->pin_global == 0)
Chris Wilsonf51455d2017-01-10 14:47:34 +00004002 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00004003
Chris Wilson383d5822016-08-18 17:17:08 +01004004 /* Bump the LRU to try and avoid premature eviction whilst flipping */
Chris Wilsonbefedbb2017-01-19 19:26:55 +00004005 i915_gem_object_bump_inactive_ggtt(vma->obj);
Chris Wilson383d5822016-08-18 17:17:08 +01004006
Chris Wilson058d88c2016-08-15 10:49:06 +01004007 i915_vma_unpin(vma);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08004008}
4009
Eric Anholte47c68e2008-11-14 13:35:19 -08004010/**
4011 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01004012 * @obj: object to act on
4013 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08004014 *
4015 * This function returns when the move is complete, including waiting on
4016 * flushes to occur.
4017 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02004018int
Chris Wilson919926a2010-11-12 13:42:53 +00004019i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08004020{
Eric Anholte47c68e2008-11-14 13:35:19 -08004021 int ret;
4022
Chris Wilsone95433c2016-10-28 13:58:27 +01004023 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01004024
Chris Wilsone95433c2016-10-28 13:58:27 +01004025 ret = i915_gem_object_wait(obj,
4026 I915_WAIT_INTERRUPTIBLE |
4027 I915_WAIT_LOCKED |
4028 (write ? I915_WAIT_ALL : 0),
4029 MAX_SCHEDULE_TIMEOUT,
4030 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00004031 if (ret)
4032 return ret;
4033
Chris Wilsonef749212017-04-12 12:01:10 +01004034 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08004035
Eric Anholte47c68e2008-11-14 13:35:19 -08004036 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00004037 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson57822dc2017-02-22 11:40:48 +00004038 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
Chris Wilson05394f32010-11-08 19:18:58 +00004039 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08004040 }
4041
4042 /* It should now be out of any other write domains, and we can update
4043 * the domain values for our changes.
4044 */
Chris Wilsone27ab732017-06-15 13:38:49 +01004045 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08004046
4047 /* If we're writing through the CPU, then the GPU read domains will
4048 * need to be invalidated at next use.
4049 */
Chris Wilsone27ab732017-06-15 13:38:49 +01004050 if (write)
4051 __start_cpu_write(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08004052
4053 return 0;
4054}
4055
Eric Anholt673a3942008-07-30 12:06:12 -07004056/* Throttle our rendering by waiting until the ring has completed our requests
4057 * emitted over 20 msec ago.
4058 *
Eric Anholtb9624422009-06-03 07:27:35 +00004059 * Note that if we were to use the current jiffies each time around the loop,
4060 * we wouldn't escape the function with any frames outstanding if the time to
4061 * render a frame was over 20ms.
4062 *
Eric Anholt673a3942008-07-30 12:06:12 -07004063 * This should get us reasonable parallelism between CPU and GPU but also
4064 * relatively low latency when blocking on a particular request to finish.
4065 */
4066static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004067i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004068{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004069 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004070 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01004071 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00004072 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +01004073 long ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004074
Chris Wilsonf4457ae2016-04-13 17:35:08 +01004075 /* ABI: return -EIO if already wedged */
4076 if (i915_terminally_wedged(&dev_priv->gpu_error))
4077 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00004078
Chris Wilson1c255952010-09-26 11:03:27 +01004079 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00004080 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
Eric Anholtb9624422009-06-03 07:27:35 +00004081 if (time_after_eq(request->emitted_jiffies, recent_enough))
4082 break;
4083
Chris Wilsonc8659ef2017-03-02 12:25:25 +00004084 if (target) {
4085 list_del(&target->client_link);
4086 target->file_priv = NULL;
4087 }
John Harrisonfcfa423c2015-05-29 17:44:12 +01004088
John Harrison54fb2412014-11-24 18:49:27 +00004089 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00004090 }
John Harrisonff865882014-11-24 18:49:28 +00004091 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01004092 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01004093 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004094
John Harrison54fb2412014-11-24 18:49:27 +00004095 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004096 return 0;
4097
Chris Wilsone95433c2016-10-28 13:58:27 +01004098 ret = i915_wait_request(target,
4099 I915_WAIT_INTERRUPTIBLE,
4100 MAX_SCHEDULE_TIMEOUT);
Chris Wilsone8a261e2016-07-20 13:31:49 +01004101 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00004102
Chris Wilsone95433c2016-10-28 13:58:27 +01004103 return ret < 0 ? ret : 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004104}
4105
Chris Wilson058d88c2016-08-15 10:49:06 +01004106struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004107i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4108 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01004109 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01004110 u64 alignment,
4111 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004112{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004113 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4114 struct i915_address_space *vm = &dev_priv->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01004115 struct i915_vma *vma;
4116 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004117
Chris Wilson4c7d62c2016-10-28 13:58:32 +01004118 lockdep_assert_held(&obj->base.dev->struct_mutex);
4119
Chris Wilson43ae70d2017-10-09 09:44:01 +01004120 if (!view && flags & PIN_MAPPABLE) {
4121 /* If the required space is larger than the available
4122 * aperture, we will not able to find a slot for the
4123 * object and unbinding the object now will be in
4124 * vain. Worse, doing so may cause us to ping-pong
4125 * the object in and out of the Global GTT and
4126 * waste a lot of cycles under the mutex.
4127 */
4128 if (obj->base.size > dev_priv->ggtt.mappable_end)
4129 return ERR_PTR(-E2BIG);
4130
4131 /* If NONBLOCK is set the caller is optimistically
4132 * trying to cache the full object within the mappable
4133 * aperture, and *must* have a fallback in place for
4134 * situations where we cannot bind the object. We
4135 * can be a little more lax here and use the fallback
4136 * more often to avoid costly migrations of ourselves
4137 * and other objects within the aperture.
4138 *
4139 * Half-the-aperture is used as a simple heuristic.
4140 * More interesting would to do search for a free
4141 * block prior to making the commitment to unbind.
4142 * That caters for the self-harm case, and with a
4143 * little more heuristics (e.g. NOFAULT, NOEVICT)
4144 * we could try to minimise harm to others.
4145 */
4146 if (flags & PIN_NONBLOCK &&
4147 obj->base.size > dev_priv->ggtt.mappable_end / 2)
4148 return ERR_PTR(-ENOSPC);
4149 }
4150
Chris Wilson718659a2017-01-16 15:21:28 +00004151 vma = i915_vma_instance(obj, vm, view);
Chris Wilsone0216b72017-01-19 19:26:57 +00004152 if (unlikely(IS_ERR(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01004153 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01004154
4155 if (i915_vma_misplaced(vma, size, alignment, flags)) {
Chris Wilson43ae70d2017-10-09 09:44:01 +01004156 if (flags & PIN_NONBLOCK) {
4157 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
4158 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01004159
Chris Wilson43ae70d2017-10-09 09:44:01 +01004160 if (flags & PIN_MAPPABLE &&
Chris Wilson944397f2017-01-09 16:16:11 +00004161 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004162 return ERR_PTR(-ENOSPC);
4163 }
4164
Chris Wilson59bfa122016-08-04 16:32:31 +01004165 WARN(i915_vma_is_pinned(vma),
4166 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01004167 " offset=%08x, req.alignment=%llx,"
4168 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4169 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01004170 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01004171 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01004172 ret = i915_vma_unbind(vma);
4173 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01004174 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01004175 }
4176
Chris Wilson058d88c2016-08-15 10:49:06 +01004177 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4178 if (ret)
4179 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004180
Chris Wilson058d88c2016-08-15 10:49:06 +01004181 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07004182}
4183
Chris Wilsonedf6b762016-08-09 09:23:33 +01004184static __always_inline unsigned int __busy_read_flag(unsigned int id)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004185{
4186 /* Note that we could alias engines in the execbuf API, but
4187 * that would be very unwise as it prevents userspace from
4188 * fine control over engine selection. Ahem.
4189 *
4190 * This should be something like EXEC_MAX_ENGINE instead of
4191 * I915_NUM_ENGINES.
4192 */
4193 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4194 return 0x10000 << id;
4195}
4196
4197static __always_inline unsigned int __busy_write_id(unsigned int id)
4198{
Chris Wilson70cb4722016-08-09 18:08:25 +01004199 /* The uABI guarantees an active writer is also amongst the read
4200 * engines. This would be true if we accessed the activity tracking
4201 * under the lock, but as we perform the lookup of the object and
4202 * its activity locklessly we can not guarantee that the last_write
4203 * being active implies that we have set the same engine flag from
4204 * last_read - hence we always set both read and write busy for
4205 * last_write.
4206 */
4207 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004208}
4209
Chris Wilsonedf6b762016-08-09 09:23:33 +01004210static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004211__busy_set_if_active(const struct dma_fence *fence,
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004212 unsigned int (*flag)(unsigned int id))
4213{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004214 struct drm_i915_gem_request *rq;
Chris Wilson12555012016-08-16 09:50:40 +01004215
Chris Wilsond07f0e52016-10-28 13:58:44 +01004216 /* We have to check the current hw status of the fence as the uABI
4217 * guarantees forward progress. We could rely on the idle worker
4218 * to eventually flush us, but to minimise latency just ask the
4219 * hardware.
4220 *
4221 * Note we only report on the status of native fences.
4222 */
4223 if (!dma_fence_is_i915(fence))
Chris Wilson12555012016-08-16 09:50:40 +01004224 return 0;
4225
Chris Wilsond07f0e52016-10-28 13:58:44 +01004226 /* opencode to_request() in order to avoid const warnings */
4227 rq = container_of(fence, struct drm_i915_gem_request, fence);
4228 if (i915_gem_request_completed(rq))
4229 return 0;
4230
Chris Wilson1d39f282017-04-11 13:43:06 +01004231 return flag(rq->engine->uabi_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004232}
4233
Chris Wilsonedf6b762016-08-09 09:23:33 +01004234static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004235busy_check_reader(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004236{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004237 return __busy_set_if_active(fence, __busy_read_flag);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004238}
4239
Chris Wilsonedf6b762016-08-09 09:23:33 +01004240static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004241busy_check_writer(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004242{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004243 if (!fence)
4244 return 0;
4245
4246 return __busy_set_if_active(fence, __busy_write_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004247}
4248
Eric Anholt673a3942008-07-30 12:06:12 -07004249int
Eric Anholt673a3942008-07-30 12:06:12 -07004250i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004251 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004252{
4253 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004254 struct drm_i915_gem_object *obj;
Chris Wilsond07f0e52016-10-28 13:58:44 +01004255 struct reservation_object_list *list;
4256 unsigned int seq;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004257 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07004258
Chris Wilsond07f0e52016-10-28 13:58:44 +01004259 err = -ENOENT;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004260 rcu_read_lock();
4261 obj = i915_gem_object_lookup_rcu(file, args->handle);
Chris Wilsond07f0e52016-10-28 13:58:44 +01004262 if (!obj)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004263 goto out;
Chris Wilsond07f0e52016-10-28 13:58:44 +01004264
4265 /* A discrepancy here is that we do not report the status of
4266 * non-i915 fences, i.e. even though we may report the object as idle,
4267 * a call to set-domain may still stall waiting for foreign rendering.
4268 * This also means that wait-ioctl may report an object as busy,
4269 * where busy-ioctl considers it idle.
4270 *
4271 * We trade the ability to warn of foreign fences to report on which
4272 * i915 engines are active for the object.
4273 *
4274 * Alternatively, we can trade that extra information on read/write
4275 * activity with
4276 * args->busy =
4277 * !reservation_object_test_signaled_rcu(obj->resv, true);
4278 * to report the overall busyness. This is what the wait-ioctl does.
4279 *
4280 */
4281retry:
4282 seq = raw_read_seqcount(&obj->resv->seq);
4283
4284 /* Translate the exclusive fence to the READ *and* WRITE engine */
4285 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4286
4287 /* Translate shared fences to READ set of engines */
4288 list = rcu_dereference(obj->resv->fence);
4289 if (list) {
4290 unsigned int shared_count = list->shared_count, i;
4291
4292 for (i = 0; i < shared_count; ++i) {
4293 struct dma_fence *fence =
4294 rcu_dereference(list->shared[i]);
4295
4296 args->busy |= busy_check_reader(fence);
4297 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004298 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004299
Chris Wilsond07f0e52016-10-28 13:58:44 +01004300 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4301 goto retry;
Chris Wilson426960b2016-01-15 16:51:46 +00004302
Chris Wilsond07f0e52016-10-28 13:58:44 +01004303 err = 0;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004304out:
4305 rcu_read_unlock();
4306 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07004307}
4308
4309int
4310i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4311 struct drm_file *file_priv)
4312{
Akshay Joshi0206e352011-08-16 15:34:10 -04004313 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004314}
4315
Chris Wilson3ef94da2009-09-14 16:50:29 +01004316int
4317i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4318 struct drm_file *file_priv)
4319{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004320 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004321 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004322 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01004323 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004324
4325 switch (args->madv) {
4326 case I915_MADV_DONTNEED:
4327 case I915_MADV_WILLNEED:
4328 break;
4329 default:
4330 return -EINVAL;
4331 }
4332
Chris Wilson03ac0642016-07-20 13:31:51 +01004333 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01004334 if (!obj)
4335 return -ENOENT;
4336
4337 err = mutex_lock_interruptible(&obj->mm.lock);
4338 if (err)
4339 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004340
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01004341 if (i915_gem_object_has_pages(obj) &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004342 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01004343 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004344 if (obj->mm.madv == I915_MADV_WILLNEED) {
4345 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004346 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004347 obj->mm.quirked = false;
4348 }
4349 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00004350 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004351 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004352 obj->mm.quirked = true;
4353 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01004354 }
4355
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004356 if (obj->mm.madv != __I915_MADV_PURGED)
4357 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004358
Chris Wilson6c085a72012-08-20 11:40:46 +02004359 /* if the object is no longer attached, discard its backing storage */
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01004360 if (obj->mm.madv == I915_MADV_DONTNEED &&
4361 !i915_gem_object_has_pages(obj))
Chris Wilson2d7ef392009-09-20 23:13:10 +01004362 i915_gem_object_truncate(obj);
4363
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004364 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01004365 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004366
Chris Wilson1233e2d2016-10-28 13:58:37 +01004367out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004368 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01004369 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004370}
4371
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004372static void
4373frontbuffer_retire(struct i915_gem_active *active,
4374 struct drm_i915_gem_request *request)
4375{
4376 struct drm_i915_gem_object *obj =
4377 container_of(active, typeof(*obj), frontbuffer_write);
4378
Chris Wilsond59b21e2017-02-22 11:40:49 +00004379 intel_fb_obj_flush(obj, ORIGIN_CS);
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004380}
4381
Chris Wilson37e680a2012-06-07 15:38:42 +01004382void i915_gem_object_init(struct drm_i915_gem_object *obj,
4383 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004384{
Chris Wilson1233e2d2016-10-28 13:58:37 +01004385 mutex_init(&obj->mm.lock);
4386
Ben Widawsky2f633152013-07-17 12:19:03 -07004387 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilsond1b48c12017-08-16 09:52:08 +01004388 INIT_LIST_HEAD(&obj->lut_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01004389 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004390
Chris Wilson37e680a2012-06-07 15:38:42 +01004391 obj->ops = ops;
4392
Chris Wilsond07f0e52016-10-28 13:58:44 +01004393 reservation_object_init(&obj->__builtin_resv);
4394 obj->resv = &obj->__builtin_resv;
4395
Chris Wilson50349242016-08-18 17:17:04 +01004396 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004397 init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004398
4399 obj->mm.madv = I915_MADV_WILLNEED;
4400 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4401 mutex_init(&obj->mm.get_page.lock);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004402
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004403 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004404}
4405
Chris Wilson37e680a2012-06-07 15:38:42 +01004406static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Tvrtko Ursulin3599a912016-11-01 14:44:10 +00004407 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4408 I915_GEM_OBJECT_IS_SHRINKABLE,
Chris Wilson7c55e2c2017-03-07 12:03:38 +00004409
Chris Wilson37e680a2012-06-07 15:38:42 +01004410 .get_pages = i915_gem_object_get_pages_gtt,
4411 .put_pages = i915_gem_object_put_pages_gtt,
Chris Wilson7c55e2c2017-03-07 12:03:38 +00004412
4413 .pwrite = i915_gem_object_pwrite_gtt,
Chris Wilson37e680a2012-06-07 15:38:42 +01004414};
4415
Matthew Auld465c4032017-10-06 23:18:14 +01004416static int i915_gem_object_create_shmem(struct drm_device *dev,
4417 struct drm_gem_object *obj,
4418 size_t size)
4419{
4420 struct drm_i915_private *i915 = to_i915(dev);
4421 unsigned long flags = VM_NORESERVE;
4422 struct file *filp;
4423
4424 drm_gem_private_object_init(dev, obj, size);
4425
4426 if (i915->mm.gemfs)
4427 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4428 flags);
4429 else
4430 filp = shmem_file_setup("i915", size, flags);
4431
4432 if (IS_ERR(filp))
4433 return PTR_ERR(filp);
4434
4435 obj->filp = filp;
4436
4437 return 0;
4438}
4439
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004440struct drm_i915_gem_object *
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00004441i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004442{
Daniel Vetterc397b902010-04-09 19:05:07 +00004443 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004444 struct address_space *mapping;
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004445 unsigned int cache_level;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004446 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004447 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00004448
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004449 /* There is a prevalence of the assumption that we fit the object's
4450 * page count inside a 32bit _signed_ variable. Let's document this and
4451 * catch if we ever need to fix it. In the meantime, if you do spot
4452 * such a local variable, please consider fixing!
4453 */
Tvrtko Ursulin7a3ee5d2017-03-30 17:31:30 +01004454 if (size >> PAGE_SHIFT > INT_MAX)
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004455 return ERR_PTR(-E2BIG);
4456
4457 if (overflows_type(size, obj->base.size))
4458 return ERR_PTR(-E2BIG);
4459
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00004460 obj = i915_gem_object_alloc(dev_priv);
Daniel Vetterc397b902010-04-09 19:05:07 +00004461 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01004462 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00004463
Matthew Auld465c4032017-10-06 23:18:14 +01004464 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01004465 if (ret)
4466 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004467
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004468 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
Jani Nikulac0f86832016-12-07 12:13:04 +02004469 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004470 /* 965gm cannot relocate objects above 4GiB. */
4471 mask &= ~__GFP_HIGHMEM;
4472 mask |= __GFP_DMA32;
4473 }
4474
Al Viro93c76a32015-12-04 23:45:44 -05004475 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004476 mapping_set_gfp_mask(mapping, mask);
Chris Wilson4846bf02017-06-09 12:03:46 +01004477 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
Hugh Dickins5949eac2011-06-27 16:18:18 -07004478
Chris Wilson37e680a2012-06-07 15:38:42 +01004479 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004480
Daniel Vetterc397b902010-04-09 19:05:07 +00004481 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4482 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4483
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004484 if (HAS_LLC(dev_priv))
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004485 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004486 * cache) for about a 10% performance improvement
4487 * compared to uncached. Graphics requests other than
4488 * display scanout are coherent with the CPU in
4489 * accessing this cache. This means in this mode we
4490 * don't need to clflush on the CPU side, and on the
4491 * GPU side we only need to flush internal caches to
4492 * get data visible to the CPU.
4493 *
4494 * However, we maintain the display planes as UC, and so
4495 * need to rebind when first used as such.
4496 */
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004497 cache_level = I915_CACHE_LLC;
4498 else
4499 cache_level = I915_CACHE_NONE;
Eric Anholta1871112011-03-29 16:59:55 -07004500
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004501 i915_gem_object_set_cache_coherency(obj, cache_level);
Chris Wilsone27ab732017-06-15 13:38:49 +01004502
Daniel Vetterd861e332013-07-24 23:25:03 +02004503 trace_i915_gem_object_create(obj);
4504
Chris Wilson05394f32010-11-08 19:18:58 +00004505 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004506
4507fail:
4508 i915_gem_object_free(obj);
Chris Wilsonfe3db792016-04-25 13:32:13 +01004509 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004510}
4511
Chris Wilson340fbd82014-05-22 09:16:52 +01004512static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4513{
4514 /* If we are the last user of the backing storage (be it shmemfs
4515 * pages or stolen etc), we know that the pages are going to be
4516 * immediately released. In this case, we can then skip copying
4517 * back the contents from the GPU.
4518 */
4519
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004520 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson340fbd82014-05-22 09:16:52 +01004521 return false;
4522
4523 if (obj->base.filp == NULL)
4524 return true;
4525
4526 /* At first glance, this looks racy, but then again so would be
4527 * userspace racing mmap against close. However, the first external
4528 * reference to the filp can only be obtained through the
4529 * i915_gem_mmap_ioctl() which safeguards us against the user
4530 * acquiring such a reference whilst we are in the middle of
4531 * freeing the object.
4532 */
4533 return atomic_long_read(&obj->base.filp->f_count) == 1;
4534}
4535
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004536static void __i915_gem_free_objects(struct drm_i915_private *i915,
4537 struct llist_node *freed)
Chris Wilsonbe726152010-07-23 23:18:50 +01004538{
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004539 struct drm_i915_gem_object *obj, *on;
Chris Wilsonbe726152010-07-23 23:18:50 +01004540
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004541 intel_runtime_pm_get(i915);
Chris Wilsoncc731f52017-10-13 21:26:21 +01004542 llist_for_each_entry_safe(obj, on, freed, freed) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004543 struct i915_vma *vma, *vn;
Paulo Zanonif65c9162013-11-27 18:20:34 -02004544
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004545 trace_i915_gem_object_destroy(obj);
4546
Chris Wilsoncc731f52017-10-13 21:26:21 +01004547 mutex_lock(&i915->drm.struct_mutex);
4548
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004549 GEM_BUG_ON(i915_gem_object_is_active(obj));
4550 list_for_each_entry_safe(vma, vn,
4551 &obj->vma_list, obj_link) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004552 GEM_BUG_ON(i915_vma_is_active(vma));
4553 vma->flags &= ~I915_VMA_PIN_MASK;
4554 i915_vma_close(vma);
4555 }
Chris Wilsondb6c2b42016-11-01 11:54:00 +00004556 GEM_BUG_ON(!list_empty(&obj->vma_list));
4557 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004558
Chris Wilsonf2123812017-10-16 12:40:37 +01004559 /* This serializes freeing with the shrinker. Since the free
4560 * is delayed, first by RCU then by the workqueue, we want the
4561 * shrinker to be able to free pages of unreferenced objects,
4562 * or else we may oom whilst there are plenty of deferred
4563 * freed objects.
4564 */
4565 if (i915_gem_object_has_pages(obj)) {
4566 spin_lock(&i915->mm.obj_lock);
4567 list_del_init(&obj->mm.link);
4568 spin_unlock(&i915->mm.obj_lock);
4569 }
4570
Chris Wilsoncc731f52017-10-13 21:26:21 +01004571 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004572
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004573 GEM_BUG_ON(obj->bind_count);
Chris Wilsona65adaf2017-10-09 09:43:57 +01004574 GEM_BUG_ON(obj->userfault_count);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004575 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
Chris Wilson67b48042017-08-22 12:05:16 +01004576 GEM_BUG_ON(!list_empty(&obj->lut_list));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004577
4578 if (obj->ops->release)
4579 obj->ops->release(obj);
4580
4581 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4582 atomic_set(&obj->mm.pages_pin_count, 0);
Chris Wilson548625e2016-11-01 12:11:34 +00004583 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilsonf1fa4f42017-10-13 21:26:13 +01004584 GEM_BUG_ON(i915_gem_object_has_pages(obj));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004585
4586 if (obj->base.import_attach)
4587 drm_prime_gem_destroy(&obj->base, NULL);
4588
Chris Wilsond07f0e52016-10-28 13:58:44 +01004589 reservation_object_fini(&obj->__builtin_resv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004590 drm_gem_object_release(&obj->base);
4591 i915_gem_info_remove_obj(i915, obj->base.size);
4592
4593 kfree(obj->bit_17);
4594 i915_gem_object_free(obj);
Chris Wilsoncc731f52017-10-13 21:26:21 +01004595
4596 if (on)
4597 cond_resched();
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004598 }
Chris Wilsoncc731f52017-10-13 21:26:21 +01004599 intel_runtime_pm_put(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004600}
4601
4602static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4603{
4604 struct llist_node *freed;
4605
Chris Wilson87701b42017-10-13 21:26:20 +01004606 /* Free the oldest, most stale object to keep the free_list short */
4607 freed = NULL;
4608 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4609 /* Only one consumer of llist_del_first() allowed */
4610 spin_lock(&i915->mm.free_lock);
4611 freed = llist_del_first(&i915->mm.free_list);
4612 spin_unlock(&i915->mm.free_lock);
4613 }
4614 if (unlikely(freed)) {
4615 freed->next = NULL;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004616 __i915_gem_free_objects(i915, freed);
Chris Wilson87701b42017-10-13 21:26:20 +01004617 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004618}
4619
4620static void __i915_gem_free_work(struct work_struct *work)
4621{
4622 struct drm_i915_private *i915 =
4623 container_of(work, struct drm_i915_private, mm.free_work);
4624 struct llist_node *freed;
Chris Wilson26e12f82011-03-20 11:20:19 +00004625
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004626 /* All file-owned VMA should have been released by this point through
4627 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4628 * However, the object may also be bound into the global GTT (e.g.
4629 * older GPUs without per-process support, or for direct access through
4630 * the GTT either for the user or for scanout). Those VMA still need to
4631 * unbound now.
4632 */
Chris Wilson1488fc02012-04-24 15:47:31 +01004633
Chris Wilson5ad08be2017-04-07 11:25:51 +01004634 while ((freed = llist_del_all(&i915->mm.free_list))) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004635 __i915_gem_free_objects(i915, freed);
Chris Wilson5ad08be2017-04-07 11:25:51 +01004636 if (need_resched())
4637 break;
4638 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004639}
4640
4641static void __i915_gem_free_object_rcu(struct rcu_head *head)
4642{
4643 struct drm_i915_gem_object *obj =
4644 container_of(head, typeof(*obj), rcu);
4645 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4646
4647 /* We can't simply use call_rcu() from i915_gem_free_object()
4648 * as we need to block whilst unbinding, and the call_rcu
4649 * task may be called from softirq context. So we take a
4650 * detour through a worker.
4651 */
4652 if (llist_add(&obj->freed, &i915->mm.free_list))
4653 schedule_work(&i915->mm.free_work);
4654}
4655
4656void i915_gem_free_object(struct drm_gem_object *gem_obj)
4657{
4658 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4659
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004660 if (obj->mm.quirked)
4661 __i915_gem_object_unpin_pages(obj);
4662
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004663 if (discard_backing_storage(obj))
4664 obj->mm.madv = I915_MADV_DONTNEED;
Daniel Vettera071fa02014-06-18 23:28:09 +02004665
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004666 /* Before we free the object, make sure any pure RCU-only
4667 * read-side critical sections are complete, e.g.
4668 * i915_gem_busy_ioctl(). For the corresponding synchronized
4669 * lookup see i915_gem_object_lookup_rcu().
4670 */
4671 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
Chris Wilsonbe726152010-07-23 23:18:50 +01004672}
4673
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004674void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4675{
4676 lockdep_assert_held(&obj->base.dev->struct_mutex);
4677
Chris Wilsond1b48c12017-08-16 09:52:08 +01004678 if (!i915_gem_object_has_active_reference(obj) &&
4679 i915_gem_object_is_active(obj))
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004680 i915_gem_object_set_active_reference(obj);
4681 else
4682 i915_gem_object_put(obj);
4683}
4684
Chris Wilson3033aca2016-10-28 13:58:47 +01004685static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4686{
4687 struct intel_engine_cs *engine;
4688 enum intel_engine_id id;
4689
4690 for_each_engine(engine, dev_priv, id)
Chris Wilsonf131e352016-12-29 14:40:37 +00004691 GEM_BUG_ON(engine->last_retired_context &&
4692 !i915_gem_context_is_kernel(engine->last_retired_context));
Chris Wilson3033aca2016-10-28 13:58:47 +01004693}
4694
Chris Wilson24145512017-01-24 11:01:35 +00004695void i915_gem_sanitize(struct drm_i915_private *i915)
4696{
Chris Wilsonf36325f2017-08-26 12:09:34 +01004697 if (i915_terminally_wedged(&i915->gpu_error)) {
4698 mutex_lock(&i915->drm.struct_mutex);
4699 i915_gem_unset_wedged(i915);
4700 mutex_unlock(&i915->drm.struct_mutex);
4701 }
4702
Chris Wilson24145512017-01-24 11:01:35 +00004703 /*
4704 * If we inherit context state from the BIOS or earlier occupants
4705 * of the GPU, the GPU may be in an inconsistent state when we
4706 * try to take over. The only way to remove the earlier state
4707 * is by resetting. However, resetting on earlier gen is tricky as
4708 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03004709 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00004710 */
Joonas Lahtinenea117b82017-04-28 10:53:38 +03004711 if (INTEL_GEN(i915) >= 5) {
Chris Wilson24145512017-01-24 11:01:35 +00004712 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4713 WARN_ON(reset && reset != -ENODEV);
4714 }
4715}
4716
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004717int i915_gem_suspend(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07004718{
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004719 struct drm_device *dev = &dev_priv->drm;
Chris Wilsondcff85c2016-08-05 10:14:11 +01004720 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004721
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004722 intel_runtime_pm_get(dev_priv);
Chris Wilson54b4f682016-07-21 21:16:19 +01004723 intel_suspend_gt_powersave(dev_priv);
4724
Chris Wilson45c5f202013-10-16 11:50:01 +01004725 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004726
4727 /* We have to flush all the executing contexts to main memory so
4728 * that they can saved in the hibernation image. To ensure the last
4729 * context image is coherent, we have to switch away from it. That
4730 * leaves the dev_priv->kernel_context still active when
4731 * we actually suspend, and its image in memory may not match the GPU
4732 * state. Fortunately, the kernel_context is disposable and we do
4733 * not rely on its state.
4734 */
4735 ret = i915_gem_switch_to_kernel_context(dev_priv);
4736 if (ret)
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004737 goto err_unlock;
Chris Wilson5ab57c72016-07-15 14:56:20 +01004738
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004739 ret = i915_gem_wait_for_idle(dev_priv,
4740 I915_WAIT_INTERRUPTIBLE |
4741 I915_WAIT_LOCKED);
Chris Wilsoncad99462017-08-26 12:09:33 +01004742 if (ret && ret != -EIO)
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004743 goto err_unlock;
Chris Wilsonf7403342013-09-13 23:57:04 +01004744
Chris Wilson3033aca2016-10-28 13:58:47 +01004745 assert_kernel_context_is_current(dev_priv);
Chris Wilson829a0af2017-06-20 12:05:45 +01004746 i915_gem_contexts_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004747 mutex_unlock(&dev->struct_mutex);
4748
Sagar Arun Kamble63987bf2017-04-05 15:51:50 +05304749 intel_guc_suspend(dev_priv);
4750
Chris Wilson737b1502015-01-26 18:03:03 +02004751 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004752 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004753
4754 /* As the idle_work is rearming if it detects a race, play safe and
4755 * repeat the flush until it is definitely idle.
4756 */
Chris Wilson7c262402017-10-06 11:40:38 +01004757 drain_delayed_work(&dev_priv->gt.idle_work);
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004758
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004759 /* Assert that we sucessfully flushed all the work and
4760 * reset the GPU back to its idle, low power state.
4761 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004762 WARN_ON(dev_priv->gt.awake);
Chris Wilsonfc692bd2017-08-26 12:09:35 +01004763 if (WARN_ON(!intel_engines_are_idle(dev_priv)))
4764 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004765
Imre Deak1c777c52016-10-12 17:46:37 +03004766 /*
4767 * Neither the BIOS, ourselves or any other kernel
4768 * expects the system to be in execlists mode on startup,
4769 * so we need to reset the GPU back to legacy mode. And the only
4770 * known way to disable logical contexts is through a GPU reset.
4771 *
4772 * So in order to leave the system in a known default configuration,
4773 * always reset the GPU upon unload and suspend. Afterwards we then
4774 * clean up the GEM state tracking, flushing off the requests and
4775 * leaving the system in a known idle state.
4776 *
4777 * Note that is of the upmost importance that the GPU is idle and
4778 * all stray writes are flushed *before* we dismantle the backing
4779 * storage for the pinned objects.
4780 *
4781 * However, since we are uncertain that resetting the GPU on older
4782 * machines is a good idea, we don't - just in case it leaves the
4783 * machine in an unusable condition.
4784 */
Chris Wilson24145512017-01-24 11:01:35 +00004785 i915_gem_sanitize(dev_priv);
Chris Wilsoncad99462017-08-26 12:09:33 +01004786
4787 intel_runtime_pm_put(dev_priv);
4788 return 0;
Imre Deak1c777c52016-10-12 17:46:37 +03004789
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004790err_unlock:
Chris Wilson45c5f202013-10-16 11:50:01 +01004791 mutex_unlock(&dev->struct_mutex);
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004792 intel_runtime_pm_put(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004793 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004794}
4795
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004796void i915_gem_resume(struct drm_i915_private *dev_priv)
Chris Wilson5ab57c72016-07-15 14:56:20 +01004797{
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004798 struct drm_device *dev = &dev_priv->drm;
Chris Wilson5ab57c72016-07-15 14:56:20 +01004799
Imre Deak31ab49a2016-11-07 11:20:05 +02004800 WARN_ON(dev_priv->gt.awake);
4801
Chris Wilson5ab57c72016-07-15 14:56:20 +01004802 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00004803 i915_gem_restore_gtt_mappings(dev_priv);
Sagar Arun Kamble269e6ea2017-09-29 10:28:36 +05304804 i915_gem_restore_fences(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004805
4806 /* As we didn't flush the kernel context before suspend, we cannot
4807 * guarantee that the context image is complete. So let's just reset
4808 * it and start again.
4809 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01004810 dev_priv->gt.resume(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004811
4812 mutex_unlock(&dev->struct_mutex);
4813}
4814
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004815void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004816{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004817 if (INTEL_GEN(dev_priv) < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004818 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4819 return;
4820
4821 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4822 DISP_TILE_SURFACE_SWIZZLING);
4823
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004824 if (IS_GEN5(dev_priv))
Daniel Vetter11782b02012-01-31 16:47:55 +01004825 return;
4826
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004827 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004828 if (IS_GEN6(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004829 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004830 else if (IS_GEN7(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004831 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004832 else if (IS_GEN8(dev_priv))
Ben Widawsky31a53362013-11-02 21:07:04 -07004833 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004834 else
4835 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004836}
Daniel Vettere21af882012-02-09 20:53:27 +01004837
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004838static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004839{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004840 I915_WRITE(RING_CTL(base), 0);
4841 I915_WRITE(RING_HEAD(base), 0);
4842 I915_WRITE(RING_TAIL(base), 0);
4843 I915_WRITE(RING_START(base), 0);
4844}
4845
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004846static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004847{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004848 if (IS_I830(dev_priv)) {
4849 init_unused_ring(dev_priv, PRB1_BASE);
4850 init_unused_ring(dev_priv, SRB0_BASE);
4851 init_unused_ring(dev_priv, SRB1_BASE);
4852 init_unused_ring(dev_priv, SRB2_BASE);
4853 init_unused_ring(dev_priv, SRB3_BASE);
4854 } else if (IS_GEN2(dev_priv)) {
4855 init_unused_ring(dev_priv, SRB0_BASE);
4856 init_unused_ring(dev_priv, SRB1_BASE);
4857 } else if (IS_GEN3(dev_priv)) {
4858 init_unused_ring(dev_priv, PRB1_BASE);
4859 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004860 }
4861}
4862
Chris Wilson20a8a742017-02-08 14:30:31 +00004863static int __i915_gem_restart_engines(void *data)
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004864{
Chris Wilson20a8a742017-02-08 14:30:31 +00004865 struct drm_i915_private *i915 = data;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004866 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304867 enum intel_engine_id id;
Chris Wilson20a8a742017-02-08 14:30:31 +00004868 int err;
4869
4870 for_each_engine(engine, i915, id) {
4871 err = engine->init_hw(engine);
4872 if (err)
4873 return err;
4874 }
4875
4876 return 0;
4877}
4878
4879int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4880{
Chris Wilsond200cda2016-04-28 09:56:44 +01004881 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004882
Chris Wilsonde867c22016-10-25 13:16:02 +01004883 dev_priv->gt.last_init_time = ktime_get();
4884
Chris Wilson5e4f5182015-02-13 14:35:59 +00004885 /* Double layer security blanket, see i915_gem_init() */
4886 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4887
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004888 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004889 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004890
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01004891 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004892 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004893 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004894
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004895 if (HAS_PCH_NOP(dev_priv)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004896 if (IS_IVYBRIDGE(dev_priv)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004897 u32 temp = I915_READ(GEN7_MSG_CTL);
4898 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4899 I915_WRITE(GEN7_MSG_CTL, temp);
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004900 } else if (INTEL_GEN(dev_priv) >= 7) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004901 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4902 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4903 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4904 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004905 }
4906
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004907 i915_gem_init_swizzling(dev_priv);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004908
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004909 /*
4910 * At least 830 can leave some of the unused rings
4911 * "active" (ie. head != tail) after resume which
4912 * will prevent c3 entry. Makes sure all unused rings
4913 * are totally idle.
4914 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004915 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004916
Dave Gordoned54c1a2016-01-19 19:02:54 +00004917 BUG_ON(!dev_priv->kernel_context);
Chris Wilson6f74b362017-10-15 15:37:25 +01004918 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
4919 ret = -EIO;
4920 goto out;
4921 }
John Harrison90638cc2015-05-29 17:43:37 +01004922
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004923 ret = i915_ppgtt_init_hw(dev_priv);
John Harrison4ad2fd82015-06-18 13:11:20 +01004924 if (ret) {
4925 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4926 goto out;
4927 }
4928
4929 /* Need to do basic initialisation of all rings first: */
Chris Wilson20a8a742017-02-08 14:30:31 +00004930 ret = __i915_gem_restart_engines(dev_priv);
4931 if (ret)
4932 goto out;
Mika Kuoppala99433932013-01-22 14:12:17 +02004933
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004934 intel_mocs_init_l3cc_table(dev_priv);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004935
Oscar Mateob8991402017-03-28 09:53:47 -07004936 /* We can't enable contexts until all firmware is loaded */
4937 ret = intel_uc_init_hw(dev_priv);
4938 if (ret)
4939 goto out;
Alex Dai33a732f2015-08-12 15:43:36 +01004940
Chris Wilson5e4f5182015-02-13 14:35:59 +00004941out:
4942 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004943 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004944}
4945
Chris Wilson39df9192016-07-20 13:31:57 +01004946bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4947{
4948 if (INTEL_INFO(dev_priv)->gen < 6)
4949 return false;
4950
4951 /* TODO: make semaphores and Execlists play nicely together */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00004952 if (i915_modparams.enable_execlists)
Chris Wilson39df9192016-07-20 13:31:57 +01004953 return false;
4954
4955 if (value >= 0)
4956 return value;
4957
Chris Wilson39df9192016-07-20 13:31:57 +01004958 /* Enable semaphores on SNB when IO remapping is off */
Chris Wilson80debff2017-05-25 13:16:12 +01004959 if (IS_GEN6(dev_priv) && intel_vtd_active())
Chris Wilson39df9192016-07-20 13:31:57 +01004960 return false;
Chris Wilson39df9192016-07-20 13:31:57 +01004961
4962 return true;
4963}
4964
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004965int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01004966{
Chris Wilson1070a422012-04-24 15:47:41 +01004967 int ret;
4968
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004969 mutex_lock(&dev_priv->drm.struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004970
Matthew Auldda9fe3f32017-10-06 23:18:31 +01004971 /*
4972 * We need to fallback to 4K pages since gvt gtt handling doesn't
4973 * support huge page entries - we will need to check either hypervisor
4974 * mm can support huge guest page or just do emulation in gvt.
4975 */
4976 if (intel_vgpu_active(dev_priv))
4977 mkwrite_device_info(dev_priv)->page_sizes =
4978 I915_GTT_PAGE_SIZE_4K;
4979
Chris Wilson94312822017-05-03 10:39:18 +01004980 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00004981
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00004982 if (!i915_modparams.enable_execlists) {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004983 dev_priv->gt.resume = intel_legacy_submission_resume;
Chris Wilson7e37f882016-08-02 22:50:21 +01004984 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004985 } else {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004986 dev_priv->gt.resume = intel_lr_context_resume;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004987 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004988 }
4989
Chris Wilson5e4f5182015-02-13 14:35:59 +00004990 /* This is just a security blanket to placate dragons.
4991 * On some systems, we very sporadically observe that the first TLBs
4992 * used by the CS may be stale, despite us poking the TLB reset. If
4993 * we hold the forcewake during initialisation these problems
4994 * just magically go away.
4995 */
4996 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4997
Chris Wilson8a2421b2017-06-16 15:05:22 +01004998 ret = i915_gem_init_userptr(dev_priv);
4999 if (ret)
5000 goto out_unlock;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01005001
5002 ret = i915_gem_init_ggtt(dev_priv);
5003 if (ret)
5004 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08005005
Chris Wilson829a0af2017-06-20 12:05:45 +01005006 ret = i915_gem_contexts_init(dev_priv);
Jani Nikula7bcc3772014-12-05 14:17:42 +02005007 if (ret)
5008 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08005009
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00005010 ret = intel_engines_init(dev_priv);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01005011 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02005012 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02005013
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00005014 ret = i915_gem_init_hw(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01005015 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01005016 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01005017 * wedged. But we only want to do this where the GPU is angry,
5018 * for all other failure, such as an allocation failure, bail.
5019 */
Chris Wilson6f74b362017-10-15 15:37:25 +01005020 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5021 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5022 i915_gem_set_wedged(dev_priv);
5023 }
Chris Wilson60990322014-04-09 09:19:42 +01005024 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01005025 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02005026
5027out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00005028 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00005029 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01005030
Chris Wilson60990322014-04-09 09:19:42 +01005031 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01005032}
5033
Chris Wilson24145512017-01-24 11:01:35 +00005034void i915_gem_init_mmio(struct drm_i915_private *i915)
5035{
5036 i915_gem_sanitize(i915);
5037}
5038
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005039void
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00005040i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005041{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00005042 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05305043 enum intel_engine_id id;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005044
Akash Goel3b3f1652016-10-13 22:44:48 +05305045 for_each_engine(engine, dev_priv, id)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00005046 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08005047}
5048
Eric Anholt673a3942008-07-30 12:06:12 -07005049void
Imre Deak40ae4e12016-03-16 14:54:03 +02005050i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5051{
Chris Wilson49ef5292016-08-18 17:17:00 +01005052 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02005053
5054 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5055 !IS_CHERRYVIEW(dev_priv))
5056 dev_priv->num_fence_regs = 32;
Jani Nikula73f67aa2016-12-07 22:48:09 +02005057 else if (INTEL_INFO(dev_priv)->gen >= 4 ||
5058 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5059 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02005060 dev_priv->num_fence_regs = 16;
5061 else
5062 dev_priv->num_fence_regs = 8;
5063
Chris Wilsonc0336662016-05-06 15:40:21 +01005064 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02005065 dev_priv->num_fence_regs =
5066 I915_READ(vgtif_reg(avail_rs.fence_num));
5067
5068 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01005069 for (i = 0; i < dev_priv->num_fence_regs; i++) {
5070 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5071
5072 fence->i915 = dev_priv;
5073 fence->id = i;
5074 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5075 }
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00005076 i915_gem_restore_fences(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02005077
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00005078 i915_gem_detect_bit_6_swizzle(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02005079}
5080
Chris Wilson73cb9702016-10-28 13:58:46 +01005081int
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00005082i915_gem_load_init(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07005083{
Tvrtko Ursulina9335682016-11-02 15:14:59 +00005084 int err = -ENOMEM;
Chris Wilson42dcedd2012-11-15 11:32:30 +00005085
Tvrtko Ursulina9335682016-11-02 15:14:59 +00005086 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5087 if (!dev_priv->objects)
Chris Wilson73cb9702016-10-28 13:58:46 +01005088 goto err_out;
Chris Wilson73cb9702016-10-28 13:58:46 +01005089
Tvrtko Ursulina9335682016-11-02 15:14:59 +00005090 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5091 if (!dev_priv->vmas)
Chris Wilson73cb9702016-10-28 13:58:46 +01005092 goto err_objects;
Chris Wilson73cb9702016-10-28 13:58:46 +01005093
Chris Wilsond1b48c12017-08-16 09:52:08 +01005094 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5095 if (!dev_priv->luts)
5096 goto err_vmas;
5097
Tvrtko Ursulina9335682016-11-02 15:14:59 +00005098 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
5099 SLAB_HWCACHE_ALIGN |
5100 SLAB_RECLAIM_ACCOUNT |
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08005101 SLAB_TYPESAFE_BY_RCU);
Tvrtko Ursulina9335682016-11-02 15:14:59 +00005102 if (!dev_priv->requests)
Chris Wilsond1b48c12017-08-16 09:52:08 +01005103 goto err_luts;
Chris Wilson73cb9702016-10-28 13:58:46 +01005104
Chris Wilson52e54202016-11-14 20:41:02 +00005105 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5106 SLAB_HWCACHE_ALIGN |
5107 SLAB_RECLAIM_ACCOUNT);
5108 if (!dev_priv->dependencies)
5109 goto err_requests;
5110
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005111 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5112 if (!dev_priv->priorities)
5113 goto err_dependencies;
5114
Chris Wilson73cb9702016-10-28 13:58:46 +01005115 mutex_lock(&dev_priv->drm.struct_mutex);
5116 INIT_LIST_HEAD(&dev_priv->gt.timelines);
Chris Wilsonbb894852016-11-14 20:40:57 +00005117 err = i915_gem_timeline_init__global(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01005118 mutex_unlock(&dev_priv->drm.struct_mutex);
5119 if (err)
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005120 goto err_priorities;
Eric Anholt673a3942008-07-30 12:06:12 -07005121
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01005122 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
Chris Wilsonf2123812017-10-16 12:40:37 +01005123
5124 spin_lock_init(&dev_priv->mm.obj_lock);
Chris Wilson87701b42017-10-13 21:26:20 +01005125 spin_lock_init(&dev_priv->mm.free_lock);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01005126 init_llist_head(&dev_priv->mm.free_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02005127 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5128 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07005129 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson275f0392016-10-24 13:42:14 +01005130 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
Chris Wilsonf2123812017-10-16 12:40:37 +01005131
Chris Wilson67d97da2016-07-04 08:08:31 +01005132 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07005133 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01005134 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005135 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01005136 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01005137 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01005138
Joonas Lahtinen6f633402016-09-01 14:58:21 +03005139 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5140
Chris Wilsonb5add952016-08-04 16:32:36 +01005141 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01005142
Matthew Auld465c4032017-10-06 23:18:14 +01005143 err = i915_gemfs_init(dev_priv);
5144 if (err)
5145 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5146
Chris Wilson73cb9702016-10-28 13:58:46 +01005147 return 0;
5148
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005149err_priorities:
5150 kmem_cache_destroy(dev_priv->priorities);
Chris Wilson52e54202016-11-14 20:41:02 +00005151err_dependencies:
5152 kmem_cache_destroy(dev_priv->dependencies);
Chris Wilson73cb9702016-10-28 13:58:46 +01005153err_requests:
5154 kmem_cache_destroy(dev_priv->requests);
Chris Wilsond1b48c12017-08-16 09:52:08 +01005155err_luts:
5156 kmem_cache_destroy(dev_priv->luts);
Chris Wilson73cb9702016-10-28 13:58:46 +01005157err_vmas:
5158 kmem_cache_destroy(dev_priv->vmas);
5159err_objects:
5160 kmem_cache_destroy(dev_priv->objects);
5161err_out:
5162 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07005163}
Dave Airlie71acb5e2008-12-30 20:31:46 +10005164
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00005165void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02005166{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00005167 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson7d5d59e2016-11-01 08:48:41 +00005168 WARN_ON(!llist_empty(&dev_priv->mm.free_list));
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00005169 WARN_ON(dev_priv->mm.object_count);
Chris Wilson7d5d59e2016-11-01 08:48:41 +00005170
Matthew Auldea84aa72016-11-17 21:04:11 +00005171 mutex_lock(&dev_priv->drm.struct_mutex);
5172 i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
5173 WARN_ON(!list_empty(&dev_priv->gt.timelines));
5174 mutex_unlock(&dev_priv->drm.struct_mutex);
5175
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005176 kmem_cache_destroy(dev_priv->priorities);
Chris Wilson52e54202016-11-14 20:41:02 +00005177 kmem_cache_destroy(dev_priv->dependencies);
Imre Deakd64aa092016-01-19 15:26:29 +02005178 kmem_cache_destroy(dev_priv->requests);
Chris Wilsond1b48c12017-08-16 09:52:08 +01005179 kmem_cache_destroy(dev_priv->luts);
Imre Deakd64aa092016-01-19 15:26:29 +02005180 kmem_cache_destroy(dev_priv->vmas);
5181 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01005182
5183 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5184 rcu_barrier();
Matthew Auld465c4032017-10-06 23:18:14 +01005185
5186 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02005187}
5188
Chris Wilson6a800ea2016-09-21 14:51:07 +01005189int i915_gem_freeze(struct drm_i915_private *dev_priv)
5190{
Chris Wilsond0aa3012017-04-07 11:25:49 +01005191 /* Discard all purgeable objects, let userspace recover those as
5192 * required after resuming.
5193 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01005194 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01005195
Chris Wilson6a800ea2016-09-21 14:51:07 +01005196 return 0;
5197}
5198
Chris Wilson461fb992016-05-14 07:26:33 +01005199int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5200{
5201 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01005202 struct list_head *phases[] = {
5203 &dev_priv->mm.unbound_list,
5204 &dev_priv->mm.bound_list,
5205 NULL
5206 }, **p;
Chris Wilson461fb992016-05-14 07:26:33 +01005207
5208 /* Called just before we write the hibernation image.
5209 *
5210 * We need to update the domain tracking to reflect that the CPU
5211 * will be accessing all the pages to create and restore from the
5212 * hibernation, and so upon restoration those pages will be in the
5213 * CPU domain.
5214 *
5215 * To make sure the hibernation image contains the latest state,
5216 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01005217 *
5218 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01005219 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01005220 */
5221
Chris Wilson912d5722017-09-06 16:19:30 -07005222 i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
Chris Wilson17b93c42017-04-07 11:25:50 +01005223 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson461fb992016-05-14 07:26:33 +01005224
Chris Wilsonf2123812017-10-16 12:40:37 +01005225 spin_lock(&dev_priv->mm.obj_lock);
Chris Wilson7aab2d52016-09-09 20:02:18 +01005226 for (p = phases; *p; p++) {
Chris Wilsonf2123812017-10-16 12:40:37 +01005227 list_for_each_entry(obj, *p, mm.link)
Chris Wilsone27ab732017-06-15 13:38:49 +01005228 __start_cpu_write(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01005229 }
Chris Wilsonf2123812017-10-16 12:40:37 +01005230 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson461fb992016-05-14 07:26:33 +01005231
5232 return 0;
5233}
5234
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005235void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00005236{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005237 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01005238 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00005239
5240 /* Clean up our request list when the client is going away, so that
5241 * later retire_requests won't dereference our soon-to-be-gone
5242 * file_priv.
5243 */
Chris Wilson1c255952010-09-26 11:03:27 +01005244 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00005245 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005246 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01005247 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005248}
5249
Chris Wilson829a0af2017-06-20 12:05:45 +01005250int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005251{
5252 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08005253 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005254
Chris Wilsonc4c29d72016-11-09 10:45:07 +00005255 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005256
5257 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5258 if (!file_priv)
5259 return -ENOMEM;
5260
5261 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01005262 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02005263 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005264
5265 spin_lock_init(&file_priv->mm.lock);
5266 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005267
Chris Wilsonc80ff162016-07-27 09:07:27 +01005268 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00005269
Chris Wilson829a0af2017-06-20 12:05:45 +01005270 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08005271 if (ret)
5272 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005273
Ben Widawskye422b882013-12-06 14:10:58 -08005274 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005275}
5276
Daniel Vetterb680c372014-09-19 18:27:27 +02005277/**
5278 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07005279 * @old: current GEM buffer for the frontbuffer slots
5280 * @new: new GEM buffer for the frontbuffer slots
5281 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02005282 *
5283 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5284 * from @old and setting them in @new. Both @old and @new can be NULL.
5285 */
Daniel Vettera071fa02014-06-18 23:28:09 +02005286void i915_gem_track_fb(struct drm_i915_gem_object *old,
5287 struct drm_i915_gem_object *new,
5288 unsigned frontbuffer_bits)
5289{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005290 /* Control of individual bits within the mask are guarded by
5291 * the owning plane->mutex, i.e. we can never see concurrent
5292 * manipulation of individual bits. But since the bitfield as a whole
5293 * is updated using RMW, we need to use atomics in order to update
5294 * the bits.
5295 */
5296 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5297 sizeof(atomic_t) * BITS_PER_BYTE);
5298
Daniel Vettera071fa02014-06-18 23:28:09 +02005299 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005300 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5301 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02005302 }
5303
5304 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005305 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5306 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02005307 }
5308}
5309
Dave Gordonea702992015-07-09 19:29:02 +01005310/* Allocate a new GEM object and fill it with the supplied data */
5311struct drm_i915_gem_object *
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00005312i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
Dave Gordonea702992015-07-09 19:29:02 +01005313 const void *data, size_t size)
5314{
5315 struct drm_i915_gem_object *obj;
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005316 struct file *file;
5317 size_t offset;
5318 int err;
Dave Gordonea702992015-07-09 19:29:02 +01005319
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00005320 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01005321 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01005322 return obj;
5323
Chris Wilsonce8ff092017-03-17 19:46:47 +00005324 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
Dave Gordonea702992015-07-09 19:29:02 +01005325
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005326 file = obj->base.filp;
5327 offset = 0;
5328 do {
5329 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5330 struct page *page;
5331 void *pgdata, *vaddr;
Dave Gordonea702992015-07-09 19:29:02 +01005332
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005333 err = pagecache_write_begin(file, file->f_mapping,
5334 offset, len, 0,
5335 &page, &pgdata);
5336 if (err < 0)
5337 goto fail;
Dave Gordonea702992015-07-09 19:29:02 +01005338
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005339 vaddr = kmap(page);
5340 memcpy(vaddr, data, len);
5341 kunmap(page);
5342
5343 err = pagecache_write_end(file, file->f_mapping,
5344 offset, len, len,
5345 page, pgdata);
5346 if (err < 0)
5347 goto fail;
5348
5349 size -= len;
5350 data += len;
5351 offset += len;
5352 } while (size);
Dave Gordonea702992015-07-09 19:29:02 +01005353
5354 return obj;
5355
5356fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01005357 i915_gem_object_put(obj);
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005358 return ERR_PTR(err);
Dave Gordonea702992015-07-09 19:29:02 +01005359}
Chris Wilson96d77632016-10-28 13:58:33 +01005360
5361struct scatterlist *
5362i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5363 unsigned int n,
5364 unsigned int *offset)
5365{
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005366 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
Chris Wilson96d77632016-10-28 13:58:33 +01005367 struct scatterlist *sg;
5368 unsigned int idx, count;
5369
5370 might_sleep();
5371 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005372 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Chris Wilson96d77632016-10-28 13:58:33 +01005373
5374 /* As we iterate forward through the sg, we record each entry in a
5375 * radixtree for quick repeated (backwards) lookups. If we have seen
5376 * this index previously, we will have an entry for it.
5377 *
5378 * Initial lookup is O(N), but this is amortized to O(1) for
5379 * sequential page access (where each new request is consecutive
5380 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5381 * i.e. O(1) with a large constant!
5382 */
5383 if (n < READ_ONCE(iter->sg_idx))
5384 goto lookup;
5385
5386 mutex_lock(&iter->lock);
5387
5388 /* We prefer to reuse the last sg so that repeated lookup of this
5389 * (or the subsequent) sg are fast - comparing against the last
5390 * sg is faster than going through the radixtree.
5391 */
5392
5393 sg = iter->sg_pos;
5394 idx = iter->sg_idx;
5395 count = __sg_page_count(sg);
5396
5397 while (idx + count <= n) {
5398 unsigned long exception, i;
5399 int ret;
5400
5401 /* If we cannot allocate and insert this entry, or the
5402 * individual pages from this range, cancel updating the
5403 * sg_idx so that on this lookup we are forced to linearly
5404 * scan onwards, but on future lookups we will try the
5405 * insertion again (in which case we need to be careful of
5406 * the error return reporting that we have already inserted
5407 * this index).
5408 */
5409 ret = radix_tree_insert(&iter->radix, idx, sg);
5410 if (ret && ret != -EEXIST)
5411 goto scan;
5412
5413 exception =
5414 RADIX_TREE_EXCEPTIONAL_ENTRY |
5415 idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5416 for (i = 1; i < count; i++) {
5417 ret = radix_tree_insert(&iter->radix, idx + i,
5418 (void *)exception);
5419 if (ret && ret != -EEXIST)
5420 goto scan;
5421 }
5422
5423 idx += count;
5424 sg = ____sg_next(sg);
5425 count = __sg_page_count(sg);
5426 }
5427
5428scan:
5429 iter->sg_pos = sg;
5430 iter->sg_idx = idx;
5431
5432 mutex_unlock(&iter->lock);
5433
5434 if (unlikely(n < idx)) /* insertion completed by another thread */
5435 goto lookup;
5436
5437 /* In case we failed to insert the entry into the radixtree, we need
5438 * to look beyond the current sg.
5439 */
5440 while (idx + count <= n) {
5441 idx += count;
5442 sg = ____sg_next(sg);
5443 count = __sg_page_count(sg);
5444 }
5445
5446 *offset = n - idx;
5447 return sg;
5448
5449lookup:
5450 rcu_read_lock();
5451
5452 sg = radix_tree_lookup(&iter->radix, n);
5453 GEM_BUG_ON(!sg);
5454
5455 /* If this index is in the middle of multi-page sg entry,
5456 * the radixtree will contain an exceptional entry that points
5457 * to the start of that range. We will return the pointer to
5458 * the base page and the offset of this page within the
5459 * sg entry's range.
5460 */
5461 *offset = 0;
5462 if (unlikely(radix_tree_exception(sg))) {
5463 unsigned long base =
5464 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5465
5466 sg = radix_tree_lookup(&iter->radix, base);
5467 GEM_BUG_ON(!sg);
5468
5469 *offset = n - base;
5470 }
5471
5472 rcu_read_unlock();
5473
5474 return sg;
5475}
5476
5477struct page *
5478i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5479{
5480 struct scatterlist *sg;
5481 unsigned int offset;
5482
5483 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5484
5485 sg = i915_gem_object_get_sg(obj, n, &offset);
5486 return nth_page(sg_page(sg), offset);
5487}
5488
5489/* Like i915_gem_object_get_page(), but mark the returned page dirty */
5490struct page *
5491i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5492 unsigned int n)
5493{
5494 struct page *page;
5495
5496 page = i915_gem_object_get_page(obj, n);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005497 if (!obj->mm.dirty)
Chris Wilson96d77632016-10-28 13:58:33 +01005498 set_page_dirty(page);
5499
5500 return page;
5501}
5502
5503dma_addr_t
5504i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5505 unsigned long n)
5506{
5507 struct scatterlist *sg;
5508 unsigned int offset;
5509
5510 sg = i915_gem_object_get_sg(obj, n, &offset);
5511 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5512}
Chris Wilson935a2f72017-02-13 17:15:13 +00005513
Chris Wilson8eeb7902017-07-26 19:16:01 +01005514int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5515{
5516 struct sg_table *pages;
5517 int err;
5518
5519 if (align > obj->base.size)
5520 return -EINVAL;
5521
5522 if (obj->ops == &i915_gem_phys_ops)
5523 return 0;
5524
5525 if (obj->ops != &i915_gem_object_ops)
5526 return -EINVAL;
5527
5528 err = i915_gem_object_unbind(obj);
5529 if (err)
5530 return err;
5531
5532 mutex_lock(&obj->mm.lock);
5533
5534 if (obj->mm.madv != I915_MADV_WILLNEED) {
5535 err = -EFAULT;
5536 goto err_unlock;
5537 }
5538
5539 if (obj->mm.quirked) {
5540 err = -EFAULT;
5541 goto err_unlock;
5542 }
5543
5544 if (obj->mm.mapping) {
5545 err = -EBUSY;
5546 goto err_unlock;
5547 }
5548
Chris Wilsonf2123812017-10-16 12:40:37 +01005549 pages = fetch_and_zero(&obj->mm.pages);
5550 if (pages) {
5551 struct drm_i915_private *i915 = to_i915(obj->base.dev);
5552
5553 __i915_gem_object_reset_page_iter(obj);
5554
5555 spin_lock(&i915->mm.obj_lock);
5556 list_del(&obj->mm.link);
5557 spin_unlock(&i915->mm.obj_lock);
5558 }
5559
Chris Wilson8eeb7902017-07-26 19:16:01 +01005560 obj->ops = &i915_gem_phys_ops;
5561
Chris Wilson8fb6a5d2017-07-26 19:16:02 +01005562 err = ____i915_gem_object_get_pages(obj);
Chris Wilson8eeb7902017-07-26 19:16:01 +01005563 if (err)
5564 goto err_xfer;
5565
5566 /* Perma-pin (until release) the physical set of pages */
5567 __i915_gem_object_pin_pages(obj);
5568
5569 if (!IS_ERR_OR_NULL(pages))
5570 i915_gem_object_ops.put_pages(obj, pages);
5571 mutex_unlock(&obj->mm.lock);
5572 return 0;
5573
5574err_xfer:
5575 obj->ops = &i915_gem_object_ops;
5576 obj->mm.pages = pages;
5577err_unlock:
5578 mutex_unlock(&obj->mm.lock);
5579 return err;
5580}
5581
Chris Wilson935a2f72017-02-13 17:15:13 +00005582#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5583#include "selftests/scatterlist.c"
Chris Wilson66d9cb52017-02-13 17:15:17 +00005584#include "selftests/mock_gem_device.c"
Chris Wilson44653982017-02-13 17:15:20 +00005585#include "selftests/huge_gem_object.c"
Matthew Auld40498662017-10-06 23:18:29 +01005586#include "selftests/huge_pages.c"
Chris Wilson8335fd62017-02-13 17:15:28 +00005587#include "selftests/i915_gem_object.c"
Chris Wilson17059452017-02-13 17:15:32 +00005588#include "selftests/i915_gem_coherency.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00005589#endif