blob: 9cb8f85cbaadd8b4cd8f5c7c70b5d7ab392c26e4 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000032#include "i915_gem_clflush.h"
Yu Zhangeb822892015-02-10 19:05:49 +080033#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010034#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070035#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010036#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010037#include "intel_mocs.h"
Matthew Auld465c4032017-10-06 23:18:14 +010038#include "i915_gemfs.h"
Chris Wilson6b5e90f2016-11-14 20:41:05 +000039#include <linux/dma-fence-array.h>
Chris Wilsonfe3288b2017-02-12 17:20:01 +000040#include <linux/kthread.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010041#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070042#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Chris Wilson20e49332016-11-22 14:41:21 +000044#include <linux/stop_machine.h>
Eric Anholt673a3942008-07-30 12:06:12 -070045#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080046#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020047#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070048
Chris Wilsonfbbd37b2016-10-28 13:58:42 +010049static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
Chris Wilson61050802012-04-17 15:31:31 +010050
Chris Wilson2c225692013-08-09 12:26:45 +010051static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52{
Chris Wilsone27ab732017-06-15 13:38:49 +010053 if (obj->cache_dirty)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053054 return false;
55
Chris Wilsonb8f55be2017-08-11 12:11:16 +010056 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
Chris Wilson2c225692013-08-09 12:26:45 +010057 return true;
58
59 return obj->pin_display;
60}
61
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053062static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010063insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053064 struct drm_mm_node *node, u32 size)
65{
66 memset(node, 0, sizeof(*node));
Chris Wilson4e64e552017-02-02 21:04:38 +000067 return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68 size, 0, I915_COLOR_UNEVICTABLE,
69 0, ggtt->mappable_end,
70 DRM_MM_INSERT_LOW);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053071}
72
73static void
74remove_mappable_node(struct drm_mm_node *node)
75{
76 drm_mm_remove_node(node);
77}
78
Chris Wilson73aa8082010-09-30 11:46:12 +010079/* some bookkeeping */
80static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010081 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010082{
Daniel Vetterc20e8352013-07-24 22:40:23 +020083 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010084 dev_priv->mm.object_count++;
85 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020086 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010087}
88
89static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010090 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010091{
Daniel Vetterc20e8352013-07-24 22:40:23 +020092 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010093 dev_priv->mm.object_count--;
94 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020095 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010096}
97
Chris Wilson21dd3732011-01-26 15:55:56 +000098static int
Daniel Vetter33196de2012-11-14 17:14:05 +010099i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100100{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100101 int ret;
102
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100103 might_sleep();
104
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200105 /*
106 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107 * userspace. If it takes that long something really bad is going on and
108 * we should simply try to bail out and fail as gracefully as possible.
109 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100110 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilson8c185ec2017-03-16 17:13:02 +0000111 !i915_reset_backoff(error),
Chris Wilsonb52992c2016-10-28 13:58:24 +0100112 I915_RESET_TIMEOUT);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200113 if (ret == 0) {
114 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115 return -EIO;
116 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100117 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100118 } else {
119 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200120 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100121}
122
Chris Wilson54cf91d2010-11-25 18:00:26 +0000123int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100124{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100125 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100126 int ret;
127
Daniel Vetter33196de2012-11-14 17:14:05 +0100128 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100129 if (ret)
130 return ret;
131
132 ret = mutex_lock_interruptible(&dev->struct_mutex);
133 if (ret)
134 return ret;
135
Chris Wilson76c1dec2010-09-25 11:22:51 +0100136 return 0;
137}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138
Eric Anholt673a3942008-07-30 12:06:12 -0700139int
Eric Anholt5a125c32008-10-22 21:40:13 -0700140i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000141 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700142{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300143 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200144 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300145 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100146 struct i915_vma *vma;
Weinan Liff8f7972017-05-31 10:35:52 +0800147 u64 pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700148
Weinan Liff8f7972017-05-31 10:35:52 +0800149 pinned = ggtt->base.reserved;
Chris Wilson73aa8082010-09-30 11:46:12 +0100150 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000151 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100152 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100153 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000154 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100155 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100156 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700158
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300159 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400160 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000161
Eric Anholt5a125c32008-10-22 21:40:13 -0700162 return 0;
163}
164
Matthew Auldb91b09e2017-10-06 23:18:17 +0100165static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100166{
Al Viro93c76a32015-12-04 23:45:44 -0500167 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilsondbb43512016-12-07 13:34:11 +0000168 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800169 struct sg_table *st;
170 struct scatterlist *sg;
Chris Wilsondbb43512016-12-07 13:34:11 +0000171 char *vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800172 int i;
Matthew Auldb91b09e2017-10-06 23:18:17 +0100173 int err;
Chris Wilson00731152014-05-21 12:42:56 +0100174
Chris Wilson6a2c4232014-11-04 04:51:40 -0800175 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
Matthew Auldb91b09e2017-10-06 23:18:17 +0100176 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100177
Chris Wilsondbb43512016-12-07 13:34:11 +0000178 /* Always aligning to the object size, allows a single allocation
179 * to handle all possible callers, and given typical object sizes,
180 * the alignment of the buddy allocation will naturally match.
181 */
182 phys = drm_pci_alloc(obj->base.dev,
Ville Syrjälä750fae22017-09-07 17:32:03 +0300183 roundup_pow_of_two(obj->base.size),
Chris Wilsondbb43512016-12-07 13:34:11 +0000184 roundup_pow_of_two(obj->base.size));
185 if (!phys)
Matthew Auldb91b09e2017-10-06 23:18:17 +0100186 return -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000187
188 vaddr = phys->vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800189 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190 struct page *page;
191 char *src;
192
193 page = shmem_read_mapping_page(mapping, i);
Chris Wilsondbb43512016-12-07 13:34:11 +0000194 if (IS_ERR(page)) {
Matthew Auldb91b09e2017-10-06 23:18:17 +0100195 err = PTR_ERR(page);
Chris Wilsondbb43512016-12-07 13:34:11 +0000196 goto err_phys;
197 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800198
199 src = kmap_atomic(page);
200 memcpy(vaddr, src, PAGE_SIZE);
201 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202 kunmap_atomic(src);
203
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300204 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800205 vaddr += PAGE_SIZE;
206 }
207
Chris Wilsonc0336662016-05-06 15:40:21 +0100208 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800209
210 st = kmalloc(sizeof(*st), GFP_KERNEL);
Chris Wilsondbb43512016-12-07 13:34:11 +0000211 if (!st) {
Matthew Auldb91b09e2017-10-06 23:18:17 +0100212 err = -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000213 goto err_phys;
214 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800215
216 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217 kfree(st);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100218 err = -ENOMEM;
Chris Wilsondbb43512016-12-07 13:34:11 +0000219 goto err_phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800220 }
221
222 sg = st->sgl;
223 sg->offset = 0;
224 sg->length = obj->base.size;
225
Chris Wilsondbb43512016-12-07 13:34:11 +0000226 sg_dma_address(sg) = phys->busaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800227 sg_dma_len(sg) = obj->base.size;
228
Chris Wilsondbb43512016-12-07 13:34:11 +0000229 obj->phys_handle = phys;
Matthew Auldb91b09e2017-10-06 23:18:17 +0100230
Matthew Aulda5c081662017-10-06 23:18:18 +0100231 __i915_gem_object_set_pages(obj, st, sg->length);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100232
233 return 0;
Chris Wilsondbb43512016-12-07 13:34:11 +0000234
235err_phys:
236 drm_pci_free(obj->base.dev, phys);
Matthew Auldb91b09e2017-10-06 23:18:17 +0100237
238 return err;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800239}
240
Chris Wilsone27ab732017-06-15 13:38:49 +0100241static void __start_cpu_write(struct drm_i915_gem_object *obj)
242{
243 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245 if (cpu_write_needs_clflush(obj))
246 obj->cache_dirty = true;
247}
248
Chris Wilson6a2c4232014-11-04 04:51:40 -0800249static void
Chris Wilson2b3c8312016-11-11 14:58:09 +0000250__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
Chris Wilsone5facdf2016-12-23 14:57:57 +0000251 struct sg_table *pages,
252 bool needs_clflush)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800253{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100254 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800255
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100256 if (obj->mm.madv == I915_MADV_DONTNEED)
257 obj->mm.dirty = false;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800258
Chris Wilsone5facdf2016-12-23 14:57:57 +0000259 if (needs_clflush &&
260 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
Chris Wilson2b3c8312016-11-11 14:58:09 +0000262 drm_clflush_sg(pages);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100263
Chris Wilsone27ab732017-06-15 13:38:49 +0100264 __start_cpu_write(obj);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100265}
266
267static void
268i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269 struct sg_table *pages)
270{
Chris Wilsone5facdf2016-12-23 14:57:57 +0000271 __i915_gem_object_release_shmem(obj, pages, false);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100272
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100273 if (obj->mm.dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500274 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800275 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100276 int i;
277
278 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800279 struct page *page;
280 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100281
Chris Wilson6a2c4232014-11-04 04:51:40 -0800282 page = shmem_read_mapping_page(mapping, i);
283 if (IS_ERR(page))
284 continue;
285
286 dst = kmap_atomic(page);
287 drm_clflush_virt_range(vaddr, PAGE_SIZE);
288 memcpy(dst, vaddr, PAGE_SIZE);
289 kunmap_atomic(dst);
290
291 set_page_dirty(page);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100292 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100293 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300294 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100295 vaddr += PAGE_SIZE;
296 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100297 obj->mm.dirty = false;
Chris Wilson00731152014-05-21 12:42:56 +0100298 }
299
Chris Wilson03ac84f2016-10-28 13:58:36 +0100300 sg_free_table(pages);
301 kfree(pages);
Chris Wilsondbb43512016-12-07 13:34:11 +0000302
303 drm_pci_free(obj->base.dev, obj->phys_handle);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800304}
305
306static void
307i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100309 i915_gem_object_unpin_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800310}
311
312static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313 .get_pages = i915_gem_object_get_pages_phys,
314 .put_pages = i915_gem_object_put_pages_phys,
315 .release = i915_gem_object_release_phys,
316};
317
Chris Wilson581ab1f2017-02-15 16:39:00 +0000318static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
Chris Wilson35a96112016-08-14 18:44:40 +0100320int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100321{
322 struct i915_vma *vma;
323 LIST_HEAD(still_in_list);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100324 int ret;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100325
Chris Wilson02bef8f2016-08-14 18:44:41 +0100326 lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328 /* Closed vma are removed from the obj->vma_list - but they may
329 * still have an active binding on the object. To remove those we
330 * must wait for all rendering to complete to the object (as unbinding
331 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100332 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100333 ret = i915_gem_object_wait(obj,
334 I915_WAIT_INTERRUPTIBLE |
335 I915_WAIT_LOCKED |
336 I915_WAIT_ALL,
337 MAX_SCHEDULE_TIMEOUT,
338 NULL);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100339 if (ret)
340 return ret;
341
342 i915_gem_retire_requests(to_i915(obj->base.dev));
343
Chris Wilsonaa653a62016-08-04 07:52:27 +0100344 while ((vma = list_first_entry_or_null(&obj->vma_list,
345 struct i915_vma,
346 obj_link))) {
347 list_move_tail(&vma->obj_link, &still_in_list);
348 ret = i915_vma_unbind(vma);
349 if (ret)
350 break;
351 }
352 list_splice(&still_in_list, &obj->vma_list);
353
354 return ret;
355}
356
Chris Wilsone95433c2016-10-28 13:58:27 +0100357static long
358i915_gem_object_wait_fence(struct dma_fence *fence,
359 unsigned int flags,
360 long timeout,
361 struct intel_rps_client *rps)
362{
363 struct drm_i915_gem_request *rq;
364
365 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
366
367 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
368 return timeout;
369
370 if (!dma_fence_is_i915(fence))
371 return dma_fence_wait_timeout(fence,
372 flags & I915_WAIT_INTERRUPTIBLE,
373 timeout);
374
375 rq = to_request(fence);
376 if (i915_gem_request_completed(rq))
377 goto out;
378
379 /* This client is about to stall waiting for the GPU. In many cases
380 * this is undesirable and limits the throughput of the system, as
381 * many clients cannot continue processing user input/output whilst
382 * blocked. RPS autotuning may take tens of milliseconds to respond
383 * to the GPU load and thus incurs additional latency for the client.
384 * We can circumvent that by promoting the GPU frequency to maximum
385 * before we wait. This makes the GPU throttle up much more quickly
386 * (good for benchmarks and user experience, e.g. window animations),
387 * but at a cost of spending more power processing the workload
388 * (bad for battery). Not all clients even want their results
389 * immediately and for them we should just let the GPU select its own
390 * frequency to maximise efficiency. To prevent a single client from
391 * forcing the clocks too high for the whole system, we only allow
392 * each client to waitboost once in a busy period.
393 */
394 if (rps) {
395 if (INTEL_GEN(rq->i915) >= 6)
Chris Wilson7b92c1b2017-06-28 13:35:48 +0100396 gen6_rps_boost(rq, rps);
Chris Wilsone95433c2016-10-28 13:58:27 +0100397 else
398 rps = NULL;
399 }
400
401 timeout = i915_wait_request(rq, flags, timeout);
402
403out:
404 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
405 i915_gem_request_retire_upto(rq);
406
Chris Wilsone95433c2016-10-28 13:58:27 +0100407 return timeout;
408}
409
410static long
411i915_gem_object_wait_reservation(struct reservation_object *resv,
412 unsigned int flags,
413 long timeout,
414 struct intel_rps_client *rps)
415{
Chris Wilsone54ca972017-02-17 15:13:04 +0000416 unsigned int seq = __read_seqcount_begin(&resv->seq);
Chris Wilsone95433c2016-10-28 13:58:27 +0100417 struct dma_fence *excl;
Chris Wilsone54ca972017-02-17 15:13:04 +0000418 bool prune_fences = false;
Chris Wilsone95433c2016-10-28 13:58:27 +0100419
420 if (flags & I915_WAIT_ALL) {
421 struct dma_fence **shared;
422 unsigned int count, i;
423 int ret;
424
425 ret = reservation_object_get_fences_rcu(resv,
426 &excl, &count, &shared);
427 if (ret)
428 return ret;
429
430 for (i = 0; i < count; i++) {
431 timeout = i915_gem_object_wait_fence(shared[i],
432 flags, timeout,
433 rps);
Chris Wilsond892e932017-02-12 21:53:43 +0000434 if (timeout < 0)
Chris Wilsone95433c2016-10-28 13:58:27 +0100435 break;
436
437 dma_fence_put(shared[i]);
438 }
439
440 for (; i < count; i++)
441 dma_fence_put(shared[i]);
442 kfree(shared);
Chris Wilsone54ca972017-02-17 15:13:04 +0000443
444 prune_fences = count && timeout >= 0;
Chris Wilsone95433c2016-10-28 13:58:27 +0100445 } else {
446 excl = reservation_object_get_excl_rcu(resv);
447 }
448
Chris Wilsone54ca972017-02-17 15:13:04 +0000449 if (excl && timeout >= 0) {
Chris Wilsone95433c2016-10-28 13:58:27 +0100450 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
Chris Wilsone54ca972017-02-17 15:13:04 +0000451 prune_fences = timeout >= 0;
452 }
Chris Wilsone95433c2016-10-28 13:58:27 +0100453
454 dma_fence_put(excl);
455
Chris Wilson03d1cac2017-03-08 13:26:28 +0000456 /* Oportunistically prune the fences iff we know they have *all* been
457 * signaled and that the reservation object has not been changed (i.e.
458 * no new fences have been added).
459 */
Chris Wilsone54ca972017-02-17 15:13:04 +0000460 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
Chris Wilson03d1cac2017-03-08 13:26:28 +0000461 if (reservation_object_trylock(resv)) {
462 if (!__read_seqcount_retry(&resv->seq, seq))
463 reservation_object_add_excl_fence(resv, NULL);
464 reservation_object_unlock(resv);
465 }
Chris Wilsone54ca972017-02-17 15:13:04 +0000466 }
467
Chris Wilsone95433c2016-10-28 13:58:27 +0100468 return timeout;
469}
470
Chris Wilson6b5e90f2016-11-14 20:41:05 +0000471static void __fence_set_priority(struct dma_fence *fence, int prio)
472{
473 struct drm_i915_gem_request *rq;
474 struct intel_engine_cs *engine;
475
476 if (!dma_fence_is_i915(fence))
477 return;
478
479 rq = to_request(fence);
480 engine = rq->engine;
481 if (!engine->schedule)
482 return;
483
484 engine->schedule(rq, prio);
485}
486
487static void fence_set_priority(struct dma_fence *fence, int prio)
488{
489 /* Recurse once into a fence-array */
490 if (dma_fence_is_array(fence)) {
491 struct dma_fence_array *array = to_dma_fence_array(fence);
492 int i;
493
494 for (i = 0; i < array->num_fences; i++)
495 __fence_set_priority(array->fences[i], prio);
496 } else {
497 __fence_set_priority(fence, prio);
498 }
499}
500
501int
502i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
503 unsigned int flags,
504 int prio)
505{
506 struct dma_fence *excl;
507
508 if (flags & I915_WAIT_ALL) {
509 struct dma_fence **shared;
510 unsigned int count, i;
511 int ret;
512
513 ret = reservation_object_get_fences_rcu(obj->resv,
514 &excl, &count, &shared);
515 if (ret)
516 return ret;
517
518 for (i = 0; i < count; i++) {
519 fence_set_priority(shared[i], prio);
520 dma_fence_put(shared[i]);
521 }
522
523 kfree(shared);
524 } else {
525 excl = reservation_object_get_excl_rcu(obj->resv);
526 }
527
528 if (excl) {
529 fence_set_priority(excl, prio);
530 dma_fence_put(excl);
531 }
532 return 0;
533}
534
Chris Wilson00e60f22016-08-04 16:32:40 +0100535/**
Chris Wilsone95433c2016-10-28 13:58:27 +0100536 * Waits for rendering to the object to be completed
Chris Wilson00e60f22016-08-04 16:32:40 +0100537 * @obj: i915 gem object
Chris Wilsone95433c2016-10-28 13:58:27 +0100538 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
539 * @timeout: how long to wait
540 * @rps: client (user process) to charge for any waitboosting
Chris Wilson00e60f22016-08-04 16:32:40 +0100541 */
542int
Chris Wilsone95433c2016-10-28 13:58:27 +0100543i915_gem_object_wait(struct drm_i915_gem_object *obj,
544 unsigned int flags,
545 long timeout,
546 struct intel_rps_client *rps)
Chris Wilson00e60f22016-08-04 16:32:40 +0100547{
Chris Wilsone95433c2016-10-28 13:58:27 +0100548 might_sleep();
549#if IS_ENABLED(CONFIG_LOCKDEP)
550 GEM_BUG_ON(debug_locks &&
551 !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
552 !!(flags & I915_WAIT_LOCKED));
553#endif
554 GEM_BUG_ON(timeout < 0);
Chris Wilson00e60f22016-08-04 16:32:40 +0100555
Chris Wilsond07f0e52016-10-28 13:58:44 +0100556 timeout = i915_gem_object_wait_reservation(obj->resv,
557 flags, timeout,
558 rps);
Chris Wilsone95433c2016-10-28 13:58:27 +0100559 return timeout < 0 ? timeout : 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100560}
561
562static struct intel_rps_client *to_rps_client(struct drm_file *file)
563{
564 struct drm_i915_file_private *fpriv = file->driver_priv;
565
566 return &fpriv->rps;
567}
568
Chris Wilson00731152014-05-21 12:42:56 +0100569static int
570i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
571 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100572 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100573{
Chris Wilson00731152014-05-21 12:42:56 +0100574 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300575 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800576
577 /* We manually control the domain here and pretend that it
578 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
579 */
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700580 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000581 if (copy_from_user(vaddr, user_data, args->size))
582 return -EFAULT;
Chris Wilson00731152014-05-21 12:42:56 +0100583
Chris Wilson6a2c4232014-11-04 04:51:40 -0800584 drm_clflush_virt_range(vaddr, args->size);
Chris Wilson10466d22017-01-06 15:22:38 +0000585 i915_gem_chipset_flush(to_i915(obj->base.dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200586
Chris Wilsond59b21e2017-02-22 11:40:49 +0000587 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilson10466d22017-01-06 15:22:38 +0000588 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100589}
590
Tvrtko Ursulin187685c2016-12-01 14:16:36 +0000591void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
Chris Wilson42dcedd2012-11-15 11:32:30 +0000592{
Chris Wilsonefab6d82015-04-07 16:20:57 +0100593 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000594}
595
596void i915_gem_object_free(struct drm_i915_gem_object *obj)
597{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100598 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100599 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000600}
601
Dave Airlieff72145b2011-02-07 12:16:14 +1000602static int
603i915_gem_create(struct drm_file *file,
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000604 struct drm_i915_private *dev_priv,
Dave Airlieff72145b2011-02-07 12:16:14 +1000605 uint64_t size,
606 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700607{
Chris Wilson05394f32010-11-08 19:18:58 +0000608 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300609 int ret;
610 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700611
Dave Airlieff72145b2011-02-07 12:16:14 +1000612 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200613 if (size == 0)
614 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700615
616 /* Allocate the new object */
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000617 obj = i915_gem_object_create(dev_priv, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100618 if (IS_ERR(obj))
619 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700620
Chris Wilson05394f32010-11-08 19:18:58 +0000621 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100622 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100623 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200624 if (ret)
625 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100626
Dave Airlieff72145b2011-02-07 12:16:14 +1000627 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700628 return 0;
629}
630
Dave Airlieff72145b2011-02-07 12:16:14 +1000631int
632i915_gem_dumb_create(struct drm_file *file,
633 struct drm_device *dev,
634 struct drm_mode_create_dumb *args)
635{
636 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300637 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000638 args->size = args->pitch * args->height;
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000639 return i915_gem_create(file, to_i915(dev),
Dave Airlieda6b51d2014-12-24 13:11:17 +1000640 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000641}
642
Chris Wilsone27ab732017-06-15 13:38:49 +0100643static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
644{
645 return !(obj->cache_level == I915_CACHE_NONE ||
646 obj->cache_level == I915_CACHE_WT);
647}
648
Dave Airlieff72145b2011-02-07 12:16:14 +1000649/**
650 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100651 * @dev: drm device pointer
652 * @data: ioctl data blob
653 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000654 */
655int
656i915_gem_create_ioctl(struct drm_device *dev, void *data,
657 struct drm_file *file)
658{
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000659 struct drm_i915_private *dev_priv = to_i915(dev);
Dave Airlieff72145b2011-02-07 12:16:14 +1000660 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200661
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000662 i915_gem_flush_free_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100663
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +0000664 return i915_gem_create(file, dev_priv,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000665 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000666}
667
Chris Wilsonef749212017-04-12 12:01:10 +0100668static inline enum fb_op_origin
669fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
670{
671 return (domain == I915_GEM_DOMAIN_GTT ?
672 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
673}
674
675static void
676flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
677{
678 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
679
680 if (!(obj->base.write_domain & flush_domains))
681 return;
682
683 /* No actual flushing is required for the GTT write domain. Writes
684 * to it "immediately" go to main memory as far as we know, so there's
685 * no chipset flush. It also doesn't land in render cache.
686 *
687 * However, we do have to enforce the order so that all writes through
688 * the GTT land before any writes to the device, such as updates to
689 * the GATT itself.
690 *
691 * We also have to wait a bit for the writes to land from the GTT.
692 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
693 * timing. This issue has only been observed when switching quickly
694 * between GTT writes and CPU reads from inside the kernel on recent hw,
695 * and it appears to only affect discrete GTT blocks (i.e. on LLC
696 * system agents we cannot reproduce this behaviour).
697 */
698 wmb();
699
700 switch (obj->base.write_domain) {
701 case I915_GEM_DOMAIN_GTT:
Chris Wilsonc5ba5b22017-09-07 19:45:20 +0100702 if (!HAS_LLC(dev_priv)) {
Chris Wilsonb69a7842017-08-29 20:25:46 +0100703 intel_runtime_pm_get(dev_priv);
704 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilsonc5ba5b22017-09-07 19:45:20 +0100705 POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
Chris Wilsonb69a7842017-08-29 20:25:46 +0100706 spin_unlock_irq(&dev_priv->uncore.lock);
707 intel_runtime_pm_put(dev_priv);
Chris Wilsonef749212017-04-12 12:01:10 +0100708 }
709
710 intel_fb_obj_flush(obj,
711 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
712 break;
713
714 case I915_GEM_DOMAIN_CPU:
715 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
716 break;
Chris Wilsone27ab732017-06-15 13:38:49 +0100717
718 case I915_GEM_DOMAIN_RENDER:
719 if (gpu_write_needs_clflush(obj))
720 obj->cache_dirty = true;
721 break;
Chris Wilsonef749212017-04-12 12:01:10 +0100722 }
723
724 obj->base.write_domain = 0;
725}
726
Daniel Vetter8c599672011-12-14 13:57:31 +0100727static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100728__copy_to_user_swizzled(char __user *cpu_vaddr,
729 const char *gpu_vaddr, int gpu_offset,
730 int length)
731{
732 int ret, cpu_offset = 0;
733
734 while (length > 0) {
735 int cacheline_end = ALIGN(gpu_offset + 1, 64);
736 int this_length = min(cacheline_end - gpu_offset, length);
737 int swizzled_gpu_offset = gpu_offset ^ 64;
738
739 ret = __copy_to_user(cpu_vaddr + cpu_offset,
740 gpu_vaddr + swizzled_gpu_offset,
741 this_length);
742 if (ret)
743 return ret + length;
744
745 cpu_offset += this_length;
746 gpu_offset += this_length;
747 length -= this_length;
748 }
749
750 return 0;
751}
752
753static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700754__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
755 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100756 int length)
757{
758 int ret, cpu_offset = 0;
759
760 while (length > 0) {
761 int cacheline_end = ALIGN(gpu_offset + 1, 64);
762 int this_length = min(cacheline_end - gpu_offset, length);
763 int swizzled_gpu_offset = gpu_offset ^ 64;
764
765 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
766 cpu_vaddr + cpu_offset,
767 this_length);
768 if (ret)
769 return ret + length;
770
771 cpu_offset += this_length;
772 gpu_offset += this_length;
773 length -= this_length;
774 }
775
776 return 0;
777}
778
Brad Volkin4c914c02014-02-18 10:15:45 -0800779/*
780 * Pins the specified object's pages and synchronizes the object with
781 * GPU accesses. Sets needs_clflush to non-zero if the caller should
782 * flush the object from the CPU cache.
783 */
784int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100785 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800786{
787 int ret;
788
Chris Wilsone95433c2016-10-28 13:58:27 +0100789 lockdep_assert_held(&obj->base.dev->struct_mutex);
Brad Volkin4c914c02014-02-18 10:15:45 -0800790
Chris Wilsone95433c2016-10-28 13:58:27 +0100791 *needs_clflush = 0;
Chris Wilson43394c72016-08-18 17:16:47 +0100792 if (!i915_gem_object_has_struct_page(obj))
793 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800794
Chris Wilsone95433c2016-10-28 13:58:27 +0100795 ret = i915_gem_object_wait(obj,
796 I915_WAIT_INTERRUPTIBLE |
797 I915_WAIT_LOCKED,
798 MAX_SCHEDULE_TIMEOUT,
799 NULL);
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100800 if (ret)
801 return ret;
802
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100803 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100804 if (ret)
805 return ret;
806
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100807 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
808 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000809 ret = i915_gem_object_set_to_cpu_domain(obj, false);
810 if (ret)
811 goto err_unpin;
812 else
813 goto out;
814 }
815
Chris Wilsonef749212017-04-12 12:01:10 +0100816 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Chris Wilsona314d5c2016-08-18 17:16:48 +0100817
Chris Wilson43394c72016-08-18 17:16:47 +0100818 /* If we're not in the cpu read domain, set ourself into the gtt
819 * read domain and manually flush cachelines (if required). This
820 * optimizes for the case when the gpu will dirty the data
821 * anyway again before the next pread happens.
822 */
Chris Wilsone27ab732017-06-15 13:38:49 +0100823 if (!obj->cache_dirty &&
824 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000825 *needs_clflush = CLFLUSH_BEFORE;
Brad Volkin4c914c02014-02-18 10:15:45 -0800826
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000827out:
Chris Wilson97649512016-08-18 17:16:50 +0100828 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100829 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100830
831err_unpin:
832 i915_gem_object_unpin_pages(obj);
833 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100834}
835
836int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
837 unsigned int *needs_clflush)
838{
839 int ret;
840
Chris Wilsone95433c2016-10-28 13:58:27 +0100841 lockdep_assert_held(&obj->base.dev->struct_mutex);
842
Chris Wilson43394c72016-08-18 17:16:47 +0100843 *needs_clflush = 0;
844 if (!i915_gem_object_has_struct_page(obj))
845 return -ENODEV;
846
Chris Wilsone95433c2016-10-28 13:58:27 +0100847 ret = i915_gem_object_wait(obj,
848 I915_WAIT_INTERRUPTIBLE |
849 I915_WAIT_LOCKED |
850 I915_WAIT_ALL,
851 MAX_SCHEDULE_TIMEOUT,
852 NULL);
Chris Wilson43394c72016-08-18 17:16:47 +0100853 if (ret)
854 return ret;
855
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100856 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100857 if (ret)
858 return ret;
859
Chris Wilsonb8f55be2017-08-11 12:11:16 +0100860 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
861 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000862 ret = i915_gem_object_set_to_cpu_domain(obj, true);
863 if (ret)
864 goto err_unpin;
865 else
866 goto out;
867 }
868
Chris Wilsonef749212017-04-12 12:01:10 +0100869 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Chris Wilsona314d5c2016-08-18 17:16:48 +0100870
Chris Wilson43394c72016-08-18 17:16:47 +0100871 /* If we're not in the cpu write domain, set ourself into the
872 * gtt write domain and manually flush cachelines (as required).
873 * This optimizes for the case when the gpu will use the data
874 * right away and we therefore have to clflush anyway.
875 */
Chris Wilsone27ab732017-06-15 13:38:49 +0100876 if (!obj->cache_dirty) {
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000877 *needs_clflush |= CLFLUSH_AFTER;
Chris Wilson43394c72016-08-18 17:16:47 +0100878
Chris Wilsone27ab732017-06-15 13:38:49 +0100879 /*
880 * Same trick applies to invalidate partially written
881 * cachelines read before writing.
882 */
883 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
884 *needs_clflush |= CLFLUSH_BEFORE;
885 }
Chris Wilson43394c72016-08-18 17:16:47 +0100886
Chris Wilson7f5f95d2017-03-10 00:09:42 +0000887out:
Chris Wilson43394c72016-08-18 17:16:47 +0100888 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100889 obj->mm.dirty = true;
Chris Wilson97649512016-08-18 17:16:50 +0100890 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100891 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100892
893err_unpin:
894 i915_gem_object_unpin_pages(obj);
895 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800896}
897
Daniel Vetter23c18c72012-03-25 19:47:42 +0200898static void
899shmem_clflush_swizzled_range(char *addr, unsigned long length,
900 bool swizzled)
901{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200902 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200903 unsigned long start = (unsigned long) addr;
904 unsigned long end = (unsigned long) addr + length;
905
906 /* For swizzling simply ensure that we always flush both
907 * channels. Lame, but simple and it works. Swizzled
908 * pwrite/pread is far from a hotpath - current userspace
909 * doesn't use it at all. */
910 start = round_down(start, 128);
911 end = round_up(end, 128);
912
913 drm_clflush_virt_range((void *)start, end - start);
914 } else {
915 drm_clflush_virt_range(addr, length);
916 }
917
918}
919
Daniel Vetterd174bd62012-03-25 19:47:40 +0200920/* Only difference to the fast-path function is that this can handle bit17
921 * and uses non-atomic copy and kmap functions. */
922static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100923shmem_pread_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +0200924 char __user *user_data,
925 bool page_do_bit17_swizzling, bool needs_clflush)
926{
927 char *vaddr;
928 int ret;
929
930 vaddr = kmap(page);
931 if (needs_clflush)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100932 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +0200933 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200934
935 if (page_do_bit17_swizzling)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100936 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200937 else
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100938 ret = __copy_to_user(user_data, vaddr + offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200939 kunmap(page);
940
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100941 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200942}
943
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100944static int
945shmem_pread(struct page *page, int offset, int length, char __user *user_data,
946 bool page_do_bit17_swizzling, bool needs_clflush)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530947{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100948 int ret;
949
950 ret = -ENODEV;
951 if (!page_do_bit17_swizzling) {
952 char *vaddr = kmap_atomic(page);
953
954 if (needs_clflush)
955 drm_clflush_virt_range(vaddr + offset, length);
956 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
957 kunmap_atomic(vaddr);
958 }
959 if (ret == 0)
960 return 0;
961
962 return shmem_pread_slow(page, offset, length, user_data,
963 page_do_bit17_swizzling, needs_clflush);
964}
965
966static int
967i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
968 struct drm_i915_gem_pread *args)
969{
970 char __user *user_data;
971 u64 remain;
972 unsigned int obj_do_bit17_swizzling;
973 unsigned int needs_clflush;
974 unsigned int idx, offset;
975 int ret;
976
977 obj_do_bit17_swizzling = 0;
978 if (i915_gem_object_needs_bit17_swizzle(obj))
979 obj_do_bit17_swizzling = BIT(17);
980
981 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
982 if (ret)
983 return ret;
984
985 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
986 mutex_unlock(&obj->base.dev->struct_mutex);
987 if (ret)
988 return ret;
989
990 remain = args->size;
991 user_data = u64_to_user_ptr(args->data_ptr);
992 offset = offset_in_page(args->offset);
993 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
994 struct page *page = i915_gem_object_get_page(obj, idx);
995 int length;
996
997 length = remain;
998 if (offset + length > PAGE_SIZE)
999 length = PAGE_SIZE - offset;
1000
1001 ret = shmem_pread(page, offset, length, user_data,
1002 page_to_phys(page) & obj_do_bit17_swizzling,
1003 needs_clflush);
1004 if (ret)
1005 break;
1006
1007 remain -= length;
1008 user_data += length;
1009 offset = 0;
1010 }
1011
1012 i915_gem_obj_finish_shmem_access(obj);
1013 return ret;
1014}
1015
1016static inline bool
1017gtt_user_read(struct io_mapping *mapping,
1018 loff_t base, int offset,
1019 char __user *user_data, int length)
1020{
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001021 void __iomem *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001022 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301023
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301024 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001025 vaddr = io_mapping_map_atomic_wc(mapping, base);
1026 unwritten = __copy_to_user_inatomic(user_data,
1027 (void __force *)vaddr + offset,
1028 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001029 io_mapping_unmap_atomic(vaddr);
1030 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001031 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1032 unwritten = copy_to_user(user_data,
1033 (void __force *)vaddr + offset,
1034 length);
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001035 io_mapping_unmap(vaddr);
1036 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301037 return unwritten;
1038}
1039
1040static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001041i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1042 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301043{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001044 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1045 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301046 struct drm_mm_node node;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001047 struct i915_vma *vma;
1048 void __user *user_data;
1049 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301050 int ret;
1051
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001052 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1053 if (ret)
1054 return ret;
1055
1056 intel_runtime_pm_get(i915);
1057 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1058 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001059 if (!IS_ERR(vma)) {
1060 node.start = i915_ggtt_offset(vma);
1061 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001062 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001063 if (ret) {
1064 i915_vma_unpin(vma);
1065 vma = ERR_PTR(ret);
1066 }
1067 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001068 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001069 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301070 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001071 goto out_unlock;
1072 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301073 }
1074
1075 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1076 if (ret)
1077 goto out_unpin;
1078
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001079 mutex_unlock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301080
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001081 user_data = u64_to_user_ptr(args->data_ptr);
1082 remain = args->size;
1083 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301084
1085 while (remain > 0) {
1086 /* Operation in this page
1087 *
1088 * page_base = page offset within aperture
1089 * page_offset = offset within page
1090 * page_length = bytes to copy for this page
1091 */
1092 u32 page_base = node.start;
1093 unsigned page_offset = offset_in_page(offset);
1094 unsigned page_length = PAGE_SIZE - page_offset;
1095 page_length = remain < page_length ? remain : page_length;
1096 if (node.allocated) {
1097 wmb();
1098 ggtt->base.insert_page(&ggtt->base,
1099 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001100 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301101 wmb();
1102 } else {
1103 page_base += offset & PAGE_MASK;
1104 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001105
1106 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1107 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301108 ret = -EFAULT;
1109 break;
1110 }
1111
1112 remain -= page_length;
1113 user_data += page_length;
1114 offset += page_length;
1115 }
1116
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001117 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301118out_unpin:
1119 if (node.allocated) {
1120 wmb();
1121 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001122 node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301123 remove_mappable_node(&node);
1124 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001125 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301126 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001127out_unlock:
1128 intel_runtime_pm_put(i915);
1129 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001130
Eric Anholteb014592009-03-10 11:44:52 -07001131 return ret;
1132}
1133
Eric Anholt673a3942008-07-30 12:06:12 -07001134/**
1135 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001136 * @dev: drm device pointer
1137 * @data: ioctl data blob
1138 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001139 *
1140 * On error, the contents of *data are undefined.
1141 */
1142int
1143i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001144 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001145{
1146 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001147 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001148 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001149
Chris Wilson51311d02010-11-17 09:10:42 +00001150 if (args->size == 0)
1151 return 0;
1152
1153 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001154 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001155 args->size))
1156 return -EFAULT;
1157
Chris Wilson03ac0642016-07-20 13:31:51 +01001158 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001159 if (!obj)
1160 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001161
Chris Wilson7dcd2492010-09-26 20:21:44 +01001162 /* Bounds check source. */
Matthew Auld966d5bf2016-12-13 20:32:22 +00001163 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001164 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001165 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001166 }
1167
Chris Wilsondb53a302011-02-03 11:57:46 +00001168 trace_i915_gem_object_pread(obj, args->offset, args->size);
1169
Chris Wilsone95433c2016-10-28 13:58:27 +01001170 ret = i915_gem_object_wait(obj,
1171 I915_WAIT_INTERRUPTIBLE,
1172 MAX_SCHEDULE_TIMEOUT,
1173 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001174 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001175 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001176
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001177 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001178 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001179 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001180
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001181 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +01001182 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001183 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301184
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001185 i915_gem_object_unpin_pages(obj);
1186out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001187 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001188 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001189}
1190
Keith Packard0839ccb2008-10-30 19:38:48 -07001191/* This is the fast write path which cannot handle
1192 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001193 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001194
Chris Wilsonfe115622016-10-28 13:58:40 +01001195static inline bool
1196ggtt_write(struct io_mapping *mapping,
1197 loff_t base, int offset,
1198 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -07001199{
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001200 void __iomem *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001201 unsigned long unwritten;
1202
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001203 /* We can use the cpu mem copy function because this is X86. */
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001204 vaddr = io_mapping_map_atomic_wc(mapping, base);
1205 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -07001206 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001207 io_mapping_unmap_atomic(vaddr);
1208 if (unwritten) {
Ville Syrjäläafe722b2017-09-01 20:12:52 +03001209 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1210 unwritten = copy_from_user((void __force *)vaddr + offset,
1211 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001212 io_mapping_unmap(vaddr);
1213 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001214
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001215 return unwritten;
1216}
1217
Eric Anholt3de09aa2009-03-09 09:42:23 -07001218/**
1219 * This is the fast pwrite path, where we copy the data directly from the
1220 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +01001221 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001222 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -07001223 */
Eric Anholt673a3942008-07-30 12:06:12 -07001224static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001225i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1226 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -07001227{
Chris Wilsonfe115622016-10-28 13:58:40 +01001228 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301229 struct i915_ggtt *ggtt = &i915->ggtt;
1230 struct drm_mm_node node;
Chris Wilsonfe115622016-10-28 13:58:40 +01001231 struct i915_vma *vma;
1232 u64 remain, offset;
1233 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301234 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301235
Chris Wilsonfe115622016-10-28 13:58:40 +01001236 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1237 if (ret)
1238 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001239
Chris Wilson9c870d02016-10-24 13:42:15 +01001240 intel_runtime_pm_get(i915);
Chris Wilson058d88c2016-08-15 10:49:06 +01001241 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsonde895082016-08-04 16:32:34 +01001242 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001243 if (!IS_ERR(vma)) {
1244 node.start = i915_ggtt_offset(vma);
1245 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001246 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001247 if (ret) {
1248 i915_vma_unpin(vma);
1249 vma = ERR_PTR(ret);
1250 }
1251 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001252 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001253 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301254 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001255 goto out_unlock;
1256 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301257 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001258
1259 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1260 if (ret)
1261 goto out_unpin;
1262
Chris Wilsonfe115622016-10-28 13:58:40 +01001263 mutex_unlock(&i915->drm.struct_mutex);
1264
Chris Wilsonb19482d2016-08-18 17:16:43 +01001265 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001266
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301267 user_data = u64_to_user_ptr(args->data_ptr);
1268 offset = args->offset;
1269 remain = args->size;
1270 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001271 /* Operation in this page
1272 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001273 * page_base = page offset within aperture
1274 * page_offset = offset within page
1275 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001276 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301277 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001278 unsigned int page_offset = offset_in_page(offset);
1279 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301280 page_length = remain < page_length ? remain : page_length;
1281 if (node.allocated) {
1282 wmb(); /* flush the write before we modify the GGTT */
1283 ggtt->base.insert_page(&ggtt->base,
1284 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1285 node.start, I915_CACHE_NONE, 0);
1286 wmb(); /* flush modifications to the GGTT (insert_page) */
1287 } else {
1288 page_base += offset & PAGE_MASK;
1289 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001290 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001291 * source page isn't available. Return the error and we'll
1292 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301293 * If the object is non-shmem backed, we retry again with the
1294 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001295 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001296 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1297 user_data, page_length)) {
1298 ret = -EFAULT;
1299 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001300 }
Eric Anholt673a3942008-07-30 12:06:12 -07001301
Keith Packard0839ccb2008-10-30 19:38:48 -07001302 remain -= page_length;
1303 user_data += page_length;
1304 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001305 }
Chris Wilsond59b21e2017-02-22 11:40:49 +00001306 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001307
1308 mutex_lock(&i915->drm.struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001309out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301310 if (node.allocated) {
1311 wmb();
1312 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001313 node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301314 remove_mappable_node(&node);
1315 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001316 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301317 }
Chris Wilsonfe115622016-10-28 13:58:40 +01001318out_unlock:
Chris Wilson9c870d02016-10-24 13:42:15 +01001319 intel_runtime_pm_put(i915);
Chris Wilsonfe115622016-10-28 13:58:40 +01001320 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001321 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001322}
1323
Eric Anholt673a3942008-07-30 12:06:12 -07001324static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001325shmem_pwrite_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +02001326 char __user *user_data,
1327 bool page_do_bit17_swizzling,
1328 bool needs_clflush_before,
1329 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001330{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001331 char *vaddr;
1332 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001333
Daniel Vetterd174bd62012-03-25 19:47:40 +02001334 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001335 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Chris Wilsonfe115622016-10-28 13:58:40 +01001336 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001337 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001338 if (page_do_bit17_swizzling)
Chris Wilsonfe115622016-10-28 13:58:40 +01001339 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1340 length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001341 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001342 ret = __copy_from_user(vaddr + offset, user_data, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001343 if (needs_clflush_after)
Chris Wilsonfe115622016-10-28 13:58:40 +01001344 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001345 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001346 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001347
Chris Wilson755d2212012-09-04 21:02:55 +01001348 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001349}
1350
Chris Wilsonfe115622016-10-28 13:58:40 +01001351/* Per-page copy function for the shmem pwrite fastpath.
1352 * Flushes invalid cachelines before writing to the target if
1353 * needs_clflush_before is set and flushes out any written cachelines after
1354 * writing if needs_clflush is set.
1355 */
Eric Anholt40123c12009-03-09 13:42:30 -07001356static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001357shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1358 bool page_do_bit17_swizzling,
1359 bool needs_clflush_before,
1360 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -07001361{
Chris Wilsonfe115622016-10-28 13:58:40 +01001362 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001363
Chris Wilsonfe115622016-10-28 13:58:40 +01001364 ret = -ENODEV;
1365 if (!page_do_bit17_swizzling) {
1366 char *vaddr = kmap_atomic(page);
1367
1368 if (needs_clflush_before)
1369 drm_clflush_virt_range(vaddr + offset, len);
1370 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1371 if (needs_clflush_after)
1372 drm_clflush_virt_range(vaddr + offset, len);
1373
1374 kunmap_atomic(vaddr);
1375 }
1376 if (ret == 0)
1377 return ret;
1378
1379 return shmem_pwrite_slow(page, offset, len, user_data,
1380 page_do_bit17_swizzling,
1381 needs_clflush_before,
1382 needs_clflush_after);
1383}
1384
1385static int
1386i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1387 const struct drm_i915_gem_pwrite *args)
1388{
1389 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1390 void __user *user_data;
1391 u64 remain;
1392 unsigned int obj_do_bit17_swizzling;
1393 unsigned int partial_cacheline_write;
1394 unsigned int needs_clflush;
1395 unsigned int offset, idx;
1396 int ret;
1397
1398 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilson43394c72016-08-18 17:16:47 +01001399 if (ret)
1400 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001401
Chris Wilsonfe115622016-10-28 13:58:40 +01001402 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1403 mutex_unlock(&i915->drm.struct_mutex);
1404 if (ret)
1405 return ret;
1406
1407 obj_do_bit17_swizzling = 0;
1408 if (i915_gem_object_needs_bit17_swizzle(obj))
1409 obj_do_bit17_swizzling = BIT(17);
1410
1411 /* If we don't overwrite a cacheline completely we need to be
1412 * careful to have up-to-date data by first clflushing. Don't
1413 * overcomplicate things and flush the entire patch.
1414 */
1415 partial_cacheline_write = 0;
1416 if (needs_clflush & CLFLUSH_BEFORE)
1417 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1418
Chris Wilson43394c72016-08-18 17:16:47 +01001419 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +01001420 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +01001421 offset = offset_in_page(args->offset);
1422 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1423 struct page *page = i915_gem_object_get_page(obj, idx);
1424 int length;
Eric Anholt40123c12009-03-09 13:42:30 -07001425
Chris Wilsonfe115622016-10-28 13:58:40 +01001426 length = remain;
1427 if (offset + length > PAGE_SIZE)
1428 length = PAGE_SIZE - offset;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001429
Chris Wilsonfe115622016-10-28 13:58:40 +01001430 ret = shmem_pwrite(page, offset, length, user_data,
1431 page_to_phys(page) & obj_do_bit17_swizzling,
1432 (offset | length) & partial_cacheline_write,
1433 needs_clflush & CLFLUSH_AFTER);
1434 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +01001435 break;
1436
Chris Wilsonfe115622016-10-28 13:58:40 +01001437 remain -= length;
1438 user_data += length;
1439 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001440 }
1441
Chris Wilsond59b21e2017-02-22 11:40:49 +00001442 intel_fb_obj_flush(obj, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001443 i915_gem_obj_finish_shmem_access(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001444 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001445}
1446
1447/**
1448 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001449 * @dev: drm device
1450 * @data: ioctl data blob
1451 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001452 *
1453 * On error, the contents of the buffer that were to be modified are undefined.
1454 */
1455int
1456i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001457 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001458{
1459 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001460 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001461 int ret;
1462
1463 if (args->size == 0)
1464 return 0;
1465
1466 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001467 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001468 args->size))
1469 return -EFAULT;
1470
Chris Wilson03ac0642016-07-20 13:31:51 +01001471 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001472 if (!obj)
1473 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001474
Chris Wilson7dcd2492010-09-26 20:21:44 +01001475 /* Bounds check destination. */
Matthew Auld966d5bf2016-12-13 20:32:22 +00001476 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001477 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001478 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001479 }
1480
Chris Wilsondb53a302011-02-03 11:57:46 +00001481 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1482
Chris Wilson7c55e2c2017-03-07 12:03:38 +00001483 ret = -ENODEV;
1484 if (obj->ops->pwrite)
1485 ret = obj->ops->pwrite(obj, args);
1486 if (ret != -ENODEV)
1487 goto err;
1488
Chris Wilsone95433c2016-10-28 13:58:27 +01001489 ret = i915_gem_object_wait(obj,
1490 I915_WAIT_INTERRUPTIBLE |
1491 I915_WAIT_ALL,
1492 MAX_SCHEDULE_TIMEOUT,
1493 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001494 if (ret)
1495 goto err;
1496
Chris Wilsonfe115622016-10-28 13:58:40 +01001497 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001498 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001499 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001500
Daniel Vetter935aaa62012-03-25 19:47:35 +02001501 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001502 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1503 * it would end up going through the fenced access, and we'll get
1504 * different detiling behavior between reading and writing.
1505 * pread/pwrite currently are reading and writing from the CPU
1506 * perspective, requiring manual detiling by the client.
1507 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001508 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +01001509 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +02001510 /* Note that the gtt paths might fail with non-page-backed user
1511 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +01001512 * textures). Fallback to the shmem path in that case.
1513 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001514 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -07001515
Chris Wilsond1054ee2016-07-16 18:42:36 +01001516 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001517 if (obj->phys_handle)
1518 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301519 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001520 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001521 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001522
Chris Wilsonfe115622016-10-28 13:58:40 +01001523 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001524err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001525 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001526 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001527}
1528
Chris Wilson40e62d52016-10-28 13:58:41 +01001529static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1530{
1531 struct drm_i915_private *i915;
1532 struct list_head *list;
1533 struct i915_vma *vma;
1534
1535 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1536 if (!i915_vma_is_ggtt(vma))
Chris Wilson28f412e2016-12-23 14:57:55 +00001537 break;
Chris Wilson40e62d52016-10-28 13:58:41 +01001538
1539 if (i915_vma_is_active(vma))
1540 continue;
1541
1542 if (!drm_mm_node_allocated(&vma->node))
1543 continue;
1544
1545 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1546 }
1547
1548 i915 = to_i915(obj->base.dev);
1549 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
Joonas Lahtinen56cea322016-11-02 12:16:04 +02001550 list_move_tail(&obj->global_link, list);
Chris Wilson40e62d52016-10-28 13:58:41 +01001551}
1552
Eric Anholt673a3942008-07-30 12:06:12 -07001553/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001554 * Called when user space prepares to use an object with the CPU, either
1555 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001556 * @dev: drm device
1557 * @data: ioctl data blob
1558 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001559 */
1560int
1561i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001562 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001563{
1564 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001565 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001566 uint32_t read_domains = args->read_domains;
1567 uint32_t write_domain = args->write_domain;
Chris Wilson40e62d52016-10-28 13:58:41 +01001568 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07001569
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001570 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001571 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001572 return -EINVAL;
1573
1574 /* Having something in the write domain implies it's in the read
1575 * domain, and only that read domain. Enforce that in the request.
1576 */
1577 if (write_domain != 0 && read_domains != write_domain)
1578 return -EINVAL;
1579
Chris Wilson03ac0642016-07-20 13:31:51 +01001580 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001581 if (!obj)
1582 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001583
Chris Wilson3236f572012-08-24 09:35:09 +01001584 /* Try to flush the object off the GPU without holding the lock.
1585 * We will repeat the flush holding the lock in the normal manner
1586 * to catch cases where we are gazumped.
1587 */
Chris Wilson40e62d52016-10-28 13:58:41 +01001588 err = i915_gem_object_wait(obj,
Chris Wilsone95433c2016-10-28 13:58:27 +01001589 I915_WAIT_INTERRUPTIBLE |
1590 (write_domain ? I915_WAIT_ALL : 0),
1591 MAX_SCHEDULE_TIMEOUT,
1592 to_rps_client(file));
Chris Wilson40e62d52016-10-28 13:58:41 +01001593 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001594 goto out;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001595
Chris Wilson40e62d52016-10-28 13:58:41 +01001596 /* Flush and acquire obj->pages so that we are coherent through
1597 * direct access in memory with previous cached writes through
1598 * shmemfs and that our cache domain tracking remains valid.
1599 * For example, if the obj->filp was moved to swap without us
1600 * being notified and releasing the pages, we would mistakenly
1601 * continue to assume that the obj remained out of the CPU cached
1602 * domain.
1603 */
1604 err = i915_gem_object_pin_pages(obj);
1605 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001606 goto out;
Chris Wilson40e62d52016-10-28 13:58:41 +01001607
1608 err = i915_mutex_lock_interruptible(dev);
1609 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001610 goto out_unpin;
Chris Wilson3236f572012-08-24 09:35:09 +01001611
Chris Wilsone22d8e32017-04-12 12:01:11 +01001612 if (read_domains & I915_GEM_DOMAIN_WC)
1613 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1614 else if (read_domains & I915_GEM_DOMAIN_GTT)
1615 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
Chris Wilson43566de2015-01-02 16:29:29 +05301616 else
Chris Wilsone22d8e32017-04-12 12:01:11 +01001617 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
Chris Wilson40e62d52016-10-28 13:58:41 +01001618
1619 /* And bump the LRU for this access */
1620 i915_gem_object_bump_inactive_ggtt(obj);
1621
1622 mutex_unlock(&dev->struct_mutex);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001623
Daniel Vetter031b6982015-06-26 19:35:16 +02001624 if (write_domain != 0)
Chris Wilsonef749212017-04-12 12:01:10 +01001625 intel_fb_obj_invalidate(obj,
1626 fb_write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001627
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001628out_unpin:
Chris Wilson40e62d52016-10-28 13:58:41 +01001629 i915_gem_object_unpin_pages(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001630out:
1631 i915_gem_object_put(obj);
Chris Wilson40e62d52016-10-28 13:58:41 +01001632 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001633}
1634
1635/**
1636 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001637 * @dev: drm device
1638 * @data: ioctl data blob
1639 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001640 */
1641int
1642i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001643 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001644{
1645 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001646 struct drm_i915_gem_object *obj;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001647
Chris Wilson03ac0642016-07-20 13:31:51 +01001648 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001649 if (!obj)
1650 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001651
Eric Anholt673a3942008-07-30 12:06:12 -07001652 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson5a97bcc2017-02-22 11:40:46 +00001653 i915_gem_object_flush_if_display(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001654 i915_gem_object_put(obj);
Chris Wilson5a97bcc2017-02-22 11:40:46 +00001655
1656 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001657}
1658
1659/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001660 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1661 * it is mapped to.
1662 * @dev: drm device
1663 * @data: ioctl data blob
1664 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001665 *
1666 * While the mapping holds a reference on the contents of the object, it doesn't
1667 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001668 *
1669 * IMPORTANT:
1670 *
1671 * DRM driver writers who look a this function as an example for how to do GEM
1672 * mmap support, please don't implement mmap support like here. The modern way
1673 * to implement DRM mmap support is with an mmap offset ioctl (like
1674 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1675 * That way debug tooling like valgrind will understand what's going on, hiding
1676 * the mmap call in a driver private ioctl will break that. The i915 driver only
1677 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001678 */
1679int
1680i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001681 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001682{
1683 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001684 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001685 unsigned long addr;
1686
Akash Goel1816f922015-01-02 16:29:30 +05301687 if (args->flags & ~(I915_MMAP_WC))
1688 return -EINVAL;
1689
Borislav Petkov568a58e2016-03-29 17:42:01 +02001690 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301691 return -ENODEV;
1692
Chris Wilson03ac0642016-07-20 13:31:51 +01001693 obj = i915_gem_object_lookup(file, args->handle);
1694 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001695 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001696
Daniel Vetter1286ff72012-05-10 15:25:09 +02001697 /* prime objects have no backing filp to GEM mmap
1698 * pages from.
1699 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001700 if (!obj->base.filp) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001701 i915_gem_object_put(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001702 return -EINVAL;
1703 }
1704
Chris Wilson03ac0642016-07-20 13:31:51 +01001705 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001706 PROT_READ | PROT_WRITE, MAP_SHARED,
1707 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301708 if (args->flags & I915_MMAP_WC) {
1709 struct mm_struct *mm = current->mm;
1710 struct vm_area_struct *vma;
1711
Michal Hocko80a89a52016-05-23 16:26:11 -07001712 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001713 i915_gem_object_put(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001714 return -EINTR;
1715 }
Akash Goel1816f922015-01-02 16:29:30 +05301716 vma = find_vma(mm, addr);
1717 if (vma)
1718 vma->vm_page_prot =
1719 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1720 else
1721 addr = -ENOMEM;
1722 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001723
1724 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001725 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301726 }
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001727 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001728 if (IS_ERR((void *)addr))
1729 return addr;
1730
1731 args->addr_ptr = (uint64_t) addr;
1732
1733 return 0;
1734}
1735
Chris Wilson03af84f2016-08-18 17:17:01 +01001736static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1737{
Chris Wilson6649a0b2017-01-09 16:16:08 +00001738 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
Chris Wilson03af84f2016-08-18 17:17:01 +01001739}
1740
Jesse Barnesde151cf2008-11-12 10:03:55 -08001741/**
Chris Wilson4cc69072016-08-25 19:05:19 +01001742 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1743 *
1744 * A history of the GTT mmap interface:
1745 *
1746 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1747 * aligned and suitable for fencing, and still fit into the available
1748 * mappable space left by the pinned display objects. A classic problem
1749 * we called the page-fault-of-doom where we would ping-pong between
1750 * two objects that could not fit inside the GTT and so the memcpy
1751 * would page one object in at the expense of the other between every
1752 * single byte.
1753 *
1754 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1755 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1756 * object is too large for the available space (or simply too large
1757 * for the mappable aperture!), a view is created instead and faulted
1758 * into userspace. (This view is aligned and sized appropriately for
1759 * fenced access.)
1760 *
Chris Wilsone22d8e32017-04-12 12:01:11 +01001761 * 2 - Recognise WC as a separate cache domain so that we can flush the
1762 * delayed writes via GTT before performing direct access via WC.
1763 *
Chris Wilson4cc69072016-08-25 19:05:19 +01001764 * Restrictions:
1765 *
1766 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1767 * hangs on some architectures, corruption on others. An attempt to service
1768 * a GTT page fault from a snoopable object will generate a SIGBUS.
1769 *
1770 * * the object must be able to fit into RAM (physical memory, though no
1771 * limited to the mappable aperture).
1772 *
1773 *
1774 * Caveats:
1775 *
1776 * * a new GTT page fault will synchronize rendering from the GPU and flush
1777 * all data to system memory. Subsequent access will not be synchronized.
1778 *
1779 * * all mappings are revoked on runtime device suspend.
1780 *
1781 * * there are only 8, 16 or 32 fence registers to share between all users
1782 * (older machines require fence register for display and blitter access
1783 * as well). Contention of the fence registers will cause the previous users
1784 * to be unmapped and any new access will generate new page faults.
1785 *
1786 * * running out of memory while servicing a fault may generate a SIGBUS,
1787 * rather than the expected SIGSEGV.
1788 */
1789int i915_gem_mmap_gtt_version(void)
1790{
Chris Wilsone22d8e32017-04-12 12:01:11 +01001791 return 2;
Chris Wilson4cc69072016-08-25 19:05:19 +01001792}
1793
Chris Wilson2d4281b2017-01-10 09:56:32 +00001794static inline struct i915_ggtt_view
1795compute_partial_view(struct drm_i915_gem_object *obj,
Chris Wilson2d4281b2017-01-10 09:56:32 +00001796 pgoff_t page_offset,
1797 unsigned int chunk)
1798{
1799 struct i915_ggtt_view view;
1800
1801 if (i915_gem_object_is_tiled(obj))
1802 chunk = roundup(chunk, tile_row_pages(obj));
1803
Chris Wilson2d4281b2017-01-10 09:56:32 +00001804 view.type = I915_GGTT_VIEW_PARTIAL;
Chris Wilson8bab11932017-01-14 00:28:25 +00001805 view.partial.offset = rounddown(page_offset, chunk);
1806 view.partial.size =
Chris Wilson2d4281b2017-01-10 09:56:32 +00001807 min_t(unsigned int, chunk,
Chris Wilson8bab11932017-01-14 00:28:25 +00001808 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
Chris Wilson2d4281b2017-01-10 09:56:32 +00001809
1810 /* If the partial covers the entire object, just create a normal VMA. */
1811 if (chunk >= obj->base.size >> PAGE_SHIFT)
1812 view.type = I915_GGTT_VIEW_NORMAL;
1813
1814 return view;
1815}
1816
Chris Wilson4cc69072016-08-25 19:05:19 +01001817/**
Jesse Barnesde151cf2008-11-12 10:03:55 -08001818 * i915_gem_fault - fault a page into the GTT
Geliang Tangd9072a32015-09-15 05:58:44 -07001819 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001820 *
1821 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1822 * from userspace. The fault handler takes care of binding the object to
1823 * the GTT (if needed), allocating and programming a fence register (again,
1824 * only if needed based on whether the old reg is still valid or the object
1825 * is tiled) and inserting a new PTE into the faulting process.
1826 *
1827 * Note that the faulting process may involve evicting existing objects
1828 * from the GTT and/or fence registers to make room. So performance may
1829 * suffer if the GTT working set is large or there are few fence registers
1830 * left.
Chris Wilson4cc69072016-08-25 19:05:19 +01001831 *
1832 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1833 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
Jesse Barnesde151cf2008-11-12 10:03:55 -08001834 */
Dave Jiang11bac802017-02-24 14:56:41 -08001835int i915_gem_fault(struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001836{
Chris Wilson03af84f2016-08-18 17:17:01 +01001837#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Dave Jiang11bac802017-02-24 14:56:41 -08001838 struct vm_area_struct *area = vmf->vma;
Chris Wilson058d88c2016-08-15 10:49:06 +01001839 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001840 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001841 struct drm_i915_private *dev_priv = to_i915(dev);
1842 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001843 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001844 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001845 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001846 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001847 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001848
Jesse Barnesde151cf2008-11-12 10:03:55 -08001849 /* We don't use vmf->pgoff since that has the fake offset */
Jan Kara1a29d852016-12-14 15:07:01 -08001850 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001851
Chris Wilsondb53a302011-02-03 11:57:46 +00001852 trace_i915_gem_object_fault(obj, page_offset, true, write);
1853
Chris Wilson6e4930f2014-02-07 18:37:06 -02001854 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001855 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001856 * repeat the flush holding the lock in the normal manner to catch cases
1857 * where we are gazumped.
1858 */
Chris Wilsone95433c2016-10-28 13:58:27 +01001859 ret = i915_gem_object_wait(obj,
1860 I915_WAIT_INTERRUPTIBLE,
1861 MAX_SCHEDULE_TIMEOUT,
1862 NULL);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001863 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001864 goto err;
1865
Chris Wilson40e62d52016-10-28 13:58:41 +01001866 ret = i915_gem_object_pin_pages(obj);
1867 if (ret)
1868 goto err;
1869
Chris Wilsonb8f90962016-08-05 10:14:07 +01001870 intel_runtime_pm_get(dev_priv);
1871
1872 ret = i915_mutex_lock_interruptible(dev);
1873 if (ret)
1874 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001875
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001876 /* Access to snoopable pages through the GTT is incoherent. */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001877 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001878 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001879 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001880 }
1881
Chris Wilson82118872016-08-18 17:17:05 +01001882 /* If the object is smaller than a couple of partial vma, it is
1883 * not worth only creating a single partial vma - we may as well
1884 * clear enough space for the full object.
1885 */
1886 flags = PIN_MAPPABLE;
1887 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1888 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1889
Chris Wilsona61007a2016-08-18 17:17:02 +01001890 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001891 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001892 if (IS_ERR(vma)) {
Chris Wilsona61007a2016-08-18 17:17:02 +01001893 /* Use a partial view if it is bigger than available space */
Chris Wilson2d4281b2017-01-10 09:56:32 +00001894 struct i915_ggtt_view view =
Chris Wilson8201c1f2017-01-10 09:56:33 +00001895 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
Chris Wilsonaa136d92016-08-18 17:17:03 +01001896
Chris Wilson50349242016-08-18 17:17:04 +01001897 /* Userspace is now writing through an untracked VMA, abandon
1898 * all hope that the hardware is able to track future writes.
1899 */
1900 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1901
Chris Wilsona61007a2016-08-18 17:17:02 +01001902 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1903 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001904 if (IS_ERR(vma)) {
1905 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001906 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001907 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001908
Chris Wilsonc9839302012-11-20 10:45:17 +00001909 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1910 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001911 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001912
Chris Wilson3bd40732017-10-09 09:43:56 +01001913 ret = i915_vma_pin_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001914 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001915 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001916
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001917 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001918 ret = remap_io_mapping(area,
Chris Wilson8bab11932017-01-14 00:28:25 +00001919 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
Chris Wilsonc58305a2016-08-19 16:54:28 +01001920 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1921 min_t(u64, vma->size, area->vm_end - area->vm_start),
1922 &ggtt->mappable);
Chris Wilsona65adaf2017-10-09 09:43:57 +01001923 if (ret)
1924 goto err_fence;
Chris Wilsona61007a2016-08-18 17:17:02 +01001925
Chris Wilsona65adaf2017-10-09 09:43:57 +01001926 /* Mark as being mmapped into userspace for later revocation */
1927 assert_rpm_wakelock_held(dev_priv);
1928 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1929 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1930 GEM_BUG_ON(!obj->userfault_count);
1931
1932err_fence:
Chris Wilson3bd40732017-10-09 09:43:56 +01001933 i915_vma_unpin_fence(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001934err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001935 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001936err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001937 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001938err_rpm:
1939 intel_runtime_pm_put(dev_priv);
Chris Wilson40e62d52016-10-28 13:58:41 +01001940 i915_gem_object_unpin_pages(obj);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001941err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001942 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001943 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001944 /*
1945 * We eat errors when the gpu is terminally wedged to avoid
1946 * userspace unduly crashing (gl has no provisions for mmaps to
1947 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1948 * and so needs to be reported.
1949 */
1950 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001951 ret = VM_FAULT_SIGBUS;
1952 break;
1953 }
Chris Wilson045e7692010-11-07 09:18:22 +00001954 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001955 /*
1956 * EAGAIN means the gpu is hung and we'll wait for the error
1957 * handler to reset everything when re-faulting in
1958 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001959 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001960 case 0:
1961 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001962 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001963 case -EBUSY:
1964 /*
1965 * EBUSY is ok: this just means that another thread
1966 * already did the job.
1967 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001968 ret = VM_FAULT_NOPAGE;
1969 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001970 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001971 ret = VM_FAULT_OOM;
1972 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001973 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001974 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001975 ret = VM_FAULT_SIGBUS;
1976 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001977 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001978 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001979 ret = VM_FAULT_SIGBUS;
1980 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001981 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001982 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001983}
1984
Chris Wilsona65adaf2017-10-09 09:43:57 +01001985static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
1986{
1987 struct i915_vma *vma;
1988
1989 GEM_BUG_ON(!obj->userfault_count);
1990
1991 obj->userfault_count = 0;
1992 list_del(&obj->userfault_link);
1993 drm_vma_node_unmap(&obj->base.vma_node,
1994 obj->base.dev->anon_inode->i_mapping);
1995
1996 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1997 if (!i915_vma_is_ggtt(vma))
1998 break;
1999
2000 i915_vma_unset_userfault(vma);
2001 }
2002}
2003
Jesse Barnesde151cf2008-11-12 10:03:55 -08002004/**
Chris Wilson901782b2009-07-10 08:18:50 +01002005 * i915_gem_release_mmap - remove physical page mappings
2006 * @obj: obj in question
2007 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002008 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01002009 * relinquish ownership of the pages back to the system.
2010 *
2011 * It is vital that we remove the page mapping if we have mapped a tiled
2012 * object through the GTT and then lose the fence register due to
2013 * resource pressure. Similarly if the object has been moved out of the
2014 * aperture, than pages mapped into userspace must be revoked. Removing the
2015 * mapping will then trigger a page fault on the next user access, allowing
2016 * fixup by i915_gem_fault().
2017 */
Eric Anholtd05ca302009-07-10 13:02:26 -07002018void
Chris Wilson05394f32010-11-08 19:18:58 +00002019i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01002020{
Chris Wilson275f0392016-10-24 13:42:14 +01002021 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilson275f0392016-10-24 13:42:14 +01002022
Chris Wilson349f2cc2016-04-13 17:35:12 +01002023 /* Serialisation between user GTT access and our code depends upon
2024 * revoking the CPU's PTE whilst the mutex is held. The next user
2025 * pagefault then has to wait until we release the mutex.
Chris Wilson9c870d02016-10-24 13:42:15 +01002026 *
2027 * Note that RPM complicates somewhat by adding an additional
2028 * requirement that operations to the GGTT be made holding the RPM
2029 * wakeref.
Chris Wilson349f2cc2016-04-13 17:35:12 +01002030 */
Chris Wilson275f0392016-10-24 13:42:14 +01002031 lockdep_assert_held(&i915->drm.struct_mutex);
Chris Wilson9c870d02016-10-24 13:42:15 +01002032 intel_runtime_pm_get(i915);
Chris Wilson349f2cc2016-04-13 17:35:12 +01002033
Chris Wilsona65adaf2017-10-09 09:43:57 +01002034 if (!obj->userfault_count)
Chris Wilson9c870d02016-10-24 13:42:15 +01002035 goto out;
Chris Wilson901782b2009-07-10 08:18:50 +01002036
Chris Wilsona65adaf2017-10-09 09:43:57 +01002037 __i915_gem_object_release_mmap(obj);
Chris Wilson349f2cc2016-04-13 17:35:12 +01002038
2039 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2040 * memory transactions from userspace before we return. The TLB
2041 * flushing implied above by changing the PTE above *should* be
2042 * sufficient, an extra barrier here just provides us with a bit
2043 * of paranoid documentation about our requirement to serialise
2044 * memory writes before touching registers / GSM.
2045 */
2046 wmb();
Chris Wilson9c870d02016-10-24 13:42:15 +01002047
2048out:
2049 intel_runtime_pm_put(i915);
Chris Wilson901782b2009-07-10 08:18:50 +01002050}
2051
Chris Wilson7c108fd2016-10-24 13:42:18 +01002052void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002053{
Chris Wilson3594a3e2016-10-24 13:42:16 +01002054 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +01002055 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002056
Chris Wilson3594a3e2016-10-24 13:42:16 +01002057 /*
2058 * Only called during RPM suspend. All users of the userfault_list
2059 * must be holding an RPM wakeref to ensure that this can not
2060 * run concurrently with themselves (and use the struct_mutex for
2061 * protection between themselves).
2062 */
2063
2064 list_for_each_entry_safe(obj, on,
Chris Wilsona65adaf2017-10-09 09:43:57 +01002065 &dev_priv->mm.userfault_list, userfault_link)
2066 __i915_gem_object_release_mmap(obj);
Chris Wilson7c108fd2016-10-24 13:42:18 +01002067
2068 /* The fence will be lost when the device powers down. If any were
2069 * in use by hardware (i.e. they are pinned), we should not be powering
2070 * down! All other fences will be reacquired by the user upon waking.
2071 */
2072 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2073 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2074
Chris Wilsone0ec3ec2017-02-03 12:57:17 +00002075 /* Ideally we want to assert that the fence register is not
2076 * live at this point (i.e. that no piece of code will be
2077 * trying to write through fence + GTT, as that both violates
2078 * our tracking of activity and associated locking/barriers,
2079 * but also is illegal given that the hw is powered down).
2080 *
2081 * Previously we used reg->pin_count as a "liveness" indicator.
2082 * That is not sufficient, and we need a more fine-grained
2083 * tool if we want to have a sanity check here.
2084 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01002085
2086 if (!reg->vma)
2087 continue;
2088
Chris Wilsona65adaf2017-10-09 09:43:57 +01002089 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
Chris Wilson7c108fd2016-10-24 13:42:18 +01002090 reg->dirty = true;
2091 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002092}
2093
Chris Wilsond8cb5082012-08-11 15:41:03 +01002094static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2095{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002096 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002097 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002098
Chris Wilsonf3f61842016-08-05 10:14:14 +01002099 err = drm_gem_create_mmap_offset(&obj->base);
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002100 if (likely(!err))
Chris Wilsonf3f61842016-08-05 10:14:14 +01002101 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01002102
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002103 /* Attempt to reap some mmap space from dead objects */
2104 do {
2105 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2106 if (err)
2107 break;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002108
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002109 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002110 err = drm_gem_create_mmap_offset(&obj->base);
Chris Wilsonb42a13d2017-01-06 15:22:40 +00002111 if (!err)
2112 break;
2113
2114 } while (flush_delayed_work(&dev_priv->gt.retire_work));
Daniel Vetterda494d72012-12-20 15:11:16 +01002115
Chris Wilsonf3f61842016-08-05 10:14:14 +01002116 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002117}
2118
2119static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2120{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002121 drm_gem_free_mmap_offset(&obj->base);
2122}
2123
Dave Airlieda6b51d2014-12-24 13:11:17 +10002124int
Dave Airlieff72145b2011-02-07 12:16:14 +10002125i915_gem_mmap_gtt(struct drm_file *file,
2126 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002127 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002128 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002129{
Chris Wilson05394f32010-11-08 19:18:58 +00002130 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002131 int ret;
2132
Chris Wilson03ac0642016-07-20 13:31:51 +01002133 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002134 if (!obj)
2135 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002136
Chris Wilsond8cb5082012-08-11 15:41:03 +01002137 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002138 if (ret == 0)
2139 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002140
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002141 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002142 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002143}
2144
Dave Airlieff72145b2011-02-07 12:16:14 +10002145/**
2146 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2147 * @dev: DRM device
2148 * @data: GTT mapping ioctl data
2149 * @file: GEM object info
2150 *
2151 * Simply returns the fake offset to userspace so it can mmap it.
2152 * The mmap call will end up in drm_gem_mmap(), which will set things
2153 * up so we can get faults in the handler above.
2154 *
2155 * The fault handler will take care of binding the object into the GTT
2156 * (since it may have been evicted to make room for something), allocating
2157 * a fence register, and mapping the appropriate aperture address into
2158 * userspace.
2159 */
2160int
2161i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2162 struct drm_file *file)
2163{
2164 struct drm_i915_gem_mmap_gtt *args = data;
2165
Dave Airlieda6b51d2014-12-24 13:11:17 +10002166 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002167}
2168
Daniel Vetter225067e2012-08-20 10:23:20 +02002169/* Immediately discard the backing storage */
2170static void
2171i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002172{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002173 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002174
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002175 if (obj->base.filp == NULL)
2176 return;
2177
Daniel Vetter225067e2012-08-20 10:23:20 +02002178 /* Our goal here is to return as much of the memory as
2179 * is possible back to the system as we are called from OOM.
2180 * To do this we must instruct the shmfs to drop all of its
2181 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002182 */
Chris Wilson55372522014-03-25 13:23:06 +00002183 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002184 obj->mm.madv = __I915_MADV_PURGED;
Chris Wilson4e5462e2017-03-07 13:20:31 +00002185 obj->mm.pages = ERR_PTR(-EFAULT);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002186}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002187
Chris Wilson55372522014-03-25 13:23:06 +00002188/* Try to discard unwanted pages */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002189void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002190{
Chris Wilson55372522014-03-25 13:23:06 +00002191 struct address_space *mapping;
2192
Chris Wilson1233e2d2016-10-28 13:58:37 +01002193 lockdep_assert_held(&obj->mm.lock);
2194 GEM_BUG_ON(obj->mm.pages);
2195
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002196 switch (obj->mm.madv) {
Chris Wilson55372522014-03-25 13:23:06 +00002197 case I915_MADV_DONTNEED:
2198 i915_gem_object_truncate(obj);
2199 case __I915_MADV_PURGED:
2200 return;
2201 }
2202
2203 if (obj->base.filp == NULL)
2204 return;
2205
Al Viro93c76a32015-12-04 23:45:44 -05002206 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002207 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002208}
2209
Chris Wilson5cdf5882010-09-27 15:51:07 +01002210static void
Chris Wilson03ac84f2016-10-28 13:58:36 +01002211i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2212 struct sg_table *pages)
Eric Anholt673a3942008-07-30 12:06:12 -07002213{
Dave Gordon85d12252016-05-20 11:54:06 +01002214 struct sgt_iter sgt_iter;
2215 struct page *page;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002216
Chris Wilsone5facdf2016-12-23 14:57:57 +00002217 __i915_gem_object_release_shmem(obj, pages, true);
Eric Anholt856fa192009-03-19 14:10:50 -07002218
Chris Wilson03ac84f2016-10-28 13:58:36 +01002219 i915_gem_gtt_finish_pages(obj, pages);
Imre Deake2273302015-07-09 12:59:05 +03002220
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002221 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002222 i915_gem_object_save_bit_17_swizzle(obj, pages);
Eric Anholt280b7132009-03-12 16:56:27 -07002223
Chris Wilson03ac84f2016-10-28 13:58:36 +01002224 for_each_sgt_page(page, sgt_iter, pages) {
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002225 if (obj->mm.dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002226 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002227
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002228 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002229 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002230
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002231 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002232 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002233 obj->mm.dirty = false;
Eric Anholt673a3942008-07-30 12:06:12 -07002234
Chris Wilson03ac84f2016-10-28 13:58:36 +01002235 sg_free_table(pages);
2236 kfree(pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002237}
2238
Chris Wilson96d77632016-10-28 13:58:33 +01002239static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2240{
2241 struct radix_tree_iter iter;
Ville Syrjäläc23aa712017-09-01 20:12:51 +03002242 void __rcu **slot;
Chris Wilson96d77632016-10-28 13:58:33 +01002243
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002244 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2245 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
Chris Wilson96d77632016-10-28 13:58:33 +01002246}
2247
Chris Wilson548625e2016-11-01 12:11:34 +00002248void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2249 enum i915_mm_subclass subclass)
Chris Wilson37e680a2012-06-07 15:38:42 +01002250{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002251 struct sg_table *pages;
Chris Wilson37e680a2012-06-07 15:38:42 +01002252
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002253 if (i915_gem_object_has_pinned_pages(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002254 return;
Chris Wilsona5570172012-09-04 21:02:54 +01002255
Chris Wilson15717de2016-08-04 07:52:26 +01002256 GEM_BUG_ON(obj->bind_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002257 if (!READ_ONCE(obj->mm.pages))
2258 return;
2259
2260 /* May be called by shrinker from within get_pages() (on another bo) */
Chris Wilson548625e2016-11-01 12:11:34 +00002261 mutex_lock_nested(&obj->mm.lock, subclass);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002262 if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2263 goto unlock;
Ben Widawsky3e123022013-07-31 17:00:04 -07002264
Chris Wilsona2165e32012-12-03 11:49:00 +00002265 /* ->put_pages might need to allocate memory for the bit17 swizzle
2266 * array, hence protect them from being reaped by removing them from gtt
2267 * lists early. */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002268 pages = fetch_and_zero(&obj->mm.pages);
2269 GEM_BUG_ON(!pages);
Chris Wilsona2165e32012-12-03 11:49:00 +00002270
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002271 if (obj->mm.mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002272 void *ptr;
2273
Chris Wilson0ce81782017-05-17 13:09:59 +01002274 ptr = page_mask_bits(obj->mm.mapping);
Chris Wilson4b30cb22016-08-18 17:16:42 +01002275 if (is_vmalloc_addr(ptr))
2276 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002277 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002278 kunmap(kmap_to_page(ptr));
2279
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002280 obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002281 }
2282
Chris Wilson96d77632016-10-28 13:58:33 +01002283 __i915_gem_object_reset_page_iter(obj);
2284
Chris Wilson4e5462e2017-03-07 13:20:31 +00002285 if (!IS_ERR(pages))
2286 obj->ops->put_pages(obj, pages);
2287
Matthew Aulda5c081662017-10-06 23:18:18 +01002288 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2289
Chris Wilson1233e2d2016-10-28 13:58:37 +01002290unlock:
2291 mutex_unlock(&obj->mm.lock);
Chris Wilson6c085a72012-08-20 11:40:46 +02002292}
2293
Chris Wilson935a2f72017-02-13 17:15:13 +00002294static bool i915_sg_trim(struct sg_table *orig_st)
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002295{
2296 struct sg_table new_st;
2297 struct scatterlist *sg, *new_sg;
2298 unsigned int i;
2299
2300 if (orig_st->nents == orig_st->orig_nents)
Chris Wilson935a2f72017-02-13 17:15:13 +00002301 return false;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002302
Chris Wilson8bfc478f2016-12-23 14:57:58 +00002303 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
Chris Wilson935a2f72017-02-13 17:15:13 +00002304 return false;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002305
2306 new_sg = new_st.sgl;
2307 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2308 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2309 /* called before being DMA mapped, no need to copy sg->dma_* */
2310 new_sg = sg_next(new_sg);
2311 }
Chris Wilsonc2dc6cc2016-12-19 12:43:46 +00002312 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002313
2314 sg_free_table(orig_st);
2315
2316 *orig_st = new_st;
Chris Wilson935a2f72017-02-13 17:15:13 +00002317 return true;
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002318}
2319
Matthew Auldb91b09e2017-10-06 23:18:17 +01002320static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002321{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002322 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsond766ef52016-12-19 12:43:45 +00002323 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2324 unsigned long i;
Eric Anholt673a3942008-07-30 12:06:12 -07002325 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002326 struct sg_table *st;
2327 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002328 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002329 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002330 unsigned long last_pfn = 0; /* suppress gcc warning */
Tvrtko Ursulin56024522017-08-03 10:14:17 +01002331 unsigned int max_segment = i915_sg_segment_size();
Matthew Aulda5c081662017-10-06 23:18:18 +01002332 unsigned int sg_mask;
Chris Wilson4846bf02017-06-09 12:03:46 +01002333 gfp_t noreclaim;
Imre Deake2273302015-07-09 12:59:05 +03002334 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002335
Chris Wilson6c085a72012-08-20 11:40:46 +02002336 /* Assert that the object is not currently in any GPU domain. As it
2337 * wasn't in the GTT, there shouldn't be any way it could have been in
2338 * a GPU cache
2339 */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002340 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2341 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Chris Wilson6c085a72012-08-20 11:40:46 +02002342
Chris Wilson9da3da62012-06-01 15:20:22 +01002343 st = kmalloc(sizeof(*st), GFP_KERNEL);
2344 if (st == NULL)
Matthew Auldb91b09e2017-10-06 23:18:17 +01002345 return -ENOMEM;
Eric Anholt673a3942008-07-30 12:06:12 -07002346
Chris Wilsond766ef52016-12-19 12:43:45 +00002347rebuild_st:
Chris Wilson9da3da62012-06-01 15:20:22 +01002348 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002349 kfree(st);
Matthew Auldb91b09e2017-10-06 23:18:17 +01002350 return -ENOMEM;
Chris Wilson9da3da62012-06-01 15:20:22 +01002351 }
2352
2353 /* Get the list of pages out of our struct file. They'll be pinned
2354 * at this point until we release them.
2355 *
2356 * Fail silently without starting the shrinker
2357 */
Al Viro93c76a32015-12-04 23:45:44 -05002358 mapping = obj->base.filp->f_mapping;
Chris Wilson0f6ab552017-06-09 12:03:48 +01002359 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
Chris Wilson4846bf02017-06-09 12:03:46 +01002360 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2361
Imre Deak90797e62013-02-18 19:28:03 +02002362 sg = st->sgl;
2363 st->nents = 0;
Matthew Aulda5c081662017-10-06 23:18:18 +01002364 sg_mask = 0;
Imre Deak90797e62013-02-18 19:28:03 +02002365 for (i = 0; i < page_count; i++) {
Chris Wilson4846bf02017-06-09 12:03:46 +01002366 const unsigned int shrink[] = {
2367 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2368 0,
2369 }, *s = shrink;
2370 gfp_t gfp = noreclaim;
2371
2372 do {
Chris Wilson6c085a72012-08-20 11:40:46 +02002373 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
Chris Wilson4846bf02017-06-09 12:03:46 +01002374 if (likely(!IS_ERR(page)))
2375 break;
2376
2377 if (!*s) {
2378 ret = PTR_ERR(page);
2379 goto err_sg;
2380 }
2381
Chris Wilson912d5722017-09-06 16:19:30 -07002382 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
Chris Wilson4846bf02017-06-09 12:03:46 +01002383 cond_resched();
Chris Wilson24f8e002017-03-22 11:05:21 +00002384
Chris Wilson6c085a72012-08-20 11:40:46 +02002385 /* We've tried hard to allocate the memory by reaping
2386 * our own buffer, now let the real VM do its job and
2387 * go down in flames if truly OOM.
Chris Wilson24f8e002017-03-22 11:05:21 +00002388 *
2389 * However, since graphics tend to be disposable,
2390 * defer the oom here by reporting the ENOMEM back
2391 * to userspace.
Chris Wilson6c085a72012-08-20 11:40:46 +02002392 */
Chris Wilson4846bf02017-06-09 12:03:46 +01002393 if (!*s) {
2394 /* reclaim and warn, but no oom */
2395 gfp = mapping_gfp_mask(mapping);
Chris Wilsoneaf41802017-06-09 12:03:47 +01002396
2397 /* Our bo are always dirty and so we require
2398 * kswapd to reclaim our pages (direct reclaim
2399 * does not effectively begin pageout of our
2400 * buffers on its own). However, direct reclaim
2401 * only waits for kswapd when under allocation
2402 * congestion. So as a result __GFP_RECLAIM is
2403 * unreliable and fails to actually reclaim our
2404 * dirty pages -- unless you try over and over
2405 * again with !__GFP_NORETRY. However, we still
2406 * want to fail this allocation rather than
2407 * trigger the out-of-memory killer and for
Michal Hockodbb32952017-07-12 14:36:55 -07002408 * this we want __GFP_RETRY_MAYFAIL.
Chris Wilsoneaf41802017-06-09 12:03:47 +01002409 */
Michal Hockodbb32952017-07-12 14:36:55 -07002410 gfp |= __GFP_RETRY_MAYFAIL;
Imre Deake2273302015-07-09 12:59:05 +03002411 }
Chris Wilson4846bf02017-06-09 12:03:46 +01002412 } while (1);
2413
Chris Wilson871dfbd2016-10-11 09:20:21 +01002414 if (!i ||
2415 sg->length >= max_segment ||
2416 page_to_pfn(page) != last_pfn + 1) {
Matthew Aulda5c081662017-10-06 23:18:18 +01002417 if (i) {
2418 sg_mask |= sg->length;
Imre Deak90797e62013-02-18 19:28:03 +02002419 sg = sg_next(sg);
Matthew Aulda5c081662017-10-06 23:18:18 +01002420 }
Imre Deak90797e62013-02-18 19:28:03 +02002421 st->nents++;
2422 sg_set_page(sg, page, PAGE_SIZE, 0);
2423 } else {
2424 sg->length += PAGE_SIZE;
2425 }
2426 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002427
2428 /* Check that the i965g/gm workaround works. */
2429 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002430 }
Matthew Aulda5c081662017-10-06 23:18:18 +01002431 if (sg) { /* loop terminated early; short sg table */
2432 sg_mask |= sg->length;
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002433 sg_mark_end(sg);
Matthew Aulda5c081662017-10-06 23:18:18 +01002434 }
Chris Wilson74ce6b62012-10-19 15:51:06 +01002435
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002436 /* Trim unused sg entries to avoid wasting memory. */
2437 i915_sg_trim(st);
2438
Chris Wilson03ac84f2016-10-28 13:58:36 +01002439 ret = i915_gem_gtt_prepare_pages(obj, st);
Chris Wilsond766ef52016-12-19 12:43:45 +00002440 if (ret) {
2441 /* DMA remapping failed? One possible cause is that
2442 * it could not reserve enough large entries, asking
2443 * for PAGE_SIZE chunks instead may be helpful.
2444 */
2445 if (max_segment > PAGE_SIZE) {
2446 for_each_sgt_page(page, sgt_iter, st)
2447 put_page(page);
2448 sg_free_table(st);
2449
2450 max_segment = PAGE_SIZE;
2451 goto rebuild_st;
2452 } else {
2453 dev_warn(&dev_priv->drm.pdev->dev,
2454 "Failed to DMA remap %lu pages\n",
2455 page_count);
2456 goto err_pages;
2457 }
2458 }
Imre Deake2273302015-07-09 12:59:05 +03002459
Eric Anholt673a3942008-07-30 12:06:12 -07002460 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002461 i915_gem_object_do_bit_17_swizzle(obj, st);
Eric Anholt673a3942008-07-30 12:06:12 -07002462
Matthew Aulda5c081662017-10-06 23:18:18 +01002463 __i915_gem_object_set_pages(obj, st, sg_mask);
Matthew Auldb91b09e2017-10-06 23:18:17 +01002464
2465 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002466
Chris Wilsonb17993b2016-11-14 11:29:30 +00002467err_sg:
Imre Deak90797e62013-02-18 19:28:03 +02002468 sg_mark_end(sg);
Chris Wilsonb17993b2016-11-14 11:29:30 +00002469err_pages:
Dave Gordon85d12252016-05-20 11:54:06 +01002470 for_each_sgt_page(page, sgt_iter, st)
2471 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002472 sg_free_table(st);
2473 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002474
2475 /* shmemfs first checks if there is enough memory to allocate the page
2476 * and reports ENOSPC should there be insufficient, along with the usual
2477 * ENOMEM for a genuine allocation failure.
2478 *
2479 * We use ENOSPC in our driver to mean that we have run out of aperture
2480 * space and so want to translate the error from shmemfs back to our
2481 * usual understanding of ENOMEM.
2482 */
Imre Deake2273302015-07-09 12:59:05 +03002483 if (ret == -ENOSPC)
2484 ret = -ENOMEM;
2485
Matthew Auldb91b09e2017-10-06 23:18:17 +01002486 return ret;
Chris Wilson03ac84f2016-10-28 13:58:36 +01002487}
2488
2489void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
Matthew Aulda5c081662017-10-06 23:18:18 +01002490 struct sg_table *pages,
2491 unsigned int sg_mask)
Chris Wilson03ac84f2016-10-28 13:58:36 +01002492{
Matthew Aulda5c081662017-10-06 23:18:18 +01002493 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2494 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2495 int i;
2496
Chris Wilson1233e2d2016-10-28 13:58:37 +01002497 lockdep_assert_held(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002498
2499 obj->mm.get_page.sg_pos = pages->sgl;
2500 obj->mm.get_page.sg_idx = 0;
2501
2502 obj->mm.pages = pages;
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002503
2504 if (i915_gem_object_is_tiled(obj) &&
2505 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2506 GEM_BUG_ON(obj->mm.quirked);
2507 __i915_gem_object_pin_pages(obj);
2508 obj->mm.quirked = true;
2509 }
Matthew Aulda5c081662017-10-06 23:18:18 +01002510
2511 GEM_BUG_ON(!sg_mask);
2512 obj->mm.page_sizes.phys = sg_mask;
2513
2514 /*
2515 * Calculate the supported page-sizes which fit into the given sg_mask.
2516 * This will give us the page-sizes which we may be able to use
2517 * opportunistically when later inserting into the GTT. For example if
2518 * phys=2G, then in theory we should be able to use 1G, 2M, 64K or 4K
2519 * pages, although in practice this will depend on a number of other
2520 * factors.
2521 */
2522 obj->mm.page_sizes.sg = 0;
2523 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2524 if (obj->mm.page_sizes.phys & ~0u << i)
2525 obj->mm.page_sizes.sg |= BIT(i);
2526 }
2527
2528 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
Chris Wilson03ac84f2016-10-28 13:58:36 +01002529}
2530
2531static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2532{
Matthew Auldb91b09e2017-10-06 23:18:17 +01002533 int err;
Chris Wilson03ac84f2016-10-28 13:58:36 +01002534
2535 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2536 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2537 return -EFAULT;
2538 }
2539
Matthew Auldb91b09e2017-10-06 23:18:17 +01002540 err = obj->ops->get_pages(obj);
2541 GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
Chris Wilson03ac84f2016-10-28 13:58:36 +01002542
Matthew Auldb91b09e2017-10-06 23:18:17 +01002543 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002544}
2545
Chris Wilson37e680a2012-06-07 15:38:42 +01002546/* Ensure that the associated pages are gathered from the backing storage
Chris Wilson1233e2d2016-10-28 13:58:37 +01002547 * and pinned into our object. i915_gem_object_pin_pages() may be called
Chris Wilson37e680a2012-06-07 15:38:42 +01002548 * multiple times before they are released by a single call to
Chris Wilson1233e2d2016-10-28 13:58:37 +01002549 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
Chris Wilson37e680a2012-06-07 15:38:42 +01002550 * either as a result of memory pressure (reaping pages under the shrinker)
2551 * or as the object is itself released.
2552 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002553int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
Chris Wilson37e680a2012-06-07 15:38:42 +01002554{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002555 int err;
Chris Wilson37e680a2012-06-07 15:38:42 +01002556
Chris Wilson1233e2d2016-10-28 13:58:37 +01002557 err = mutex_lock_interruptible(&obj->mm.lock);
2558 if (err)
2559 return err;
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002560
Chris Wilson4e5462e2017-03-07 13:20:31 +00002561 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
Chris Wilson88c880b2017-09-06 14:52:20 +01002562 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2563
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002564 err = ____i915_gem_object_get_pages(obj);
2565 if (err)
2566 goto unlock;
2567
2568 smp_mb__before_atomic();
Chris Wilson1233e2d2016-10-28 13:58:37 +01002569 }
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002570 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson43e28f02013-01-08 10:53:09 +00002571
Chris Wilson1233e2d2016-10-28 13:58:37 +01002572unlock:
2573 mutex_unlock(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002574 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002575}
2576
Dave Gordondd6034c2016-05-20 11:54:04 +01002577/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002578static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2579 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002580{
2581 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002582 struct sg_table *sgt = obj->mm.pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002583 struct sgt_iter sgt_iter;
2584 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002585 struct page *stack_pages[32];
2586 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002587 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002588 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002589 void *addr;
2590
2591 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002592 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002593 return kmap(sg_page(sgt->sgl));
2594
Dave Gordonb338fa42016-05-20 11:54:05 +01002595 if (n_pages > ARRAY_SIZE(stack_pages)) {
2596 /* Too big for stack -- allocate temporary array instead */
Michal Hocko0ee931c2017-09-13 16:28:29 -07002597 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
Dave Gordonb338fa42016-05-20 11:54:05 +01002598 if (!pages)
2599 return NULL;
2600 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002601
Dave Gordon85d12252016-05-20 11:54:06 +01002602 for_each_sgt_page(page, sgt_iter, sgt)
2603 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002604
2605 /* Check that we have the expected number of pages */
2606 GEM_BUG_ON(i != n_pages);
2607
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002608 switch (type) {
Chris Wilsona575c672017-08-28 11:46:31 +01002609 default:
2610 MISSING_CASE(type);
2611 /* fallthrough to use PAGE_KERNEL anyway */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002612 case I915_MAP_WB:
2613 pgprot = PAGE_KERNEL;
2614 break;
2615 case I915_MAP_WC:
2616 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2617 break;
2618 }
2619 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002620
Dave Gordonb338fa42016-05-20 11:54:05 +01002621 if (pages != stack_pages)
Michal Hocko20981052017-05-17 14:23:12 +02002622 kvfree(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002623
2624 return addr;
2625}
2626
2627/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002628void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2629 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002630{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002631 enum i915_map_type has_type;
2632 bool pinned;
2633 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002634 int ret;
2635
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002636 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002637
Chris Wilson1233e2d2016-10-28 13:58:37 +01002638 ret = mutex_lock_interruptible(&obj->mm.lock);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002639 if (ret)
2640 return ERR_PTR(ret);
2641
Chris Wilsona575c672017-08-28 11:46:31 +01002642 pinned = !(type & I915_MAP_OVERRIDE);
2643 type &= ~I915_MAP_OVERRIDE;
2644
Chris Wilson1233e2d2016-10-28 13:58:37 +01002645 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
Chris Wilson4e5462e2017-03-07 13:20:31 +00002646 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
Chris Wilson88c880b2017-09-06 14:52:20 +01002647 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2648
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002649 ret = ____i915_gem_object_get_pages(obj);
2650 if (ret)
2651 goto err_unlock;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002652
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002653 smp_mb__before_atomic();
2654 }
2655 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002656 pinned = false;
2657 }
2658 GEM_BUG_ON(!obj->mm.pages);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002659
Chris Wilson0ce81782017-05-17 13:09:59 +01002660 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002661 if (ptr && has_type != type) {
2662 if (pinned) {
2663 ret = -EBUSY;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002664 goto err_unpin;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002665 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002666
2667 if (is_vmalloc_addr(ptr))
2668 vunmap(ptr);
2669 else
2670 kunmap(kmap_to_page(ptr));
2671
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002672 ptr = obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002673 }
2674
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002675 if (!ptr) {
2676 ptr = i915_gem_object_map(obj, type);
2677 if (!ptr) {
2678 ret = -ENOMEM;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002679 goto err_unpin;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002680 }
2681
Chris Wilson0ce81782017-05-17 13:09:59 +01002682 obj->mm.mapping = page_pack_bits(ptr, type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002683 }
2684
Chris Wilson1233e2d2016-10-28 13:58:37 +01002685out_unlock:
2686 mutex_unlock(&obj->mm.lock);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002687 return ptr;
2688
Chris Wilson1233e2d2016-10-28 13:58:37 +01002689err_unpin:
2690 atomic_dec(&obj->mm.pages_pin_count);
2691err_unlock:
2692 ptr = ERR_PTR(ret);
2693 goto out_unlock;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002694}
2695
Chris Wilson7c55e2c2017-03-07 12:03:38 +00002696static int
2697i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2698 const struct drm_i915_gem_pwrite *arg)
2699{
2700 struct address_space *mapping = obj->base.filp->f_mapping;
2701 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2702 u64 remain, offset;
2703 unsigned int pg;
2704
2705 /* Before we instantiate/pin the backing store for our use, we
2706 * can prepopulate the shmemfs filp efficiently using a write into
2707 * the pagecache. We avoid the penalty of instantiating all the
2708 * pages, important if the user is just writing to a few and never
2709 * uses the object on the GPU, and using a direct write into shmemfs
2710 * allows it to avoid the cost of retrieving a page (either swapin
2711 * or clearing-before-use) before it is overwritten.
2712 */
2713 if (READ_ONCE(obj->mm.pages))
2714 return -ENODEV;
2715
2716 /* Before the pages are instantiated the object is treated as being
2717 * in the CPU domain. The pages will be clflushed as required before
2718 * use, and we can freely write into the pages directly. If userspace
2719 * races pwrite with any other operation; corruption will ensue -
2720 * that is userspace's prerogative!
2721 */
2722
2723 remain = arg->size;
2724 offset = arg->offset;
2725 pg = offset_in_page(offset);
2726
2727 do {
2728 unsigned int len, unwritten;
2729 struct page *page;
2730 void *data, *vaddr;
2731 int err;
2732
2733 len = PAGE_SIZE - pg;
2734 if (len > remain)
2735 len = remain;
2736
2737 err = pagecache_write_begin(obj->base.filp, mapping,
2738 offset, len, 0,
2739 &page, &data);
2740 if (err < 0)
2741 return err;
2742
2743 vaddr = kmap(page);
2744 unwritten = copy_from_user(vaddr + pg, user_data, len);
2745 kunmap(page);
2746
2747 err = pagecache_write_end(obj->base.filp, mapping,
2748 offset, len, len - unwritten,
2749 page, data);
2750 if (err < 0)
2751 return err;
2752
2753 if (unwritten)
2754 return -EFAULT;
2755
2756 remain -= len;
2757 user_data += len;
2758 offset += len;
2759 pg = 0;
2760 } while (remain);
2761
2762 return 0;
2763}
2764
Chris Wilson77b25a92017-07-21 13:32:30 +01002765static bool ban_context(const struct i915_gem_context *ctx,
2766 unsigned int score)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002767{
Chris Wilson60958682016-12-31 11:20:11 +00002768 return (i915_gem_context_is_bannable(ctx) &&
Chris Wilson77b25a92017-07-21 13:32:30 +01002769 score >= CONTEXT_SCORE_BAN_THRESHOLD);
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002770}
2771
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +02002772static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002773{
Chris Wilson77b25a92017-07-21 13:32:30 +01002774 unsigned int score;
2775 bool banned;
Mika Kuoppalab083a082016-11-18 15:10:47 +02002776
Chris Wilson77b25a92017-07-21 13:32:30 +01002777 atomic_inc(&ctx->guilty_count);
2778
2779 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2780 banned = ban_context(ctx, score);
Mika Kuoppalab083a082016-11-18 15:10:47 +02002781 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
Chris Wilson77b25a92017-07-21 13:32:30 +01002782 ctx->name, score, yesno(banned));
2783 if (!banned)
Mika Kuoppalab083a082016-11-18 15:10:47 +02002784 return;
2785
Chris Wilson77b25a92017-07-21 13:32:30 +01002786 i915_gem_context_set_banned(ctx);
2787 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2788 atomic_inc(&ctx->file_priv->context_bans);
2789 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2790 ctx->name, atomic_read(&ctx->file_priv->context_bans));
2791 }
Mika Kuoppalae5e1fc42016-11-16 17:20:31 +02002792}
2793
2794static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2795{
Chris Wilson77b25a92017-07-21 13:32:30 +01002796 atomic_inc(&ctx->active_count);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002797}
2798
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002799struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002800i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002801{
Chris Wilson754c9fd2017-02-23 07:44:14 +00002802 struct drm_i915_gem_request *request, *active = NULL;
2803 unsigned long flags;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002804
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002805 /* We are called by the error capture and reset at a random
2806 * point in time. In particular, note that neither is crucially
2807 * ordered with an interrupt. After a hang, the GPU is dead and we
2808 * assume that no more writes can happen (we waited long enough for
2809 * all writes that were in transaction to be flushed) - adding an
2810 * extra delay for a recent interrupt is pointless. Hence, we do
2811 * not need an engine->irq_seqno_barrier() before the seqno reads.
2812 */
Chris Wilson754c9fd2017-02-23 07:44:14 +00002813 spin_lock_irqsave(&engine->timeline->lock, flags);
Chris Wilson73cb9702016-10-28 13:58:46 +01002814 list_for_each_entry(request, &engine->timeline->requests, link) {
Chris Wilson754c9fd2017-02-23 07:44:14 +00002815 if (__i915_gem_request_completed(request,
2816 request->global_seqno))
Chris Wilson4db080f2013-12-04 11:37:09 +00002817 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002818
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002819 GEM_BUG_ON(request->engine != engine);
Chris Wilsonc00122f32017-02-12 17:19:58 +00002820 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2821 &request->fence.flags));
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002822
Chris Wilson754c9fd2017-02-23 07:44:14 +00002823 active = request;
2824 break;
2825 }
2826 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2827
2828 return active;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002829}
2830
Mika Kuoppalabf2f0432017-01-17 17:59:04 +02002831static bool engine_stalled(struct intel_engine_cs *engine)
2832{
2833 if (!engine->hangcheck.stalled)
2834 return false;
2835
2836 /* Check for possible seqno movement after hang declaration */
2837 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2838 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2839 return false;
2840 }
2841
2842 return true;
2843}
2844
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002845/*
2846 * Ensure irq handler finishes, and not run again.
2847 * Also return the active request so that we only search for it once.
2848 */
2849struct drm_i915_gem_request *
2850i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2851{
2852 struct drm_i915_gem_request *request = NULL;
2853
Chris Wilson1749d902017-10-09 12:02:59 +01002854 /*
2855 * During the reset sequence, we must prevent the engine from
2856 * entering RC6. As the context state is undefined until we restart
2857 * the engine, if it does enter RC6 during the reset, the state
2858 * written to the powercontext is undefined and so we may lose
2859 * GPU state upon resume, i.e. fail to restart after a reset.
2860 */
2861 intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2862
2863 /*
2864 * Prevent the signaler thread from updating the request
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002865 * state (by calling dma_fence_signal) as we are processing
2866 * the reset. The write from the GPU of the seqno is
2867 * asynchronous and the signaler thread may see a different
2868 * value to us and declare the request complete, even though
2869 * the reset routine have picked that request as the active
2870 * (incomplete) request. This conflict is not handled
2871 * gracefully!
2872 */
2873 kthread_park(engine->breadcrumbs.signaler);
2874
Chris Wilson1749d902017-10-09 12:02:59 +01002875 /*
2876 * Prevent request submission to the hardware until we have
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002877 * completed the reset in i915_gem_reset_finish(). If a request
2878 * is completed by one engine, it may then queue a request
2879 * to a second via its engine->irq_tasklet *just* as we are
2880 * calling engine->init_hw() and also writing the ELSP.
2881 * Turning off the engine->irq_tasklet until the reset is over
2882 * prevents the race.
2883 */
Mika Kuoppalab620e872017-09-22 15:43:03 +03002884 tasklet_kill(&engine->execlists.irq_tasklet);
2885 tasklet_disable(&engine->execlists.irq_tasklet);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002886
2887 if (engine->irq_seqno_barrier)
2888 engine->irq_seqno_barrier(engine);
2889
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002890 request = i915_gem_find_active_request(engine);
2891 if (request && request->fence.error == -EIO)
2892 request = ERR_PTR(-EIO); /* Previous reset failed! */
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002893
2894 return request;
2895}
2896
Chris Wilson0e178ae2017-01-17 17:59:06 +02002897int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
Chris Wilson4c965542017-01-17 17:59:01 +02002898{
2899 struct intel_engine_cs *engine;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002900 struct drm_i915_gem_request *request;
Chris Wilson4c965542017-01-17 17:59:01 +02002901 enum intel_engine_id id;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002902 int err = 0;
Chris Wilson4c965542017-01-17 17:59:01 +02002903
Chris Wilson0e178ae2017-01-17 17:59:06 +02002904 for_each_engine(engine, dev_priv, id) {
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002905 request = i915_gem_reset_prepare_engine(engine);
2906 if (IS_ERR(request)) {
2907 err = PTR_ERR(request);
2908 continue;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002909 }
Michel Thierryc64992e2017-06-20 10:57:44 +01002910
2911 engine->hangcheck.active_request = request;
Chris Wilson0e178ae2017-01-17 17:59:06 +02002912 }
2913
Chris Wilson4c965542017-01-17 17:59:01 +02002914 i915_gem_revoke_fences(dev_priv);
Chris Wilson0e178ae2017-01-17 17:59:06 +02002915
2916 return err;
Chris Wilson4c965542017-01-17 17:59:01 +02002917}
2918
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002919static void skip_request(struct drm_i915_gem_request *request)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002920{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002921 void *vaddr = request->ring->vaddr;
2922 u32 head;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002923
Chris Wilson821ed7d2016-09-09 14:11:53 +01002924 /* As this request likely depends on state from the lost
2925 * context, clear out all the user operations leaving the
2926 * breadcrumb at the end (so we get the fence notifications).
2927 */
2928 head = request->head;
2929 if (request->postfix < head) {
2930 memset(vaddr + head, 0, request->ring->size - head);
2931 head = 0;
2932 }
2933 memset(vaddr + head, 0, request->postfix - head);
Chris Wilsonc0d5f322017-01-10 17:22:43 +00002934
2935 dma_fence_set_error(&request->fence, -EIO);
Chris Wilson4db080f2013-12-04 11:37:09 +00002936}
2937
Mika Kuoppala36193ac2017-01-17 17:59:02 +02002938static void engine_skip_context(struct drm_i915_gem_request *request)
2939{
2940 struct intel_engine_cs *engine = request->engine;
2941 struct i915_gem_context *hung_ctx = request->ctx;
2942 struct intel_timeline *timeline;
2943 unsigned long flags;
2944
2945 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
2946
2947 spin_lock_irqsave(&engine->timeline->lock, flags);
2948 spin_lock(&timeline->lock);
2949
2950 list_for_each_entry_continue(request, &engine->timeline->requests, link)
2951 if (request->ctx == hung_ctx)
2952 skip_request(request);
2953
2954 list_for_each_entry(request, &timeline->requests, link)
2955 skip_request(request);
2956
2957 spin_unlock(&timeline->lock);
2958 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2959}
2960
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002961/* Returns the request if it was guilty of the hang */
2962static struct drm_i915_gem_request *
2963i915_gem_reset_request(struct intel_engine_cs *engine,
2964 struct drm_i915_gem_request *request)
Mika Kuoppala61da5362017-01-17 17:59:05 +02002965{
Mika Kuoppala71895a02017-01-17 17:59:07 +02002966 /* The guilty request will get skipped on a hung engine.
2967 *
2968 * Users of client default contexts do not rely on logical
2969 * state preserved between batches so it is safe to execute
2970 * queued requests following the hang. Non default contexts
2971 * rely on preserved state, so skipping a batch loses the
2972 * evolution of the state and it needs to be considered corrupted.
2973 * Executing more queued batches on top of corrupted state is
2974 * risky. But we take the risk by trying to advance through
2975 * the queued requests in order to make the client behaviour
2976 * more predictable around resets, by not throwing away random
2977 * amount of batches it has prepared for execution. Sophisticated
2978 * clients can use gem_reset_stats_ioctl and dma fence status
2979 * (exported via sync_file info ioctl on explicit fences) to observe
2980 * when it loses the context state and should rebuild accordingly.
2981 *
2982 * The context ban, and ultimately the client ban, mechanism are safety
2983 * valves if client submission ends up resulting in nothing more than
2984 * subsequent hangs.
2985 */
2986
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002987 if (engine_stalled(engine)) {
Mika Kuoppala61da5362017-01-17 17:59:05 +02002988 i915_gem_context_mark_guilty(request->ctx);
2989 skip_request(request);
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002990
2991 /* If this context is now banned, skip all pending requests. */
2992 if (i915_gem_context_is_banned(request->ctx))
2993 engine_skip_context(request);
Mika Kuoppala61da5362017-01-17 17:59:05 +02002994 } else {
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01002995 /*
2996 * Since this is not the hung engine, it may have advanced
2997 * since the hang declaration. Double check by refinding
2998 * the active request at the time of the reset.
2999 */
3000 request = i915_gem_find_active_request(engine);
3001 if (request) {
3002 i915_gem_context_mark_innocent(request->ctx);
3003 dma_fence_set_error(&request->fence, -EAGAIN);
3004
3005 /* Rewind the engine to replay the incomplete rq */
3006 spin_lock_irq(&engine->timeline->lock);
3007 request = list_prev_entry(request, link);
3008 if (&request->link == &engine->timeline->requests)
3009 request = NULL;
3010 spin_unlock_irq(&engine->timeline->lock);
3011 }
Mika Kuoppala61da5362017-01-17 17:59:05 +02003012 }
3013
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003014 return request;
Mika Kuoppala61da5362017-01-17 17:59:05 +02003015}
3016
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003017void i915_gem_reset_engine(struct intel_engine_cs *engine,
3018 struct drm_i915_gem_request *request)
Chris Wilson4db080f2013-12-04 11:37:09 +00003019{
Chris Wilsoned454f22017-07-21 13:32:29 +01003020 engine->irq_posted = 0;
3021
Chris Wilsond1d1ebf42017-07-21 13:32:33 +01003022 if (request)
3023 request = i915_gem_reset_request(engine, request);
3024
3025 if (request) {
Chris Wilsonc0dcb202017-02-07 15:24:37 +00003026 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3027 engine->name, request->global_seqno);
Chris Wilsonc0dcb202017-02-07 15:24:37 +00003028 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003029
3030 /* Setup the CS to resume from the breadcrumb of the hung request */
3031 engine->reset_hw(engine, request);
Chris Wilson821ed7d2016-09-09 14:11:53 +01003032}
3033
Chris Wilsond8027092017-02-08 14:30:32 +00003034void i915_gem_reset(struct drm_i915_private *dev_priv)
Chris Wilson821ed7d2016-09-09 14:11:53 +01003035{
3036 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303037 enum intel_engine_id id;
Chris Wilson821ed7d2016-09-09 14:11:53 +01003038
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003039 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3040
Chris Wilson821ed7d2016-09-09 14:11:53 +01003041 i915_gem_retire_requests(dev_priv);
3042
Chris Wilson2ae55732017-02-12 17:20:02 +00003043 for_each_engine(engine, dev_priv, id) {
3044 struct i915_gem_context *ctx;
3045
Michel Thierryc64992e2017-06-20 10:57:44 +01003046 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
Chris Wilson2ae55732017-02-12 17:20:02 +00003047 ctx = fetch_and_zero(&engine->last_retired_context);
3048 if (ctx)
3049 engine->context_unpin(engine, ctx);
3050 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003051
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00003052 i915_gem_restore_fences(dev_priv);
Chris Wilsonf2a91d12016-09-21 14:51:06 +01003053
3054 if (dev_priv->gt.awake) {
3055 intel_sanitize_gt_powersave(dev_priv);
3056 intel_enable_gt_powersave(dev_priv);
3057 if (INTEL_GEN(dev_priv) >= 6)
3058 gen6_rps_busy(dev_priv);
3059 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01003060}
3061
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003062void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3063{
Mika Kuoppalab620e872017-09-22 15:43:03 +03003064 tasklet_enable(&engine->execlists.irq_tasklet);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003065 kthread_unpark(engine->breadcrumbs.signaler);
Chris Wilson1749d902017-10-09 12:02:59 +01003066
3067 intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003068}
3069
Chris Wilsond8027092017-02-08 14:30:32 +00003070void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3071{
Chris Wilson1f7b8472017-02-08 14:30:33 +00003072 struct intel_engine_cs *engine;
3073 enum intel_engine_id id;
3074
Chris Wilsond8027092017-02-08 14:30:32 +00003075 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson1f7b8472017-02-08 14:30:33 +00003076
Chris Wilsonfe3288b2017-02-12 17:20:01 +00003077 for_each_engine(engine, dev_priv, id) {
Michel Thierryc64992e2017-06-20 10:57:44 +01003078 engine->hangcheck.active_request = NULL;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01003079 i915_gem_reset_finish_engine(engine);
Chris Wilsonfe3288b2017-02-12 17:20:01 +00003080 }
Chris Wilsond8027092017-02-08 14:30:32 +00003081}
3082
Chris Wilson821ed7d2016-09-09 14:11:53 +01003083static void nop_submit_request(struct drm_i915_gem_request *request)
3084{
Chris Wilson8d550822017-10-06 12:56:17 +01003085 unsigned long flags;
3086
Chris Wilsonbf2eac32017-07-21 13:32:28 +01003087 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
Chris Wilson3cd94422017-01-10 17:22:45 +00003088 dma_fence_set_error(&request->fence, -EIO);
Chris Wilson8d550822017-10-06 12:56:17 +01003089
3090 spin_lock_irqsave(&request->engine->timeline->lock, flags);
3091 __i915_gem_request_submit(request);
Chris Wilson3dcf93f2016-11-22 14:41:20 +00003092 intel_engine_init_global_seqno(request->engine, request->global_seqno);
Chris Wilson8d550822017-10-06 12:56:17 +01003093 spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
Chris Wilson821ed7d2016-09-09 14:11:53 +01003094}
3095
Chris Wilson2a20d6f2017-01-10 17:22:46 +00003096static void engine_set_wedged(struct intel_engine_cs *engine)
Chris Wilson821ed7d2016-09-09 14:11:53 +01003097{
Chris Wilson20e49332016-11-22 14:41:21 +00003098 /* We need to be sure that no thread is running the old callback as
3099 * we install the nop handler (otherwise we would submit a request
3100 * to hardware that will never complete). In order to prevent this
3101 * race, we wait until the machine is idle before making the swap
3102 * (using stop_machine()).
3103 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01003104 engine->submit_request = nop_submit_request;
Chris Wilson70c2a242016-09-09 14:11:46 +01003105
Chris Wilson3cd94422017-01-10 17:22:45 +00003106 /* Mark all executing requests as skipped */
Chris Wilson27a5f612017-09-15 18:31:00 +01003107 engine->cancel_requests(engine);
Chris Wilson5e32d742017-07-21 13:32:25 +01003108
3109 /* Mark all pending requests as complete so that any concurrent
3110 * (lockless) lookup doesn't try and wait upon the request as we
3111 * reset it.
3112 */
3113 intel_engine_init_global_seqno(engine,
3114 intel_engine_last_submit(engine));
Eric Anholt673a3942008-07-30 12:06:12 -07003115}
3116
Chris Wilson20e49332016-11-22 14:41:21 +00003117static int __i915_gem_set_wedged_BKL(void *data)
Eric Anholt673a3942008-07-30 12:06:12 -07003118{
Chris Wilson20e49332016-11-22 14:41:21 +00003119 struct drm_i915_private *i915 = data;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003120 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303121 enum intel_engine_id id;
Eric Anholt673a3942008-07-30 12:06:12 -07003122
Chris Wilson20e49332016-11-22 14:41:21 +00003123 for_each_engine(engine, i915, id)
Chris Wilson2a20d6f2017-01-10 17:22:46 +00003124 engine_set_wedged(engine);
Chris Wilson20e49332016-11-22 14:41:21 +00003125
Chris Wilson3d7adbb2017-07-21 13:32:27 +01003126 set_bit(I915_WEDGED, &i915->gpu_error.flags);
3127 wake_up_all(&i915->gpu_error.reset_queue);
3128
Chris Wilson20e49332016-11-22 14:41:21 +00003129 return 0;
3130}
3131
3132void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
3133{
Chris Wilson20e49332016-11-22 14:41:21 +00003134 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003135}
3136
Chris Wilson2e8f9d32017-03-16 17:13:04 +00003137bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3138{
3139 struct i915_gem_timeline *tl;
3140 int i;
3141
3142 lockdep_assert_held(&i915->drm.struct_mutex);
3143 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3144 return true;
3145
3146 /* Before unwedging, make sure that all pending operations
3147 * are flushed and errored out - we may have requests waiting upon
3148 * third party fences. We marked all inflight requests as EIO, and
3149 * every execbuf since returned EIO, for consistency we want all
3150 * the currently pending requests to also be marked as EIO, which
3151 * is done inside our nop_submit_request - and so we must wait.
3152 *
3153 * No more can be submitted until we reset the wedged bit.
3154 */
3155 list_for_each_entry(tl, &i915->gt.timelines, link) {
3156 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3157 struct drm_i915_gem_request *rq;
3158
3159 rq = i915_gem_active_peek(&tl->engine[i].last_request,
3160 &i915->drm.struct_mutex);
3161 if (!rq)
3162 continue;
3163
3164 /* We can't use our normal waiter as we want to
3165 * avoid recursively trying to handle the current
3166 * reset. The basic dma_fence_default_wait() installs
3167 * a callback for dma_fence_signal(), which is
3168 * triggered by our nop handler (indirectly, the
3169 * callback enables the signaler thread which is
3170 * woken by the nop_submit_request() advancing the seqno
3171 * and when the seqno passes the fence, the signaler
3172 * then signals the fence waking us up).
3173 */
3174 if (dma_fence_default_wait(&rq->fence, true,
3175 MAX_SCHEDULE_TIMEOUT) < 0)
3176 return false;
3177 }
3178 }
3179
3180 /* Undo nop_submit_request. We prevent all new i915 requests from
3181 * being queued (by disallowing execbuf whilst wedged) so having
3182 * waited for all active requests above, we know the system is idle
3183 * and do not have to worry about a thread being inside
3184 * engine->submit_request() as we swap over. So unlike installing
3185 * the nop_submit_request on reset, we can do this from normal
3186 * context and do not require stop_machine().
3187 */
3188 intel_engines_reset_default_submission(i915);
Chris Wilson36703e72017-06-22 11:56:25 +01003189 i915_gem_contexts_lost(i915);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00003190
3191 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3192 clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3193
3194 return true;
3195}
3196
Daniel Vetter75ef9da2010-08-21 00:25:16 +02003197static void
Eric Anholt673a3942008-07-30 12:06:12 -07003198i915_gem_retire_work_handler(struct work_struct *work)
3199{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003200 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003201 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01003202 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07003203
Chris Wilson891b48c2010-09-29 12:26:37 +01003204 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003205 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01003206 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003207 mutex_unlock(&dev->struct_mutex);
3208 }
Chris Wilson67d97da2016-07-04 08:08:31 +01003209
3210 /* Keep the retire handler running until we are finally idle.
3211 * We do not need to do this test under locking as in the worst-case
3212 * we queue the retire worker once too often.
3213 */
Chris Wilsonc9615612016-07-09 10:12:06 +01003214 if (READ_ONCE(dev_priv->gt.awake)) {
3215 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01003216 queue_delayed_work(dev_priv->wq,
3217 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01003218 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01003219 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003220}
Chris Wilson891b48c2010-09-29 12:26:37 +01003221
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003222static void
3223i915_gem_idle_work_handler(struct work_struct *work)
3224{
3225 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01003226 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01003227 struct drm_device *dev = &dev_priv->drm;
Chris Wilson67d97da2016-07-04 08:08:31 +01003228 bool rearm_hangcheck;
3229
3230 if (!READ_ONCE(dev_priv->gt.awake))
3231 return;
3232
Imre Deak0cb56702016-11-07 11:20:04 +02003233 /*
3234 * Wait for last execlists context complete, but bail out in case a
3235 * new request is submitted.
3236 */
Chris Wilson8490ae202017-03-30 15:50:37 +01003237 wait_for(intel_engines_are_idle(dev_priv), 10);
Chris Wilson28176ef2016-10-28 13:58:56 +01003238 if (READ_ONCE(dev_priv->gt.active_requests))
Chris Wilson67d97da2016-07-04 08:08:31 +01003239 return;
3240
3241 rearm_hangcheck =
3242 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3243
3244 if (!mutex_trylock(&dev->struct_mutex)) {
3245 /* Currently busy, come back later */
3246 mod_delayed_work(dev_priv->wq,
3247 &dev_priv->gt.idle_work,
3248 msecs_to_jiffies(50));
3249 goto out_rearm;
3250 }
3251
Imre Deak93c97dc2016-11-07 11:20:03 +02003252 /*
3253 * New request retired after this work handler started, extend active
3254 * period until next instance of the work.
3255 */
3256 if (work_pending(work))
3257 goto out_unlock;
3258
Chris Wilson28176ef2016-10-28 13:58:56 +01003259 if (dev_priv->gt.active_requests)
Chris Wilson67d97da2016-07-04 08:08:31 +01003260 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003261
Chris Wilson05425242017-03-03 12:19:47 +00003262 if (wait_for(intel_engines_are_idle(dev_priv), 10))
Imre Deak0cb56702016-11-07 11:20:04 +02003263 DRM_ERROR("Timeout waiting for engines to idle\n");
3264
Chris Wilson6c067572017-05-17 13:10:03 +01003265 intel_engines_mark_idle(dev_priv);
Chris Wilson47979482017-05-03 10:39:21 +01003266 i915_gem_timelines_mark_idle(dev_priv);
Zou Nan hai852835f2010-05-21 09:08:56 +08003267
Chris Wilson67d97da2016-07-04 08:08:31 +01003268 GEM_BUG_ON(!dev_priv->gt.awake);
3269 dev_priv->gt.awake = false;
3270 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01003271
Chris Wilson67d97da2016-07-04 08:08:31 +01003272 if (INTEL_GEN(dev_priv) >= 6)
3273 gen6_rps_idle(dev_priv);
3274 intel_runtime_pm_put(dev_priv);
3275out_unlock:
3276 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01003277
Chris Wilson67d97da2016-07-04 08:08:31 +01003278out_rearm:
3279 if (rearm_hangcheck) {
3280 GEM_BUG_ON(!dev_priv->gt.awake);
3281 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01003282 }
Eric Anholt673a3942008-07-30 12:06:12 -07003283}
3284
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003285void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3286{
Chris Wilsond1b48c12017-08-16 09:52:08 +01003287 struct drm_i915_private *i915 = to_i915(gem->dev);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003288 struct drm_i915_gem_object *obj = to_intel_bo(gem);
3289 struct drm_i915_file_private *fpriv = file->driver_priv;
Chris Wilsond1b48c12017-08-16 09:52:08 +01003290 struct i915_lut_handle *lut, *ln;
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003291
Chris Wilsond1b48c12017-08-16 09:52:08 +01003292 mutex_lock(&i915->drm.struct_mutex);
3293
3294 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3295 struct i915_gem_context *ctx = lut->ctx;
3296 struct i915_vma *vma;
3297
Chris Wilson432295d2017-08-22 12:05:15 +01003298 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
Chris Wilsond1b48c12017-08-16 09:52:08 +01003299 if (ctx->file_priv != fpriv)
3300 continue;
3301
3302 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
Chris Wilson3ffff012017-08-22 12:05:17 +01003303 GEM_BUG_ON(vma->obj != obj);
3304
3305 /* We allow the process to have multiple handles to the same
3306 * vma, in the same fd namespace, by virtue of flink/open.
3307 */
3308 GEM_BUG_ON(!vma->open_count);
3309 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003310 i915_vma_close(vma);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01003311
Chris Wilsond1b48c12017-08-16 09:52:08 +01003312 list_del(&lut->obj_link);
3313 list_del(&lut->ctx_link);
Chris Wilson4ff4b442017-06-16 15:05:16 +01003314
Chris Wilsond1b48c12017-08-16 09:52:08 +01003315 kmem_cache_free(i915->luts, lut);
3316 __i915_gem_object_release_unless_active(obj);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01003317 }
Chris Wilsond1b48c12017-08-16 09:52:08 +01003318
3319 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003320}
3321
Chris Wilsone95433c2016-10-28 13:58:27 +01003322static unsigned long to_wait_timeout(s64 timeout_ns)
3323{
3324 if (timeout_ns < 0)
3325 return MAX_SCHEDULE_TIMEOUT;
3326
3327 if (timeout_ns == 0)
3328 return 0;
3329
3330 return nsecs_to_jiffies_timeout(timeout_ns);
3331}
3332
Ben Widawsky5816d642012-04-11 11:18:19 -07003333/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003334 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003335 * @dev: drm device pointer
3336 * @data: ioctl data blob
3337 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003338 *
3339 * Returns 0 if successful, else an error is returned with the remaining time in
3340 * the timeout parameter.
3341 * -ETIME: object is still busy after timeout
3342 * -ERESTARTSYS: signal interrupted the wait
3343 * -ENONENT: object doesn't exist
3344 * Also possible, but rare:
Chris Wilsonb8050142017-08-11 11:57:31 +01003345 * -EAGAIN: incomplete, restart syscall
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003346 * -ENOMEM: damn
3347 * -ENODEV: Internal IRQ fail
3348 * -E?: The add request failed
3349 *
3350 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3351 * non-zero timeout parameter the wait ioctl will wait for the given number of
3352 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3353 * without holding struct_mutex the object may become re-busied before this
3354 * function completes. A similar but shorter * race condition exists in the busy
3355 * ioctl
3356 */
3357int
3358i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3359{
3360 struct drm_i915_gem_wait *args = data;
3361 struct drm_i915_gem_object *obj;
Chris Wilsone95433c2016-10-28 13:58:27 +01003362 ktime_t start;
3363 long ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003364
Daniel Vetter11b5d512014-09-29 15:31:26 +02003365 if (args->flags != 0)
3366 return -EINVAL;
3367
Chris Wilson03ac0642016-07-20 13:31:51 +01003368 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01003369 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003370 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01003371
Chris Wilsone95433c2016-10-28 13:58:27 +01003372 start = ktime_get();
3373
3374 ret = i915_gem_object_wait(obj,
3375 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3376 to_wait_timeout(args->timeout_ns),
3377 to_rps_client(file));
3378
3379 if (args->timeout_ns > 0) {
3380 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3381 if (args->timeout_ns < 0)
3382 args->timeout_ns = 0;
Chris Wilsonc1d20612017-02-16 12:54:41 +00003383
3384 /*
3385 * Apparently ktime isn't accurate enough and occasionally has a
3386 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3387 * things up to make the test happy. We allow up to 1 jiffy.
3388 *
3389 * This is a regression from the timespec->ktime conversion.
3390 */
3391 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3392 args->timeout_ns = 0;
Chris Wilsonb8050142017-08-11 11:57:31 +01003393
3394 /* Asked to wait beyond the jiffie/scheduler precision? */
3395 if (ret == -ETIME && args->timeout_ns)
3396 ret = -EAGAIN;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003397 }
3398
Chris Wilsonf0cd5182016-10-28 13:58:43 +01003399 i915_gem_object_put(obj);
John Harrisonff865882014-11-24 18:49:28 +00003400 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003401}
3402
Chris Wilson73cb9702016-10-28 13:58:46 +01003403static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003404{
Chris Wilson73cb9702016-10-28 13:58:46 +01003405 int ret, i;
3406
3407 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3408 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3409 if (ret)
3410 return ret;
3411 }
3412
3413 return 0;
3414}
3415
Chris Wilson25112b62017-03-30 15:50:39 +01003416static int wait_for_engines(struct drm_i915_private *i915)
3417{
Chris Wilsoncad99462017-08-26 12:09:33 +01003418 if (wait_for(intel_engines_are_idle(i915), 50)) {
3419 DRM_ERROR("Failed to idle engines, declaring wedged!\n");
3420 i915_gem_set_wedged(i915);
3421 return -EIO;
Chris Wilson25112b62017-03-30 15:50:39 +01003422 }
3423
3424 return 0;
3425}
3426
Chris Wilson73cb9702016-10-28 13:58:46 +01003427int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3428{
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003429 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003430
Chris Wilson863e9fd2017-05-30 13:13:32 +01003431 /* If the device is asleep, we have no requests outstanding */
3432 if (!READ_ONCE(i915->gt.awake))
3433 return 0;
3434
Chris Wilson9caa34a2016-11-11 14:58:08 +00003435 if (flags & I915_WAIT_LOCKED) {
3436 struct i915_gem_timeline *tl;
3437
3438 lockdep_assert_held(&i915->drm.struct_mutex);
3439
3440 list_for_each_entry(tl, &i915->gt.timelines, link) {
3441 ret = wait_for_timeline(tl, flags);
3442 if (ret)
3443 return ret;
3444 }
Chris Wilson72022a72017-03-30 15:50:38 +01003445
3446 i915_gem_retire_requests(i915);
3447 GEM_BUG_ON(i915->gt.active_requests);
Chris Wilson25112b62017-03-30 15:50:39 +01003448
3449 ret = wait_for_engines(i915);
Chris Wilson9caa34a2016-11-11 14:58:08 +00003450 } else {
3451 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003452 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003453
Chris Wilson25112b62017-03-30 15:50:39 +01003454 return ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003455}
3456
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003457static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3458{
Chris Wilsone27ab732017-06-15 13:38:49 +01003459 /*
3460 * We manually flush the CPU domain so that we can override and
3461 * force the flush for the display, and perform it asyncrhonously.
3462 */
3463 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3464 if (obj->cache_dirty)
3465 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003466 obj->base.write_domain = 0;
3467}
3468
3469void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3470{
3471 if (!READ_ONCE(obj->pin_display))
3472 return;
3473
3474 mutex_lock(&obj->base.dev->struct_mutex);
3475 __i915_gem_object_flush_for_display(obj);
3476 mutex_unlock(&obj->base.dev->struct_mutex);
3477}
3478
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003479/**
Chris Wilsone22d8e32017-04-12 12:01:11 +01003480 * Moves a single object to the WC read, and possibly write domain.
3481 * @obj: object to act on
3482 * @write: ask for write access or read only
3483 *
3484 * This function returns when the move is complete, including waiting on
3485 * flushes to occur.
3486 */
3487int
3488i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3489{
3490 int ret;
3491
3492 lockdep_assert_held(&obj->base.dev->struct_mutex);
3493
3494 ret = i915_gem_object_wait(obj,
3495 I915_WAIT_INTERRUPTIBLE |
3496 I915_WAIT_LOCKED |
3497 (write ? I915_WAIT_ALL : 0),
3498 MAX_SCHEDULE_TIMEOUT,
3499 NULL);
3500 if (ret)
3501 return ret;
3502
3503 if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3504 return 0;
3505
3506 /* Flush and acquire obj->pages so that we are coherent through
3507 * direct access in memory with previous cached writes through
3508 * shmemfs and that our cache domain tracking remains valid.
3509 * For example, if the obj->filp was moved to swap without us
3510 * being notified and releasing the pages, we would mistakenly
3511 * continue to assume that the obj remained out of the CPU cached
3512 * domain.
3513 */
3514 ret = i915_gem_object_pin_pages(obj);
3515 if (ret)
3516 return ret;
3517
3518 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3519
3520 /* Serialise direct access to this object with the barriers for
3521 * coherent writes from the GPU, by effectively invalidating the
3522 * WC domain upon first access.
3523 */
3524 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3525 mb();
3526
3527 /* It should now be out of any other write domains, and we can update
3528 * the domain values for our changes.
3529 */
3530 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3531 obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3532 if (write) {
3533 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3534 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3535 obj->mm.dirty = true;
3536 }
3537
3538 i915_gem_object_unpin_pages(obj);
3539 return 0;
3540}
3541
3542/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003543 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003544 * @obj: object to act on
3545 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003546 *
3547 * This function returns when the move is complete, including waiting on
3548 * flushes to occur.
3549 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003550int
Chris Wilson20217462010-11-23 15:26:33 +00003551i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003552{
Eric Anholte47c68e2008-11-14 13:35:19 -08003553 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003554
Chris Wilsone95433c2016-10-28 13:58:27 +01003555 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003556
Chris Wilsone95433c2016-10-28 13:58:27 +01003557 ret = i915_gem_object_wait(obj,
3558 I915_WAIT_INTERRUPTIBLE |
3559 I915_WAIT_LOCKED |
3560 (write ? I915_WAIT_ALL : 0),
3561 MAX_SCHEDULE_TIMEOUT,
3562 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003563 if (ret)
3564 return ret;
3565
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003566 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3567 return 0;
3568
Chris Wilson43566de2015-01-02 16:29:29 +05303569 /* Flush and acquire obj->pages so that we are coherent through
3570 * direct access in memory with previous cached writes through
3571 * shmemfs and that our cache domain tracking remains valid.
3572 * For example, if the obj->filp was moved to swap without us
3573 * being notified and releasing the pages, we would mistakenly
3574 * continue to assume that the obj remained out of the CPU cached
3575 * domain.
3576 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003577 ret = i915_gem_object_pin_pages(obj);
Chris Wilson43566de2015-01-02 16:29:29 +05303578 if (ret)
3579 return ret;
3580
Chris Wilsonef749212017-04-12 12:01:10 +01003581 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003582
Chris Wilsond0a57782012-10-09 19:24:37 +01003583 /* Serialise direct access to this object with the barriers for
3584 * coherent writes from the GPU, by effectively invalidating the
3585 * GTT domain upon first access.
3586 */
3587 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3588 mb();
3589
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003590 /* It should now be out of any other write domains, and we can update
3591 * the domain values for our changes.
3592 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003593 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003594 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003595 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003596 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3597 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003598 obj->mm.dirty = true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003599 }
3600
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003601 i915_gem_object_unpin_pages(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003602 return 0;
3603}
3604
Chris Wilsonef55f922015-10-09 14:11:27 +01003605/**
3606 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003607 * @obj: object to act on
3608 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003609 *
3610 * After this function returns, the object will be in the new cache-level
3611 * across all GTT and the contents of the backing storage will be coherent,
3612 * with respect to the new cache-level. In order to keep the backing storage
3613 * coherent for all users, we only allow a single cache level to be set
3614 * globally on the object and prevent it from being changed whilst the
3615 * hardware is reading from the object. That is if the object is currently
3616 * on the scanout it will be set to uncached (or equivalent display
3617 * cache coherency) and all non-MOCS GPU access will also be uncached so
3618 * that all direct access to the scanout remains coherent.
3619 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003620int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3621 enum i915_cache_level cache_level)
3622{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003623 struct i915_vma *vma;
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003624 int ret;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003625
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003626 lockdep_assert_held(&obj->base.dev->struct_mutex);
3627
Chris Wilsone4ffd172011-04-04 09:44:39 +01003628 if (obj->cache_level == cache_level)
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003629 return 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003630
Chris Wilsonef55f922015-10-09 14:11:27 +01003631 /* Inspect the list of currently bound VMA and unbind any that would
3632 * be invalid given the new cache-level. This is principally to
3633 * catch the issue of the CS prefetch crossing page boundaries and
3634 * reading an invalid PTE on older architectures.
3635 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003636restart:
3637 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003638 if (!drm_mm_node_allocated(&vma->node))
3639 continue;
3640
Chris Wilson20dfbde2016-08-04 16:32:30 +01003641 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003642 DRM_DEBUG("can not change the cache level of pinned objects\n");
3643 return -EBUSY;
3644 }
3645
Chris Wilsonaa653a62016-08-04 07:52:27 +01003646 if (i915_gem_valid_gtt_space(vma, cache_level))
3647 continue;
3648
3649 ret = i915_vma_unbind(vma);
3650 if (ret)
3651 return ret;
3652
3653 /* As unbinding may affect other elements in the
3654 * obj->vma_list (due to side-effects from retiring
3655 * an active vma), play safe and restart the iterator.
3656 */
3657 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003658 }
3659
Chris Wilsonef55f922015-10-09 14:11:27 +01003660 /* We can reuse the existing drm_mm nodes but need to change the
3661 * cache-level on the PTE. We could simply unbind them all and
3662 * rebind with the correct cache-level on next use. However since
3663 * we already have a valid slot, dma mapping, pages etc, we may as
3664 * rewrite the PTE in the belief that doing so tramples upon less
3665 * state and so involves less work.
3666 */
Chris Wilson15717de2016-08-04 07:52:26 +01003667 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003668 /* Before we change the PTE, the GPU must not be accessing it.
3669 * If we wait upon the object, we know that all the bound
3670 * VMA are no longer active.
3671 */
Chris Wilsone95433c2016-10-28 13:58:27 +01003672 ret = i915_gem_object_wait(obj,
3673 I915_WAIT_INTERRUPTIBLE |
3674 I915_WAIT_LOCKED |
3675 I915_WAIT_ALL,
3676 MAX_SCHEDULE_TIMEOUT,
3677 NULL);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003678 if (ret)
3679 return ret;
3680
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00003681 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3682 cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003683 /* Access to snoopable pages through the GTT is
3684 * incoherent and on some machines causes a hard
3685 * lockup. Relinquish the CPU mmaping to force
3686 * userspace to refault in the pages and we can
3687 * then double check if the GTT mapping is still
3688 * valid for that pointer access.
3689 */
3690 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003691
Chris Wilsonef55f922015-10-09 14:11:27 +01003692 /* As we no longer need a fence for GTT access,
3693 * we can relinquish it now (and so prevent having
3694 * to steal a fence from someone else on the next
3695 * fence request). Note GPU activity would have
3696 * dropped the fence as all snoopable access is
3697 * supposed to be linear.
3698 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003699 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3700 ret = i915_vma_put_fence(vma);
3701 if (ret)
3702 return ret;
3703 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003704 } else {
3705 /* We either have incoherent backing store and
3706 * so no GTT access or the architecture is fully
3707 * coherent. In such cases, existing GTT mmaps
3708 * ignore the cache bit in the PTE and we can
3709 * rewrite it without confusing the GPU or having
3710 * to force userspace to fault back in its mmaps.
3711 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003712 }
3713
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003714 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003715 if (!drm_mm_node_allocated(&vma->node))
3716 continue;
3717
3718 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3719 if (ret)
3720 return ret;
3721 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003722 }
3723
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003724 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003725 vma->node.color = cache_level;
Chris Wilsonb8f55be2017-08-11 12:11:16 +01003726 i915_gem_object_set_cache_coherency(obj, cache_level);
Chris Wilsone27ab732017-06-15 13:38:49 +01003727 obj->cache_dirty = true; /* Always invalidate stale cachelines */
Chris Wilson2c225692013-08-09 12:26:45 +01003728
Chris Wilsone4ffd172011-04-04 09:44:39 +01003729 return 0;
3730}
3731
Ben Widawsky199adf42012-09-21 17:01:20 -07003732int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3733 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003734{
Ben Widawsky199adf42012-09-21 17:01:20 -07003735 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003736 struct drm_i915_gem_object *obj;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003737 int err = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003738
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003739 rcu_read_lock();
3740 obj = i915_gem_object_lookup_rcu(file, args->handle);
3741 if (!obj) {
3742 err = -ENOENT;
3743 goto out;
3744 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003745
Chris Wilson651d7942013-08-08 14:41:10 +01003746 switch (obj->cache_level) {
3747 case I915_CACHE_LLC:
3748 case I915_CACHE_L3_LLC:
3749 args->caching = I915_CACHING_CACHED;
3750 break;
3751
Chris Wilson4257d3b2013-08-08 14:41:11 +01003752 case I915_CACHE_WT:
3753 args->caching = I915_CACHING_DISPLAY;
3754 break;
3755
Chris Wilson651d7942013-08-08 14:41:10 +01003756 default:
3757 args->caching = I915_CACHING_NONE;
3758 break;
3759 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003760out:
3761 rcu_read_unlock();
3762 return err;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003763}
3764
Ben Widawsky199adf42012-09-21 17:01:20 -07003765int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3766 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003767{
Chris Wilson9c870d02016-10-24 13:42:15 +01003768 struct drm_i915_private *i915 = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003769 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003770 struct drm_i915_gem_object *obj;
3771 enum i915_cache_level level;
Chris Wilsond65415d2017-01-19 08:22:10 +00003772 int ret = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003773
Ben Widawsky199adf42012-09-21 17:01:20 -07003774 switch (args->caching) {
3775 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003776 level = I915_CACHE_NONE;
3777 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003778 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003779 /*
3780 * Due to a HW issue on BXT A stepping, GPU stores via a
3781 * snooped mapping may leave stale data in a corresponding CPU
3782 * cacheline, whereas normally such cachelines would get
3783 * invalidated.
3784 */
Chris Wilson9c870d02016-10-24 13:42:15 +01003785 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
Imre Deake5756c12015-08-14 18:43:30 +03003786 return -ENODEV;
3787
Chris Wilsone6994ae2012-07-10 10:27:08 +01003788 level = I915_CACHE_LLC;
3789 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003790 case I915_CACHING_DISPLAY:
Chris Wilson9c870d02016-10-24 13:42:15 +01003791 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003792 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003793 default:
3794 return -EINVAL;
3795 }
3796
Chris Wilsond65415d2017-01-19 08:22:10 +00003797 obj = i915_gem_object_lookup(file, args->handle);
3798 if (!obj)
3799 return -ENOENT;
3800
3801 if (obj->cache_level == level)
3802 goto out;
3803
3804 ret = i915_gem_object_wait(obj,
3805 I915_WAIT_INTERRUPTIBLE,
3806 MAX_SCHEDULE_TIMEOUT,
3807 to_rps_client(file));
3808 if (ret)
3809 goto out;
3810
Ben Widawsky3bc29132012-09-26 16:15:20 -07003811 ret = i915_mutex_lock_interruptible(dev);
3812 if (ret)
Chris Wilsond65415d2017-01-19 08:22:10 +00003813 goto out;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003814
3815 ret = i915_gem_object_set_cache_level(obj, level);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003816 mutex_unlock(&dev->struct_mutex);
Chris Wilsond65415d2017-01-19 08:22:10 +00003817
3818out:
3819 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003820 return ret;
3821}
3822
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003823/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003824 * Prepare buffer for display plane (scanout, cursors, etc).
3825 * Can be called from an uninterruptible phase (modesetting) and allows
3826 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003827 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003828struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003829i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3830 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003831 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003832{
Chris Wilson058d88c2016-08-15 10:49:06 +01003833 struct i915_vma *vma;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003834 int ret;
3835
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003836 lockdep_assert_held(&obj->base.dev->struct_mutex);
3837
Chris Wilsoncc98b412013-08-09 12:25:09 +01003838 /* Mark the pin_display early so that we account for the
3839 * display coherency whilst setting up the cache domains.
3840 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003841 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003842
Eric Anholta7ef0642011-03-29 16:59:54 -07003843 /* The display engine is not coherent with the LLC cache on gen6. As
3844 * a result, we make sure that the pinning that is about to occur is
3845 * done with uncached PTEs. This is lowest common denominator for all
3846 * chipsets.
3847 *
3848 * However for gen6+, we could do better by using the GFDT bit instead
3849 * of uncaching, which would allow us to flush all the LLC-cached data
3850 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3851 */
Chris Wilson651d7942013-08-08 14:41:10 +01003852 ret = i915_gem_object_set_cache_level(obj,
Tvrtko Ursulin86527442016-10-13 11:03:00 +01003853 HAS_WT(to_i915(obj->base.dev)) ?
3854 I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003855 if (ret) {
3856 vma = ERR_PTR(ret);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003857 goto err_unpin_display;
Chris Wilson058d88c2016-08-15 10:49:06 +01003858 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003859
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003860 /* As the user may map the buffer once pinned in the display plane
3861 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003862 * always use map_and_fenceable for all scanout buffers. However,
3863 * it may simply be too big to fit into mappable, in which case
3864 * put it anyway and hope that userspace can cope (but always first
3865 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003866 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003867 vma = ERR_PTR(-ENOSPC);
Chris Wilson47a8e3f2017-01-14 00:28:27 +00003868 if (!view || view->type == I915_GGTT_VIEW_NORMAL)
Chris Wilson2efb8132016-08-18 17:17:06 +01003869 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3870 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson767a2222016-11-07 11:01:28 +00003871 if (IS_ERR(vma)) {
3872 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3873 unsigned int flags;
3874
3875 /* Valleyview is definitely limited to scanning out the first
3876 * 512MiB. Lets presume this behaviour was inherited from the
3877 * g4x display engine and that all earlier gen are similarly
3878 * limited. Testing suggests that it is a little more
3879 * complicated than this. For example, Cherryview appears quite
3880 * happy to scanout from anywhere within its global aperture.
3881 */
3882 flags = 0;
3883 if (HAS_GMCH_DISPLAY(i915))
3884 flags = PIN_MAPPABLE;
3885 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3886 }
Chris Wilson058d88c2016-08-15 10:49:06 +01003887 if (IS_ERR(vma))
Chris Wilsoncc98b412013-08-09 12:25:09 +01003888 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003889
Chris Wilsond8923dc2016-08-18 17:17:07 +01003890 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3891
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003892 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
Chris Wilson5a97bcc2017-02-22 11:40:46 +00003893 __i915_gem_object_flush_for_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +00003894 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003895
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003896 /* It should now be out of any other write domains, and we can update
3897 * the domain values for our changes.
3898 */
Chris Wilson05394f32010-11-08 19:18:58 +00003899 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003900
Chris Wilson058d88c2016-08-15 10:49:06 +01003901 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003902
3903err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003904 obj->pin_display--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003905 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003906}
3907
3908void
Chris Wilson058d88c2016-08-15 10:49:06 +01003909i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003910{
Chris Wilson49d73912016-11-29 09:50:08 +00003911 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003912
Chris Wilson058d88c2016-08-15 10:49:06 +01003913 if (WARN_ON(vma->obj->pin_display == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003914 return;
3915
Chris Wilsond8923dc2016-08-18 17:17:07 +01003916 if (--vma->obj->pin_display == 0)
Chris Wilsonf51455d2017-01-10 14:47:34 +00003917 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003918
Chris Wilson383d5822016-08-18 17:17:08 +01003919 /* Bump the LRU to try and avoid premature eviction whilst flipping */
Chris Wilsonbefedbb2017-01-19 19:26:55 +00003920 i915_gem_object_bump_inactive_ggtt(vma->obj);
Chris Wilson383d5822016-08-18 17:17:08 +01003921
Chris Wilson058d88c2016-08-15 10:49:06 +01003922 i915_vma_unpin(vma);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003923}
3924
Eric Anholte47c68e2008-11-14 13:35:19 -08003925/**
3926 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003927 * @obj: object to act on
3928 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08003929 *
3930 * This function returns when the move is complete, including waiting on
3931 * flushes to occur.
3932 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003933int
Chris Wilson919926a2010-11-12 13:42:53 +00003934i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003935{
Eric Anholte47c68e2008-11-14 13:35:19 -08003936 int ret;
3937
Chris Wilsone95433c2016-10-28 13:58:27 +01003938 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003939
Chris Wilsone95433c2016-10-28 13:58:27 +01003940 ret = i915_gem_object_wait(obj,
3941 I915_WAIT_INTERRUPTIBLE |
3942 I915_WAIT_LOCKED |
3943 (write ? I915_WAIT_ALL : 0),
3944 MAX_SCHEDULE_TIMEOUT,
3945 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003946 if (ret)
3947 return ret;
3948
Chris Wilsonef749212017-04-12 12:01:10 +01003949 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003950
Eric Anholte47c68e2008-11-14 13:35:19 -08003951 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003952 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson57822dc2017-02-22 11:40:48 +00003953 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
Chris Wilson05394f32010-11-08 19:18:58 +00003954 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003955 }
3956
3957 /* It should now be out of any other write domains, and we can update
3958 * the domain values for our changes.
3959 */
Chris Wilsone27ab732017-06-15 13:38:49 +01003960 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003961
3962 /* If we're writing through the CPU, then the GPU read domains will
3963 * need to be invalidated at next use.
3964 */
Chris Wilsone27ab732017-06-15 13:38:49 +01003965 if (write)
3966 __start_cpu_write(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003967
3968 return 0;
3969}
3970
Eric Anholt673a3942008-07-30 12:06:12 -07003971/* Throttle our rendering by waiting until the ring has completed our requests
3972 * emitted over 20 msec ago.
3973 *
Eric Anholtb9624422009-06-03 07:27:35 +00003974 * Note that if we were to use the current jiffies each time around the loop,
3975 * we wouldn't escape the function with any frames outstanding if the time to
3976 * render a frame was over 20ms.
3977 *
Eric Anholt673a3942008-07-30 12:06:12 -07003978 * This should get us reasonable parallelism between CPU and GPU but also
3979 * relatively low latency when blocking on a particular request to finish.
3980 */
3981static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003982i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003983{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003984 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003985 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01003986 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00003987 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +01003988 long ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003989
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003990 /* ABI: return -EIO if already wedged */
3991 if (i915_terminally_wedged(&dev_priv->gpu_error))
3992 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003993
Chris Wilson1c255952010-09-26 11:03:27 +01003994 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00003995 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
Eric Anholtb9624422009-06-03 07:27:35 +00003996 if (time_after_eq(request->emitted_jiffies, recent_enough))
3997 break;
3998
Chris Wilsonc8659ef2017-03-02 12:25:25 +00003999 if (target) {
4000 list_del(&target->client_link);
4001 target->file_priv = NULL;
4002 }
John Harrisonfcfa423c2015-05-29 17:44:12 +01004003
John Harrison54fb2412014-11-24 18:49:27 +00004004 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00004005 }
John Harrisonff865882014-11-24 18:49:28 +00004006 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01004007 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01004008 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004009
John Harrison54fb2412014-11-24 18:49:27 +00004010 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004011 return 0;
4012
Chris Wilsone95433c2016-10-28 13:58:27 +01004013 ret = i915_wait_request(target,
4014 I915_WAIT_INTERRUPTIBLE,
4015 MAX_SCHEDULE_TIMEOUT);
Chris Wilsone8a261e2016-07-20 13:31:49 +01004016 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00004017
Chris Wilsone95433c2016-10-28 13:58:27 +01004018 return ret < 0 ? ret : 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004019}
4020
Chris Wilson058d88c2016-08-15 10:49:06 +01004021struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004022i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4023 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01004024 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01004025 u64 alignment,
4026 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004027{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004028 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4029 struct i915_address_space *vm = &dev_priv->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01004030 struct i915_vma *vma;
4031 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03004032
Chris Wilson4c7d62c2016-10-28 13:58:32 +01004033 lockdep_assert_held(&obj->base.dev->struct_mutex);
4034
Chris Wilson718659a2017-01-16 15:21:28 +00004035 vma = i915_vma_instance(obj, vm, view);
Chris Wilsone0216b72017-01-19 19:26:57 +00004036 if (unlikely(IS_ERR(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01004037 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01004038
4039 if (i915_vma_misplaced(vma, size, alignment, flags)) {
4040 if (flags & PIN_NONBLOCK &&
4041 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01004042 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01004043
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004044 if (flags & PIN_MAPPABLE) {
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004045 /* If the required space is larger than the available
4046 * aperture, we will not able to find a slot for the
4047 * object and unbinding the object now will be in
4048 * vain. Worse, doing so may cause us to ping-pong
4049 * the object in and out of the Global GTT and
4050 * waste a lot of cycles under the mutex.
4051 */
Chris Wilson944397f2017-01-09 16:16:11 +00004052 if (vma->fence_size > dev_priv->ggtt.mappable_end)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004053 return ERR_PTR(-E2BIG);
4054
4055 /* If NONBLOCK is set the caller is optimistically
4056 * trying to cache the full object within the mappable
4057 * aperture, and *must* have a fallback in place for
4058 * situations where we cannot bind the object. We
4059 * can be a little more lax here and use the fallback
4060 * more often to avoid costly migrations of ourselves
4061 * and other objects within the aperture.
4062 *
4063 * Half-the-aperture is used as a simple heuristic.
4064 * More interesting would to do search for a free
4065 * block prior to making the commitment to unbind.
4066 * That caters for the self-harm case, and with a
4067 * little more heuristics (e.g. NOFAULT, NOEVICT)
4068 * we could try to minimise harm to others.
4069 */
4070 if (flags & PIN_NONBLOCK &&
Chris Wilson944397f2017-01-09 16:16:11 +00004071 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
Chris Wilsonad16d2e2016-10-13 09:55:04 +01004072 return ERR_PTR(-ENOSPC);
4073 }
4074
Chris Wilson59bfa122016-08-04 16:32:31 +01004075 WARN(i915_vma_is_pinned(vma),
4076 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01004077 " offset=%08x, req.alignment=%llx,"
4078 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4079 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01004080 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01004081 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01004082 ret = i915_vma_unbind(vma);
4083 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01004084 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01004085 }
4086
Chris Wilson058d88c2016-08-15 10:49:06 +01004087 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4088 if (ret)
4089 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02004090
Chris Wilson058d88c2016-08-15 10:49:06 +01004091 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07004092}
4093
Chris Wilsonedf6b762016-08-09 09:23:33 +01004094static __always_inline unsigned int __busy_read_flag(unsigned int id)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004095{
4096 /* Note that we could alias engines in the execbuf API, but
4097 * that would be very unwise as it prevents userspace from
4098 * fine control over engine selection. Ahem.
4099 *
4100 * This should be something like EXEC_MAX_ENGINE instead of
4101 * I915_NUM_ENGINES.
4102 */
4103 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4104 return 0x10000 << id;
4105}
4106
4107static __always_inline unsigned int __busy_write_id(unsigned int id)
4108{
Chris Wilson70cb4722016-08-09 18:08:25 +01004109 /* The uABI guarantees an active writer is also amongst the read
4110 * engines. This would be true if we accessed the activity tracking
4111 * under the lock, but as we perform the lookup of the object and
4112 * its activity locklessly we can not guarantee that the last_write
4113 * being active implies that we have set the same engine flag from
4114 * last_read - hence we always set both read and write busy for
4115 * last_write.
4116 */
4117 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004118}
4119
Chris Wilsonedf6b762016-08-09 09:23:33 +01004120static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004121__busy_set_if_active(const struct dma_fence *fence,
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004122 unsigned int (*flag)(unsigned int id))
4123{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004124 struct drm_i915_gem_request *rq;
Chris Wilson12555012016-08-16 09:50:40 +01004125
Chris Wilsond07f0e52016-10-28 13:58:44 +01004126 /* We have to check the current hw status of the fence as the uABI
4127 * guarantees forward progress. We could rely on the idle worker
4128 * to eventually flush us, but to minimise latency just ask the
4129 * hardware.
4130 *
4131 * Note we only report on the status of native fences.
4132 */
4133 if (!dma_fence_is_i915(fence))
Chris Wilson12555012016-08-16 09:50:40 +01004134 return 0;
4135
Chris Wilsond07f0e52016-10-28 13:58:44 +01004136 /* opencode to_request() in order to avoid const warnings */
4137 rq = container_of(fence, struct drm_i915_gem_request, fence);
4138 if (i915_gem_request_completed(rq))
4139 return 0;
4140
Chris Wilson1d39f282017-04-11 13:43:06 +01004141 return flag(rq->engine->uabi_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004142}
4143
Chris Wilsonedf6b762016-08-09 09:23:33 +01004144static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004145busy_check_reader(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004146{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004147 return __busy_set_if_active(fence, __busy_read_flag);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004148}
4149
Chris Wilsonedf6b762016-08-09 09:23:33 +01004150static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01004151busy_check_writer(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004152{
Chris Wilsond07f0e52016-10-28 13:58:44 +01004153 if (!fence)
4154 return 0;
4155
4156 return __busy_set_if_active(fence, __busy_write_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004157}
4158
Eric Anholt673a3942008-07-30 12:06:12 -07004159int
Eric Anholt673a3942008-07-30 12:06:12 -07004160i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004161 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004162{
4163 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004164 struct drm_i915_gem_object *obj;
Chris Wilsond07f0e52016-10-28 13:58:44 +01004165 struct reservation_object_list *list;
4166 unsigned int seq;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004167 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07004168
Chris Wilsond07f0e52016-10-28 13:58:44 +01004169 err = -ENOENT;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004170 rcu_read_lock();
4171 obj = i915_gem_object_lookup_rcu(file, args->handle);
Chris Wilsond07f0e52016-10-28 13:58:44 +01004172 if (!obj)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004173 goto out;
Chris Wilsond07f0e52016-10-28 13:58:44 +01004174
4175 /* A discrepancy here is that we do not report the status of
4176 * non-i915 fences, i.e. even though we may report the object as idle,
4177 * a call to set-domain may still stall waiting for foreign rendering.
4178 * This also means that wait-ioctl may report an object as busy,
4179 * where busy-ioctl considers it idle.
4180 *
4181 * We trade the ability to warn of foreign fences to report on which
4182 * i915 engines are active for the object.
4183 *
4184 * Alternatively, we can trade that extra information on read/write
4185 * activity with
4186 * args->busy =
4187 * !reservation_object_test_signaled_rcu(obj->resv, true);
4188 * to report the overall busyness. This is what the wait-ioctl does.
4189 *
4190 */
4191retry:
4192 seq = raw_read_seqcount(&obj->resv->seq);
4193
4194 /* Translate the exclusive fence to the READ *and* WRITE engine */
4195 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4196
4197 /* Translate shared fences to READ set of engines */
4198 list = rcu_dereference(obj->resv->fence);
4199 if (list) {
4200 unsigned int shared_count = list->shared_count, i;
4201
4202 for (i = 0; i < shared_count; ++i) {
4203 struct dma_fence *fence =
4204 rcu_dereference(list->shared[i]);
4205
4206 args->busy |= busy_check_reader(fence);
4207 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004208 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004209
Chris Wilsond07f0e52016-10-28 13:58:44 +01004210 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4211 goto retry;
Chris Wilson426960b2016-01-15 16:51:46 +00004212
Chris Wilsond07f0e52016-10-28 13:58:44 +01004213 err = 0;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004214out:
4215 rcu_read_unlock();
4216 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07004217}
4218
4219int
4220i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4221 struct drm_file *file_priv)
4222{
Akshay Joshi0206e352011-08-16 15:34:10 -04004223 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004224}
4225
Chris Wilson3ef94da2009-09-14 16:50:29 +01004226int
4227i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4228 struct drm_file *file_priv)
4229{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004230 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004231 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004232 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01004233 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004234
4235 switch (args->madv) {
4236 case I915_MADV_DONTNEED:
4237 case I915_MADV_WILLNEED:
4238 break;
4239 default:
4240 return -EINVAL;
4241 }
4242
Chris Wilson03ac0642016-07-20 13:31:51 +01004243 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01004244 if (!obj)
4245 return -ENOENT;
4246
4247 err = mutex_lock_interruptible(&obj->mm.lock);
4248 if (err)
4249 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004250
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004251 if (obj->mm.pages &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004252 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01004253 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004254 if (obj->mm.madv == I915_MADV_WILLNEED) {
4255 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004256 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004257 obj->mm.quirked = false;
4258 }
4259 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00004260 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004261 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004262 obj->mm.quirked = true;
4263 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01004264 }
4265
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004266 if (obj->mm.madv != __I915_MADV_PURGED)
4267 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004268
Chris Wilson6c085a72012-08-20 11:40:46 +02004269 /* if the object is no longer attached, discard its backing storage */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004270 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004271 i915_gem_object_truncate(obj);
4272
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004273 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01004274 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004275
Chris Wilson1233e2d2016-10-28 13:58:37 +01004276out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004277 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01004278 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004279}
4280
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004281static void
4282frontbuffer_retire(struct i915_gem_active *active,
4283 struct drm_i915_gem_request *request)
4284{
4285 struct drm_i915_gem_object *obj =
4286 container_of(active, typeof(*obj), frontbuffer_write);
4287
Chris Wilsond59b21e2017-02-22 11:40:49 +00004288 intel_fb_obj_flush(obj, ORIGIN_CS);
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004289}
4290
Chris Wilson37e680a2012-06-07 15:38:42 +01004291void i915_gem_object_init(struct drm_i915_gem_object *obj,
4292 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004293{
Chris Wilson1233e2d2016-10-28 13:58:37 +01004294 mutex_init(&obj->mm.lock);
4295
Joonas Lahtinen56cea322016-11-02 12:16:04 +02004296 INIT_LIST_HEAD(&obj->global_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004297 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilsond1b48c12017-08-16 09:52:08 +01004298 INIT_LIST_HEAD(&obj->lut_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01004299 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004300
Chris Wilson37e680a2012-06-07 15:38:42 +01004301 obj->ops = ops;
4302
Chris Wilsond07f0e52016-10-28 13:58:44 +01004303 reservation_object_init(&obj->__builtin_resv);
4304 obj->resv = &obj->__builtin_resv;
4305
Chris Wilson50349242016-08-18 17:17:04 +01004306 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00004307 init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004308
4309 obj->mm.madv = I915_MADV_WILLNEED;
4310 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4311 mutex_init(&obj->mm.get_page.lock);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004312
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004313 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004314}
4315
Chris Wilson37e680a2012-06-07 15:38:42 +01004316static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Tvrtko Ursulin3599a912016-11-01 14:44:10 +00004317 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4318 I915_GEM_OBJECT_IS_SHRINKABLE,
Chris Wilson7c55e2c2017-03-07 12:03:38 +00004319
Chris Wilson37e680a2012-06-07 15:38:42 +01004320 .get_pages = i915_gem_object_get_pages_gtt,
4321 .put_pages = i915_gem_object_put_pages_gtt,
Chris Wilson7c55e2c2017-03-07 12:03:38 +00004322
4323 .pwrite = i915_gem_object_pwrite_gtt,
Chris Wilson37e680a2012-06-07 15:38:42 +01004324};
4325
Matthew Auld465c4032017-10-06 23:18:14 +01004326static int i915_gem_object_create_shmem(struct drm_device *dev,
4327 struct drm_gem_object *obj,
4328 size_t size)
4329{
4330 struct drm_i915_private *i915 = to_i915(dev);
4331 unsigned long flags = VM_NORESERVE;
4332 struct file *filp;
4333
4334 drm_gem_private_object_init(dev, obj, size);
4335
4336 if (i915->mm.gemfs)
4337 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4338 flags);
4339 else
4340 filp = shmem_file_setup("i915", size, flags);
4341
4342 if (IS_ERR(filp))
4343 return PTR_ERR(filp);
4344
4345 obj->filp = filp;
4346
4347 return 0;
4348}
4349
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004350struct drm_i915_gem_object *
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00004351i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004352{
Daniel Vetterc397b902010-04-09 19:05:07 +00004353 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004354 struct address_space *mapping;
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004355 unsigned int cache_level;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004356 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004357 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00004358
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004359 /* There is a prevalence of the assumption that we fit the object's
4360 * page count inside a 32bit _signed_ variable. Let's document this and
4361 * catch if we ever need to fix it. In the meantime, if you do spot
4362 * such a local variable, please consider fixing!
4363 */
Tvrtko Ursulin7a3ee5d2017-03-30 17:31:30 +01004364 if (size >> PAGE_SHIFT > INT_MAX)
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01004365 return ERR_PTR(-E2BIG);
4366
4367 if (overflows_type(size, obj->base.size))
4368 return ERR_PTR(-E2BIG);
4369
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00004370 obj = i915_gem_object_alloc(dev_priv);
Daniel Vetterc397b902010-04-09 19:05:07 +00004371 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01004372 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00004373
Matthew Auld465c4032017-10-06 23:18:14 +01004374 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01004375 if (ret)
4376 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004377
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004378 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
Jani Nikulac0f86832016-12-07 12:13:04 +02004379 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004380 /* 965gm cannot relocate objects above 4GiB. */
4381 mask &= ~__GFP_HIGHMEM;
4382 mask |= __GFP_DMA32;
4383 }
4384
Al Viro93c76a32015-12-04 23:45:44 -05004385 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004386 mapping_set_gfp_mask(mapping, mask);
Chris Wilson4846bf02017-06-09 12:03:46 +01004387 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
Hugh Dickins5949eac2011-06-27 16:18:18 -07004388
Chris Wilson37e680a2012-06-07 15:38:42 +01004389 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004390
Daniel Vetterc397b902010-04-09 19:05:07 +00004391 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4392 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4393
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004394 if (HAS_LLC(dev_priv))
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004395 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004396 * cache) for about a 10% performance improvement
4397 * compared to uncached. Graphics requests other than
4398 * display scanout are coherent with the CPU in
4399 * accessing this cache. This means in this mode we
4400 * don't need to clflush on the CPU side, and on the
4401 * GPU side we only need to flush internal caches to
4402 * get data visible to the CPU.
4403 *
4404 * However, we maintain the display planes as UC, and so
4405 * need to rebind when first used as such.
4406 */
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004407 cache_level = I915_CACHE_LLC;
4408 else
4409 cache_level = I915_CACHE_NONE;
Eric Anholta1871112011-03-29 16:59:55 -07004410
Chris Wilsonb8f55be2017-08-11 12:11:16 +01004411 i915_gem_object_set_cache_coherency(obj, cache_level);
Chris Wilsone27ab732017-06-15 13:38:49 +01004412
Daniel Vetterd861e332013-07-24 23:25:03 +02004413 trace_i915_gem_object_create(obj);
4414
Chris Wilson05394f32010-11-08 19:18:58 +00004415 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004416
4417fail:
4418 i915_gem_object_free(obj);
Chris Wilsonfe3db792016-04-25 13:32:13 +01004419 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004420}
4421
Chris Wilson340fbd82014-05-22 09:16:52 +01004422static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4423{
4424 /* If we are the last user of the backing storage (be it shmemfs
4425 * pages or stolen etc), we know that the pages are going to be
4426 * immediately released. In this case, we can then skip copying
4427 * back the contents from the GPU.
4428 */
4429
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004430 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson340fbd82014-05-22 09:16:52 +01004431 return false;
4432
4433 if (obj->base.filp == NULL)
4434 return true;
4435
4436 /* At first glance, this looks racy, but then again so would be
4437 * userspace racing mmap against close. However, the first external
4438 * reference to the filp can only be obtained through the
4439 * i915_gem_mmap_ioctl() which safeguards us against the user
4440 * acquiring such a reference whilst we are in the middle of
4441 * freeing the object.
4442 */
4443 return atomic_long_read(&obj->base.filp->f_count) == 1;
4444}
4445
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004446static void __i915_gem_free_objects(struct drm_i915_private *i915,
4447 struct llist_node *freed)
Chris Wilsonbe726152010-07-23 23:18:50 +01004448{
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004449 struct drm_i915_gem_object *obj, *on;
Chris Wilsonbe726152010-07-23 23:18:50 +01004450
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004451 mutex_lock(&i915->drm.struct_mutex);
4452 intel_runtime_pm_get(i915);
4453 llist_for_each_entry(obj, freed, freed) {
4454 struct i915_vma *vma, *vn;
Paulo Zanonif65c9162013-11-27 18:20:34 -02004455
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004456 trace_i915_gem_object_destroy(obj);
4457
4458 GEM_BUG_ON(i915_gem_object_is_active(obj));
4459 list_for_each_entry_safe(vma, vn,
4460 &obj->vma_list, obj_link) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004461 GEM_BUG_ON(i915_vma_is_active(vma));
4462 vma->flags &= ~I915_VMA_PIN_MASK;
4463 i915_vma_close(vma);
4464 }
Chris Wilsondb6c2b42016-11-01 11:54:00 +00004465 GEM_BUG_ON(!list_empty(&obj->vma_list));
4466 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004467
Joonas Lahtinen56cea322016-11-02 12:16:04 +02004468 list_del(&obj->global_link);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004469 }
4470 intel_runtime_pm_put(i915);
4471 mutex_unlock(&i915->drm.struct_mutex);
4472
Chris Wilsonf2be9d62017-04-07 11:25:52 +01004473 cond_resched();
4474
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004475 llist_for_each_entry_safe(obj, on, freed, freed) {
4476 GEM_BUG_ON(obj->bind_count);
Chris Wilsona65adaf2017-10-09 09:43:57 +01004477 GEM_BUG_ON(obj->userfault_count);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004478 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
Chris Wilson67b48042017-08-22 12:05:16 +01004479 GEM_BUG_ON(!list_empty(&obj->lut_list));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004480
4481 if (obj->ops->release)
4482 obj->ops->release(obj);
4483
4484 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4485 atomic_set(&obj->mm.pages_pin_count, 0);
Chris Wilson548625e2016-11-01 12:11:34 +00004486 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004487 GEM_BUG_ON(obj->mm.pages);
4488
4489 if (obj->base.import_attach)
4490 drm_prime_gem_destroy(&obj->base, NULL);
4491
Chris Wilsond07f0e52016-10-28 13:58:44 +01004492 reservation_object_fini(&obj->__builtin_resv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004493 drm_gem_object_release(&obj->base);
4494 i915_gem_info_remove_obj(i915, obj->base.size);
4495
4496 kfree(obj->bit_17);
4497 i915_gem_object_free(obj);
4498 }
4499}
4500
4501static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4502{
4503 struct llist_node *freed;
4504
4505 freed = llist_del_all(&i915->mm.free_list);
4506 if (unlikely(freed))
4507 __i915_gem_free_objects(i915, freed);
4508}
4509
4510static void __i915_gem_free_work(struct work_struct *work)
4511{
4512 struct drm_i915_private *i915 =
4513 container_of(work, struct drm_i915_private, mm.free_work);
4514 struct llist_node *freed;
Chris Wilson26e12f82011-03-20 11:20:19 +00004515
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004516 /* All file-owned VMA should have been released by this point through
4517 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4518 * However, the object may also be bound into the global GTT (e.g.
4519 * older GPUs without per-process support, or for direct access through
4520 * the GTT either for the user or for scanout). Those VMA still need to
4521 * unbound now.
4522 */
Chris Wilson1488fc02012-04-24 15:47:31 +01004523
Chris Wilson5ad08be2017-04-07 11:25:51 +01004524 while ((freed = llist_del_all(&i915->mm.free_list))) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004525 __i915_gem_free_objects(i915, freed);
Chris Wilson5ad08be2017-04-07 11:25:51 +01004526 if (need_resched())
4527 break;
4528 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004529}
4530
4531static void __i915_gem_free_object_rcu(struct rcu_head *head)
4532{
4533 struct drm_i915_gem_object *obj =
4534 container_of(head, typeof(*obj), rcu);
4535 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4536
4537 /* We can't simply use call_rcu() from i915_gem_free_object()
4538 * as we need to block whilst unbinding, and the call_rcu
4539 * task may be called from softirq context. So we take a
4540 * detour through a worker.
4541 */
4542 if (llist_add(&obj->freed, &i915->mm.free_list))
4543 schedule_work(&i915->mm.free_work);
4544}
4545
4546void i915_gem_free_object(struct drm_gem_object *gem_obj)
4547{
4548 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4549
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004550 if (obj->mm.quirked)
4551 __i915_gem_object_unpin_pages(obj);
4552
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004553 if (discard_backing_storage(obj))
4554 obj->mm.madv = I915_MADV_DONTNEED;
Daniel Vettera071fa02014-06-18 23:28:09 +02004555
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004556 /* Before we free the object, make sure any pure RCU-only
4557 * read-side critical sections are complete, e.g.
4558 * i915_gem_busy_ioctl(). For the corresponding synchronized
4559 * lookup see i915_gem_object_lookup_rcu().
4560 */
4561 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
Chris Wilsonbe726152010-07-23 23:18:50 +01004562}
4563
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004564void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4565{
4566 lockdep_assert_held(&obj->base.dev->struct_mutex);
4567
Chris Wilsond1b48c12017-08-16 09:52:08 +01004568 if (!i915_gem_object_has_active_reference(obj) &&
4569 i915_gem_object_is_active(obj))
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004570 i915_gem_object_set_active_reference(obj);
4571 else
4572 i915_gem_object_put(obj);
4573}
4574
Chris Wilson3033aca2016-10-28 13:58:47 +01004575static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4576{
4577 struct intel_engine_cs *engine;
4578 enum intel_engine_id id;
4579
4580 for_each_engine(engine, dev_priv, id)
Chris Wilsonf131e352016-12-29 14:40:37 +00004581 GEM_BUG_ON(engine->last_retired_context &&
4582 !i915_gem_context_is_kernel(engine->last_retired_context));
Chris Wilson3033aca2016-10-28 13:58:47 +01004583}
4584
Chris Wilson24145512017-01-24 11:01:35 +00004585void i915_gem_sanitize(struct drm_i915_private *i915)
4586{
Chris Wilsonf36325f2017-08-26 12:09:34 +01004587 if (i915_terminally_wedged(&i915->gpu_error)) {
4588 mutex_lock(&i915->drm.struct_mutex);
4589 i915_gem_unset_wedged(i915);
4590 mutex_unlock(&i915->drm.struct_mutex);
4591 }
4592
Chris Wilson24145512017-01-24 11:01:35 +00004593 /*
4594 * If we inherit context state from the BIOS or earlier occupants
4595 * of the GPU, the GPU may be in an inconsistent state when we
4596 * try to take over. The only way to remove the earlier state
4597 * is by resetting. However, resetting on earlier gen is tricky as
4598 * it may impact the display and we are uncertain about the stability
Joonas Lahtinenea117b82017-04-28 10:53:38 +03004599 * of the reset, so this could be applied to even earlier gen.
Chris Wilson24145512017-01-24 11:01:35 +00004600 */
Joonas Lahtinenea117b82017-04-28 10:53:38 +03004601 if (INTEL_GEN(i915) >= 5) {
Chris Wilson24145512017-01-24 11:01:35 +00004602 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4603 WARN_ON(reset && reset != -ENODEV);
4604 }
4605}
4606
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004607int i915_gem_suspend(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07004608{
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004609 struct drm_device *dev = &dev_priv->drm;
Chris Wilsondcff85c2016-08-05 10:14:11 +01004610 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004611
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004612 intel_runtime_pm_get(dev_priv);
Chris Wilson54b4f682016-07-21 21:16:19 +01004613 intel_suspend_gt_powersave(dev_priv);
4614
Chris Wilson45c5f202013-10-16 11:50:01 +01004615 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004616
4617 /* We have to flush all the executing contexts to main memory so
4618 * that they can saved in the hibernation image. To ensure the last
4619 * context image is coherent, we have to switch away from it. That
4620 * leaves the dev_priv->kernel_context still active when
4621 * we actually suspend, and its image in memory may not match the GPU
4622 * state. Fortunately, the kernel_context is disposable and we do
4623 * not rely on its state.
4624 */
4625 ret = i915_gem_switch_to_kernel_context(dev_priv);
4626 if (ret)
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004627 goto err_unlock;
Chris Wilson5ab57c72016-07-15 14:56:20 +01004628
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004629 ret = i915_gem_wait_for_idle(dev_priv,
4630 I915_WAIT_INTERRUPTIBLE |
4631 I915_WAIT_LOCKED);
Chris Wilsoncad99462017-08-26 12:09:33 +01004632 if (ret && ret != -EIO)
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004633 goto err_unlock;
Chris Wilsonf7403342013-09-13 23:57:04 +01004634
Chris Wilson3033aca2016-10-28 13:58:47 +01004635 assert_kernel_context_is_current(dev_priv);
Chris Wilson829a0af2017-06-20 12:05:45 +01004636 i915_gem_contexts_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004637 mutex_unlock(&dev->struct_mutex);
4638
Sagar Arun Kamble63987bf2017-04-05 15:51:50 +05304639 intel_guc_suspend(dev_priv);
4640
Chris Wilson737b1502015-01-26 18:03:03 +02004641 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004642 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004643
4644 /* As the idle_work is rearming if it detects a race, play safe and
4645 * repeat the flush until it is definitely idle.
4646 */
Chris Wilson7c262402017-10-06 11:40:38 +01004647 drain_delayed_work(&dev_priv->gt.idle_work);
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004648
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004649 /* Assert that we sucessfully flushed all the work and
4650 * reset the GPU back to its idle, low power state.
4651 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004652 WARN_ON(dev_priv->gt.awake);
Chris Wilsonfc692bd2017-08-26 12:09:35 +01004653 if (WARN_ON(!intel_engines_are_idle(dev_priv)))
4654 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004655
Imre Deak1c777c52016-10-12 17:46:37 +03004656 /*
4657 * Neither the BIOS, ourselves or any other kernel
4658 * expects the system to be in execlists mode on startup,
4659 * so we need to reset the GPU back to legacy mode. And the only
4660 * known way to disable logical contexts is through a GPU reset.
4661 *
4662 * So in order to leave the system in a known default configuration,
4663 * always reset the GPU upon unload and suspend. Afterwards we then
4664 * clean up the GEM state tracking, flushing off the requests and
4665 * leaving the system in a known idle state.
4666 *
4667 * Note that is of the upmost importance that the GPU is idle and
4668 * all stray writes are flushed *before* we dismantle the backing
4669 * storage for the pinned objects.
4670 *
4671 * However, since we are uncertain that resetting the GPU on older
4672 * machines is a good idea, we don't - just in case it leaves the
4673 * machine in an unusable condition.
4674 */
Chris Wilson24145512017-01-24 11:01:35 +00004675 i915_gem_sanitize(dev_priv);
Chris Wilsoncad99462017-08-26 12:09:33 +01004676
4677 intel_runtime_pm_put(dev_priv);
4678 return 0;
Imre Deak1c777c52016-10-12 17:46:37 +03004679
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004680err_unlock:
Chris Wilson45c5f202013-10-16 11:50:01 +01004681 mutex_unlock(&dev->struct_mutex);
Chris Wilsonc998e8a2017-03-02 08:30:29 +00004682 intel_runtime_pm_put(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004683 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004684}
4685
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004686void i915_gem_resume(struct drm_i915_private *dev_priv)
Chris Wilson5ab57c72016-07-15 14:56:20 +01004687{
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004688 struct drm_device *dev = &dev_priv->drm;
Chris Wilson5ab57c72016-07-15 14:56:20 +01004689
Imre Deak31ab49a2016-11-07 11:20:05 +02004690 WARN_ON(dev_priv->gt.awake);
4691
Chris Wilson5ab57c72016-07-15 14:56:20 +01004692 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00004693 i915_gem_restore_gtt_mappings(dev_priv);
Sagar Arun Kamble269e6ea2017-09-29 10:28:36 +05304694 i915_gem_restore_fences(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004695
4696 /* As we didn't flush the kernel context before suspend, we cannot
4697 * guarantee that the context image is complete. So let's just reset
4698 * it and start again.
4699 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01004700 dev_priv->gt.resume(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004701
4702 mutex_unlock(&dev->struct_mutex);
4703}
4704
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004705void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004706{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004707 if (INTEL_GEN(dev_priv) < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004708 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4709 return;
4710
4711 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4712 DISP_TILE_SURFACE_SWIZZLING);
4713
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004714 if (IS_GEN5(dev_priv))
Daniel Vetter11782b02012-01-31 16:47:55 +01004715 return;
4716
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004717 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004718 if (IS_GEN6(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004719 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004720 else if (IS_GEN7(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004721 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004722 else if (IS_GEN8(dev_priv))
Ben Widawsky31a53362013-11-02 21:07:04 -07004723 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004724 else
4725 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004726}
Daniel Vettere21af882012-02-09 20:53:27 +01004727
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004728static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004729{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004730 I915_WRITE(RING_CTL(base), 0);
4731 I915_WRITE(RING_HEAD(base), 0);
4732 I915_WRITE(RING_TAIL(base), 0);
4733 I915_WRITE(RING_START(base), 0);
4734}
4735
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004736static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004737{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004738 if (IS_I830(dev_priv)) {
4739 init_unused_ring(dev_priv, PRB1_BASE);
4740 init_unused_ring(dev_priv, SRB0_BASE);
4741 init_unused_ring(dev_priv, SRB1_BASE);
4742 init_unused_ring(dev_priv, SRB2_BASE);
4743 init_unused_ring(dev_priv, SRB3_BASE);
4744 } else if (IS_GEN2(dev_priv)) {
4745 init_unused_ring(dev_priv, SRB0_BASE);
4746 init_unused_ring(dev_priv, SRB1_BASE);
4747 } else if (IS_GEN3(dev_priv)) {
4748 init_unused_ring(dev_priv, PRB1_BASE);
4749 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004750 }
4751}
4752
Chris Wilson20a8a742017-02-08 14:30:31 +00004753static int __i915_gem_restart_engines(void *data)
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004754{
Chris Wilson20a8a742017-02-08 14:30:31 +00004755 struct drm_i915_private *i915 = data;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004756 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304757 enum intel_engine_id id;
Chris Wilson20a8a742017-02-08 14:30:31 +00004758 int err;
4759
4760 for_each_engine(engine, i915, id) {
4761 err = engine->init_hw(engine);
4762 if (err)
4763 return err;
4764 }
4765
4766 return 0;
4767}
4768
4769int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4770{
Chris Wilsond200cda2016-04-28 09:56:44 +01004771 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004772
Chris Wilsonde867c22016-10-25 13:16:02 +01004773 dev_priv->gt.last_init_time = ktime_get();
4774
Chris Wilson5e4f5182015-02-13 14:35:59 +00004775 /* Double layer security blanket, see i915_gem_init() */
4776 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4777
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004778 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004779 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004780
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01004781 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004782 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004783 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004784
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004785 if (HAS_PCH_NOP(dev_priv)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004786 if (IS_IVYBRIDGE(dev_priv)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004787 u32 temp = I915_READ(GEN7_MSG_CTL);
4788 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4789 I915_WRITE(GEN7_MSG_CTL, temp);
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004790 } else if (INTEL_GEN(dev_priv) >= 7) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004791 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4792 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4793 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4794 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004795 }
4796
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004797 i915_gem_init_swizzling(dev_priv);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004798
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004799 /*
4800 * At least 830 can leave some of the unused rings
4801 * "active" (ie. head != tail) after resume which
4802 * will prevent c3 entry. Makes sure all unused rings
4803 * are totally idle.
4804 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004805 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004806
Dave Gordoned54c1a2016-01-19 19:02:54 +00004807 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01004808
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004809 ret = i915_ppgtt_init_hw(dev_priv);
John Harrison4ad2fd82015-06-18 13:11:20 +01004810 if (ret) {
4811 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4812 goto out;
4813 }
4814
4815 /* Need to do basic initialisation of all rings first: */
Chris Wilson20a8a742017-02-08 14:30:31 +00004816 ret = __i915_gem_restart_engines(dev_priv);
4817 if (ret)
4818 goto out;
Mika Kuoppala99433932013-01-22 14:12:17 +02004819
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004820 intel_mocs_init_l3cc_table(dev_priv);
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004821
Oscar Mateob8991402017-03-28 09:53:47 -07004822 /* We can't enable contexts until all firmware is loaded */
4823 ret = intel_uc_init_hw(dev_priv);
4824 if (ret)
4825 goto out;
Alex Dai33a732f2015-08-12 15:43:36 +01004826
Chris Wilson5e4f5182015-02-13 14:35:59 +00004827out:
4828 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004829 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004830}
4831
Chris Wilson39df9192016-07-20 13:31:57 +01004832bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4833{
4834 if (INTEL_INFO(dev_priv)->gen < 6)
4835 return false;
4836
4837 /* TODO: make semaphores and Execlists play nicely together */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00004838 if (i915_modparams.enable_execlists)
Chris Wilson39df9192016-07-20 13:31:57 +01004839 return false;
4840
4841 if (value >= 0)
4842 return value;
4843
Chris Wilson39df9192016-07-20 13:31:57 +01004844 /* Enable semaphores on SNB when IO remapping is off */
Chris Wilson80debff2017-05-25 13:16:12 +01004845 if (IS_GEN6(dev_priv) && intel_vtd_active())
Chris Wilson39df9192016-07-20 13:31:57 +01004846 return false;
Chris Wilson39df9192016-07-20 13:31:57 +01004847
4848 return true;
4849}
4850
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004851int i915_gem_init(struct drm_i915_private *dev_priv)
Chris Wilson1070a422012-04-24 15:47:41 +01004852{
Chris Wilson1070a422012-04-24 15:47:41 +01004853 int ret;
4854
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004855 mutex_lock(&dev_priv->drm.struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004856
Matthew Auldda9fe3f32017-10-06 23:18:31 +01004857 /*
4858 * We need to fallback to 4K pages since gvt gtt handling doesn't
4859 * support huge page entries - we will need to check either hypervisor
4860 * mm can support huge guest page or just do emulation in gvt.
4861 */
4862 if (intel_vgpu_active(dev_priv))
4863 mkwrite_device_info(dev_priv)->page_sizes =
4864 I915_GTT_PAGE_SIZE_4K;
4865
Chris Wilson94312822017-05-03 10:39:18 +01004866 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
Chris Wilson57822dc2017-02-22 11:40:48 +00004867
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00004868 if (!i915_modparams.enable_execlists) {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004869 dev_priv->gt.resume = intel_legacy_submission_resume;
Chris Wilson7e37f882016-08-02 22:50:21 +01004870 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004871 } else {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004872 dev_priv->gt.resume = intel_lr_context_resume;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004873 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004874 }
4875
Chris Wilson5e4f5182015-02-13 14:35:59 +00004876 /* This is just a security blanket to placate dragons.
4877 * On some systems, we very sporadically observe that the first TLBs
4878 * used by the CS may be stale, despite us poking the TLB reset. If
4879 * we hold the forcewake during initialisation these problems
4880 * just magically go away.
4881 */
4882 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4883
Chris Wilson8a2421b2017-06-16 15:05:22 +01004884 ret = i915_gem_init_userptr(dev_priv);
4885 if (ret)
4886 goto out_unlock;
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01004887
4888 ret = i915_gem_init_ggtt(dev_priv);
4889 if (ret)
4890 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08004891
Chris Wilson829a0af2017-06-20 12:05:45 +01004892 ret = i915_gem_contexts_init(dev_priv);
Jani Nikula7bcc3772014-12-05 14:17:42 +02004893 if (ret)
4894 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004895
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004896 ret = intel_engines_init(dev_priv);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004897 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02004898 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004899
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004900 ret = i915_gem_init_hw(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004901 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01004902 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01004903 * wedged. But we only want to do this where the GPU is angry,
4904 * for all other failure, such as an allocation failure, bail.
4905 */
4906 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01004907 i915_gem_set_wedged(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004908 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004909 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02004910
4911out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00004912 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00004913 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004914
Chris Wilson60990322014-04-09 09:19:42 +01004915 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004916}
4917
Chris Wilson24145512017-01-24 11:01:35 +00004918void i915_gem_init_mmio(struct drm_i915_private *i915)
4919{
4920 i915_gem_sanitize(i915);
4921}
4922
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004923void
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00004924i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004925{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004926 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304927 enum intel_engine_id id;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004928
Akash Goel3b3f1652016-10-13 22:44:48 +05304929 for_each_engine(engine, dev_priv, id)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004930 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004931}
4932
Eric Anholt673a3942008-07-30 12:06:12 -07004933void
Imre Deak40ae4e12016-03-16 14:54:03 +02004934i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4935{
Chris Wilson49ef5292016-08-18 17:17:00 +01004936 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02004937
4938 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4939 !IS_CHERRYVIEW(dev_priv))
4940 dev_priv->num_fence_regs = 32;
Jani Nikula73f67aa2016-12-07 22:48:09 +02004941 else if (INTEL_INFO(dev_priv)->gen >= 4 ||
4942 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4943 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004944 dev_priv->num_fence_regs = 16;
4945 else
4946 dev_priv->num_fence_regs = 8;
4947
Chris Wilsonc0336662016-05-06 15:40:21 +01004948 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004949 dev_priv->num_fence_regs =
4950 I915_READ(vgtif_reg(avail_rs.fence_num));
4951
4952 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01004953 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4954 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4955
4956 fence->i915 = dev_priv;
4957 fence->id = i;
4958 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4959 }
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00004960 i915_gem_restore_fences(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02004961
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00004962 i915_gem_detect_bit_6_swizzle(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02004963}
4964
Chris Wilson73cb9702016-10-28 13:58:46 +01004965int
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00004966i915_gem_load_init(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07004967{
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004968 int err = -ENOMEM;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004969
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004970 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4971 if (!dev_priv->objects)
Chris Wilson73cb9702016-10-28 13:58:46 +01004972 goto err_out;
Chris Wilson73cb9702016-10-28 13:58:46 +01004973
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004974 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4975 if (!dev_priv->vmas)
Chris Wilson73cb9702016-10-28 13:58:46 +01004976 goto err_objects;
Chris Wilson73cb9702016-10-28 13:58:46 +01004977
Chris Wilsond1b48c12017-08-16 09:52:08 +01004978 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
4979 if (!dev_priv->luts)
4980 goto err_vmas;
4981
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004982 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4983 SLAB_HWCACHE_ALIGN |
4984 SLAB_RECLAIM_ACCOUNT |
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08004985 SLAB_TYPESAFE_BY_RCU);
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004986 if (!dev_priv->requests)
Chris Wilsond1b48c12017-08-16 09:52:08 +01004987 goto err_luts;
Chris Wilson73cb9702016-10-28 13:58:46 +01004988
Chris Wilson52e54202016-11-14 20:41:02 +00004989 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4990 SLAB_HWCACHE_ALIGN |
4991 SLAB_RECLAIM_ACCOUNT);
4992 if (!dev_priv->dependencies)
4993 goto err_requests;
4994
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01004995 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
4996 if (!dev_priv->priorities)
4997 goto err_dependencies;
4998
Chris Wilson73cb9702016-10-28 13:58:46 +01004999 mutex_lock(&dev_priv->drm.struct_mutex);
5000 INIT_LIST_HEAD(&dev_priv->gt.timelines);
Chris Wilsonbb894852016-11-14 20:40:57 +00005001 err = i915_gem_timeline_init__global(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01005002 mutex_unlock(&dev_priv->drm.struct_mutex);
5003 if (err)
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005004 goto err_priorities;
Eric Anholt673a3942008-07-30 12:06:12 -07005005
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01005006 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
5007 init_llist_head(&dev_priv->mm.free_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02005008 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5009 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07005010 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson275f0392016-10-24 13:42:14 +01005011 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
Chris Wilson67d97da2016-07-04 08:08:31 +01005012 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07005013 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01005014 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005015 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01005016 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01005017 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01005018
Joonas Lahtinen6f633402016-09-01 14:58:21 +03005019 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5020
Chris Wilsonb5add952016-08-04 16:32:36 +01005021 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01005022
Matthew Auld465c4032017-10-06 23:18:14 +01005023 err = i915_gemfs_init(dev_priv);
5024 if (err)
5025 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5026
Chris Wilson73cb9702016-10-28 13:58:46 +01005027 return 0;
5028
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005029err_priorities:
5030 kmem_cache_destroy(dev_priv->priorities);
Chris Wilson52e54202016-11-14 20:41:02 +00005031err_dependencies:
5032 kmem_cache_destroy(dev_priv->dependencies);
Chris Wilson73cb9702016-10-28 13:58:46 +01005033err_requests:
5034 kmem_cache_destroy(dev_priv->requests);
Chris Wilsond1b48c12017-08-16 09:52:08 +01005035err_luts:
5036 kmem_cache_destroy(dev_priv->luts);
Chris Wilson73cb9702016-10-28 13:58:46 +01005037err_vmas:
5038 kmem_cache_destroy(dev_priv->vmas);
5039err_objects:
5040 kmem_cache_destroy(dev_priv->objects);
5041err_out:
5042 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07005043}
Dave Airlie71acb5e2008-12-30 20:31:46 +10005044
Tvrtko Ursulincb15d9f2016-12-01 14:16:39 +00005045void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
Imre Deakd64aa092016-01-19 15:26:29 +02005046{
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00005047 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson7d5d59e2016-11-01 08:48:41 +00005048 WARN_ON(!llist_empty(&dev_priv->mm.free_list));
Chris Wilsonc4d4c1c2017-02-10 16:35:23 +00005049 WARN_ON(dev_priv->mm.object_count);
Chris Wilson7d5d59e2016-11-01 08:48:41 +00005050
Matthew Auldea84aa72016-11-17 21:04:11 +00005051 mutex_lock(&dev_priv->drm.struct_mutex);
5052 i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
5053 WARN_ON(!list_empty(&dev_priv->gt.timelines));
5054 mutex_unlock(&dev_priv->drm.struct_mutex);
5055
Chris Wilsonc5cf9a92017-05-17 13:10:04 +01005056 kmem_cache_destroy(dev_priv->priorities);
Chris Wilson52e54202016-11-14 20:41:02 +00005057 kmem_cache_destroy(dev_priv->dependencies);
Imre Deakd64aa092016-01-19 15:26:29 +02005058 kmem_cache_destroy(dev_priv->requests);
Chris Wilsond1b48c12017-08-16 09:52:08 +01005059 kmem_cache_destroy(dev_priv->luts);
Imre Deakd64aa092016-01-19 15:26:29 +02005060 kmem_cache_destroy(dev_priv->vmas);
5061 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01005062
5063 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5064 rcu_barrier();
Matthew Auld465c4032017-10-06 23:18:14 +01005065
5066 i915_gemfs_fini(dev_priv);
Imre Deakd64aa092016-01-19 15:26:29 +02005067}
5068
Chris Wilson6a800ea2016-09-21 14:51:07 +01005069int i915_gem_freeze(struct drm_i915_private *dev_priv)
5070{
Chris Wilsond0aa3012017-04-07 11:25:49 +01005071 /* Discard all purgeable objects, let userspace recover those as
5072 * required after resuming.
5073 */
Chris Wilson6a800ea2016-09-21 14:51:07 +01005074 i915_gem_shrink_all(dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01005075
Chris Wilson6a800ea2016-09-21 14:51:07 +01005076 return 0;
5077}
5078
Chris Wilson461fb992016-05-14 07:26:33 +01005079int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5080{
5081 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01005082 struct list_head *phases[] = {
5083 &dev_priv->mm.unbound_list,
5084 &dev_priv->mm.bound_list,
5085 NULL
5086 }, **p;
Chris Wilson461fb992016-05-14 07:26:33 +01005087
5088 /* Called just before we write the hibernation image.
5089 *
5090 * We need to update the domain tracking to reflect that the CPU
5091 * will be accessing all the pages to create and restore from the
5092 * hibernation, and so upon restoration those pages will be in the
5093 * CPU domain.
5094 *
5095 * To make sure the hibernation image contains the latest state,
5096 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01005097 *
5098 * To try and reduce the hibernation image, we manually shrink
Chris Wilsond0aa3012017-04-07 11:25:49 +01005099 * the objects as well, see i915_gem_freeze()
Chris Wilson461fb992016-05-14 07:26:33 +01005100 */
5101
Chris Wilson912d5722017-09-06 16:19:30 -07005102 i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
Chris Wilson17b93c42017-04-07 11:25:50 +01005103 i915_gem_drain_freed_objects(dev_priv);
Chris Wilson461fb992016-05-14 07:26:33 +01005104
Chris Wilsond0aa3012017-04-07 11:25:49 +01005105 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson7aab2d52016-09-09 20:02:18 +01005106 for (p = phases; *p; p++) {
Chris Wilsone27ab732017-06-15 13:38:49 +01005107 list_for_each_entry(obj, *p, global_link)
5108 __start_cpu_write(obj);
Chris Wilson461fb992016-05-14 07:26:33 +01005109 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01005110 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson461fb992016-05-14 07:26:33 +01005111
5112 return 0;
5113}
5114
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005115void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00005116{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005117 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01005118 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00005119
5120 /* Clean up our request list when the client is going away, so that
5121 * later retire_requests won't dereference our soon-to-be-gone
5122 * file_priv.
5123 */
Chris Wilson1c255952010-09-26 11:03:27 +01005124 spin_lock(&file_priv->mm.lock);
Chris Wilsonc8659ef2017-03-02 12:25:25 +00005125 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005126 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01005127 spin_unlock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005128}
5129
Chris Wilson829a0af2017-06-20 12:05:45 +01005130int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005131{
5132 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08005133 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005134
Chris Wilsonc4c29d72016-11-09 10:45:07 +00005135 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005136
5137 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5138 if (!file_priv)
5139 return -ENOMEM;
5140
5141 file->driver_priv = file_priv;
Chris Wilson829a0af2017-06-20 12:05:45 +01005142 file_priv->dev_priv = i915;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02005143 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005144
5145 spin_lock_init(&file_priv->mm.lock);
5146 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005147
Chris Wilsonc80ff162016-07-27 09:07:27 +01005148 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00005149
Chris Wilson829a0af2017-06-20 12:05:45 +01005150 ret = i915_gem_context_open(i915, file);
Ben Widawskye422b882013-12-06 14:10:58 -08005151 if (ret)
5152 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005153
Ben Widawskye422b882013-12-06 14:10:58 -08005154 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005155}
5156
Daniel Vetterb680c372014-09-19 18:27:27 +02005157/**
5158 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07005159 * @old: current GEM buffer for the frontbuffer slots
5160 * @new: new GEM buffer for the frontbuffer slots
5161 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02005162 *
5163 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5164 * from @old and setting them in @new. Both @old and @new can be NULL.
5165 */
Daniel Vettera071fa02014-06-18 23:28:09 +02005166void i915_gem_track_fb(struct drm_i915_gem_object *old,
5167 struct drm_i915_gem_object *new,
5168 unsigned frontbuffer_bits)
5169{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005170 /* Control of individual bits within the mask are guarded by
5171 * the owning plane->mutex, i.e. we can never see concurrent
5172 * manipulation of individual bits. But since the bitfield as a whole
5173 * is updated using RMW, we need to use atomics in order to update
5174 * the bits.
5175 */
5176 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5177 sizeof(atomic_t) * BITS_PER_BYTE);
5178
Daniel Vettera071fa02014-06-18 23:28:09 +02005179 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005180 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5181 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02005182 }
5183
5184 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01005185 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5186 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02005187 }
5188}
5189
Dave Gordonea702992015-07-09 19:29:02 +01005190/* Allocate a new GEM object and fill it with the supplied data */
5191struct drm_i915_gem_object *
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00005192i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
Dave Gordonea702992015-07-09 19:29:02 +01005193 const void *data, size_t size)
5194{
5195 struct drm_i915_gem_object *obj;
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005196 struct file *file;
5197 size_t offset;
5198 int err;
Dave Gordonea702992015-07-09 19:29:02 +01005199
Tvrtko Ursulin12d79d72016-12-01 14:16:37 +00005200 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01005201 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01005202 return obj;
5203
Chris Wilsonce8ff092017-03-17 19:46:47 +00005204 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
Dave Gordonea702992015-07-09 19:29:02 +01005205
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005206 file = obj->base.filp;
5207 offset = 0;
5208 do {
5209 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5210 struct page *page;
5211 void *pgdata, *vaddr;
Dave Gordonea702992015-07-09 19:29:02 +01005212
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005213 err = pagecache_write_begin(file, file->f_mapping,
5214 offset, len, 0,
5215 &page, &pgdata);
5216 if (err < 0)
5217 goto fail;
Dave Gordonea702992015-07-09 19:29:02 +01005218
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005219 vaddr = kmap(page);
5220 memcpy(vaddr, data, len);
5221 kunmap(page);
5222
5223 err = pagecache_write_end(file, file->f_mapping,
5224 offset, len, len,
5225 page, pgdata);
5226 if (err < 0)
5227 goto fail;
5228
5229 size -= len;
5230 data += len;
5231 offset += len;
5232 } while (size);
Dave Gordonea702992015-07-09 19:29:02 +01005233
5234 return obj;
5235
5236fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01005237 i915_gem_object_put(obj);
Chris Wilsonbe062fa2017-03-17 19:46:48 +00005238 return ERR_PTR(err);
Dave Gordonea702992015-07-09 19:29:02 +01005239}
Chris Wilson96d77632016-10-28 13:58:33 +01005240
5241struct scatterlist *
5242i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5243 unsigned int n,
5244 unsigned int *offset)
5245{
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005246 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
Chris Wilson96d77632016-10-28 13:58:33 +01005247 struct scatterlist *sg;
5248 unsigned int idx, count;
5249
5250 might_sleep();
5251 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005252 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Chris Wilson96d77632016-10-28 13:58:33 +01005253
5254 /* As we iterate forward through the sg, we record each entry in a
5255 * radixtree for quick repeated (backwards) lookups. If we have seen
5256 * this index previously, we will have an entry for it.
5257 *
5258 * Initial lookup is O(N), but this is amortized to O(1) for
5259 * sequential page access (where each new request is consecutive
5260 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5261 * i.e. O(1) with a large constant!
5262 */
5263 if (n < READ_ONCE(iter->sg_idx))
5264 goto lookup;
5265
5266 mutex_lock(&iter->lock);
5267
5268 /* We prefer to reuse the last sg so that repeated lookup of this
5269 * (or the subsequent) sg are fast - comparing against the last
5270 * sg is faster than going through the radixtree.
5271 */
5272
5273 sg = iter->sg_pos;
5274 idx = iter->sg_idx;
5275 count = __sg_page_count(sg);
5276
5277 while (idx + count <= n) {
5278 unsigned long exception, i;
5279 int ret;
5280
5281 /* If we cannot allocate and insert this entry, or the
5282 * individual pages from this range, cancel updating the
5283 * sg_idx so that on this lookup we are forced to linearly
5284 * scan onwards, but on future lookups we will try the
5285 * insertion again (in which case we need to be careful of
5286 * the error return reporting that we have already inserted
5287 * this index).
5288 */
5289 ret = radix_tree_insert(&iter->radix, idx, sg);
5290 if (ret && ret != -EEXIST)
5291 goto scan;
5292
5293 exception =
5294 RADIX_TREE_EXCEPTIONAL_ENTRY |
5295 idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5296 for (i = 1; i < count; i++) {
5297 ret = radix_tree_insert(&iter->radix, idx + i,
5298 (void *)exception);
5299 if (ret && ret != -EEXIST)
5300 goto scan;
5301 }
5302
5303 idx += count;
5304 sg = ____sg_next(sg);
5305 count = __sg_page_count(sg);
5306 }
5307
5308scan:
5309 iter->sg_pos = sg;
5310 iter->sg_idx = idx;
5311
5312 mutex_unlock(&iter->lock);
5313
5314 if (unlikely(n < idx)) /* insertion completed by another thread */
5315 goto lookup;
5316
5317 /* In case we failed to insert the entry into the radixtree, we need
5318 * to look beyond the current sg.
5319 */
5320 while (idx + count <= n) {
5321 idx += count;
5322 sg = ____sg_next(sg);
5323 count = __sg_page_count(sg);
5324 }
5325
5326 *offset = n - idx;
5327 return sg;
5328
5329lookup:
5330 rcu_read_lock();
5331
5332 sg = radix_tree_lookup(&iter->radix, n);
5333 GEM_BUG_ON(!sg);
5334
5335 /* If this index is in the middle of multi-page sg entry,
5336 * the radixtree will contain an exceptional entry that points
5337 * to the start of that range. We will return the pointer to
5338 * the base page and the offset of this page within the
5339 * sg entry's range.
5340 */
5341 *offset = 0;
5342 if (unlikely(radix_tree_exception(sg))) {
5343 unsigned long base =
5344 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5345
5346 sg = radix_tree_lookup(&iter->radix, base);
5347 GEM_BUG_ON(!sg);
5348
5349 *offset = n - base;
5350 }
5351
5352 rcu_read_unlock();
5353
5354 return sg;
5355}
5356
5357struct page *
5358i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5359{
5360 struct scatterlist *sg;
5361 unsigned int offset;
5362
5363 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5364
5365 sg = i915_gem_object_get_sg(obj, n, &offset);
5366 return nth_page(sg_page(sg), offset);
5367}
5368
5369/* Like i915_gem_object_get_page(), but mark the returned page dirty */
5370struct page *
5371i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5372 unsigned int n)
5373{
5374 struct page *page;
5375
5376 page = i915_gem_object_get_page(obj, n);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01005377 if (!obj->mm.dirty)
Chris Wilson96d77632016-10-28 13:58:33 +01005378 set_page_dirty(page);
5379
5380 return page;
5381}
5382
5383dma_addr_t
5384i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5385 unsigned long n)
5386{
5387 struct scatterlist *sg;
5388 unsigned int offset;
5389
5390 sg = i915_gem_object_get_sg(obj, n, &offset);
5391 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5392}
Chris Wilson935a2f72017-02-13 17:15:13 +00005393
Chris Wilson8eeb7902017-07-26 19:16:01 +01005394int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5395{
5396 struct sg_table *pages;
5397 int err;
5398
5399 if (align > obj->base.size)
5400 return -EINVAL;
5401
5402 if (obj->ops == &i915_gem_phys_ops)
5403 return 0;
5404
5405 if (obj->ops != &i915_gem_object_ops)
5406 return -EINVAL;
5407
5408 err = i915_gem_object_unbind(obj);
5409 if (err)
5410 return err;
5411
5412 mutex_lock(&obj->mm.lock);
5413
5414 if (obj->mm.madv != I915_MADV_WILLNEED) {
5415 err = -EFAULT;
5416 goto err_unlock;
5417 }
5418
5419 if (obj->mm.quirked) {
5420 err = -EFAULT;
5421 goto err_unlock;
5422 }
5423
5424 if (obj->mm.mapping) {
5425 err = -EBUSY;
5426 goto err_unlock;
5427 }
5428
5429 pages = obj->mm.pages;
5430 obj->ops = &i915_gem_phys_ops;
5431
Chris Wilson8fb6a5d2017-07-26 19:16:02 +01005432 err = ____i915_gem_object_get_pages(obj);
Chris Wilson8eeb7902017-07-26 19:16:01 +01005433 if (err)
5434 goto err_xfer;
5435
5436 /* Perma-pin (until release) the physical set of pages */
5437 __i915_gem_object_pin_pages(obj);
5438
5439 if (!IS_ERR_OR_NULL(pages))
5440 i915_gem_object_ops.put_pages(obj, pages);
5441 mutex_unlock(&obj->mm.lock);
5442 return 0;
5443
5444err_xfer:
5445 obj->ops = &i915_gem_object_ops;
5446 obj->mm.pages = pages;
5447err_unlock:
5448 mutex_unlock(&obj->mm.lock);
5449 return err;
5450}
5451
Chris Wilson935a2f72017-02-13 17:15:13 +00005452#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5453#include "selftests/scatterlist.c"
Chris Wilson66d9cb52017-02-13 17:15:17 +00005454#include "selftests/mock_gem_device.c"
Chris Wilson44653982017-02-13 17:15:20 +00005455#include "selftests/huge_gem_object.c"
Matthew Auld40498662017-10-06 23:18:29 +01005456#include "selftests/huge_pages.c"
Chris Wilson8335fd62017-02-13 17:15:28 +00005457#include "selftests/i915_gem_object.c"
Chris Wilson17059452017-02-13 17:15:32 +00005458#include "selftests/i915_gem_coherency.c"
Chris Wilson935a2f72017-02-13 17:15:13 +00005459#endif