blob: 3dd7fc662859a90803b142a8adf7f542f29c5948 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Yu Zhangeb822892015-02-10 19:05:49 +080032#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010033#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070034#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010035#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010036#include "intel_mocs.h"
Chris Wilson6b5e90f2016-11-14 20:41:05 +000037#include <linux/dma-fence-array.h>
Chris Wilsonc13d87e2016-07-20 09:21:15 +010038#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070039#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070041#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080042#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020043#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070044
Chris Wilsonfbbd37b2016-10-28 13:58:42 +010045static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
Chris Wilson05394f32010-11-08 19:18:58 +000046static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Daniel Vettere62b59e2015-01-21 14:53:48 +010047static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson61050802012-04-17 15:31:31 +010048
Chris Wilsonc76ce032013-08-08 14:41:03 +010049static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
51{
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +000052 return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
Chris Wilsonc76ce032013-08-08 14:41:03 +010053}
54
Chris Wilson2c225692013-08-09 12:26:45 +010055static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053057 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58 return false;
59
Chris Wilson2c225692013-08-09 12:26:45 +010060 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
61 return true;
62
63 return obj->pin_display;
64}
65
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053066static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010067insert_mappable_node(struct i915_ggtt *ggtt,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053068 struct drm_mm_node *node, u32 size)
69{
70 memset(node, 0, sizeof(*node));
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +010071 return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
72 size, 0, -1,
73 0, ggtt->mappable_end,
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053074 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
76}
77
78static void
79remove_mappable_node(struct drm_mm_node *node)
80{
81 drm_mm_remove_node(node);
82}
83
Chris Wilson73aa8082010-09-30 11:46:12 +010084/* some bookkeeping */
85static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010086 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010087{
Daniel Vetterc20e8352013-07-24 22:40:23 +020088 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010089 dev_priv->mm.object_count++;
90 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020091 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010092}
93
94static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
Chris Wilson3ef7f222016-10-18 13:02:48 +010095 u64 size)
Chris Wilson73aa8082010-09-30 11:46:12 +010096{
Daniel Vetterc20e8352013-07-24 22:40:23 +020097 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010098 dev_priv->mm.object_count--;
99 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200100 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100101}
102
Chris Wilson21dd3732011-01-26 15:55:56 +0000103static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100104i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100105{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100106 int ret;
107
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100108 might_sleep();
109
Chris Wilsond98c52c2016-04-13 17:35:05 +0100110 if (!i915_reset_in_progress(error))
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100111 return 0;
112
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200113 /*
114 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
115 * userspace. If it takes that long something really bad is going on and
116 * we should simply try to bail out and fail as gracefully as possible.
117 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100118 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilsond98c52c2016-04-13 17:35:05 +0100119 !i915_reset_in_progress(error),
Chris Wilsonb52992c2016-10-28 13:58:24 +0100120 I915_RESET_TIMEOUT);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200121 if (ret == 0) {
122 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
123 return -EIO;
124 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100125 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100126 } else {
127 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200128 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100129}
130
Chris Wilson54cf91d2010-11-25 18:00:26 +0000131int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100132{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100133 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100134 int ret;
135
Daniel Vetter33196de2012-11-14 17:14:05 +0100136 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100137 if (ret)
138 return ret;
139
140 ret = mutex_lock_interruptible(&dev->struct_mutex);
141 if (ret)
142 return ret;
143
Chris Wilson76c1dec2010-09-25 11:22:51 +0100144 return 0;
145}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100146
Eric Anholt673a3942008-07-30 12:06:12 -0700147int
Eric Anholt5a125c32008-10-22 21:40:13 -0700148i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000149 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700150{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300151 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200152 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300153 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100154 struct i915_vma *vma;
Chris Wilson6299f992010-11-24 12:23:44 +0000155 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700156
Chris Wilson6299f992010-11-24 12:23:44 +0000157 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100158 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000159 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100160 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100161 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000162 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100163 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100164 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100165 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700166
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300167 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400168 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000169
Eric Anholt5a125c32008-10-22 21:40:13 -0700170 return 0;
171}
172
Chris Wilson03ac84f2016-10-28 13:58:36 +0100173static struct sg_table *
Chris Wilson6a2c4232014-11-04 04:51:40 -0800174i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100175{
Al Viro93c76a32015-12-04 23:45:44 -0500176 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson057f8032016-12-07 13:34:11 +0000177 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800178 struct sg_table *st;
179 struct scatterlist *sg;
Chris Wilson057f8032016-12-07 13:34:11 +0000180 char *vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800181 int i;
Chris Wilson00731152014-05-21 12:42:56 +0100182
Chris Wilson6a2c4232014-11-04 04:51:40 -0800183 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
Chris Wilson03ac84f2016-10-28 13:58:36 +0100184 return ERR_PTR(-EINVAL);
Chris Wilson00731152014-05-21 12:42:56 +0100185
Chris Wilson057f8032016-12-07 13:34:11 +0000186 /* Always aligning to the object size, allows a single allocation
187 * to handle all possible callers, and given typical object sizes,
188 * the alignment of the buddy allocation will naturally match.
189 */
190 phys = drm_pci_alloc(obj->base.dev,
191 obj->base.size,
192 roundup_pow_of_two(obj->base.size));
193 if (!phys)
194 return ERR_PTR(-ENOMEM);
195
196 vaddr = phys->vaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800197 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
198 struct page *page;
199 char *src;
200
201 page = shmem_read_mapping_page(mapping, i);
Chris Wilson057f8032016-12-07 13:34:11 +0000202 if (IS_ERR(page)) {
203 st = ERR_CAST(page);
204 goto err_phys;
205 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800206
207 src = kmap_atomic(page);
208 memcpy(vaddr, src, PAGE_SIZE);
209 drm_clflush_virt_range(vaddr, PAGE_SIZE);
210 kunmap_atomic(src);
211
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300212 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800213 vaddr += PAGE_SIZE;
214 }
215
Chris Wilsonc0336662016-05-06 15:40:21 +0100216 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800217
218 st = kmalloc(sizeof(*st), GFP_KERNEL);
Chris Wilson057f8032016-12-07 13:34:11 +0000219 if (!st) {
220 st = ERR_PTR(-ENOMEM);
221 goto err_phys;
222 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800223
224 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
225 kfree(st);
Chris Wilson057f8032016-12-07 13:34:11 +0000226 st = ERR_PTR(-ENOMEM);
227 goto err_phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800228 }
229
230 sg = st->sgl;
231 sg->offset = 0;
232 sg->length = obj->base.size;
233
Chris Wilson057f8032016-12-07 13:34:11 +0000234 sg_dma_address(sg) = phys->busaddr;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800235 sg_dma_len(sg) = obj->base.size;
236
Chris Wilson057f8032016-12-07 13:34:11 +0000237 obj->phys_handle = phys;
238 return st;
239
240err_phys:
241 drm_pci_free(obj->base.dev, phys);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100242 return st;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800243}
244
245static void
Chris Wilson2b3c8312016-11-11 14:58:09 +0000246__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
Chris Wilsonc3f923b2016-12-23 14:57:57 +0000247 struct sg_table *pages,
248 bool needs_clflush)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800249{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100250 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800251
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100252 if (obj->mm.madv == I915_MADV_DONTNEED)
253 obj->mm.dirty = false;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800254
Chris Wilsonc3f923b2016-12-23 14:57:57 +0000255 if (needs_clflush &&
256 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
Chris Wilson05c34832016-11-18 21:17:47 +0000257 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson2b3c8312016-11-11 14:58:09 +0000258 drm_clflush_sg(pages);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100259
260 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
261 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
262}
263
264static void
265i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
266 struct sg_table *pages)
267{
Chris Wilsonc3f923b2016-12-23 14:57:57 +0000268 __i915_gem_object_release_shmem(obj, pages, false);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100269
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100270 if (obj->mm.dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500271 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800272 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100273 int i;
274
275 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800276 struct page *page;
277 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100278
Chris Wilson6a2c4232014-11-04 04:51:40 -0800279 page = shmem_read_mapping_page(mapping, i);
280 if (IS_ERR(page))
281 continue;
282
283 dst = kmap_atomic(page);
284 drm_clflush_virt_range(vaddr, PAGE_SIZE);
285 memcpy(dst, vaddr, PAGE_SIZE);
286 kunmap_atomic(dst);
287
288 set_page_dirty(page);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100289 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100290 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300291 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100292 vaddr += PAGE_SIZE;
293 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100294 obj->mm.dirty = false;
Chris Wilson00731152014-05-21 12:42:56 +0100295 }
296
Chris Wilson03ac84f2016-10-28 13:58:36 +0100297 sg_free_table(pages);
298 kfree(pages);
Chris Wilson057f8032016-12-07 13:34:11 +0000299
300 drm_pci_free(obj->base.dev, obj->phys_handle);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800301}
302
303static void
304i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
305{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100306 i915_gem_object_unpin_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800307}
308
309static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
310 .get_pages = i915_gem_object_get_pages_phys,
311 .put_pages = i915_gem_object_put_pages_phys,
312 .release = i915_gem_object_release_phys,
313};
314
Chris Wilson35a96112016-08-14 18:44:40 +0100315int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilsonaa653a62016-08-04 07:52:27 +0100316{
317 struct i915_vma *vma;
318 LIST_HEAD(still_in_list);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100319 int ret;
Chris Wilsonaa653a62016-08-04 07:52:27 +0100320
Chris Wilson02bef8f2016-08-14 18:44:41 +0100321 lockdep_assert_held(&obj->base.dev->struct_mutex);
322
323 /* Closed vma are removed from the obj->vma_list - but they may
324 * still have an active binding on the object. To remove those we
325 * must wait for all rendering to complete to the object (as unbinding
326 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100327 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100328 ret = i915_gem_object_wait(obj,
329 I915_WAIT_INTERRUPTIBLE |
330 I915_WAIT_LOCKED |
331 I915_WAIT_ALL,
332 MAX_SCHEDULE_TIMEOUT,
333 NULL);
Chris Wilson02bef8f2016-08-14 18:44:41 +0100334 if (ret)
335 return ret;
336
337 i915_gem_retire_requests(to_i915(obj->base.dev));
338
Chris Wilsonaa653a62016-08-04 07:52:27 +0100339 while ((vma = list_first_entry_or_null(&obj->vma_list,
340 struct i915_vma,
341 obj_link))) {
342 list_move_tail(&vma->obj_link, &still_in_list);
343 ret = i915_vma_unbind(vma);
344 if (ret)
345 break;
346 }
347 list_splice(&still_in_list, &obj->vma_list);
348
349 return ret;
350}
351
Chris Wilsone95433c2016-10-28 13:58:27 +0100352static long
353i915_gem_object_wait_fence(struct dma_fence *fence,
354 unsigned int flags,
355 long timeout,
356 struct intel_rps_client *rps)
357{
358 struct drm_i915_gem_request *rq;
359
360 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
361
362 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
363 return timeout;
364
365 if (!dma_fence_is_i915(fence))
366 return dma_fence_wait_timeout(fence,
367 flags & I915_WAIT_INTERRUPTIBLE,
368 timeout);
369
370 rq = to_request(fence);
371 if (i915_gem_request_completed(rq))
372 goto out;
373
374 /* This client is about to stall waiting for the GPU. In many cases
375 * this is undesirable and limits the throughput of the system, as
376 * many clients cannot continue processing user input/output whilst
377 * blocked. RPS autotuning may take tens of milliseconds to respond
378 * to the GPU load and thus incurs additional latency for the client.
379 * We can circumvent that by promoting the GPU frequency to maximum
380 * before we wait. This makes the GPU throttle up much more quickly
381 * (good for benchmarks and user experience, e.g. window animations),
382 * but at a cost of spending more power processing the workload
383 * (bad for battery). Not all clients even want their results
384 * immediately and for them we should just let the GPU select its own
385 * frequency to maximise efficiency. To prevent a single client from
386 * forcing the clocks too high for the whole system, we only allow
387 * each client to waitboost once in a busy period.
388 */
389 if (rps) {
390 if (INTEL_GEN(rq->i915) >= 6)
391 gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
392 else
393 rps = NULL;
394 }
395
396 timeout = i915_wait_request(rq, flags, timeout);
397
398out:
399 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
400 i915_gem_request_retire_upto(rq);
401
Chris Wilsoncb399ea2016-11-01 10:03:16 +0000402 if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
Chris Wilsone95433c2016-10-28 13:58:27 +0100403 /* The GPU is now idle and this client has stalled.
404 * Since no other client has submitted a request in the
405 * meantime, assume that this client is the only one
406 * supplying work to the GPU but is unable to keep that
407 * work supplied because it is waiting. Since the GPU is
408 * then never kept fully busy, RPS autoclocking will
409 * keep the clocks relatively low, causing further delays.
410 * Compensate by giving the synchronous client credit for
411 * a waitboost next time.
412 */
413 spin_lock(&rq->i915->rps.client_lock);
414 list_del_init(&rps->link);
415 spin_unlock(&rq->i915->rps.client_lock);
416 }
417
418 return timeout;
419}
420
421static long
422i915_gem_object_wait_reservation(struct reservation_object *resv,
423 unsigned int flags,
424 long timeout,
425 struct intel_rps_client *rps)
426{
427 struct dma_fence *excl;
428
429 if (flags & I915_WAIT_ALL) {
430 struct dma_fence **shared;
431 unsigned int count, i;
432 int ret;
433
434 ret = reservation_object_get_fences_rcu(resv,
435 &excl, &count, &shared);
436 if (ret)
437 return ret;
438
439 for (i = 0; i < count; i++) {
440 timeout = i915_gem_object_wait_fence(shared[i],
441 flags, timeout,
442 rps);
443 if (timeout <= 0)
444 break;
445
446 dma_fence_put(shared[i]);
447 }
448
449 for (; i < count; i++)
450 dma_fence_put(shared[i]);
451 kfree(shared);
452 } else {
453 excl = reservation_object_get_excl_rcu(resv);
454 }
455
456 if (excl && timeout > 0)
457 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
458
459 dma_fence_put(excl);
460
461 return timeout;
462}
463
Chris Wilson6b5e90f2016-11-14 20:41:05 +0000464static void __fence_set_priority(struct dma_fence *fence, int prio)
465{
466 struct drm_i915_gem_request *rq;
467 struct intel_engine_cs *engine;
468
469 if (!dma_fence_is_i915(fence))
470 return;
471
472 rq = to_request(fence);
473 engine = rq->engine;
474 if (!engine->schedule)
475 return;
476
477 engine->schedule(rq, prio);
478}
479
480static void fence_set_priority(struct dma_fence *fence, int prio)
481{
482 /* Recurse once into a fence-array */
483 if (dma_fence_is_array(fence)) {
484 struct dma_fence_array *array = to_dma_fence_array(fence);
485 int i;
486
487 for (i = 0; i < array->num_fences; i++)
488 __fence_set_priority(array->fences[i], prio);
489 } else {
490 __fence_set_priority(fence, prio);
491 }
492}
493
494int
495i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
496 unsigned int flags,
497 int prio)
498{
499 struct dma_fence *excl;
500
501 if (flags & I915_WAIT_ALL) {
502 struct dma_fence **shared;
503 unsigned int count, i;
504 int ret;
505
506 ret = reservation_object_get_fences_rcu(obj->resv,
507 &excl, &count, &shared);
508 if (ret)
509 return ret;
510
511 for (i = 0; i < count; i++) {
512 fence_set_priority(shared[i], prio);
513 dma_fence_put(shared[i]);
514 }
515
516 kfree(shared);
517 } else {
518 excl = reservation_object_get_excl_rcu(obj->resv);
519 }
520
521 if (excl) {
522 fence_set_priority(excl, prio);
523 dma_fence_put(excl);
524 }
525 return 0;
526}
527
Chris Wilson00e60f22016-08-04 16:32:40 +0100528/**
Chris Wilsone95433c2016-10-28 13:58:27 +0100529 * Waits for rendering to the object to be completed
Chris Wilson00e60f22016-08-04 16:32:40 +0100530 * @obj: i915 gem object
Chris Wilsone95433c2016-10-28 13:58:27 +0100531 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
532 * @timeout: how long to wait
533 * @rps: client (user process) to charge for any waitboosting
Chris Wilson00e60f22016-08-04 16:32:40 +0100534 */
535int
Chris Wilsone95433c2016-10-28 13:58:27 +0100536i915_gem_object_wait(struct drm_i915_gem_object *obj,
537 unsigned int flags,
538 long timeout,
539 struct intel_rps_client *rps)
Chris Wilson00e60f22016-08-04 16:32:40 +0100540{
Chris Wilsone95433c2016-10-28 13:58:27 +0100541 might_sleep();
542#if IS_ENABLED(CONFIG_LOCKDEP)
543 GEM_BUG_ON(debug_locks &&
544 !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
545 !!(flags & I915_WAIT_LOCKED));
546#endif
547 GEM_BUG_ON(timeout < 0);
Chris Wilson00e60f22016-08-04 16:32:40 +0100548
Chris Wilsond07f0e52016-10-28 13:58:44 +0100549 timeout = i915_gem_object_wait_reservation(obj->resv,
550 flags, timeout,
551 rps);
Chris Wilsone95433c2016-10-28 13:58:27 +0100552 return timeout < 0 ? timeout : 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100553}
554
555static struct intel_rps_client *to_rps_client(struct drm_file *file)
556{
557 struct drm_i915_file_private *fpriv = file->driver_priv;
558
559 return &fpriv->rps;
560}
561
Chris Wilson00731152014-05-21 12:42:56 +0100562int
563i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
564 int align)
565{
Chris Wilson6a2c4232014-11-04 04:51:40 -0800566 int ret;
Chris Wilson00731152014-05-21 12:42:56 +0100567
Chris Wilson057f8032016-12-07 13:34:11 +0000568 if (align > obj->base.size)
569 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100570
Chris Wilson057f8032016-12-07 13:34:11 +0000571 if (obj->ops == &i915_gem_phys_ops)
Chris Wilson00731152014-05-21 12:42:56 +0100572 return 0;
Chris Wilson00731152014-05-21 12:42:56 +0100573
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100574 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100575 return -EFAULT;
576
577 if (obj->base.filp == NULL)
578 return -EINVAL;
579
Chris Wilson4717ca92016-08-04 07:52:28 +0100580 ret = i915_gem_object_unbind(obj);
581 if (ret)
582 return ret;
583
Chris Wilson548625e2016-11-01 12:11:34 +0000584 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilson03ac84f2016-10-28 13:58:36 +0100585 if (obj->mm.pages)
586 return -EBUSY;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800587
Chris Wilson6a2c4232014-11-04 04:51:40 -0800588 obj->ops = &i915_gem_phys_ops;
589
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100590 return i915_gem_object_pin_pages(obj);
Chris Wilson00731152014-05-21 12:42:56 +0100591}
592
593static int
594i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595 struct drm_i915_gem_pwrite *args,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100596 struct drm_file *file)
Chris Wilson00731152014-05-21 12:42:56 +0100597{
598 struct drm_device *dev = obj->base.dev;
599 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300600 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilsone95433c2016-10-28 13:58:27 +0100601 int ret;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800602
603 /* We manually control the domain here and pretend that it
604 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
605 */
Chris Wilsone95433c2016-10-28 13:58:27 +0100606 lockdep_assert_held(&obj->base.dev->struct_mutex);
607 ret = i915_gem_object_wait(obj,
608 I915_WAIT_INTERRUPTIBLE |
609 I915_WAIT_LOCKED |
610 I915_WAIT_ALL,
611 MAX_SCHEDULE_TIMEOUT,
Chris Wilson03ac84f2016-10-28 13:58:36 +0100612 to_rps_client(file));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800613 if (ret)
614 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100615
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700616 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson00731152014-05-21 12:42:56 +0100617 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
618 unsigned long unwritten;
619
620 /* The physical object once assigned is fixed for the lifetime
621 * of the obj, so we can safely drop the lock and continue
622 * to access vaddr.
623 */
624 mutex_unlock(&dev->struct_mutex);
625 unwritten = copy_from_user(vaddr, user_data, args->size);
626 mutex_lock(&dev->struct_mutex);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200627 if (unwritten) {
628 ret = -EFAULT;
629 goto out;
630 }
Chris Wilson00731152014-05-21 12:42:56 +0100631 }
632
Chris Wilson6a2c4232014-11-04 04:51:40 -0800633 drm_clflush_virt_range(vaddr, args->size);
Chris Wilsonc0336662016-05-06 15:40:21 +0100634 i915_gem_chipset_flush(to_i915(dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200635
636out:
Rodrigo Vivide152b62015-07-07 16:28:51 -0700637 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200638 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100639}
640
Chris Wilson42dcedd2012-11-15 11:32:30 +0000641void *i915_gem_object_alloc(struct drm_device *dev)
642{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100643 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100644 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000645}
646
647void i915_gem_object_free(struct drm_i915_gem_object *obj)
648{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100649 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100650 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000651}
652
Dave Airlieff72145b2011-02-07 12:16:14 +1000653static int
654i915_gem_create(struct drm_file *file,
655 struct drm_device *dev,
656 uint64_t size,
657 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700658{
Chris Wilson05394f32010-11-08 19:18:58 +0000659 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300660 int ret;
661 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700662
Dave Airlieff72145b2011-02-07 12:16:14 +1000663 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200664 if (size == 0)
665 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700666
667 /* Allocate the new object */
Dave Gordond37cd8a2016-04-22 19:14:32 +0100668 obj = i915_gem_object_create(dev, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100669 if (IS_ERR(obj))
670 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700671
Chris Wilson05394f32010-11-08 19:18:58 +0000672 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100673 /* drop reference from allocate - handle holds it now */
Chris Wilsonf0cd5182016-10-28 13:58:43 +0100674 i915_gem_object_put(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200675 if (ret)
676 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100677
Dave Airlieff72145b2011-02-07 12:16:14 +1000678 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700679 return 0;
680}
681
Dave Airlieff72145b2011-02-07 12:16:14 +1000682int
683i915_gem_dumb_create(struct drm_file *file,
684 struct drm_device *dev,
685 struct drm_mode_create_dumb *args)
686{
687 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300688 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000689 args->size = args->pitch * args->height;
690 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000691 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000692}
693
Dave Airlieff72145b2011-02-07 12:16:14 +1000694/**
695 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100696 * @dev: drm device pointer
697 * @data: ioctl data blob
698 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000699 */
700int
701i915_gem_create_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file)
703{
704 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200705
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100706 i915_gem_flush_free_objects(to_i915(dev));
707
Dave Airlieff72145b2011-02-07 12:16:14 +1000708 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000709 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000710}
711
Daniel Vetter8c599672011-12-14 13:57:31 +0100712static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100713__copy_to_user_swizzled(char __user *cpu_vaddr,
714 const char *gpu_vaddr, int gpu_offset,
715 int length)
716{
717 int ret, cpu_offset = 0;
718
719 while (length > 0) {
720 int cacheline_end = ALIGN(gpu_offset + 1, 64);
721 int this_length = min(cacheline_end - gpu_offset, length);
722 int swizzled_gpu_offset = gpu_offset ^ 64;
723
724 ret = __copy_to_user(cpu_vaddr + cpu_offset,
725 gpu_vaddr + swizzled_gpu_offset,
726 this_length);
727 if (ret)
728 return ret + length;
729
730 cpu_offset += this_length;
731 gpu_offset += this_length;
732 length -= this_length;
733 }
734
735 return 0;
736}
737
738static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700739__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
740 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100741 int length)
742{
743 int ret, cpu_offset = 0;
744
745 while (length > 0) {
746 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747 int this_length = min(cacheline_end - gpu_offset, length);
748 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
751 cpu_vaddr + cpu_offset,
752 this_length);
753 if (ret)
754 return ret + length;
755
756 cpu_offset += this_length;
757 gpu_offset += this_length;
758 length -= this_length;
759 }
760
761 return 0;
762}
763
Brad Volkin4c914c02014-02-18 10:15:45 -0800764/*
765 * Pins the specified object's pages and synchronizes the object with
766 * GPU accesses. Sets needs_clflush to non-zero if the caller should
767 * flush the object from the CPU cache.
768 */
769int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100770 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800771{
772 int ret;
773
Chris Wilsone95433c2016-10-28 13:58:27 +0100774 lockdep_assert_held(&obj->base.dev->struct_mutex);
Brad Volkin4c914c02014-02-18 10:15:45 -0800775
Chris Wilsone95433c2016-10-28 13:58:27 +0100776 *needs_clflush = 0;
Chris Wilson43394c72016-08-18 17:16:47 +0100777 if (!i915_gem_object_has_struct_page(obj))
778 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800779
Chris Wilsone95433c2016-10-28 13:58:27 +0100780 ret = i915_gem_object_wait(obj,
781 I915_WAIT_INTERRUPTIBLE |
782 I915_WAIT_LOCKED,
783 MAX_SCHEDULE_TIMEOUT,
784 NULL);
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100785 if (ret)
786 return ret;
787
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100788 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100789 if (ret)
790 return ret;
791
Chris Wilsona314d5c2016-08-18 17:16:48 +0100792 i915_gem_object_flush_gtt_write_domain(obj);
793
Chris Wilson43394c72016-08-18 17:16:47 +0100794 /* If we're not in the cpu read domain, set ourself into the gtt
795 * read domain and manually flush cachelines (if required). This
796 * optimizes for the case when the gpu will dirty the data
797 * anyway again before the next pread happens.
798 */
799 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Brad Volkin4c914c02014-02-18 10:15:45 -0800800 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
801 obj->cache_level);
Brad Volkin4c914c02014-02-18 10:15:45 -0800802
Chris Wilson43394c72016-08-18 17:16:47 +0100803 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
804 ret = i915_gem_object_set_to_cpu_domain(obj, false);
Chris Wilson97649512016-08-18 17:16:50 +0100805 if (ret)
806 goto err_unpin;
807
Chris Wilson43394c72016-08-18 17:16:47 +0100808 *needs_clflush = 0;
809 }
810
Chris Wilson97649512016-08-18 17:16:50 +0100811 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100812 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100813
814err_unpin:
815 i915_gem_object_unpin_pages(obj);
816 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100817}
818
819int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
820 unsigned int *needs_clflush)
821{
822 int ret;
823
Chris Wilsone95433c2016-10-28 13:58:27 +0100824 lockdep_assert_held(&obj->base.dev->struct_mutex);
825
Chris Wilson43394c72016-08-18 17:16:47 +0100826 *needs_clflush = 0;
827 if (!i915_gem_object_has_struct_page(obj))
828 return -ENODEV;
829
Chris Wilsone95433c2016-10-28 13:58:27 +0100830 ret = i915_gem_object_wait(obj,
831 I915_WAIT_INTERRUPTIBLE |
832 I915_WAIT_LOCKED |
833 I915_WAIT_ALL,
834 MAX_SCHEDULE_TIMEOUT,
835 NULL);
Chris Wilson43394c72016-08-18 17:16:47 +0100836 if (ret)
837 return ret;
838
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100839 ret = i915_gem_object_pin_pages(obj);
Chris Wilson97649512016-08-18 17:16:50 +0100840 if (ret)
841 return ret;
842
Chris Wilsona314d5c2016-08-18 17:16:48 +0100843 i915_gem_object_flush_gtt_write_domain(obj);
844
Chris Wilson43394c72016-08-18 17:16:47 +0100845 /* If we're not in the cpu write domain, set ourself into the
846 * gtt write domain and manually flush cachelines (as required).
847 * This optimizes for the case when the gpu will use the data
848 * right away and we therefore have to clflush anyway.
849 */
850 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
851 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
852
853 /* Same trick applies to invalidate partially written cachelines read
854 * before writing.
855 */
856 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
857 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
858 obj->cache_level);
859
Chris Wilson43394c72016-08-18 17:16:47 +0100860 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
861 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilson97649512016-08-18 17:16:50 +0100862 if (ret)
863 goto err_unpin;
864
Chris Wilson43394c72016-08-18 17:16:47 +0100865 *needs_clflush = 0;
866 }
867
868 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
869 obj->cache_dirty = true;
870
871 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100872 obj->mm.dirty = true;
Chris Wilson97649512016-08-18 17:16:50 +0100873 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100874 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100875
876err_unpin:
877 i915_gem_object_unpin_pages(obj);
878 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800879}
880
Daniel Vetter23c18c72012-03-25 19:47:42 +0200881static void
882shmem_clflush_swizzled_range(char *addr, unsigned long length,
883 bool swizzled)
884{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200885 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200886 unsigned long start = (unsigned long) addr;
887 unsigned long end = (unsigned long) addr + length;
888
889 /* For swizzling simply ensure that we always flush both
890 * channels. Lame, but simple and it works. Swizzled
891 * pwrite/pread is far from a hotpath - current userspace
892 * doesn't use it at all. */
893 start = round_down(start, 128);
894 end = round_up(end, 128);
895
896 drm_clflush_virt_range((void *)start, end - start);
897 } else {
898 drm_clflush_virt_range(addr, length);
899 }
900
901}
902
Daniel Vetterd174bd62012-03-25 19:47:40 +0200903/* Only difference to the fast-path function is that this can handle bit17
904 * and uses non-atomic copy and kmap functions. */
905static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100906shmem_pread_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +0200907 char __user *user_data,
908 bool page_do_bit17_swizzling, bool needs_clflush)
909{
910 char *vaddr;
911 int ret;
912
913 vaddr = kmap(page);
914 if (needs_clflush)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100915 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +0200916 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200917
918 if (page_do_bit17_swizzling)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100919 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200920 else
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100921 ret = __copy_to_user(user_data, vaddr + offset, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200922 kunmap(page);
923
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100924 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200925}
926
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100927static int
928shmem_pread(struct page *page, int offset, int length, char __user *user_data,
929 bool page_do_bit17_swizzling, bool needs_clflush)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530930{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +0100931 int ret;
932
933 ret = -ENODEV;
934 if (!page_do_bit17_swizzling) {
935 char *vaddr = kmap_atomic(page);
936
937 if (needs_clflush)
938 drm_clflush_virt_range(vaddr + offset, length);
939 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
940 kunmap_atomic(vaddr);
941 }
942 if (ret == 0)
943 return 0;
944
945 return shmem_pread_slow(page, offset, length, user_data,
946 page_do_bit17_swizzling, needs_clflush);
947}
948
949static int
950i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
951 struct drm_i915_gem_pread *args)
952{
953 char __user *user_data;
954 u64 remain;
955 unsigned int obj_do_bit17_swizzling;
956 unsigned int needs_clflush;
957 unsigned int idx, offset;
958 int ret;
959
960 obj_do_bit17_swizzling = 0;
961 if (i915_gem_object_needs_bit17_swizzle(obj))
962 obj_do_bit17_swizzling = BIT(17);
963
964 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
965 if (ret)
966 return ret;
967
968 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
969 mutex_unlock(&obj->base.dev->struct_mutex);
970 if (ret)
971 return ret;
972
973 remain = args->size;
974 user_data = u64_to_user_ptr(args->data_ptr);
975 offset = offset_in_page(args->offset);
976 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
977 struct page *page = i915_gem_object_get_page(obj, idx);
978 int length;
979
980 length = remain;
981 if (offset + length > PAGE_SIZE)
982 length = PAGE_SIZE - offset;
983
984 ret = shmem_pread(page, offset, length, user_data,
985 page_to_phys(page) & obj_do_bit17_swizzling,
986 needs_clflush);
987 if (ret)
988 break;
989
990 remain -= length;
991 user_data += length;
992 offset = 0;
993 }
994
995 i915_gem_obj_finish_shmem_access(obj);
996 return ret;
997}
998
999static inline bool
1000gtt_user_read(struct io_mapping *mapping,
1001 loff_t base, int offset,
1002 char __user *user_data, int length)
1003{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301004 void *vaddr;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001005 unsigned long unwritten;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301006
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301007 /* We can use the cpu mem copy function because this is X86. */
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001008 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1009 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
1010 io_mapping_unmap_atomic(vaddr);
1011 if (unwritten) {
1012 vaddr = (void __force *)
1013 io_mapping_map_wc(mapping, base, PAGE_SIZE);
1014 unwritten = copy_to_user(user_data, vaddr + offset, length);
1015 io_mapping_unmap(vaddr);
1016 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301017 return unwritten;
1018}
1019
1020static int
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001021i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1022 const struct drm_i915_gem_pread *args)
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301023{
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001024 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1025 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301026 struct drm_mm_node node;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001027 struct i915_vma *vma;
1028 void __user *user_data;
1029 u64 remain, offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301030 int ret;
1031
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001032 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1033 if (ret)
1034 return ret;
1035
1036 intel_runtime_pm_get(i915);
1037 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1038 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001039 if (!IS_ERR(vma)) {
1040 node.start = i915_ggtt_offset(vma);
1041 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001042 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001043 if (ret) {
1044 i915_vma_unpin(vma);
1045 vma = ERR_PTR(ret);
1046 }
1047 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001048 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001049 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301050 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001051 goto out_unlock;
1052 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301053 }
1054
1055 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1056 if (ret)
1057 goto out_unpin;
1058
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001059 mutex_unlock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301060
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001061 user_data = u64_to_user_ptr(args->data_ptr);
1062 remain = args->size;
1063 offset = args->offset;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301064
1065 while (remain > 0) {
1066 /* Operation in this page
1067 *
1068 * page_base = page offset within aperture
1069 * page_offset = offset within page
1070 * page_length = bytes to copy for this page
1071 */
1072 u32 page_base = node.start;
1073 unsigned page_offset = offset_in_page(offset);
1074 unsigned page_length = PAGE_SIZE - page_offset;
1075 page_length = remain < page_length ? remain : page_length;
1076 if (node.allocated) {
1077 wmb();
1078 ggtt->base.insert_page(&ggtt->base,
1079 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001080 node.start, I915_CACHE_NONE, 0);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301081 wmb();
1082 } else {
1083 page_base += offset & PAGE_MASK;
1084 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001085
1086 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1087 user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301088 ret = -EFAULT;
1089 break;
1090 }
1091
1092 remain -= page_length;
1093 user_data += page_length;
1094 offset += page_length;
1095 }
1096
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001097 mutex_lock(&i915->drm.struct_mutex);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301098out_unpin:
1099 if (node.allocated) {
1100 wmb();
1101 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001102 node.start, node.size);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301103 remove_mappable_node(&node);
1104 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001105 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301106 }
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001107out_unlock:
1108 intel_runtime_pm_put(i915);
1109 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001110
Eric Anholteb014592009-03-10 11:44:52 -07001111 return ret;
1112}
1113
Eric Anholt673a3942008-07-30 12:06:12 -07001114/**
1115 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001116 * @dev: drm device pointer
1117 * @data: ioctl data blob
1118 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001119 *
1120 * On error, the contents of *data are undefined.
1121 */
1122int
1123i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001124 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001125{
1126 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001127 struct drm_i915_gem_object *obj;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001128 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001129
Chris Wilson51311d02010-11-17 09:10:42 +00001130 if (args->size == 0)
1131 return 0;
1132
1133 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001134 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001135 args->size))
1136 return -EFAULT;
1137
Chris Wilson03ac0642016-07-20 13:31:51 +01001138 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001139 if (!obj)
1140 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001141
Chris Wilson7dcd2492010-09-26 20:21:44 +01001142 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +00001143 if (args->offset > obj->base.size ||
1144 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001145 ret = -EINVAL;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001146 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001147 }
1148
Chris Wilsondb53a302011-02-03 11:57:46 +00001149 trace_i915_gem_object_pread(obj, args->offset, args->size);
1150
Chris Wilsone95433c2016-10-28 13:58:27 +01001151 ret = i915_gem_object_wait(obj,
1152 I915_WAIT_INTERRUPTIBLE,
1153 MAX_SCHEDULE_TIMEOUT,
1154 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001155 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001156 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001157
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001158 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001159 if (ret)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001160 goto out;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001161
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001162 ret = i915_gem_shmem_pread(obj, args);
Chris Wilson9c870d02016-10-24 13:42:15 +01001163 if (ret == -EFAULT || ret == -ENODEV)
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001164 ret = i915_gem_gtt_pread(obj, args);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301165
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001166 i915_gem_object_unpin_pages(obj);
1167out:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001168 i915_gem_object_put(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001169 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001170}
1171
Keith Packard0839ccb2008-10-30 19:38:48 -07001172/* This is the fast write path which cannot handle
1173 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001174 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001175
Chris Wilsonfe115622016-10-28 13:58:40 +01001176static inline bool
1177ggtt_write(struct io_mapping *mapping,
1178 loff_t base, int offset,
1179 char __user *user_data, int length)
Keith Packard0839ccb2008-10-30 19:38:48 -07001180{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001181 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001182 unsigned long unwritten;
1183
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001184 /* We can use the cpu mem copy function because this is X86. */
Chris Wilsonfe115622016-10-28 13:58:40 +01001185 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1186 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
Keith Packard0839ccb2008-10-30 19:38:48 -07001187 user_data, length);
Chris Wilsonfe115622016-10-28 13:58:40 +01001188 io_mapping_unmap_atomic(vaddr);
1189 if (unwritten) {
1190 vaddr = (void __force *)
1191 io_mapping_map_wc(mapping, base, PAGE_SIZE);
1192 unwritten = copy_from_user(vaddr + offset, user_data, length);
1193 io_mapping_unmap(vaddr);
1194 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001195
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001196 return unwritten;
1197}
1198
Eric Anholt3de09aa2009-03-09 09:42:23 -07001199/**
1200 * This is the fast pwrite path, where we copy the data directly from the
1201 * user into the GTT, uncached.
Chris Wilsonfe115622016-10-28 13:58:40 +01001202 * @obj: i915 GEM object
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001203 * @args: pwrite arguments structure
Eric Anholt3de09aa2009-03-09 09:42:23 -07001204 */
Eric Anholt673a3942008-07-30 12:06:12 -07001205static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001206i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1207 const struct drm_i915_gem_pwrite *args)
Eric Anholt673a3942008-07-30 12:06:12 -07001208{
Chris Wilsonfe115622016-10-28 13:58:40 +01001209 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301210 struct i915_ggtt *ggtt = &i915->ggtt;
1211 struct drm_mm_node node;
Chris Wilsonfe115622016-10-28 13:58:40 +01001212 struct i915_vma *vma;
1213 u64 remain, offset;
1214 void __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301215 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301216
Chris Wilsonfe115622016-10-28 13:58:40 +01001217 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1218 if (ret)
1219 return ret;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001220
Chris Wilson9c870d02016-10-24 13:42:15 +01001221 intel_runtime_pm_get(i915);
Chris Wilson058d88c2016-08-15 10:49:06 +01001222 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsonde895082016-08-04 16:32:34 +01001223 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001224 if (!IS_ERR(vma)) {
1225 node.start = i915_ggtt_offset(vma);
1226 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001227 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001228 if (ret) {
1229 i915_vma_unpin(vma);
1230 vma = ERR_PTR(ret);
1231 }
1232 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001233 if (IS_ERR(vma)) {
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001234 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301235 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001236 goto out_unlock;
1237 GEM_BUG_ON(!node.allocated);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301238 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001239
1240 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1241 if (ret)
1242 goto out_unpin;
1243
Chris Wilsonfe115622016-10-28 13:58:40 +01001244 mutex_unlock(&i915->drm.struct_mutex);
1245
Chris Wilsonb19482d2016-08-18 17:16:43 +01001246 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001247
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301248 user_data = u64_to_user_ptr(args->data_ptr);
1249 offset = args->offset;
1250 remain = args->size;
1251 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001252 /* Operation in this page
1253 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001254 * page_base = page offset within aperture
1255 * page_offset = offset within page
1256 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001257 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301258 u32 page_base = node.start;
Chris Wilsonbb6dc8d2016-10-28 13:58:39 +01001259 unsigned int page_offset = offset_in_page(offset);
1260 unsigned int page_length = PAGE_SIZE - page_offset;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301261 page_length = remain < page_length ? remain : page_length;
1262 if (node.allocated) {
1263 wmb(); /* flush the write before we modify the GGTT */
1264 ggtt->base.insert_page(&ggtt->base,
1265 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1266 node.start, I915_CACHE_NONE, 0);
1267 wmb(); /* flush modifications to the GGTT (insert_page) */
1268 } else {
1269 page_base += offset & PAGE_MASK;
1270 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001271 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001272 * source page isn't available. Return the error and we'll
1273 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301274 * If the object is non-shmem backed, we retry again with the
1275 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001276 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001277 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1278 user_data, page_length)) {
1279 ret = -EFAULT;
1280 break;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001281 }
Eric Anholt673a3942008-07-30 12:06:12 -07001282
Keith Packard0839ccb2008-10-30 19:38:48 -07001283 remain -= page_length;
1284 user_data += page_length;
1285 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001286 }
Chris Wilsonb19482d2016-08-18 17:16:43 +01001287 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001288
1289 mutex_lock(&i915->drm.struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001290out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301291 if (node.allocated) {
1292 wmb();
1293 ggtt->base.clear_range(&ggtt->base,
Michał Winiarski4fb84d92016-10-13 14:02:40 +02001294 node.start, node.size);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301295 remove_mappable_node(&node);
1296 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001297 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301298 }
Chris Wilsonfe115622016-10-28 13:58:40 +01001299out_unlock:
Chris Wilson9c870d02016-10-24 13:42:15 +01001300 intel_runtime_pm_put(i915);
Chris Wilsonfe115622016-10-28 13:58:40 +01001301 mutex_unlock(&i915->drm.struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001302 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001303}
1304
Eric Anholt673a3942008-07-30 12:06:12 -07001305static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001306shmem_pwrite_slow(struct page *page, int offset, int length,
Daniel Vetterd174bd62012-03-25 19:47:40 +02001307 char __user *user_data,
1308 bool page_do_bit17_swizzling,
1309 bool needs_clflush_before,
1310 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001311{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001312 char *vaddr;
1313 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001314
Daniel Vetterd174bd62012-03-25 19:47:40 +02001315 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001316 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Chris Wilsonfe115622016-10-28 13:58:40 +01001317 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001318 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001319 if (page_do_bit17_swizzling)
Chris Wilsonfe115622016-10-28 13:58:40 +01001320 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1321 length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001322 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001323 ret = __copy_from_user(vaddr + offset, user_data, length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001324 if (needs_clflush_after)
Chris Wilsonfe115622016-10-28 13:58:40 +01001325 shmem_clflush_swizzled_range(vaddr + offset, length,
Daniel Vetter23c18c72012-03-25 19:47:42 +02001326 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001327 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001328
Chris Wilson755d2212012-09-04 21:02:55 +01001329 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001330}
1331
Chris Wilsonfe115622016-10-28 13:58:40 +01001332/* Per-page copy function for the shmem pwrite fastpath.
1333 * Flushes invalid cachelines before writing to the target if
1334 * needs_clflush_before is set and flushes out any written cachelines after
1335 * writing if needs_clflush is set.
1336 */
Eric Anholt40123c12009-03-09 13:42:30 -07001337static int
Chris Wilsonfe115622016-10-28 13:58:40 +01001338shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1339 bool page_do_bit17_swizzling,
1340 bool needs_clflush_before,
1341 bool needs_clflush_after)
Eric Anholt40123c12009-03-09 13:42:30 -07001342{
Chris Wilsonfe115622016-10-28 13:58:40 +01001343 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001344
Chris Wilsonfe115622016-10-28 13:58:40 +01001345 ret = -ENODEV;
1346 if (!page_do_bit17_swizzling) {
1347 char *vaddr = kmap_atomic(page);
1348
1349 if (needs_clflush_before)
1350 drm_clflush_virt_range(vaddr + offset, len);
1351 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1352 if (needs_clflush_after)
1353 drm_clflush_virt_range(vaddr + offset, len);
1354
1355 kunmap_atomic(vaddr);
1356 }
1357 if (ret == 0)
1358 return ret;
1359
1360 return shmem_pwrite_slow(page, offset, len, user_data,
1361 page_do_bit17_swizzling,
1362 needs_clflush_before,
1363 needs_clflush_after);
1364}
1365
1366static int
1367i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1368 const struct drm_i915_gem_pwrite *args)
1369{
1370 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1371 void __user *user_data;
1372 u64 remain;
1373 unsigned int obj_do_bit17_swizzling;
1374 unsigned int partial_cacheline_write;
1375 unsigned int needs_clflush;
1376 unsigned int offset, idx;
1377 int ret;
1378
1379 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilson43394c72016-08-18 17:16:47 +01001380 if (ret)
1381 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001382
Chris Wilsonfe115622016-10-28 13:58:40 +01001383 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1384 mutex_unlock(&i915->drm.struct_mutex);
1385 if (ret)
1386 return ret;
1387
1388 obj_do_bit17_swizzling = 0;
1389 if (i915_gem_object_needs_bit17_swizzle(obj))
1390 obj_do_bit17_swizzling = BIT(17);
1391
1392 /* If we don't overwrite a cacheline completely we need to be
1393 * careful to have up-to-date data by first clflushing. Don't
1394 * overcomplicate things and flush the entire patch.
1395 */
1396 partial_cacheline_write = 0;
1397 if (needs_clflush & CLFLUSH_BEFORE)
1398 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1399
Chris Wilson43394c72016-08-18 17:16:47 +01001400 user_data = u64_to_user_ptr(args->data_ptr);
Chris Wilson43394c72016-08-18 17:16:47 +01001401 remain = args->size;
Chris Wilsonfe115622016-10-28 13:58:40 +01001402 offset = offset_in_page(args->offset);
1403 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1404 struct page *page = i915_gem_object_get_page(obj, idx);
1405 int length;
Eric Anholt40123c12009-03-09 13:42:30 -07001406
Chris Wilsonfe115622016-10-28 13:58:40 +01001407 length = remain;
1408 if (offset + length > PAGE_SIZE)
1409 length = PAGE_SIZE - offset;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001410
Chris Wilsonfe115622016-10-28 13:58:40 +01001411 ret = shmem_pwrite(page, offset, length, user_data,
1412 page_to_phys(page) & obj_do_bit17_swizzling,
1413 (offset | length) & partial_cacheline_write,
1414 needs_clflush & CLFLUSH_AFTER);
1415 if (ret)
Chris Wilson9da3da62012-06-01 15:20:22 +01001416 break;
1417
Chris Wilsonfe115622016-10-28 13:58:40 +01001418 remain -= length;
1419 user_data += length;
1420 offset = 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001421 }
1422
Rodrigo Vivide152b62015-07-07 16:28:51 -07001423 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Chris Wilsonfe115622016-10-28 13:58:40 +01001424 i915_gem_obj_finish_shmem_access(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001425 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001426}
1427
1428/**
1429 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001430 * @dev: drm device
1431 * @data: ioctl data blob
1432 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001433 *
1434 * On error, the contents of the buffer that were to be modified are undefined.
1435 */
1436int
1437i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001438 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001439{
1440 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001441 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001442 int ret;
1443
1444 if (args->size == 0)
1445 return 0;
1446
1447 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001448 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001449 args->size))
1450 return -EFAULT;
1451
Chris Wilson03ac0642016-07-20 13:31:51 +01001452 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001453 if (!obj)
1454 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001455
Chris Wilson7dcd2492010-09-26 20:21:44 +01001456 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001457 if (args->offset > obj->base.size ||
1458 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001459 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001460 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001461 }
1462
Chris Wilsondb53a302011-02-03 11:57:46 +00001463 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1464
Chris Wilsone95433c2016-10-28 13:58:27 +01001465 ret = i915_gem_object_wait(obj,
1466 I915_WAIT_INTERRUPTIBLE |
1467 I915_WAIT_ALL,
1468 MAX_SCHEDULE_TIMEOUT,
1469 to_rps_client(file));
Chris Wilson258a5ed2016-08-05 10:14:16 +01001470 if (ret)
1471 goto err;
1472
Chris Wilsonfe115622016-10-28 13:58:40 +01001473 ret = i915_gem_object_pin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001474 if (ret)
Chris Wilsonfe115622016-10-28 13:58:40 +01001475 goto err;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001476
Daniel Vetter935aaa62012-03-25 19:47:35 +02001477 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001478 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1479 * it would end up going through the fenced access, and we'll get
1480 * different detiling behavior between reading and writing.
1481 * pread/pwrite currently are reading and writing from the CPU
1482 * perspective, requiring manual detiling by the client.
1483 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001484 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson9c870d02016-10-24 13:42:15 +01001485 cpu_write_needs_clflush(obj))
Daniel Vetter935aaa62012-03-25 19:47:35 +02001486 /* Note that the gtt paths might fail with non-page-backed user
1487 * pointers (e.g. gtt mappings when moving data between
Chris Wilson9c870d02016-10-24 13:42:15 +01001488 * textures). Fallback to the shmem path in that case.
1489 */
Chris Wilsonfe115622016-10-28 13:58:40 +01001490 ret = i915_gem_gtt_pwrite_fast(obj, args);
Eric Anholt673a3942008-07-30 12:06:12 -07001491
Chris Wilsond1054ee2016-07-16 18:42:36 +01001492 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001493 if (obj->phys_handle)
1494 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301495 else
Chris Wilsonfe115622016-10-28 13:58:40 +01001496 ret = i915_gem_shmem_pwrite(obj, args);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001497 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001498
Chris Wilsonfe115622016-10-28 13:58:40 +01001499 i915_gem_object_unpin_pages(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001500err:
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001501 i915_gem_object_put(obj);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001502 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001503}
1504
Chris Wilsond243ad82016-08-18 17:16:44 +01001505static inline enum fb_op_origin
Chris Wilsonaeecc962016-06-17 14:46:39 -03001506write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1507{
Chris Wilson50349242016-08-18 17:17:04 +01001508 return (domain == I915_GEM_DOMAIN_GTT ?
1509 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001510}
1511
Chris Wilson40e62d52016-10-28 13:58:41 +01001512static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1513{
1514 struct drm_i915_private *i915;
1515 struct list_head *list;
1516 struct i915_vma *vma;
1517
1518 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1519 if (!i915_vma_is_ggtt(vma))
1520 continue;
1521
1522 if (i915_vma_is_active(vma))
1523 continue;
1524
1525 if (!drm_mm_node_allocated(&vma->node))
1526 continue;
1527
1528 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1529 }
1530
1531 i915 = to_i915(obj->base.dev);
1532 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
Joonas Lahtinen56cea322016-11-02 12:16:04 +02001533 list_move_tail(&obj->global_link, list);
Chris Wilson40e62d52016-10-28 13:58:41 +01001534}
1535
Eric Anholt673a3942008-07-30 12:06:12 -07001536/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001537 * Called when user space prepares to use an object with the CPU, either
1538 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001539 * @dev: drm device
1540 * @data: ioctl data blob
1541 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001542 */
1543int
1544i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001545 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001546{
1547 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001548 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001549 uint32_t read_domains = args->read_domains;
1550 uint32_t write_domain = args->write_domain;
Chris Wilson40e62d52016-10-28 13:58:41 +01001551 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07001552
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001553 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001554 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001555 return -EINVAL;
1556
1557 /* Having something in the write domain implies it's in the read
1558 * domain, and only that read domain. Enforce that in the request.
1559 */
1560 if (write_domain != 0 && read_domains != write_domain)
1561 return -EINVAL;
1562
Chris Wilson03ac0642016-07-20 13:31:51 +01001563 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001564 if (!obj)
1565 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001566
Chris Wilson3236f572012-08-24 09:35:09 +01001567 /* Try to flush the object off the GPU without holding the lock.
1568 * We will repeat the flush holding the lock in the normal manner
1569 * to catch cases where we are gazumped.
1570 */
Chris Wilson40e62d52016-10-28 13:58:41 +01001571 err = i915_gem_object_wait(obj,
Chris Wilsone95433c2016-10-28 13:58:27 +01001572 I915_WAIT_INTERRUPTIBLE |
1573 (write_domain ? I915_WAIT_ALL : 0),
1574 MAX_SCHEDULE_TIMEOUT,
1575 to_rps_client(file));
Chris Wilson40e62d52016-10-28 13:58:41 +01001576 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001577 goto out;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001578
Chris Wilson40e62d52016-10-28 13:58:41 +01001579 /* Flush and acquire obj->pages so that we are coherent through
1580 * direct access in memory with previous cached writes through
1581 * shmemfs and that our cache domain tracking remains valid.
1582 * For example, if the obj->filp was moved to swap without us
1583 * being notified and releasing the pages, we would mistakenly
1584 * continue to assume that the obj remained out of the CPU cached
1585 * domain.
1586 */
1587 err = i915_gem_object_pin_pages(obj);
1588 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001589 goto out;
Chris Wilson40e62d52016-10-28 13:58:41 +01001590
1591 err = i915_mutex_lock_interruptible(dev);
1592 if (err)
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001593 goto out_unpin;
Chris Wilson3236f572012-08-24 09:35:09 +01001594
Chris Wilson43566de2015-01-02 16:29:29 +05301595 if (read_domains & I915_GEM_DOMAIN_GTT)
Chris Wilson40e62d52016-10-28 13:58:41 +01001596 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Chris Wilson43566de2015-01-02 16:29:29 +05301597 else
Chris Wilson40e62d52016-10-28 13:58:41 +01001598 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1599
1600 /* And bump the LRU for this access */
1601 i915_gem_object_bump_inactive_ggtt(obj);
1602
1603 mutex_unlock(&dev->struct_mutex);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001604
Daniel Vetter031b6982015-06-26 19:35:16 +02001605 if (write_domain != 0)
Chris Wilsonaeecc962016-06-17 14:46:39 -03001606 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001607
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001608out_unpin:
Chris Wilson40e62d52016-10-28 13:58:41 +01001609 i915_gem_object_unpin_pages(obj);
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001610out:
1611 i915_gem_object_put(obj);
Chris Wilson40e62d52016-10-28 13:58:41 +01001612 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001613}
1614
1615/**
1616 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001617 * @dev: drm device
1618 * @data: ioctl data blob
1619 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001620 */
1621int
1622i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001623 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001624{
1625 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001626 struct drm_i915_gem_object *obj;
Chris Wilsonc21724c2016-08-05 10:14:19 +01001627 int err = 0;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001628
Chris Wilson03ac0642016-07-20 13:31:51 +01001629 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001630 if (!obj)
1631 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001632
Eric Anholt673a3942008-07-30 12:06:12 -07001633 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilsonc21724c2016-08-05 10:14:19 +01001634 if (READ_ONCE(obj->pin_display)) {
1635 err = i915_mutex_lock_interruptible(dev);
1636 if (!err) {
1637 i915_gem_object_flush_cpu_write_domain(obj);
1638 mutex_unlock(&dev->struct_mutex);
1639 }
1640 }
Eric Anholte47c68e2008-11-14 13:35:19 -08001641
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001642 i915_gem_object_put(obj);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001643 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001644}
1645
1646/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001647 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1648 * it is mapped to.
1649 * @dev: drm device
1650 * @data: ioctl data blob
1651 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001652 *
1653 * While the mapping holds a reference on the contents of the object, it doesn't
1654 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001655 *
1656 * IMPORTANT:
1657 *
1658 * DRM driver writers who look a this function as an example for how to do GEM
1659 * mmap support, please don't implement mmap support like here. The modern way
1660 * to implement DRM mmap support is with an mmap offset ioctl (like
1661 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1662 * That way debug tooling like valgrind will understand what's going on, hiding
1663 * the mmap call in a driver private ioctl will break that. The i915 driver only
1664 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001665 */
1666int
1667i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001668 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001669{
1670 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001671 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001672 unsigned long addr;
1673
Akash Goel1816f922015-01-02 16:29:30 +05301674 if (args->flags & ~(I915_MMAP_WC))
1675 return -EINVAL;
1676
Borislav Petkov568a58e2016-03-29 17:42:01 +02001677 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301678 return -ENODEV;
1679
Chris Wilson03ac0642016-07-20 13:31:51 +01001680 obj = i915_gem_object_lookup(file, args->handle);
1681 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001682 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001683
Daniel Vetter1286ff72012-05-10 15:25:09 +02001684 /* prime objects have no backing filp to GEM mmap
1685 * pages from.
1686 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001687 if (!obj->base.filp) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001688 i915_gem_object_put(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001689 return -EINVAL;
1690 }
1691
Chris Wilson03ac0642016-07-20 13:31:51 +01001692 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001693 PROT_READ | PROT_WRITE, MAP_SHARED,
1694 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301695 if (args->flags & I915_MMAP_WC) {
1696 struct mm_struct *mm = current->mm;
1697 struct vm_area_struct *vma;
1698
Michal Hocko80a89a52016-05-23 16:26:11 -07001699 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001700 i915_gem_object_put(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001701 return -EINTR;
1702 }
Akash Goel1816f922015-01-02 16:29:30 +05301703 vma = find_vma(mm, addr);
1704 if (vma)
1705 vma->vm_page_prot =
1706 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1707 else
1708 addr = -ENOMEM;
1709 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001710
1711 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001712 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301713 }
Chris Wilsonf0cd5182016-10-28 13:58:43 +01001714 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001715 if (IS_ERR((void *)addr))
1716 return addr;
1717
1718 args->addr_ptr = (uint64_t) addr;
1719
1720 return 0;
1721}
1722
Chris Wilson03af84f2016-08-18 17:17:01 +01001723static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1724{
1725 u64 size;
1726
1727 size = i915_gem_object_get_stride(obj);
1728 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1729
1730 return size >> PAGE_SHIFT;
1731}
1732
Jesse Barnesde151cf2008-11-12 10:03:55 -08001733/**
Chris Wilson4cc69072016-08-25 19:05:19 +01001734 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1735 *
1736 * A history of the GTT mmap interface:
1737 *
1738 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1739 * aligned and suitable for fencing, and still fit into the available
1740 * mappable space left by the pinned display objects. A classic problem
1741 * we called the page-fault-of-doom where we would ping-pong between
1742 * two objects that could not fit inside the GTT and so the memcpy
1743 * would page one object in at the expense of the other between every
1744 * single byte.
1745 *
1746 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1747 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1748 * object is too large for the available space (or simply too large
1749 * for the mappable aperture!), a view is created instead and faulted
1750 * into userspace. (This view is aligned and sized appropriately for
1751 * fenced access.)
1752 *
1753 * Restrictions:
1754 *
1755 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1756 * hangs on some architectures, corruption on others. An attempt to service
1757 * a GTT page fault from a snoopable object will generate a SIGBUS.
1758 *
1759 * * the object must be able to fit into RAM (physical memory, though no
1760 * limited to the mappable aperture).
1761 *
1762 *
1763 * Caveats:
1764 *
1765 * * a new GTT page fault will synchronize rendering from the GPU and flush
1766 * all data to system memory. Subsequent access will not be synchronized.
1767 *
1768 * * all mappings are revoked on runtime device suspend.
1769 *
1770 * * there are only 8, 16 or 32 fence registers to share between all users
1771 * (older machines require fence register for display and blitter access
1772 * as well). Contention of the fence registers will cause the previous users
1773 * to be unmapped and any new access will generate new page faults.
1774 *
1775 * * running out of memory while servicing a fault may generate a SIGBUS,
1776 * rather than the expected SIGSEGV.
1777 */
1778int i915_gem_mmap_gtt_version(void)
1779{
1780 return 1;
1781}
1782
1783/**
Jesse Barnesde151cf2008-11-12 10:03:55 -08001784 * i915_gem_fault - fault a page into the GTT
Chris Wilson058d88c2016-08-15 10:49:06 +01001785 * @area: CPU VMA in question
Geliang Tangd9072a32015-09-15 05:58:44 -07001786 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001787 *
1788 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1789 * from userspace. The fault handler takes care of binding the object to
1790 * the GTT (if needed), allocating and programming a fence register (again,
1791 * only if needed based on whether the old reg is still valid or the object
1792 * is tiled) and inserting a new PTE into the faulting process.
1793 *
1794 * Note that the faulting process may involve evicting existing objects
1795 * from the GTT and/or fence registers to make room. So performance may
1796 * suffer if the GTT working set is large or there are few fence registers
1797 * left.
Chris Wilson4cc69072016-08-25 19:05:19 +01001798 *
1799 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1800 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
Jesse Barnesde151cf2008-11-12 10:03:55 -08001801 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001802int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001803{
Chris Wilson03af84f2016-08-18 17:17:01 +01001804#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Chris Wilson058d88c2016-08-15 10:49:06 +01001805 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001806 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001807 struct drm_i915_private *dev_priv = to_i915(dev);
1808 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001809 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001810 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001811 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001812 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001813 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001814
Jesse Barnesde151cf2008-11-12 10:03:55 -08001815 /* We don't use vmf->pgoff since that has the fake offset */
Jan Kara1a29d852016-12-14 15:07:01 -08001816 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001817
Chris Wilsondb53a302011-02-03 11:57:46 +00001818 trace_i915_gem_object_fault(obj, page_offset, true, write);
1819
Chris Wilson6e4930f2014-02-07 18:37:06 -02001820 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001821 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001822 * repeat the flush holding the lock in the normal manner to catch cases
1823 * where we are gazumped.
1824 */
Chris Wilsone95433c2016-10-28 13:58:27 +01001825 ret = i915_gem_object_wait(obj,
1826 I915_WAIT_INTERRUPTIBLE,
1827 MAX_SCHEDULE_TIMEOUT,
1828 NULL);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001829 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001830 goto err;
1831
Chris Wilson40e62d52016-10-28 13:58:41 +01001832 ret = i915_gem_object_pin_pages(obj);
1833 if (ret)
1834 goto err;
1835
Chris Wilsonb8f90962016-08-05 10:14:07 +01001836 intel_runtime_pm_get(dev_priv);
1837
1838 ret = i915_mutex_lock_interruptible(dev);
1839 if (ret)
1840 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001841
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001842 /* Access to snoopable pages through the GTT is incoherent. */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001843 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001844 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001845 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001846 }
1847
Chris Wilson82118872016-08-18 17:17:05 +01001848 /* If the object is smaller than a couple of partial vma, it is
1849 * not worth only creating a single partial vma - we may as well
1850 * clear enough space for the full object.
1851 */
1852 flags = PIN_MAPPABLE;
1853 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1854 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1855
Chris Wilsona61007a2016-08-18 17:17:02 +01001856 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001857 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001858 if (IS_ERR(vma)) {
1859 struct i915_ggtt_view view;
Chris Wilson03af84f2016-08-18 17:17:01 +01001860 unsigned int chunk_size;
1861
Chris Wilsona61007a2016-08-18 17:17:02 +01001862 /* Use a partial view if it is bigger than available space */
Chris Wilson03af84f2016-08-18 17:17:01 +01001863 chunk_size = MIN_CHUNK_PAGES;
1864 if (i915_gem_object_is_tiled(obj))
Chris Wilson0ef723c2016-11-07 10:54:43 +00001865 chunk_size = roundup(chunk_size, tile_row_pages(obj));
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03001866
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001867 memset(&view, 0, sizeof(view));
1868 view.type = I915_GGTT_VIEW_PARTIAL;
1869 view.params.partial.offset = rounddown(page_offset, chunk_size);
1870 view.params.partial.size =
Chris Wilsona61007a2016-08-18 17:17:02 +01001871 min_t(unsigned int, chunk_size,
Chris Wilson908b1232016-10-11 10:06:56 +01001872 vma_pages(area) - view.params.partial.offset);
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001873
Chris Wilsonaa136d92016-08-18 17:17:03 +01001874 /* If the partial covers the entire object, just create a
1875 * normal VMA.
1876 */
1877 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1878 view.type = I915_GGTT_VIEW_NORMAL;
1879
Chris Wilson50349242016-08-18 17:17:04 +01001880 /* Userspace is now writing through an untracked VMA, abandon
1881 * all hope that the hardware is able to track future writes.
1882 */
1883 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1884
Chris Wilsona61007a2016-08-18 17:17:02 +01001885 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1886 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001887 if (IS_ERR(vma)) {
1888 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001889 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001890 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001891
Chris Wilsonc9839302012-11-20 10:45:17 +00001892 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1893 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001894 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001895
Chris Wilson49ef5292016-08-18 17:17:00 +01001896 ret = i915_vma_get_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001897 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001898 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001899
Chris Wilson275f0392016-10-24 13:42:14 +01001900 /* Mark as being mmapped into userspace for later revocation */
Chris Wilson9c870d02016-10-24 13:42:15 +01001901 assert_rpm_wakelock_held(dev_priv);
Chris Wilson275f0392016-10-24 13:42:14 +01001902 if (list_empty(&obj->userfault_link))
1903 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
Chris Wilson275f0392016-10-24 13:42:14 +01001904
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001905 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001906 ret = remap_io_mapping(area,
1907 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1908 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1909 min_t(u64, vma->size, area->vm_end - area->vm_start),
1910 &ggtt->mappable);
Chris Wilsona61007a2016-08-18 17:17:02 +01001911
Chris Wilsonb8f90962016-08-05 10:14:07 +01001912err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001913 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001914err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001915 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001916err_rpm:
1917 intel_runtime_pm_put(dev_priv);
Chris Wilson40e62d52016-10-28 13:58:41 +01001918 i915_gem_object_unpin_pages(obj);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001919err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001920 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001921 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001922 /*
1923 * We eat errors when the gpu is terminally wedged to avoid
1924 * userspace unduly crashing (gl has no provisions for mmaps to
1925 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1926 * and so needs to be reported.
1927 */
1928 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001929 ret = VM_FAULT_SIGBUS;
1930 break;
1931 }
Chris Wilson045e7692010-11-07 09:18:22 +00001932 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001933 /*
1934 * EAGAIN means the gpu is hung and we'll wait for the error
1935 * handler to reset everything when re-faulting in
1936 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001937 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001938 case 0:
1939 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001940 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001941 case -EBUSY:
1942 /*
1943 * EBUSY is ok: this just means that another thread
1944 * already did the job.
1945 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001946 ret = VM_FAULT_NOPAGE;
1947 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001948 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001949 ret = VM_FAULT_OOM;
1950 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001951 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001952 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001953 ret = VM_FAULT_SIGBUS;
1954 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001955 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001956 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001957 ret = VM_FAULT_SIGBUS;
1958 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001959 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001960 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001961}
1962
1963/**
Chris Wilson901782b2009-07-10 08:18:50 +01001964 * i915_gem_release_mmap - remove physical page mappings
1965 * @obj: obj in question
1966 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001967 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001968 * relinquish ownership of the pages back to the system.
1969 *
1970 * It is vital that we remove the page mapping if we have mapped a tiled
1971 * object through the GTT and then lose the fence register due to
1972 * resource pressure. Similarly if the object has been moved out of the
1973 * aperture, than pages mapped into userspace must be revoked. Removing the
1974 * mapping will then trigger a page fault on the next user access, allowing
1975 * fixup by i915_gem_fault().
1976 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001977void
Chris Wilson05394f32010-11-08 19:18:58 +00001978i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001979{
Chris Wilson275f0392016-10-24 13:42:14 +01001980 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilson275f0392016-10-24 13:42:14 +01001981
Chris Wilson349f2cc2016-04-13 17:35:12 +01001982 /* Serialisation between user GTT access and our code depends upon
1983 * revoking the CPU's PTE whilst the mutex is held. The next user
1984 * pagefault then has to wait until we release the mutex.
Chris Wilson9c870d02016-10-24 13:42:15 +01001985 *
1986 * Note that RPM complicates somewhat by adding an additional
1987 * requirement that operations to the GGTT be made holding the RPM
1988 * wakeref.
Chris Wilson349f2cc2016-04-13 17:35:12 +01001989 */
Chris Wilson275f0392016-10-24 13:42:14 +01001990 lockdep_assert_held(&i915->drm.struct_mutex);
Chris Wilson9c870d02016-10-24 13:42:15 +01001991 intel_runtime_pm_get(i915);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001992
Chris Wilson3594a3e2016-10-24 13:42:16 +01001993 if (list_empty(&obj->userfault_link))
Chris Wilson9c870d02016-10-24 13:42:15 +01001994 goto out;
Chris Wilson901782b2009-07-10 08:18:50 +01001995
Chris Wilson3594a3e2016-10-24 13:42:16 +01001996 list_del_init(&obj->userfault_link);
David Herrmann6796cb12014-01-03 14:24:19 +01001997 drm_vma_node_unmap(&obj->base.vma_node,
1998 obj->base.dev->anon_inode->i_mapping);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001999
2000 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2001 * memory transactions from userspace before we return. The TLB
2002 * flushing implied above by changing the PTE above *should* be
2003 * sufficient, an extra barrier here just provides us with a bit
2004 * of paranoid documentation about our requirement to serialise
2005 * memory writes before touching registers / GSM.
2006 */
2007 wmb();
Chris Wilson9c870d02016-10-24 13:42:15 +01002008
2009out:
2010 intel_runtime_pm_put(i915);
Chris Wilson901782b2009-07-10 08:18:50 +01002011}
2012
Chris Wilson7c108fd2016-10-24 13:42:18 +01002013void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002014{
Chris Wilson3594a3e2016-10-24 13:42:16 +01002015 struct drm_i915_gem_object *obj, *on;
Chris Wilson7c108fd2016-10-24 13:42:18 +01002016 int i;
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002017
Chris Wilson3594a3e2016-10-24 13:42:16 +01002018 /*
2019 * Only called during RPM suspend. All users of the userfault_list
2020 * must be holding an RPM wakeref to ensure that this can not
2021 * run concurrently with themselves (and use the struct_mutex for
2022 * protection between themselves).
2023 */
2024
2025 list_for_each_entry_safe(obj, on,
2026 &dev_priv->mm.userfault_list, userfault_link) {
Chris Wilson275f0392016-10-24 13:42:14 +01002027 list_del_init(&obj->userfault_link);
Chris Wilson275f0392016-10-24 13:42:14 +01002028 drm_vma_node_unmap(&obj->base.vma_node,
2029 obj->base.dev->anon_inode->i_mapping);
Chris Wilson275f0392016-10-24 13:42:14 +01002030 }
Chris Wilson7c108fd2016-10-24 13:42:18 +01002031
2032 /* The fence will be lost when the device powers down. If any were
2033 * in use by hardware (i.e. they are pinned), we should not be powering
2034 * down! All other fences will be reacquired by the user upon waking.
2035 */
2036 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2037 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2038
2039 if (WARN_ON(reg->pin_count))
2040 continue;
2041
2042 if (!reg->vma)
2043 continue;
2044
2045 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
2046 reg->dirty = true;
2047 }
Chris Wilsoneedd10f2014-06-16 08:57:44 +01002048}
2049
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002050/**
2051 * i915_gem_get_ggtt_size - return required global GTT size for an object
Chris Wilsona9f14812016-08-04 16:32:28 +01002052 * @dev_priv: i915 device
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002053 * @size: object size
2054 * @tiling_mode: tiling mode
2055 *
2056 * Return the required global GTT size for an object, taking into account
2057 * potential fence register mapping.
2058 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002059u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2060 u64 size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00002061{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002062 u64 ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002063
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002064 GEM_BUG_ON(size == 0);
2065
Chris Wilsona9f14812016-08-04 16:32:28 +01002066 if (INTEL_GEN(dev_priv) >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002067 tiling_mode == I915_TILING_NONE)
2068 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002069
2070 /* Previous chips need a power-of-two fence region when tiling */
Chris Wilsona9f14812016-08-04 16:32:28 +01002071 if (IS_GEN3(dev_priv))
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002072 ggtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002073 else
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002074 ggtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002075
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002076 while (ggtt_size < size)
2077 ggtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002078
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002079 return ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00002080}
2081
Jesse Barnesde151cf2008-11-12 10:03:55 -08002082/**
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002083 * i915_gem_get_ggtt_alignment - return required global GTT alignment
Chris Wilsona9f14812016-08-04 16:32:28 +01002084 * @dev_priv: i915 device
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002085 * @size: object size
2086 * @tiling_mode: tiling mode
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002087 * @fenced: is fenced alignment required or not
Jesse Barnesde151cf2008-11-12 10:03:55 -08002088 *
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002089 * Return the required global GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01002090 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002091 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002092u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002093 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002094{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002095 GEM_BUG_ON(size == 0);
2096
Jesse Barnesde151cf2008-11-12 10:03:55 -08002097 /*
2098 * Minimum alignment is 4k (GTT page size), but might be greater
2099 * if a fence register is needed for the object.
2100 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002101 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002102 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002103 return 4096;
2104
2105 /*
2106 * Previous chips need to be aligned to the size of the smallest
2107 * fence register that can contain the object.
2108 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002109 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002110}
2111
Chris Wilsond8cb5082012-08-11 15:41:03 +01002112static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2113{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002114 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002115 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002116
Chris Wilsonf3f61842016-08-05 10:14:14 +01002117 err = drm_gem_create_mmap_offset(&obj->base);
2118 if (!err)
2119 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01002120
Chris Wilsonf3f61842016-08-05 10:14:14 +01002121 /* We can idle the GPU locklessly to flush stale objects, but in order
2122 * to claim that space for ourselves, we need to take the big
2123 * struct_mutex to free the requests+objects and allocate our slot.
Chris Wilsond8cb5082012-08-11 15:41:03 +01002124 */
Chris Wilsonea746f32016-09-09 14:11:49 +01002125 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002126 if (err)
2127 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002128
Chris Wilsonf3f61842016-08-05 10:14:14 +01002129 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2130 if (!err) {
2131 i915_gem_retire_requests(dev_priv);
2132 err = drm_gem_create_mmap_offset(&obj->base);
2133 mutex_unlock(&dev_priv->drm.struct_mutex);
2134 }
Daniel Vetterda494d72012-12-20 15:11:16 +01002135
Chris Wilsonf3f61842016-08-05 10:14:14 +01002136 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002137}
2138
2139static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2140{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002141 drm_gem_free_mmap_offset(&obj->base);
2142}
2143
Dave Airlieda6b51d2014-12-24 13:11:17 +10002144int
Dave Airlieff72145b2011-02-07 12:16:14 +10002145i915_gem_mmap_gtt(struct drm_file *file,
2146 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002147 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002148 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002149{
Chris Wilson05394f32010-11-08 19:18:58 +00002150 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002151 int ret;
2152
Chris Wilson03ac0642016-07-20 13:31:51 +01002153 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002154 if (!obj)
2155 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002156
Chris Wilsond8cb5082012-08-11 15:41:03 +01002157 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002158 if (ret == 0)
2159 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002160
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002161 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002162 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002163}
2164
Dave Airlieff72145b2011-02-07 12:16:14 +10002165/**
2166 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2167 * @dev: DRM device
2168 * @data: GTT mapping ioctl data
2169 * @file: GEM object info
2170 *
2171 * Simply returns the fake offset to userspace so it can mmap it.
2172 * The mmap call will end up in drm_gem_mmap(), which will set things
2173 * up so we can get faults in the handler above.
2174 *
2175 * The fault handler will take care of binding the object into the GTT
2176 * (since it may have been evicted to make room for something), allocating
2177 * a fence register, and mapping the appropriate aperture address into
2178 * userspace.
2179 */
2180int
2181i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2182 struct drm_file *file)
2183{
2184 struct drm_i915_gem_mmap_gtt *args = data;
2185
Dave Airlieda6b51d2014-12-24 13:11:17 +10002186 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002187}
2188
Daniel Vetter225067e2012-08-20 10:23:20 +02002189/* Immediately discard the backing storage */
2190static void
2191i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002192{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002193 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002194
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002195 if (obj->base.filp == NULL)
2196 return;
2197
Daniel Vetter225067e2012-08-20 10:23:20 +02002198 /* Our goal here is to return as much of the memory as
2199 * is possible back to the system as we are called from OOM.
2200 * To do this we must instruct the shmfs to drop all of its
2201 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002202 */
Chris Wilson55372522014-03-25 13:23:06 +00002203 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002204 obj->mm.madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01002205}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002206
Chris Wilson55372522014-03-25 13:23:06 +00002207/* Try to discard unwanted pages */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002208void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002209{
Chris Wilson55372522014-03-25 13:23:06 +00002210 struct address_space *mapping;
2211
Chris Wilson1233e2d2016-10-28 13:58:37 +01002212 lockdep_assert_held(&obj->mm.lock);
2213 GEM_BUG_ON(obj->mm.pages);
2214
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002215 switch (obj->mm.madv) {
Chris Wilson55372522014-03-25 13:23:06 +00002216 case I915_MADV_DONTNEED:
2217 i915_gem_object_truncate(obj);
2218 case __I915_MADV_PURGED:
2219 return;
2220 }
2221
2222 if (obj->base.filp == NULL)
2223 return;
2224
Al Viro93c76a32015-12-04 23:45:44 -05002225 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002226 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002227}
2228
Chris Wilson5cdf5882010-09-27 15:51:07 +01002229static void
Chris Wilson03ac84f2016-10-28 13:58:36 +01002230i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2231 struct sg_table *pages)
Eric Anholt673a3942008-07-30 12:06:12 -07002232{
Dave Gordon85d12252016-05-20 11:54:06 +01002233 struct sgt_iter sgt_iter;
2234 struct page *page;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002235
Chris Wilsonc3f923b2016-12-23 14:57:57 +00002236 __i915_gem_object_release_shmem(obj, pages, true);
Eric Anholt856fa192009-03-19 14:10:50 -07002237
Chris Wilson03ac84f2016-10-28 13:58:36 +01002238 i915_gem_gtt_finish_pages(obj, pages);
Imre Deake2273302015-07-09 12:59:05 +03002239
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002240 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002241 i915_gem_object_save_bit_17_swizzle(obj, pages);
Eric Anholt280b7132009-03-12 16:56:27 -07002242
Chris Wilson03ac84f2016-10-28 13:58:36 +01002243 for_each_sgt_page(page, sgt_iter, pages) {
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002244 if (obj->mm.dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002245 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002246
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002247 if (obj->mm.madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002248 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002249
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002250 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002251 }
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002252 obj->mm.dirty = false;
Eric Anholt673a3942008-07-30 12:06:12 -07002253
Chris Wilson03ac84f2016-10-28 13:58:36 +01002254 sg_free_table(pages);
2255 kfree(pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002256}
2257
Chris Wilson96d77632016-10-28 13:58:33 +01002258static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2259{
2260 struct radix_tree_iter iter;
2261 void **slot;
2262
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002263 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2264 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
Chris Wilson96d77632016-10-28 13:58:33 +01002265}
2266
Chris Wilson548625e2016-11-01 12:11:34 +00002267void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2268 enum i915_mm_subclass subclass)
Chris Wilson37e680a2012-06-07 15:38:42 +01002269{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002270 struct sg_table *pages;
Chris Wilson37e680a2012-06-07 15:38:42 +01002271
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002272 if (i915_gem_object_has_pinned_pages(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002273 return;
Chris Wilsona5570172012-09-04 21:02:54 +01002274
Chris Wilson15717de2016-08-04 07:52:26 +01002275 GEM_BUG_ON(obj->bind_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002276 if (!READ_ONCE(obj->mm.pages))
2277 return;
2278
2279 /* May be called by shrinker from within get_pages() (on another bo) */
Chris Wilson548625e2016-11-01 12:11:34 +00002280 mutex_lock_nested(&obj->mm.lock, subclass);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002281 if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2282 goto unlock;
Ben Widawsky3e123022013-07-31 17:00:04 -07002283
Chris Wilsona2165e32012-12-03 11:49:00 +00002284 /* ->put_pages might need to allocate memory for the bit17 swizzle
2285 * array, hence protect them from being reaped by removing them from gtt
2286 * lists early. */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002287 pages = fetch_and_zero(&obj->mm.pages);
2288 GEM_BUG_ON(!pages);
Chris Wilsona2165e32012-12-03 11:49:00 +00002289
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002290 if (obj->mm.mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002291 void *ptr;
2292
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002293 ptr = ptr_mask_bits(obj->mm.mapping);
Chris Wilson4b30cb22016-08-18 17:16:42 +01002294 if (is_vmalloc_addr(ptr))
2295 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002296 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002297 kunmap(kmap_to_page(ptr));
2298
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002299 obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002300 }
2301
Chris Wilson96d77632016-10-28 13:58:33 +01002302 __i915_gem_object_reset_page_iter(obj);
2303
Chris Wilson03ac84f2016-10-28 13:58:36 +01002304 obj->ops->put_pages(obj, pages);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002305unlock:
2306 mutex_unlock(&obj->mm.lock);
Chris Wilson6c085a72012-08-20 11:40:46 +02002307}
2308
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002309static void i915_sg_trim(struct sg_table *orig_st)
2310{
2311 struct sg_table new_st;
2312 struct scatterlist *sg, *new_sg;
2313 unsigned int i;
2314
2315 if (orig_st->nents == orig_st->orig_nents)
2316 return;
2317
Chris Wilson64d14612016-12-23 14:57:58 +00002318 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002319 return;
2320
2321 new_sg = new_st.sgl;
2322 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2323 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2324 /* called before being DMA mapped, no need to copy sg->dma_* */
2325 new_sg = sg_next(new_sg);
2326 }
2327
2328 sg_free_table(orig_st);
2329
2330 *orig_st = new_st;
2331}
2332
Chris Wilson03ac84f2016-10-28 13:58:36 +01002333static struct sg_table *
Chris Wilson6c085a72012-08-20 11:40:46 +02002334i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002335{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002336 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonabb0dea2016-12-19 12:43:45 +00002337 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2338 unsigned long i;
Eric Anholt673a3942008-07-30 12:06:12 -07002339 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002340 struct sg_table *st;
2341 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002342 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002343 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002344 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson4ff340f02016-10-18 13:02:50 +01002345 unsigned int max_segment;
Imre Deake2273302015-07-09 12:59:05 +03002346 int ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02002347 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002348
Chris Wilson6c085a72012-08-20 11:40:46 +02002349 /* Assert that the object is not currently in any GPU domain. As it
2350 * wasn't in the GTT, there shouldn't be any way it could have been in
2351 * a GPU cache
2352 */
Chris Wilson03ac84f2016-10-28 13:58:36 +01002353 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2354 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Chris Wilson6c085a72012-08-20 11:40:46 +02002355
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -05002356 max_segment = swiotlb_max_segment();
Chris Wilson871dfbd2016-10-11 09:20:21 +01002357 if (!max_segment)
Chris Wilson4ff340f02016-10-18 13:02:50 +01002358 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
Chris Wilson871dfbd2016-10-11 09:20:21 +01002359
Chris Wilson9da3da62012-06-01 15:20:22 +01002360 st = kmalloc(sizeof(*st), GFP_KERNEL);
2361 if (st == NULL)
Chris Wilson03ac84f2016-10-28 13:58:36 +01002362 return ERR_PTR(-ENOMEM);
Eric Anholt673a3942008-07-30 12:06:12 -07002363
Chris Wilsonabb0dea2016-12-19 12:43:45 +00002364rebuild_st:
Chris Wilson9da3da62012-06-01 15:20:22 +01002365 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002366 kfree(st);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002367 return ERR_PTR(-ENOMEM);
Chris Wilson9da3da62012-06-01 15:20:22 +01002368 }
2369
2370 /* Get the list of pages out of our struct file. They'll be pinned
2371 * at this point until we release them.
2372 *
2373 * Fail silently without starting the shrinker
2374 */
Al Viro93c76a32015-12-04 23:45:44 -05002375 mapping = obj->base.filp->f_mapping;
Michal Hockoc62d2552015-11-06 16:28:49 -08002376 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
Mel Gormand0164ad2015-11-06 16:28:21 -08002377 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Imre Deak90797e62013-02-18 19:28:03 +02002378 sg = st->sgl;
2379 st->nents = 0;
2380 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002381 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2382 if (IS_ERR(page)) {
Chris Wilson21ab4e72014-09-09 11:16:08 +01002383 i915_gem_shrink(dev_priv,
2384 page_count,
2385 I915_SHRINK_BOUND |
2386 I915_SHRINK_UNBOUND |
2387 I915_SHRINK_PURGEABLE);
Chris Wilson6c085a72012-08-20 11:40:46 +02002388 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2389 }
2390 if (IS_ERR(page)) {
2391 /* We've tried hard to allocate the memory by reaping
2392 * our own buffer, now let the real VM do its job and
2393 * go down in flames if truly OOM.
2394 */
David Herrmannf461d1b2014-05-25 14:34:10 +02002395 page = shmem_read_mapping_page(mapping, i);
Imre Deake2273302015-07-09 12:59:05 +03002396 if (IS_ERR(page)) {
2397 ret = PTR_ERR(page);
Chris Wilsonb17993b2016-11-14 11:29:30 +00002398 goto err_sg;
Imre Deake2273302015-07-09 12:59:05 +03002399 }
Chris Wilson6c085a72012-08-20 11:40:46 +02002400 }
Chris Wilson871dfbd2016-10-11 09:20:21 +01002401 if (!i ||
2402 sg->length >= max_segment ||
2403 page_to_pfn(page) != last_pfn + 1) {
Imre Deak90797e62013-02-18 19:28:03 +02002404 if (i)
2405 sg = sg_next(sg);
2406 st->nents++;
2407 sg_set_page(sg, page, PAGE_SIZE, 0);
2408 } else {
2409 sg->length += PAGE_SIZE;
2410 }
2411 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002412
2413 /* Check that the i965g/gm workaround works. */
2414 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002415 }
Chris Wilson871dfbd2016-10-11 09:20:21 +01002416 if (sg) /* loop terminated early; short sg table */
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002417 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002418
Tvrtko Ursulin0c40ce12016-11-09 15:13:43 +00002419 /* Trim unused sg entries to avoid wasting memory. */
2420 i915_sg_trim(st);
2421
Chris Wilson03ac84f2016-10-28 13:58:36 +01002422 ret = i915_gem_gtt_prepare_pages(obj, st);
Chris Wilsonabb0dea2016-12-19 12:43:45 +00002423 if (ret) {
2424 /* DMA remapping failed? One possible cause is that
2425 * it could not reserve enough large entries, asking
2426 * for PAGE_SIZE chunks instead may be helpful.
2427 */
2428 if (max_segment > PAGE_SIZE) {
2429 for_each_sgt_page(page, sgt_iter, st)
2430 put_page(page);
2431 sg_free_table(st);
2432
2433 max_segment = PAGE_SIZE;
2434 goto rebuild_st;
2435 } else {
2436 dev_warn(&dev_priv->drm.pdev->dev,
2437 "Failed to DMA remap %lu pages\n",
2438 page_count);
2439 goto err_pages;
2440 }
2441 }
Imre Deake2273302015-07-09 12:59:05 +03002442
Eric Anholt673a3942008-07-30 12:06:12 -07002443 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson03ac84f2016-10-28 13:58:36 +01002444 i915_gem_object_do_bit_17_swizzle(obj, st);
Eric Anholt673a3942008-07-30 12:06:12 -07002445
Chris Wilson03ac84f2016-10-28 13:58:36 +01002446 return st;
Eric Anholt673a3942008-07-30 12:06:12 -07002447
Chris Wilsonb17993b2016-11-14 11:29:30 +00002448err_sg:
Imre Deak90797e62013-02-18 19:28:03 +02002449 sg_mark_end(sg);
Chris Wilsonb17993b2016-11-14 11:29:30 +00002450err_pages:
Dave Gordon85d12252016-05-20 11:54:06 +01002451 for_each_sgt_page(page, sgt_iter, st)
2452 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002453 sg_free_table(st);
2454 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002455
2456 /* shmemfs first checks if there is enough memory to allocate the page
2457 * and reports ENOSPC should there be insufficient, along with the usual
2458 * ENOMEM for a genuine allocation failure.
2459 *
2460 * We use ENOSPC in our driver to mean that we have run out of aperture
2461 * space and so want to translate the error from shmemfs back to our
2462 * usual understanding of ENOMEM.
2463 */
Imre Deake2273302015-07-09 12:59:05 +03002464 if (ret == -ENOSPC)
2465 ret = -ENOMEM;
2466
Chris Wilson03ac84f2016-10-28 13:58:36 +01002467 return ERR_PTR(ret);
2468}
2469
2470void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2471 struct sg_table *pages)
2472{
Chris Wilson1233e2d2016-10-28 13:58:37 +01002473 lockdep_assert_held(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002474
2475 obj->mm.get_page.sg_pos = pages->sgl;
2476 obj->mm.get_page.sg_idx = 0;
2477
2478 obj->mm.pages = pages;
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002479
2480 if (i915_gem_object_is_tiled(obj) &&
2481 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2482 GEM_BUG_ON(obj->mm.quirked);
2483 __i915_gem_object_pin_pages(obj);
2484 obj->mm.quirked = true;
2485 }
Chris Wilson03ac84f2016-10-28 13:58:36 +01002486}
2487
2488static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2489{
2490 struct sg_table *pages;
2491
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002492 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2493
Chris Wilson03ac84f2016-10-28 13:58:36 +01002494 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2495 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2496 return -EFAULT;
2497 }
2498
2499 pages = obj->ops->get_pages(obj);
2500 if (unlikely(IS_ERR(pages)))
2501 return PTR_ERR(pages);
2502
2503 __i915_gem_object_set_pages(obj, pages);
2504 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002505}
2506
Chris Wilson37e680a2012-06-07 15:38:42 +01002507/* Ensure that the associated pages are gathered from the backing storage
Chris Wilson1233e2d2016-10-28 13:58:37 +01002508 * and pinned into our object. i915_gem_object_pin_pages() may be called
Chris Wilson37e680a2012-06-07 15:38:42 +01002509 * multiple times before they are released by a single call to
Chris Wilson1233e2d2016-10-28 13:58:37 +01002510 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
Chris Wilson37e680a2012-06-07 15:38:42 +01002511 * either as a result of memory pressure (reaping pages under the shrinker)
2512 * or as the object is itself released.
2513 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002514int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
Chris Wilson37e680a2012-06-07 15:38:42 +01002515{
Chris Wilson03ac84f2016-10-28 13:58:36 +01002516 int err;
Chris Wilson37e680a2012-06-07 15:38:42 +01002517
Chris Wilson1233e2d2016-10-28 13:58:37 +01002518 err = mutex_lock_interruptible(&obj->mm.lock);
2519 if (err)
2520 return err;
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002521
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002522 if (unlikely(!obj->mm.pages)) {
2523 err = ____i915_gem_object_get_pages(obj);
2524 if (err)
2525 goto unlock;
2526
2527 smp_mb__before_atomic();
Chris Wilson1233e2d2016-10-28 13:58:37 +01002528 }
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002529 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson43e28f02013-01-08 10:53:09 +00002530
Chris Wilson1233e2d2016-10-28 13:58:37 +01002531unlock:
2532 mutex_unlock(&obj->mm.lock);
Chris Wilson03ac84f2016-10-28 13:58:36 +01002533 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07002534}
2535
Dave Gordondd6034c2016-05-20 11:54:04 +01002536/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002537static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2538 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002539{
2540 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002541 struct sg_table *sgt = obj->mm.pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002542 struct sgt_iter sgt_iter;
2543 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002544 struct page *stack_pages[32];
2545 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002546 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002547 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002548 void *addr;
2549
2550 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002551 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002552 return kmap(sg_page(sgt->sgl));
2553
Dave Gordonb338fa42016-05-20 11:54:05 +01002554 if (n_pages > ARRAY_SIZE(stack_pages)) {
2555 /* Too big for stack -- allocate temporary array instead */
2556 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2557 if (!pages)
2558 return NULL;
2559 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002560
Dave Gordon85d12252016-05-20 11:54:06 +01002561 for_each_sgt_page(page, sgt_iter, sgt)
2562 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002563
2564 /* Check that we have the expected number of pages */
2565 GEM_BUG_ON(i != n_pages);
2566
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002567 switch (type) {
2568 case I915_MAP_WB:
2569 pgprot = PAGE_KERNEL;
2570 break;
2571 case I915_MAP_WC:
2572 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2573 break;
2574 }
2575 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002576
Dave Gordonb338fa42016-05-20 11:54:05 +01002577 if (pages != stack_pages)
2578 drm_free_large(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002579
2580 return addr;
2581}
2582
2583/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002584void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2585 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002586{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002587 enum i915_map_type has_type;
2588 bool pinned;
2589 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002590 int ret;
2591
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002592 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002593
Chris Wilson1233e2d2016-10-28 13:58:37 +01002594 ret = mutex_lock_interruptible(&obj->mm.lock);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002595 if (ret)
2596 return ERR_PTR(ret);
2597
Chris Wilson1233e2d2016-10-28 13:58:37 +01002598 pinned = true;
2599 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002600 if (unlikely(!obj->mm.pages)) {
2601 ret = ____i915_gem_object_get_pages(obj);
2602 if (ret)
2603 goto err_unlock;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002604
Chris Wilson2c3a3f42016-11-04 10:30:01 +00002605 smp_mb__before_atomic();
2606 }
2607 atomic_inc(&obj->mm.pages_pin_count);
Chris Wilson1233e2d2016-10-28 13:58:37 +01002608 pinned = false;
2609 }
2610 GEM_BUG_ON(!obj->mm.pages);
Chris Wilson0a798eb2016-04-08 12:11:11 +01002611
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002612 ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002613 if (ptr && has_type != type) {
2614 if (pinned) {
2615 ret = -EBUSY;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002616 goto err_unpin;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002617 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002618
2619 if (is_vmalloc_addr(ptr))
2620 vunmap(ptr);
2621 else
2622 kunmap(kmap_to_page(ptr));
2623
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002624 ptr = obj->mm.mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002625 }
2626
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002627 if (!ptr) {
2628 ptr = i915_gem_object_map(obj, type);
2629 if (!ptr) {
2630 ret = -ENOMEM;
Chris Wilson1233e2d2016-10-28 13:58:37 +01002631 goto err_unpin;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002632 }
2633
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002634 obj->mm.mapping = ptr_pack_bits(ptr, type);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002635 }
2636
Chris Wilson1233e2d2016-10-28 13:58:37 +01002637out_unlock:
2638 mutex_unlock(&obj->mm.lock);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002639 return ptr;
2640
Chris Wilson1233e2d2016-10-28 13:58:37 +01002641err_unpin:
2642 atomic_dec(&obj->mm.pages_pin_count);
2643err_unlock:
2644 ptr = ERR_PTR(ret);
2645 goto out_unlock;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002646}
2647
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002648static bool i915_context_is_banned(const struct i915_gem_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002649{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002650 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002651
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002652 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002653 return true;
2654
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002655 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
Chris Wilson676fa572014-12-24 08:13:39 -08002656 if (ctx->hang_stats.ban_period_seconds &&
2657 elapsed <= ctx->hang_stats.ban_period_seconds) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002658 DRM_DEBUG("context hanging too fast, banning!\n");
2659 return true;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002660 }
2661
2662 return false;
2663}
2664
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002665static void i915_set_reset_status(struct i915_gem_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002666 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002667{
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002668 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002669
2670 if (guilty) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002671 hs->banned = i915_context_is_banned(ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002672 hs->batch_active++;
2673 hs->guilty_ts = get_seconds();
2674 } else {
2675 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002676 }
2677}
2678
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002679struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002680i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002681{
Chris Wilson4db080f2013-12-04 11:37:09 +00002682 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002683
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002684 /* We are called by the error capture and reset at a random
2685 * point in time. In particular, note that neither is crucially
2686 * ordered with an interrupt. After a hang, the GPU is dead and we
2687 * assume that no more writes can happen (we waited long enough for
2688 * all writes that were in transaction to be flushed) - adding an
2689 * extra delay for a recent interrupt is pointless. Hence, we do
2690 * not need an engine->irq_seqno_barrier() before the seqno reads.
2691 */
Chris Wilson73cb9702016-10-28 13:58:46 +01002692 list_for_each_entry(request, &engine->timeline->requests, link) {
Chris Wilson80b204b2016-10-28 13:58:58 +01002693 if (__i915_gem_request_completed(request))
Chris Wilson4db080f2013-12-04 11:37:09 +00002694 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002695
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002696 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002697 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002698
2699 return NULL;
2700}
2701
Chris Wilson821ed7d2016-09-09 14:11:53 +01002702static void reset_request(struct drm_i915_gem_request *request)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002703{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002704 void *vaddr = request->ring->vaddr;
2705 u32 head;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002706
Chris Wilson821ed7d2016-09-09 14:11:53 +01002707 /* As this request likely depends on state from the lost
2708 * context, clear out all the user operations leaving the
2709 * breadcrumb at the end (so we get the fence notifications).
2710 */
2711 head = request->head;
2712 if (request->postfix < head) {
2713 memset(vaddr + head, 0, request->ring->size - head);
2714 head = 0;
2715 }
2716 memset(vaddr + head, 0, request->postfix - head);
Chris Wilson4db080f2013-12-04 11:37:09 +00002717}
2718
Chris Wilson821ed7d2016-09-09 14:11:53 +01002719static void i915_gem_reset_engine(struct intel_engine_cs *engine)
Chris Wilson4db080f2013-12-04 11:37:09 +00002720{
Chris Wilsondcff85c2016-08-05 10:14:11 +01002721 struct drm_i915_gem_request *request;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002722 struct i915_gem_context *incomplete_ctx;
Chris Wilson80b204b2016-10-28 13:58:58 +01002723 struct intel_timeline *timeline;
Chris Wilson2471eb52016-12-23 14:58:04 +00002724 unsigned long flags;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002725 bool ring_hung;
Chris Wilson608c1a52015-09-03 13:01:40 +01002726
Chris Wilson821ed7d2016-09-09 14:11:53 +01002727 if (engine->irq_seqno_barrier)
2728 engine->irq_seqno_barrier(engine);
2729
2730 request = i915_gem_find_active_request(engine);
2731 if (!request)
2732 return;
2733
2734 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
Chris Wilson77c60702016-10-04 21:11:29 +01002735 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
2736 ring_hung = false;
2737
Chris Wilson821ed7d2016-09-09 14:11:53 +01002738 i915_set_reset_status(request->ctx, ring_hung);
2739 if (!ring_hung)
2740 return;
2741
2742 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
Chris Wilson65e47602016-10-28 13:58:49 +01002743 engine->name, request->global_seqno);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002744
2745 /* Setup the CS to resume from the breadcrumb of the hung request */
2746 engine->reset_hw(engine, request);
2747
2748 /* Users of the default context do not rely on logical state
2749 * preserved between batches. They have to emit full state on
2750 * every batch and so it is safe to execute queued requests following
2751 * the hang.
2752 *
2753 * Other contexts preserve state, now corrupt. We want to skip all
2754 * queued requests that reference the corrupt context.
2755 */
2756 incomplete_ctx = request->ctx;
2757 if (i915_gem_context_is_default(incomplete_ctx))
2758 return;
2759
Chris Wilson2471eb52016-12-23 14:58:04 +00002760 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2761
2762 spin_lock_irqsave(&engine->timeline->lock, flags);
2763 spin_lock(&timeline->lock);
2764
Chris Wilson73cb9702016-10-28 13:58:46 +01002765 list_for_each_entry_continue(request, &engine->timeline->requests, link)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002766 if (request->ctx == incomplete_ctx)
2767 reset_request(request);
Chris Wilson80b204b2016-10-28 13:58:58 +01002768
Chris Wilson80b204b2016-10-28 13:58:58 +01002769 list_for_each_entry(request, &timeline->requests, link)
2770 reset_request(request);
Chris Wilson2471eb52016-12-23 14:58:04 +00002771
2772 spin_unlock(&timeline->lock);
2773 spin_unlock_irqrestore(&engine->timeline->lock, flags);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002774}
2775
2776void i915_gem_reset(struct drm_i915_private *dev_priv)
2777{
2778 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302779 enum intel_engine_id id;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002780
Chris Wilson4c7d62c2016-10-28 13:58:32 +01002781 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2782
Chris Wilson821ed7d2016-09-09 14:11:53 +01002783 i915_gem_retire_requests(dev_priv);
2784
Akash Goel3b3f1652016-10-13 22:44:48 +05302785 for_each_engine(engine, dev_priv, id)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002786 i915_gem_reset_engine(engine);
2787
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00002788 i915_gem_restore_fences(dev_priv);
Chris Wilsonf2a91d12016-09-21 14:51:06 +01002789
2790 if (dev_priv->gt.awake) {
2791 intel_sanitize_gt_powersave(dev_priv);
2792 intel_enable_gt_powersave(dev_priv);
2793 if (INTEL_GEN(dev_priv) >= 6)
2794 gen6_rps_busy(dev_priv);
2795 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01002796}
2797
2798static void nop_submit_request(struct drm_i915_gem_request *request)
2799{
Chris Wilsonce1135c2016-11-22 14:41:20 +00002800 i915_gem_request_submit(request);
2801 intel_engine_init_global_seqno(request->engine, request->global_seqno);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002802}
2803
2804static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2805{
2806 engine->submit_request = nop_submit_request;
Chris Wilson70c2a242016-09-09 14:11:46 +01002807
Chris Wilsonc4b09302016-07-20 09:21:10 +01002808 /* Mark all pending requests as complete so that any concurrent
2809 * (lockless) lookup doesn't try and wait upon the request as we
2810 * reset it.
2811 */
Chris Wilson73cb9702016-10-28 13:58:46 +01002812 intel_engine_init_global_seqno(engine,
Chris Wilsoncb399ea2016-11-01 10:03:16 +00002813 intel_engine_last_submit(engine));
Chris Wilsonc4b09302016-07-20 09:21:10 +01002814
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002815 /*
Oscar Mateodcb4c122014-11-13 10:28:10 +00002816 * Clear the execlists queue up before freeing the requests, as those
2817 * are the ones that keep the context and ringbuffer backing objects
2818 * pinned in place.
2819 */
Oscar Mateodcb4c122014-11-13 10:28:10 +00002820
Tomas Elf7de1691a2015-10-19 16:32:32 +01002821 if (i915.enable_execlists) {
Chris Wilson663f71e2016-11-14 20:41:00 +00002822 unsigned long flags;
2823
2824 spin_lock_irqsave(&engine->timeline->lock, flags);
2825
Chris Wilson70c2a242016-09-09 14:11:46 +01002826 i915_gem_request_put(engine->execlist_port[0].request);
2827 i915_gem_request_put(engine->execlist_port[1].request);
2828 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
Chris Wilson20311bd2016-11-14 20:41:03 +00002829 engine->execlist_queue = RB_ROOT;
2830 engine->execlist_first = NULL;
Chris Wilson663f71e2016-11-14 20:41:00 +00002831
2832 spin_unlock_irqrestore(&engine->timeline->lock, flags);
Oscar Mateodcb4c122014-11-13 10:28:10 +00002833 }
Eric Anholt673a3942008-07-30 12:06:12 -07002834}
2835
Chris Wilson821ed7d2016-09-09 14:11:53 +01002836void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
Eric Anholt673a3942008-07-30 12:06:12 -07002837{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002838 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302839 enum intel_engine_id id;
Eric Anholt673a3942008-07-30 12:06:12 -07002840
Chris Wilson821ed7d2016-09-09 14:11:53 +01002841 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2842 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
Chris Wilson4db080f2013-12-04 11:37:09 +00002843
Chris Wilson821ed7d2016-09-09 14:11:53 +01002844 i915_gem_context_lost(dev_priv);
Akash Goel3b3f1652016-10-13 22:44:48 +05302845 for_each_engine(engine, dev_priv, id)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002846 i915_gem_cleanup_engine(engine);
Chris Wilsonb913b332016-07-13 09:10:31 +01002847 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
Chris Wilsondfaae392010-09-22 10:31:52 +01002848
Chris Wilson821ed7d2016-09-09 14:11:53 +01002849 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002850}
2851
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002852static void
Eric Anholt673a3942008-07-30 12:06:12 -07002853i915_gem_retire_work_handler(struct work_struct *work)
2854{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002855 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002856 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002857 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07002858
Chris Wilson891b48c2010-09-29 12:26:37 +01002859 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002860 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01002861 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002862 mutex_unlock(&dev->struct_mutex);
2863 }
Chris Wilson67d97da2016-07-04 08:08:31 +01002864
2865 /* Keep the retire handler running until we are finally idle.
2866 * We do not need to do this test under locking as in the worst-case
2867 * we queue the retire worker once too often.
2868 */
Chris Wilsonc9615612016-07-09 10:12:06 +01002869 if (READ_ONCE(dev_priv->gt.awake)) {
2870 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01002871 queue_delayed_work(dev_priv->wq,
2872 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002873 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01002874 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002875}
Chris Wilson891b48c2010-09-29 12:26:37 +01002876
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002877static void
2878i915_gem_idle_work_handler(struct work_struct *work)
2879{
2880 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002881 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002882 struct drm_device *dev = &dev_priv->drm;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002883 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302884 enum intel_engine_id id;
Chris Wilson67d97da2016-07-04 08:08:31 +01002885 bool rearm_hangcheck;
2886
2887 if (!READ_ONCE(dev_priv->gt.awake))
2888 return;
2889
Imre Deak0cb56702016-11-07 11:20:04 +02002890 /*
2891 * Wait for last execlists context complete, but bail out in case a
2892 * new request is submitted.
2893 */
2894 wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
2895 intel_execlists_idle(dev_priv), 10);
2896
Chris Wilson28176ef2016-10-28 13:58:56 +01002897 if (READ_ONCE(dev_priv->gt.active_requests))
Chris Wilson67d97da2016-07-04 08:08:31 +01002898 return;
2899
2900 rearm_hangcheck =
2901 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2902
2903 if (!mutex_trylock(&dev->struct_mutex)) {
2904 /* Currently busy, come back later */
2905 mod_delayed_work(dev_priv->wq,
2906 &dev_priv->gt.idle_work,
2907 msecs_to_jiffies(50));
2908 goto out_rearm;
2909 }
2910
Imre Deak93c97dc2016-11-07 11:20:03 +02002911 /*
2912 * New request retired after this work handler started, extend active
2913 * period until next instance of the work.
2914 */
2915 if (work_pending(work))
2916 goto out_unlock;
2917
Chris Wilson28176ef2016-10-28 13:58:56 +01002918 if (dev_priv->gt.active_requests)
Chris Wilson67d97da2016-07-04 08:08:31 +01002919 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002920
Imre Deak0cb56702016-11-07 11:20:04 +02002921 if (wait_for(intel_execlists_idle(dev_priv), 10))
2922 DRM_ERROR("Timeout waiting for engines to idle\n");
2923
Akash Goel3b3f1652016-10-13 22:44:48 +05302924 for_each_engine(engine, dev_priv, id)
Chris Wilson67d97da2016-07-04 08:08:31 +01002925 i915_gem_batch_pool_fini(&engine->batch_pool);
Zou Nan hai852835f2010-05-21 09:08:56 +08002926
Chris Wilson67d97da2016-07-04 08:08:31 +01002927 GEM_BUG_ON(!dev_priv->gt.awake);
2928 dev_priv->gt.awake = false;
2929 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01002930
Chris Wilson67d97da2016-07-04 08:08:31 +01002931 if (INTEL_GEN(dev_priv) >= 6)
2932 gen6_rps_idle(dev_priv);
2933 intel_runtime_pm_put(dev_priv);
2934out_unlock:
2935 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01002936
Chris Wilson67d97da2016-07-04 08:08:31 +01002937out_rearm:
2938 if (rearm_hangcheck) {
2939 GEM_BUG_ON(!dev_priv->gt.awake);
2940 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01002941 }
Eric Anholt673a3942008-07-30 12:06:12 -07002942}
2943
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002944void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2945{
2946 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2947 struct drm_i915_file_private *fpriv = file->driver_priv;
2948 struct i915_vma *vma, *vn;
2949
2950 mutex_lock(&obj->base.dev->struct_mutex);
2951 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2952 if (vma->vm->file == fpriv)
2953 i915_vma_close(vma);
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01002954
2955 if (i915_gem_object_is_active(obj) &&
2956 !i915_gem_object_has_active_reference(obj)) {
2957 i915_gem_object_set_active_reference(obj);
2958 i915_gem_object_get(obj);
2959 }
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002960 mutex_unlock(&obj->base.dev->struct_mutex);
2961}
2962
Chris Wilsone95433c2016-10-28 13:58:27 +01002963static unsigned long to_wait_timeout(s64 timeout_ns)
2964{
2965 if (timeout_ns < 0)
2966 return MAX_SCHEDULE_TIMEOUT;
2967
2968 if (timeout_ns == 0)
2969 return 0;
2970
2971 return nsecs_to_jiffies_timeout(timeout_ns);
2972}
2973
Ben Widawsky5816d642012-04-11 11:18:19 -07002974/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002975 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002976 * @dev: drm device pointer
2977 * @data: ioctl data blob
2978 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002979 *
2980 * Returns 0 if successful, else an error is returned with the remaining time in
2981 * the timeout parameter.
2982 * -ETIME: object is still busy after timeout
2983 * -ERESTARTSYS: signal interrupted the wait
2984 * -ENONENT: object doesn't exist
2985 * Also possible, but rare:
2986 * -EAGAIN: GPU wedged
2987 * -ENOMEM: damn
2988 * -ENODEV: Internal IRQ fail
2989 * -E?: The add request failed
2990 *
2991 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2992 * non-zero timeout parameter the wait ioctl will wait for the given number of
2993 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2994 * without holding struct_mutex the object may become re-busied before this
2995 * function completes. A similar but shorter * race condition exists in the busy
2996 * ioctl
2997 */
2998int
2999i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3000{
3001 struct drm_i915_gem_wait *args = data;
3002 struct drm_i915_gem_object *obj;
Chris Wilsone95433c2016-10-28 13:58:27 +01003003 ktime_t start;
3004 long ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003005
Daniel Vetter11b5d512014-09-29 15:31:26 +02003006 if (args->flags != 0)
3007 return -EINVAL;
3008
Chris Wilson03ac0642016-07-20 13:31:51 +01003009 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01003010 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003011 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01003012
Chris Wilsone95433c2016-10-28 13:58:27 +01003013 start = ktime_get();
3014
3015 ret = i915_gem_object_wait(obj,
3016 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3017 to_wait_timeout(args->timeout_ns),
3018 to_rps_client(file));
3019
3020 if (args->timeout_ns > 0) {
3021 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3022 if (args->timeout_ns < 0)
3023 args->timeout_ns = 0;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003024 }
3025
Chris Wilsonf0cd5182016-10-28 13:58:43 +01003026 i915_gem_object_put(obj);
John Harrisonff865882014-11-24 18:49:28 +00003027 return ret;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07003028}
3029
Chris Wilson73cb9702016-10-28 13:58:46 +01003030static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003031{
Chris Wilson73cb9702016-10-28 13:58:46 +01003032 int ret, i;
3033
3034 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3035 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3036 if (ret)
3037 return ret;
3038 }
3039
3040 return 0;
3041}
3042
3043int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3044{
Dave Gordonb4ac5af2016-03-24 11:20:38 +00003045 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003046
Chris Wilson9caa34a2016-11-11 14:58:08 +00003047 if (flags & I915_WAIT_LOCKED) {
3048 struct i915_gem_timeline *tl;
3049
3050 lockdep_assert_held(&i915->drm.struct_mutex);
3051
3052 list_for_each_entry(tl, &i915->gt.timelines, link) {
3053 ret = wait_for_timeline(tl, flags);
3054 if (ret)
3055 return ret;
3056 }
3057 } else {
3058 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003059 if (ret)
3060 return ret;
3061 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003062
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01003063 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01003064}
3065
Chris Wilsond0da48c2016-11-06 12:59:59 +00003066void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3067 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003068{
Eric Anholt673a3942008-07-30 12:06:12 -07003069 /* If we don't have a page list set up, then we're not pinned
3070 * to GPU, and we can ignore the cache flush because it'll happen
3071 * again at bind time.
3072 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003073 if (!obj->mm.pages)
Chris Wilsond0da48c2016-11-06 12:59:59 +00003074 return;
Eric Anholt673a3942008-07-30 12:06:12 -07003075
Imre Deak769ce462013-02-13 21:56:05 +02003076 /*
3077 * Stolen memory is always coherent with the GPU as it is explicitly
3078 * marked as wc by the system, or the system is cache-coherent.
3079 */
Chris Wilson6a2c4232014-11-04 04:51:40 -08003080 if (obj->stolen || obj->phys_handle)
Chris Wilsond0da48c2016-11-06 12:59:59 +00003081 return;
Imre Deak769ce462013-02-13 21:56:05 +02003082
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003083 /* If the GPU is snooping the contents of the CPU cache,
3084 * we do not need to manually clear the CPU cache lines. However,
3085 * the caches are only snooped when the render cache is
3086 * flushed/invalidated. As we always have to emit invalidations
3087 * and flushes when moving into and out of the RENDER domain, correct
3088 * snooping behaviour occurs naturally as the result of our domain
3089 * tracking.
3090 */
Chris Wilson0f719792015-01-13 13:32:52 +00003091 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3092 obj->cache_dirty = true;
Chris Wilsond0da48c2016-11-06 12:59:59 +00003093 return;
Chris Wilson0f719792015-01-13 13:32:52 +00003094 }
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003095
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003096 trace_i915_gem_object_clflush(obj);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003097 drm_clflush_sg(obj->mm.pages);
Chris Wilson0f719792015-01-13 13:32:52 +00003098 obj->cache_dirty = false;
Eric Anholte47c68e2008-11-14 13:35:19 -08003099}
3100
3101/** Flushes the GTT write domain for the object if it's dirty. */
3102static void
Chris Wilson05394f32010-11-08 19:18:58 +00003103i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003104{
Chris Wilson3b5724d2016-08-18 17:16:49 +01003105 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003106
Chris Wilson05394f32010-11-08 19:18:58 +00003107 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003108 return;
3109
Chris Wilson63256ec2011-01-04 18:42:07 +00003110 /* No actual flushing is required for the GTT write domain. Writes
Chris Wilson3b5724d2016-08-18 17:16:49 +01003111 * to it "immediately" go to main memory as far as we know, so there's
Eric Anholte47c68e2008-11-14 13:35:19 -08003112 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003113 *
3114 * However, we do have to enforce the order so that all writes through
3115 * the GTT land before any writes to the device, such as updates to
3116 * the GATT itself.
Chris Wilson3b5724d2016-08-18 17:16:49 +01003117 *
3118 * We also have to wait a bit for the writes to land from the GTT.
3119 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3120 * timing. This issue has only been observed when switching quickly
3121 * between GTT writes and CPU reads from inside the kernel on recent hw,
3122 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3123 * system agents we cannot reproduce this behaviour).
Eric Anholte47c68e2008-11-14 13:35:19 -08003124 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003125 wmb();
Chris Wilson3b5724d2016-08-18 17:16:49 +01003126 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
Akash Goel3b3f1652016-10-13 22:44:48 +05303127 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
Chris Wilson63256ec2011-01-04 18:42:07 +00003128
Chris Wilsond243ad82016-08-18 17:16:44 +01003129 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
Daniel Vetterf99d7062014-06-19 16:01:59 +02003130
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003131 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003132 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003133 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003134 I915_GEM_DOMAIN_GTT);
Eric Anholte47c68e2008-11-14 13:35:19 -08003135}
3136
3137/** Flushes the CPU write domain for the object if it's dirty. */
3138static void
Daniel Vettere62b59e2015-01-21 14:53:48 +01003139i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003140{
Chris Wilson05394f32010-11-08 19:18:58 +00003141 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003142 return;
3143
Chris Wilsond0da48c2016-11-06 12:59:59 +00003144 i915_gem_clflush_object(obj, obj->pin_display);
Rodrigo Vivide152b62015-07-07 16:28:51 -07003145 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetterf99d7062014-06-19 16:01:59 +02003146
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003147 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003148 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003149 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003150 I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003151}
3152
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003153/**
3154 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003155 * @obj: object to act on
3156 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003157 *
3158 * This function returns when the move is complete, including waiting on
3159 * flushes to occur.
3160 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003161int
Chris Wilson20217462010-11-23 15:26:33 +00003162i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003163{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003164 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003165 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003166
Chris Wilsone95433c2016-10-28 13:58:27 +01003167 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003168
Chris Wilsone95433c2016-10-28 13:58:27 +01003169 ret = i915_gem_object_wait(obj,
3170 I915_WAIT_INTERRUPTIBLE |
3171 I915_WAIT_LOCKED |
3172 (write ? I915_WAIT_ALL : 0),
3173 MAX_SCHEDULE_TIMEOUT,
3174 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003175 if (ret)
3176 return ret;
3177
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003178 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3179 return 0;
3180
Chris Wilson43566de2015-01-02 16:29:29 +05303181 /* Flush and acquire obj->pages so that we are coherent through
3182 * direct access in memory with previous cached writes through
3183 * shmemfs and that our cache domain tracking remains valid.
3184 * For example, if the obj->filp was moved to swap without us
3185 * being notified and releasing the pages, we would mistakenly
3186 * continue to assume that the obj remained out of the CPU cached
3187 * domain.
3188 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003189 ret = i915_gem_object_pin_pages(obj);
Chris Wilson43566de2015-01-02 16:29:29 +05303190 if (ret)
3191 return ret;
3192
Daniel Vettere62b59e2015-01-21 14:53:48 +01003193 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003194
Chris Wilsond0a57782012-10-09 19:24:37 +01003195 /* Serialise direct access to this object with the barriers for
3196 * coherent writes from the GPU, by effectively invalidating the
3197 * GTT domain upon first access.
3198 */
3199 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3200 mb();
3201
Chris Wilson05394f32010-11-08 19:18:58 +00003202 old_write_domain = obj->base.write_domain;
3203 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003204
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003205 /* It should now be out of any other write domains, and we can update
3206 * the domain values for our changes.
3207 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003208 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003209 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003210 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003211 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3212 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003213 obj->mm.dirty = true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003214 }
3215
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003216 trace_i915_gem_object_change_domain(obj,
3217 old_read_domains,
3218 old_write_domain);
3219
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003220 i915_gem_object_unpin_pages(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003221 return 0;
3222}
3223
Chris Wilsonef55f922015-10-09 14:11:27 +01003224/**
3225 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003226 * @obj: object to act on
3227 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003228 *
3229 * After this function returns, the object will be in the new cache-level
3230 * across all GTT and the contents of the backing storage will be coherent,
3231 * with respect to the new cache-level. In order to keep the backing storage
3232 * coherent for all users, we only allow a single cache level to be set
3233 * globally on the object and prevent it from being changed whilst the
3234 * hardware is reading from the object. That is if the object is currently
3235 * on the scanout it will be set to uncached (or equivalent display
3236 * cache coherency) and all non-MOCS GPU access will also be uncached so
3237 * that all direct access to the scanout remains coherent.
3238 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003239int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3240 enum i915_cache_level cache_level)
3241{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003242 struct i915_vma *vma;
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003243 int ret;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003244
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003245 lockdep_assert_held(&obj->base.dev->struct_mutex);
3246
Chris Wilsone4ffd172011-04-04 09:44:39 +01003247 if (obj->cache_level == cache_level)
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003248 return 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003249
Chris Wilsonef55f922015-10-09 14:11:27 +01003250 /* Inspect the list of currently bound VMA and unbind any that would
3251 * be invalid given the new cache-level. This is principally to
3252 * catch the issue of the CS prefetch crossing page boundaries and
3253 * reading an invalid PTE on older architectures.
3254 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003255restart:
3256 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003257 if (!drm_mm_node_allocated(&vma->node))
3258 continue;
3259
Chris Wilson20dfbde2016-08-04 16:32:30 +01003260 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003261 DRM_DEBUG("can not change the cache level of pinned objects\n");
3262 return -EBUSY;
3263 }
3264
Chris Wilsonaa653a62016-08-04 07:52:27 +01003265 if (i915_gem_valid_gtt_space(vma, cache_level))
3266 continue;
3267
3268 ret = i915_vma_unbind(vma);
3269 if (ret)
3270 return ret;
3271
3272 /* As unbinding may affect other elements in the
3273 * obj->vma_list (due to side-effects from retiring
3274 * an active vma), play safe and restart the iterator.
3275 */
3276 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003277 }
3278
Chris Wilsonef55f922015-10-09 14:11:27 +01003279 /* We can reuse the existing drm_mm nodes but need to change the
3280 * cache-level on the PTE. We could simply unbind them all and
3281 * rebind with the correct cache-level on next use. However since
3282 * we already have a valid slot, dma mapping, pages etc, we may as
3283 * rewrite the PTE in the belief that doing so tramples upon less
3284 * state and so involves less work.
3285 */
Chris Wilson15717de2016-08-04 07:52:26 +01003286 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003287 /* Before we change the PTE, the GPU must not be accessing it.
3288 * If we wait upon the object, we know that all the bound
3289 * VMA are no longer active.
3290 */
Chris Wilsone95433c2016-10-28 13:58:27 +01003291 ret = i915_gem_object_wait(obj,
3292 I915_WAIT_INTERRUPTIBLE |
3293 I915_WAIT_LOCKED |
3294 I915_WAIT_ALL,
3295 MAX_SCHEDULE_TIMEOUT,
3296 NULL);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003297 if (ret)
3298 return ret;
3299
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00003300 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3301 cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003302 /* Access to snoopable pages through the GTT is
3303 * incoherent and on some machines causes a hard
3304 * lockup. Relinquish the CPU mmaping to force
3305 * userspace to refault in the pages and we can
3306 * then double check if the GTT mapping is still
3307 * valid for that pointer access.
3308 */
3309 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003310
Chris Wilsonef55f922015-10-09 14:11:27 +01003311 /* As we no longer need a fence for GTT access,
3312 * we can relinquish it now (and so prevent having
3313 * to steal a fence from someone else on the next
3314 * fence request). Note GPU activity would have
3315 * dropped the fence as all snoopable access is
3316 * supposed to be linear.
3317 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003318 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3319 ret = i915_vma_put_fence(vma);
3320 if (ret)
3321 return ret;
3322 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003323 } else {
3324 /* We either have incoherent backing store and
3325 * so no GTT access or the architecture is fully
3326 * coherent. In such cases, existing GTT mmaps
3327 * ignore the cache bit in the PTE and we can
3328 * rewrite it without confusing the GPU or having
3329 * to force userspace to fault back in its mmaps.
3330 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003331 }
3332
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003333 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003334 if (!drm_mm_node_allocated(&vma->node))
3335 continue;
3336
3337 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3338 if (ret)
3339 return ret;
3340 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003341 }
3342
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003343 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
3344 cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3345 obj->cache_dirty = true;
3346
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003347 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003348 vma->node.color = cache_level;
3349 obj->cache_level = cache_level;
3350
Chris Wilsone4ffd172011-04-04 09:44:39 +01003351 return 0;
3352}
3353
Ben Widawsky199adf42012-09-21 17:01:20 -07003354int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3355 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003356{
Ben Widawsky199adf42012-09-21 17:01:20 -07003357 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003358 struct drm_i915_gem_object *obj;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003359 int err = 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003360
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003361 rcu_read_lock();
3362 obj = i915_gem_object_lookup_rcu(file, args->handle);
3363 if (!obj) {
3364 err = -ENOENT;
3365 goto out;
3366 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003367
Chris Wilson651d7942013-08-08 14:41:10 +01003368 switch (obj->cache_level) {
3369 case I915_CACHE_LLC:
3370 case I915_CACHE_L3_LLC:
3371 args->caching = I915_CACHING_CACHED;
3372 break;
3373
Chris Wilson4257d3b2013-08-08 14:41:11 +01003374 case I915_CACHE_WT:
3375 args->caching = I915_CACHING_DISPLAY;
3376 break;
3377
Chris Wilson651d7942013-08-08 14:41:10 +01003378 default:
3379 args->caching = I915_CACHING_NONE;
3380 break;
3381 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003382out:
3383 rcu_read_unlock();
3384 return err;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003385}
3386
Ben Widawsky199adf42012-09-21 17:01:20 -07003387int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3388 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003389{
Chris Wilson9c870d02016-10-24 13:42:15 +01003390 struct drm_i915_private *i915 = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003391 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003392 struct drm_i915_gem_object *obj;
3393 enum i915_cache_level level;
3394 int ret;
3395
Ben Widawsky199adf42012-09-21 17:01:20 -07003396 switch (args->caching) {
3397 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003398 level = I915_CACHE_NONE;
3399 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003400 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003401 /*
3402 * Due to a HW issue on BXT A stepping, GPU stores via a
3403 * snooped mapping may leave stale data in a corresponding CPU
3404 * cacheline, whereas normally such cachelines would get
3405 * invalidated.
3406 */
Chris Wilson9c870d02016-10-24 13:42:15 +01003407 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
Imre Deake5756c12015-08-14 18:43:30 +03003408 return -ENODEV;
3409
Chris Wilsone6994ae2012-07-10 10:27:08 +01003410 level = I915_CACHE_LLC;
3411 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003412 case I915_CACHING_DISPLAY:
Chris Wilson9c870d02016-10-24 13:42:15 +01003413 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003414 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003415 default:
3416 return -EINVAL;
3417 }
3418
Ben Widawsky3bc29132012-09-26 16:15:20 -07003419 ret = i915_mutex_lock_interruptible(dev);
3420 if (ret)
Chris Wilson9c870d02016-10-24 13:42:15 +01003421 return ret;
Ben Widawsky3bc29132012-09-26 16:15:20 -07003422
Chris Wilson03ac0642016-07-20 13:31:51 +01003423 obj = i915_gem_object_lookup(file, args->handle);
3424 if (!obj) {
Chris Wilsone6994ae2012-07-10 10:27:08 +01003425 ret = -ENOENT;
3426 goto unlock;
3427 }
3428
3429 ret = i915_gem_object_set_cache_level(obj, level);
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003430 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003431unlock:
3432 mutex_unlock(&dev->struct_mutex);
3433 return ret;
3434}
3435
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003436/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003437 * Prepare buffer for display plane (scanout, cursors, etc).
3438 * Can be called from an uninterruptible phase (modesetting) and allows
3439 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003440 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003441struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003442i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3443 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003444 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003445{
Chris Wilson058d88c2016-08-15 10:49:06 +01003446 struct i915_vma *vma;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003447 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003448 int ret;
3449
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003450 lockdep_assert_held(&obj->base.dev->struct_mutex);
3451
Chris Wilsoncc98b412013-08-09 12:25:09 +01003452 /* Mark the pin_display early so that we account for the
3453 * display coherency whilst setting up the cache domains.
3454 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003455 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003456
Eric Anholta7ef0642011-03-29 16:59:54 -07003457 /* The display engine is not coherent with the LLC cache on gen6. As
3458 * a result, we make sure that the pinning that is about to occur is
3459 * done with uncached PTEs. This is lowest common denominator for all
3460 * chipsets.
3461 *
3462 * However for gen6+, we could do better by using the GFDT bit instead
3463 * of uncaching, which would allow us to flush all the LLC-cached data
3464 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3465 */
Chris Wilson651d7942013-08-08 14:41:10 +01003466 ret = i915_gem_object_set_cache_level(obj,
Tvrtko Ursulin86527442016-10-13 11:03:00 +01003467 HAS_WT(to_i915(obj->base.dev)) ?
3468 I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003469 if (ret) {
3470 vma = ERR_PTR(ret);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003471 goto err_unpin_display;
Chris Wilson058d88c2016-08-15 10:49:06 +01003472 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003473
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003474 /* As the user may map the buffer once pinned in the display plane
3475 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003476 * always use map_and_fenceable for all scanout buffers. However,
3477 * it may simply be too big to fit into mappable, in which case
3478 * put it anyway and hope that userspace can cope (but always first
3479 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003480 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003481 vma = ERR_PTR(-ENOSPC);
3482 if (view->type == I915_GGTT_VIEW_NORMAL)
3483 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3484 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson767a2222016-11-07 11:01:28 +00003485 if (IS_ERR(vma)) {
3486 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3487 unsigned int flags;
3488
3489 /* Valleyview is definitely limited to scanning out the first
3490 * 512MiB. Lets presume this behaviour was inherited from the
3491 * g4x display engine and that all earlier gen are similarly
3492 * limited. Testing suggests that it is a little more
3493 * complicated than this. For example, Cherryview appears quite
3494 * happy to scanout from anywhere within its global aperture.
3495 */
3496 flags = 0;
3497 if (HAS_GMCH_DISPLAY(i915))
3498 flags = PIN_MAPPABLE;
3499 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3500 }
Chris Wilson058d88c2016-08-15 10:49:06 +01003501 if (IS_ERR(vma))
Chris Wilsoncc98b412013-08-09 12:25:09 +01003502 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003503
Chris Wilsond8923dc2016-08-18 17:17:07 +01003504 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3505
Chris Wilsona6a7cc42016-11-18 21:17:46 +00003506 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3507 if (obj->cache_dirty) {
3508 i915_gem_clflush_object(obj, true);
3509 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
3510 }
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003511
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003512 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003513 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003514
3515 /* It should now be out of any other write domains, and we can update
3516 * the domain values for our changes.
3517 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003518 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003519 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003520
3521 trace_i915_gem_object_change_domain(obj,
3522 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003523 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003524
Chris Wilson058d88c2016-08-15 10:49:06 +01003525 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003526
3527err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003528 obj->pin_display--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003529 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003530}
3531
3532void
Chris Wilson058d88c2016-08-15 10:49:06 +01003533i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003534{
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003535 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3536
Chris Wilson058d88c2016-08-15 10:49:06 +01003537 if (WARN_ON(vma->obj->pin_display == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003538 return;
3539
Chris Wilsond8923dc2016-08-18 17:17:07 +01003540 if (--vma->obj->pin_display == 0)
3541 vma->display_alignment = 0;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003542
Chris Wilson383d5822016-08-18 17:17:08 +01003543 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3544 if (!i915_vma_is_active(vma))
3545 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3546
Chris Wilson058d88c2016-08-15 10:49:06 +01003547 i915_vma_unpin(vma);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003548}
3549
Eric Anholte47c68e2008-11-14 13:35:19 -08003550/**
3551 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003552 * @obj: object to act on
3553 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08003554 *
3555 * This function returns when the move is complete, including waiting on
3556 * flushes to occur.
3557 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003558int
Chris Wilson919926a2010-11-12 13:42:53 +00003559i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003560{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003561 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003562 int ret;
3563
Chris Wilsone95433c2016-10-28 13:58:27 +01003564 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003565
Chris Wilsone95433c2016-10-28 13:58:27 +01003566 ret = i915_gem_object_wait(obj,
3567 I915_WAIT_INTERRUPTIBLE |
3568 I915_WAIT_LOCKED |
3569 (write ? I915_WAIT_ALL : 0),
3570 MAX_SCHEDULE_TIMEOUT,
3571 NULL);
Chris Wilson88241782011-01-07 17:09:48 +00003572 if (ret)
3573 return ret;
3574
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003575 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3576 return 0;
3577
Eric Anholte47c68e2008-11-14 13:35:19 -08003578 i915_gem_object_flush_gtt_write_domain(obj);
3579
Chris Wilson05394f32010-11-08 19:18:58 +00003580 old_write_domain = obj->base.write_domain;
3581 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003582
Eric Anholte47c68e2008-11-14 13:35:19 -08003583 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003584 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003585 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003586
Chris Wilson05394f32010-11-08 19:18:58 +00003587 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003588 }
3589
3590 /* It should now be out of any other write domains, and we can update
3591 * the domain values for our changes.
3592 */
Chris Wilson40e62d52016-10-28 13:58:41 +01003593 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003594
3595 /* If we're writing through the CPU, then the GPU read domains will
3596 * need to be invalidated at next use.
3597 */
3598 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003599 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3600 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003601 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003602
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003603 trace_i915_gem_object_change_domain(obj,
3604 old_read_domains,
3605 old_write_domain);
3606
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003607 return 0;
3608}
3609
Eric Anholt673a3942008-07-30 12:06:12 -07003610/* Throttle our rendering by waiting until the ring has completed our requests
3611 * emitted over 20 msec ago.
3612 *
Eric Anholtb9624422009-06-03 07:27:35 +00003613 * Note that if we were to use the current jiffies each time around the loop,
3614 * we wouldn't escape the function with any frames outstanding if the time to
3615 * render a frame was over 20ms.
3616 *
Eric Anholt673a3942008-07-30 12:06:12 -07003617 * This should get us reasonable parallelism between CPU and GPU but also
3618 * relatively low latency when blocking on a particular request to finish.
3619 */
3620static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003621i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003622{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003623 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003624 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01003625 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00003626 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsone95433c2016-10-28 13:58:27 +01003627 long ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003628
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003629 /* ABI: return -EIO if already wedged */
3630 if (i915_terminally_wedged(&dev_priv->gpu_error))
3631 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003632
Chris Wilson1c255952010-09-26 11:03:27 +01003633 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003634 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003635 if (time_after_eq(request->emitted_jiffies, recent_enough))
3636 break;
3637
John Harrisonfcfa423c2015-05-29 17:44:12 +01003638 /*
3639 * Note that the request might not have been submitted yet.
3640 * In which case emitted_jiffies will be zero.
3641 */
3642 if (!request->emitted_jiffies)
3643 continue;
3644
John Harrison54fb2412014-11-24 18:49:27 +00003645 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00003646 }
John Harrisonff865882014-11-24 18:49:28 +00003647 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01003648 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01003649 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003650
John Harrison54fb2412014-11-24 18:49:27 +00003651 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003652 return 0;
3653
Chris Wilsone95433c2016-10-28 13:58:27 +01003654 ret = i915_wait_request(target,
3655 I915_WAIT_INTERRUPTIBLE,
3656 MAX_SCHEDULE_TIMEOUT);
Chris Wilsone8a261e2016-07-20 13:31:49 +01003657 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00003658
Chris Wilsone95433c2016-10-28 13:58:27 +01003659 return ret < 0 ? ret : 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003660}
3661
Chris Wilson058d88c2016-08-15 10:49:06 +01003662struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003663i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3664 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003665 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003666 u64 alignment,
3667 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003668{
Chris Wilsonad16d2e2016-10-13 09:55:04 +01003669 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3670 struct i915_address_space *vm = &dev_priv->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01003671 struct i915_vma *vma;
3672 int ret;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03003673
Chris Wilson4c7d62c2016-10-28 13:58:32 +01003674 lockdep_assert_held(&obj->base.dev->struct_mutex);
3675
Chris Wilson058d88c2016-08-15 10:49:06 +01003676 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
Chris Wilson59bfa122016-08-04 16:32:31 +01003677 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01003678 return vma;
Chris Wilson59bfa122016-08-04 16:32:31 +01003679
3680 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3681 if (flags & PIN_NONBLOCK &&
3682 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01003683 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01003684
Chris Wilsonad16d2e2016-10-13 09:55:04 +01003685 if (flags & PIN_MAPPABLE) {
3686 u32 fence_size;
3687
3688 fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3689 i915_gem_object_get_tiling(obj));
3690 /* If the required space is larger than the available
3691 * aperture, we will not able to find a slot for the
3692 * object and unbinding the object now will be in
3693 * vain. Worse, doing so may cause us to ping-pong
3694 * the object in and out of the Global GTT and
3695 * waste a lot of cycles under the mutex.
3696 */
3697 if (fence_size > dev_priv->ggtt.mappable_end)
3698 return ERR_PTR(-E2BIG);
3699
3700 /* If NONBLOCK is set the caller is optimistically
3701 * trying to cache the full object within the mappable
3702 * aperture, and *must* have a fallback in place for
3703 * situations where we cannot bind the object. We
3704 * can be a little more lax here and use the fallback
3705 * more often to avoid costly migrations of ourselves
3706 * and other objects within the aperture.
3707 *
3708 * Half-the-aperture is used as a simple heuristic.
3709 * More interesting would to do search for a free
3710 * block prior to making the commitment to unbind.
3711 * That caters for the self-harm case, and with a
3712 * little more heuristics (e.g. NOFAULT, NOEVICT)
3713 * we could try to minimise harm to others.
3714 */
3715 if (flags & PIN_NONBLOCK &&
3716 fence_size > dev_priv->ggtt.mappable_end / 2)
3717 return ERR_PTR(-ENOSPC);
3718 }
3719
Chris Wilson59bfa122016-08-04 16:32:31 +01003720 WARN(i915_vma_is_pinned(vma),
3721 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01003722 " offset=%08x, req.alignment=%llx,"
3723 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3724 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01003725 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01003726 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01003727 ret = i915_vma_unbind(vma);
3728 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01003729 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01003730 }
3731
Chris Wilson058d88c2016-08-15 10:49:06 +01003732 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3733 if (ret)
3734 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003735
Chris Wilson058d88c2016-08-15 10:49:06 +01003736 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003737}
3738
Chris Wilsonedf6b762016-08-09 09:23:33 +01003739static __always_inline unsigned int __busy_read_flag(unsigned int id)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003740{
3741 /* Note that we could alias engines in the execbuf API, but
3742 * that would be very unwise as it prevents userspace from
3743 * fine control over engine selection. Ahem.
3744 *
3745 * This should be something like EXEC_MAX_ENGINE instead of
3746 * I915_NUM_ENGINES.
3747 */
3748 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3749 return 0x10000 << id;
3750}
3751
3752static __always_inline unsigned int __busy_write_id(unsigned int id)
3753{
Chris Wilson70cb4722016-08-09 18:08:25 +01003754 /* The uABI guarantees an active writer is also amongst the read
3755 * engines. This would be true if we accessed the activity tracking
3756 * under the lock, but as we perform the lookup of the object and
3757 * its activity locklessly we can not guarantee that the last_write
3758 * being active implies that we have set the same engine flag from
3759 * last_read - hence we always set both read and write busy for
3760 * last_write.
3761 */
3762 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003763}
3764
Chris Wilsonedf6b762016-08-09 09:23:33 +01003765static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003766__busy_set_if_active(const struct dma_fence *fence,
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003767 unsigned int (*flag)(unsigned int id))
3768{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003769 struct drm_i915_gem_request *rq;
Chris Wilson12555012016-08-16 09:50:40 +01003770
Chris Wilsond07f0e52016-10-28 13:58:44 +01003771 /* We have to check the current hw status of the fence as the uABI
3772 * guarantees forward progress. We could rely on the idle worker
3773 * to eventually flush us, but to minimise latency just ask the
3774 * hardware.
3775 *
3776 * Note we only report on the status of native fences.
3777 */
3778 if (!dma_fence_is_i915(fence))
Chris Wilson12555012016-08-16 09:50:40 +01003779 return 0;
3780
Chris Wilsond07f0e52016-10-28 13:58:44 +01003781 /* opencode to_request() in order to avoid const warnings */
3782 rq = container_of(fence, struct drm_i915_gem_request, fence);
3783 if (i915_gem_request_completed(rq))
3784 return 0;
3785
3786 return flag(rq->engine->exec_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003787}
3788
Chris Wilsonedf6b762016-08-09 09:23:33 +01003789static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003790busy_check_reader(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003791{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003792 return __busy_set_if_active(fence, __busy_read_flag);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003793}
3794
Chris Wilsonedf6b762016-08-09 09:23:33 +01003795static __always_inline unsigned int
Chris Wilsond07f0e52016-10-28 13:58:44 +01003796busy_check_writer(const struct dma_fence *fence)
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003797{
Chris Wilsond07f0e52016-10-28 13:58:44 +01003798 if (!fence)
3799 return 0;
3800
3801 return __busy_set_if_active(fence, __busy_write_id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003802}
3803
Eric Anholt673a3942008-07-30 12:06:12 -07003804int
Eric Anholt673a3942008-07-30 12:06:12 -07003805i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003806 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003807{
3808 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003809 struct drm_i915_gem_object *obj;
Chris Wilsond07f0e52016-10-28 13:58:44 +01003810 struct reservation_object_list *list;
3811 unsigned int seq;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003812 int err;
Eric Anholt673a3942008-07-30 12:06:12 -07003813
Chris Wilsond07f0e52016-10-28 13:58:44 +01003814 err = -ENOENT;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003815 rcu_read_lock();
3816 obj = i915_gem_object_lookup_rcu(file, args->handle);
Chris Wilsond07f0e52016-10-28 13:58:44 +01003817 if (!obj)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003818 goto out;
Chris Wilsond07f0e52016-10-28 13:58:44 +01003819
3820 /* A discrepancy here is that we do not report the status of
3821 * non-i915 fences, i.e. even though we may report the object as idle,
3822 * a call to set-domain may still stall waiting for foreign rendering.
3823 * This also means that wait-ioctl may report an object as busy,
3824 * where busy-ioctl considers it idle.
3825 *
3826 * We trade the ability to warn of foreign fences to report on which
3827 * i915 engines are active for the object.
3828 *
3829 * Alternatively, we can trade that extra information on read/write
3830 * activity with
3831 * args->busy =
3832 * !reservation_object_test_signaled_rcu(obj->resv, true);
3833 * to report the overall busyness. This is what the wait-ioctl does.
3834 *
3835 */
3836retry:
3837 seq = raw_read_seqcount(&obj->resv->seq);
3838
3839 /* Translate the exclusive fence to the READ *and* WRITE engine */
3840 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3841
3842 /* Translate shared fences to READ set of engines */
3843 list = rcu_dereference(obj->resv->fence);
3844 if (list) {
3845 unsigned int shared_count = list->shared_count, i;
3846
3847 for (i = 0; i < shared_count; ++i) {
3848 struct dma_fence *fence =
3849 rcu_dereference(list->shared[i]);
3850
3851 args->busy |= busy_check_reader(fence);
3852 }
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003853 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003854
Chris Wilsond07f0e52016-10-28 13:58:44 +01003855 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3856 goto retry;
Chris Wilson426960b2016-01-15 16:51:46 +00003857
Chris Wilsond07f0e52016-10-28 13:58:44 +01003858 err = 0;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003859out:
3860 rcu_read_unlock();
3861 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07003862}
3863
3864int
3865i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3866 struct drm_file *file_priv)
3867{
Akshay Joshi0206e352011-08-16 15:34:10 -04003868 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003869}
3870
Chris Wilson3ef94da2009-09-14 16:50:29 +01003871int
3872i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3873 struct drm_file *file_priv)
3874{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003875 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01003876 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003877 struct drm_i915_gem_object *obj;
Chris Wilson1233e2d2016-10-28 13:58:37 +01003878 int err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003879
3880 switch (args->madv) {
3881 case I915_MADV_DONTNEED:
3882 case I915_MADV_WILLNEED:
3883 break;
3884 default:
3885 return -EINVAL;
3886 }
3887
Chris Wilson03ac0642016-07-20 13:31:51 +01003888 obj = i915_gem_object_lookup(file_priv, args->handle);
Chris Wilson1233e2d2016-10-28 13:58:37 +01003889 if (!obj)
3890 return -ENOENT;
3891
3892 err = mutex_lock_interruptible(&obj->mm.lock);
3893 if (err)
3894 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003895
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003896 if (obj->mm.pages &&
Chris Wilson3e510a82016-08-05 10:14:23 +01003897 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01003898 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003899 if (obj->mm.madv == I915_MADV_WILLNEED) {
3900 GEM_BUG_ON(!obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003901 __i915_gem_object_unpin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003902 obj->mm.quirked = false;
3903 }
3904 if (args->madv == I915_MADV_WILLNEED) {
Chris Wilson2c3a3f42016-11-04 10:30:01 +00003905 GEM_BUG_ON(obj->mm.quirked);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003906 __i915_gem_object_pin_pages(obj);
Chris Wilsonbc0629a2016-11-01 10:03:17 +00003907 obj->mm.quirked = true;
3908 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01003909 }
3910
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003911 if (obj->mm.madv != __I915_MADV_PURGED)
3912 obj->mm.madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003913
Chris Wilson6c085a72012-08-20 11:40:46 +02003914 /* if the object is no longer attached, discard its backing storage */
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003915 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003916 i915_gem_object_truncate(obj);
3917
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003918 args->retained = obj->mm.madv != __I915_MADV_PURGED;
Chris Wilson1233e2d2016-10-28 13:58:37 +01003919 mutex_unlock(&obj->mm.lock);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003920
Chris Wilson1233e2d2016-10-28 13:58:37 +01003921out:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003922 i915_gem_object_put(obj);
Chris Wilson1233e2d2016-10-28 13:58:37 +01003923 return err;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003924}
3925
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00003926static void
3927frontbuffer_retire(struct i915_gem_active *active,
3928 struct drm_i915_gem_request *request)
3929{
3930 struct drm_i915_gem_object *obj =
3931 container_of(active, typeof(*obj), frontbuffer_write);
3932
3933 intel_fb_obj_flush(obj, true, ORIGIN_CS);
3934}
3935
Chris Wilson37e680a2012-06-07 15:38:42 +01003936void i915_gem_object_init(struct drm_i915_gem_object *obj,
3937 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01003938{
Chris Wilson1233e2d2016-10-28 13:58:37 +01003939 mutex_init(&obj->mm.lock);
3940
Joonas Lahtinen56cea322016-11-02 12:16:04 +02003941 INIT_LIST_HEAD(&obj->global_link);
Chris Wilson275f0392016-10-24 13:42:14 +01003942 INIT_LIST_HEAD(&obj->userfault_link);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02003943 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07003944 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01003945 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003946
Chris Wilson37e680a2012-06-07 15:38:42 +01003947 obj->ops = ops;
3948
Chris Wilsond07f0e52016-10-28 13:58:44 +01003949 reservation_object_init(&obj->__builtin_resv);
3950 obj->resv = &obj->__builtin_resv;
3951
Chris Wilson50349242016-08-18 17:17:04 +01003952 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilson5b8c8ae2016-11-16 19:07:04 +00003953 init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01003954
3955 obj->mm.madv = I915_MADV_WILLNEED;
3956 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
3957 mutex_init(&obj->mm.get_page.lock);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003958
Dave Gordonf19ec8c2016-07-04 11:34:37 +01003959 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003960}
3961
Chris Wilson37e680a2012-06-07 15:38:42 +01003962static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Tvrtko Ursulin3599a912016-11-01 14:44:10 +00003963 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3964 I915_GEM_OBJECT_IS_SHRINKABLE,
Chris Wilson37e680a2012-06-07 15:38:42 +01003965 .get_pages = i915_gem_object_get_pages_gtt,
3966 .put_pages = i915_gem_object_put_pages_gtt,
3967};
3968
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01003969/* Note we don't consider signbits :| */
3970#define overflows_type(x, T) \
3971 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
3972
3973struct drm_i915_gem_object *
3974i915_gem_object_create(struct drm_device *dev, u64 size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003975{
Ville Syrjäläa26e5232016-10-31 22:37:19 +02003976 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00003977 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003978 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01003979 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01003980 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00003981
Chris Wilsonb4bcbe22016-10-18 13:02:49 +01003982 /* There is a prevalence of the assumption that we fit the object's
3983 * page count inside a 32bit _signed_ variable. Let's document this and
3984 * catch if we ever need to fix it. In the meantime, if you do spot
3985 * such a local variable, please consider fixing!
3986 */
3987 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
3988 return ERR_PTR(-E2BIG);
3989
3990 if (overflows_type(size, obj->base.size))
3991 return ERR_PTR(-E2BIG);
3992
Chris Wilson42dcedd2012-11-15 11:32:30 +00003993 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00003994 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01003995 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00003996
Chris Wilsonfe3db792016-04-25 13:32:13 +01003997 ret = drm_gem_object_init(dev, &obj->base, size);
3998 if (ret)
3999 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004000
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004001 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
Ville Syrjäläa26e5232016-10-31 22:37:19 +02004002 if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004003 /* 965gm cannot relocate objects above 4GiB. */
4004 mask &= ~__GFP_HIGHMEM;
4005 mask |= __GFP_DMA32;
4006 }
4007
Al Viro93c76a32015-12-04 23:45:44 -05004008 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004009 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004010
Chris Wilson37e680a2012-06-07 15:38:42 +01004011 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004012
Daniel Vetterc397b902010-04-09 19:05:07 +00004013 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4014 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4015
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004016 if (HAS_LLC(dev_priv)) {
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004017 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004018 * cache) for about a 10% performance improvement
4019 * compared to uncached. Graphics requests other than
4020 * display scanout are coherent with the CPU in
4021 * accessing this cache. This means in this mode we
4022 * don't need to clflush on the CPU side, and on the
4023 * GPU side we only need to flush internal caches to
4024 * get data visible to the CPU.
4025 *
4026 * However, we maintain the display planes as UC, and so
4027 * need to rebind when first used as such.
4028 */
4029 obj->cache_level = I915_CACHE_LLC;
4030 } else
4031 obj->cache_level = I915_CACHE_NONE;
4032
Daniel Vetterd861e332013-07-24 23:25:03 +02004033 trace_i915_gem_object_create(obj);
4034
Chris Wilson05394f32010-11-08 19:18:58 +00004035 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004036
4037fail:
4038 i915_gem_object_free(obj);
Chris Wilsonfe3db792016-04-25 13:32:13 +01004039 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004040}
4041
Chris Wilson340fbd82014-05-22 09:16:52 +01004042static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4043{
4044 /* If we are the last user of the backing storage (be it shmemfs
4045 * pages or stolen etc), we know that the pages are going to be
4046 * immediately released. In this case, we can then skip copying
4047 * back the contents from the GPU.
4048 */
4049
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004050 if (obj->mm.madv != I915_MADV_WILLNEED)
Chris Wilson340fbd82014-05-22 09:16:52 +01004051 return false;
4052
4053 if (obj->base.filp == NULL)
4054 return true;
4055
4056 /* At first glance, this looks racy, but then again so would be
4057 * userspace racing mmap against close. However, the first external
4058 * reference to the filp can only be obtained through the
4059 * i915_gem_mmap_ioctl() which safeguards us against the user
4060 * acquiring such a reference whilst we are in the middle of
4061 * freeing the object.
4062 */
4063 return atomic_long_read(&obj->base.filp->f_count) == 1;
4064}
4065
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004066static void __i915_gem_free_objects(struct drm_i915_private *i915,
4067 struct llist_node *freed)
Chris Wilsonbe726152010-07-23 23:18:50 +01004068{
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004069 struct drm_i915_gem_object *obj, *on;
Chris Wilsonbe726152010-07-23 23:18:50 +01004070
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004071 mutex_lock(&i915->drm.struct_mutex);
4072 intel_runtime_pm_get(i915);
4073 llist_for_each_entry(obj, freed, freed) {
4074 struct i915_vma *vma, *vn;
Paulo Zanonif65c9162013-11-27 18:20:34 -02004075
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004076 trace_i915_gem_object_destroy(obj);
4077
4078 GEM_BUG_ON(i915_gem_object_is_active(obj));
4079 list_for_each_entry_safe(vma, vn,
4080 &obj->vma_list, obj_link) {
4081 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4082 GEM_BUG_ON(i915_vma_is_active(vma));
4083 vma->flags &= ~I915_VMA_PIN_MASK;
4084 i915_vma_close(vma);
4085 }
Chris Wilsondb6c2b42016-11-01 11:54:00 +00004086 GEM_BUG_ON(!list_empty(&obj->vma_list));
4087 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004088
Joonas Lahtinen56cea322016-11-02 12:16:04 +02004089 list_del(&obj->global_link);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004090 }
4091 intel_runtime_pm_put(i915);
4092 mutex_unlock(&i915->drm.struct_mutex);
4093
4094 llist_for_each_entry_safe(obj, on, freed, freed) {
4095 GEM_BUG_ON(obj->bind_count);
4096 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4097
4098 if (obj->ops->release)
4099 obj->ops->release(obj);
4100
4101 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4102 atomic_set(&obj->mm.pages_pin_count, 0);
Chris Wilson548625e2016-11-01 12:11:34 +00004103 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004104 GEM_BUG_ON(obj->mm.pages);
4105
4106 if (obj->base.import_attach)
4107 drm_prime_gem_destroy(&obj->base, NULL);
4108
Chris Wilsond07f0e52016-10-28 13:58:44 +01004109 reservation_object_fini(&obj->__builtin_resv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004110 drm_gem_object_release(&obj->base);
4111 i915_gem_info_remove_obj(i915, obj->base.size);
4112
4113 kfree(obj->bit_17);
4114 i915_gem_object_free(obj);
4115 }
4116}
4117
4118static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4119{
4120 struct llist_node *freed;
4121
4122 freed = llist_del_all(&i915->mm.free_list);
4123 if (unlikely(freed))
4124 __i915_gem_free_objects(i915, freed);
4125}
4126
4127static void __i915_gem_free_work(struct work_struct *work)
4128{
4129 struct drm_i915_private *i915 =
4130 container_of(work, struct drm_i915_private, mm.free_work);
4131 struct llist_node *freed;
Chris Wilson26e12f82011-03-20 11:20:19 +00004132
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004133 /* All file-owned VMA should have been released by this point through
4134 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4135 * However, the object may also be bound into the global GTT (e.g.
4136 * older GPUs without per-process support, or for direct access through
4137 * the GTT either for the user or for scanout). Those VMA still need to
4138 * unbound now.
4139 */
Chris Wilson1488fc02012-04-24 15:47:31 +01004140
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004141 while ((freed = llist_del_all(&i915->mm.free_list)))
4142 __i915_gem_free_objects(i915, freed);
4143}
4144
4145static void __i915_gem_free_object_rcu(struct rcu_head *head)
4146{
4147 struct drm_i915_gem_object *obj =
4148 container_of(head, typeof(*obj), rcu);
4149 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4150
4151 /* We can't simply use call_rcu() from i915_gem_free_object()
4152 * as we need to block whilst unbinding, and the call_rcu
4153 * task may be called from softirq context. So we take a
4154 * detour through a worker.
4155 */
4156 if (llist_add(&obj->freed, &i915->mm.free_list))
4157 schedule_work(&i915->mm.free_work);
4158}
4159
4160void i915_gem_free_object(struct drm_gem_object *gem_obj)
4161{
4162 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4163
Chris Wilsonbc0629a2016-11-01 10:03:17 +00004164 if (obj->mm.quirked)
4165 __i915_gem_object_unpin_pages(obj);
4166
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004167 if (discard_backing_storage(obj))
4168 obj->mm.madv = I915_MADV_DONTNEED;
Daniel Vettera071fa02014-06-18 23:28:09 +02004169
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004170 /* Before we free the object, make sure any pure RCU-only
4171 * read-side critical sections are complete, e.g.
4172 * i915_gem_busy_ioctl(). For the corresponding synchronized
4173 * lookup see i915_gem_object_lookup_rcu().
4174 */
4175 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
Chris Wilsonbe726152010-07-23 23:18:50 +01004176}
4177
Chris Wilsonf8a7fde2016-10-28 13:58:29 +01004178void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4179{
4180 lockdep_assert_held(&obj->base.dev->struct_mutex);
4181
4182 GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
4183 if (i915_gem_object_is_active(obj))
4184 i915_gem_object_set_active_reference(obj);
4185 else
4186 i915_gem_object_put(obj);
4187}
4188
Chris Wilson3033aca2016-10-28 13:58:47 +01004189static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4190{
4191 struct intel_engine_cs *engine;
4192 enum intel_engine_id id;
4193
4194 for_each_engine(engine, dev_priv, id)
4195 GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
4196}
4197
Chris Wilsondcff85c2016-08-05 10:14:11 +01004198int i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004199{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004200 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsondcff85c2016-08-05 10:14:11 +01004201 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004202
Chris Wilson54b4f682016-07-21 21:16:19 +01004203 intel_suspend_gt_powersave(dev_priv);
4204
Chris Wilson45c5f202013-10-16 11:50:01 +01004205 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004206
4207 /* We have to flush all the executing contexts to main memory so
4208 * that they can saved in the hibernation image. To ensure the last
4209 * context image is coherent, we have to switch away from it. That
4210 * leaves the dev_priv->kernel_context still active when
4211 * we actually suspend, and its image in memory may not match the GPU
4212 * state. Fortunately, the kernel_context is disposable and we do
4213 * not rely on its state.
4214 */
4215 ret = i915_gem_switch_to_kernel_context(dev_priv);
4216 if (ret)
4217 goto err;
4218
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004219 ret = i915_gem_wait_for_idle(dev_priv,
4220 I915_WAIT_INTERRUPTIBLE |
4221 I915_WAIT_LOCKED);
Chris Wilsonf7403342013-09-13 23:57:04 +01004222 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004223 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004224
Chris Wilsonc0336662016-05-06 15:40:21 +01004225 i915_gem_retire_requests(dev_priv);
Chris Wilson28176ef2016-10-28 13:58:56 +01004226 GEM_BUG_ON(dev_priv->gt.active_requests);
Eric Anholt673a3942008-07-30 12:06:12 -07004227
Chris Wilson3033aca2016-10-28 13:58:47 +01004228 assert_kernel_context_is_current(dev_priv);
Chris Wilsonb2e862d2016-04-28 09:56:41 +01004229 i915_gem_context_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004230 mutex_unlock(&dev->struct_mutex);
4231
Chris Wilson737b1502015-01-26 18:03:03 +02004232 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004233 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4234 flush_delayed_work(&dev_priv->gt.idle_work);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004235 flush_work(&dev_priv->mm.free_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004236
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004237 /* Assert that we sucessfully flushed all the work and
4238 * reset the GPU back to its idle, low power state.
4239 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004240 WARN_ON(dev_priv->gt.awake);
Imre Deak31ab49a2016-11-07 11:20:05 +02004241 WARN_ON(!intel_execlists_idle(dev_priv));
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004242
Imre Deak1c777c52016-10-12 17:46:37 +03004243 /*
4244 * Neither the BIOS, ourselves or any other kernel
4245 * expects the system to be in execlists mode on startup,
4246 * so we need to reset the GPU back to legacy mode. And the only
4247 * known way to disable logical contexts is through a GPU reset.
4248 *
4249 * So in order to leave the system in a known default configuration,
4250 * always reset the GPU upon unload and suspend. Afterwards we then
4251 * clean up the GEM state tracking, flushing off the requests and
4252 * leaving the system in a known idle state.
4253 *
4254 * Note that is of the upmost importance that the GPU is idle and
4255 * all stray writes are flushed *before* we dismantle the backing
4256 * storage for the pinned objects.
4257 *
4258 * However, since we are uncertain that resetting the GPU on older
4259 * machines is a good idea, we don't - just in case it leaves the
4260 * machine in an unusable condition.
4261 */
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004262 if (HAS_HW_CONTEXTS(dev_priv)) {
Imre Deak1c777c52016-10-12 17:46:37 +03004263 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4264 WARN_ON(reset && reset != -ENODEV);
4265 }
4266
Eric Anholt673a3942008-07-30 12:06:12 -07004267 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004268
4269err:
4270 mutex_unlock(&dev->struct_mutex);
4271 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004272}
4273
Chris Wilson5ab57c72016-07-15 14:56:20 +01004274void i915_gem_resume(struct drm_device *dev)
4275{
4276 struct drm_i915_private *dev_priv = to_i915(dev);
4277
Imre Deak31ab49a2016-11-07 11:20:05 +02004278 WARN_ON(dev_priv->gt.awake);
4279
Chris Wilson5ab57c72016-07-15 14:56:20 +01004280 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00004281 i915_gem_restore_gtt_mappings(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004282
4283 /* As we didn't flush the kernel context before suspend, we cannot
4284 * guarantee that the context image is complete. So let's just reset
4285 * it and start again.
4286 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01004287 dev_priv->gt.resume(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004288
4289 mutex_unlock(&dev->struct_mutex);
4290}
4291
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004292void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004293{
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004294 if (INTEL_GEN(dev_priv) < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004295 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4296 return;
4297
4298 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4299 DISP_TILE_SURFACE_SWIZZLING);
4300
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004301 if (IS_GEN5(dev_priv))
Daniel Vetter11782b02012-01-31 16:47:55 +01004302 return;
4303
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004304 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004305 if (IS_GEN6(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004306 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004307 else if (IS_GEN7(dev_priv))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004308 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004309 else if (IS_GEN8(dev_priv))
Ben Widawsky31a53362013-11-02 21:07:04 -07004310 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004311 else
4312 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004313}
Daniel Vettere21af882012-02-09 20:53:27 +01004314
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004315static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004316{
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004317 I915_WRITE(RING_CTL(base), 0);
4318 I915_WRITE(RING_HEAD(base), 0);
4319 I915_WRITE(RING_TAIL(base), 0);
4320 I915_WRITE(RING_START(base), 0);
4321}
4322
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004323static void init_unused_rings(struct drm_i915_private *dev_priv)
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004324{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004325 if (IS_I830(dev_priv)) {
4326 init_unused_ring(dev_priv, PRB1_BASE);
4327 init_unused_ring(dev_priv, SRB0_BASE);
4328 init_unused_ring(dev_priv, SRB1_BASE);
4329 init_unused_ring(dev_priv, SRB2_BASE);
4330 init_unused_ring(dev_priv, SRB3_BASE);
4331 } else if (IS_GEN2(dev_priv)) {
4332 init_unused_ring(dev_priv, SRB0_BASE);
4333 init_unused_ring(dev_priv, SRB1_BASE);
4334 } else if (IS_GEN3(dev_priv)) {
4335 init_unused_ring(dev_priv, PRB1_BASE);
4336 init_unused_ring(dev_priv, PRB2_BASE);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004337 }
4338}
4339
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004340int
4341i915_gem_init_hw(struct drm_device *dev)
4342{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004343 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004344 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304345 enum intel_engine_id id;
Chris Wilsond200cda2016-04-28 09:56:44 +01004346 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004347
Chris Wilsonde867c22016-10-25 13:16:02 +01004348 dev_priv->gt.last_init_time = ktime_get();
4349
Chris Wilson5e4f5182015-02-13 14:35:59 +00004350 /* Double layer security blanket, see i915_gem_init() */
4351 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4352
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00004353 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004354 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004355
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01004356 if (IS_HASWELL(dev_priv))
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004357 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004358 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004359
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004360 if (HAS_PCH_NOP(dev_priv)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004361 if (IS_IVYBRIDGE(dev_priv)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004362 u32 temp = I915_READ(GEN7_MSG_CTL);
4363 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4364 I915_WRITE(GEN7_MSG_CTL, temp);
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004365 } else if (INTEL_GEN(dev_priv) >= 7) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004366 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4367 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4368 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4369 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004370 }
4371
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004372 i915_gem_init_swizzling(dev_priv);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004373
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004374 /*
4375 * At least 830 can leave some of the unused rings
4376 * "active" (ie. head != tail) after resume which
4377 * will prevent c3 entry. Makes sure all unused rings
4378 * are totally idle.
4379 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004380 init_unused_rings(dev_priv);
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004381
Dave Gordoned54c1a2016-01-19 19:02:54 +00004382 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01004383
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00004384 ret = i915_ppgtt_init_hw(dev_priv);
John Harrison4ad2fd82015-06-18 13:11:20 +01004385 if (ret) {
4386 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4387 goto out;
4388 }
4389
4390 /* Need to do basic initialisation of all rings first: */
Akash Goel3b3f1652016-10-13 22:44:48 +05304391 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004392 ret = engine->init_hw(engine);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004393 if (ret)
Chris Wilson5e4f5182015-02-13 14:35:59 +00004394 goto out;
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004395 }
Mika Kuoppala99433932013-01-22 14:12:17 +02004396
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004397 intel_mocs_init_l3cc_table(dev);
4398
Alex Dai33a732f2015-08-12 15:43:36 +01004399 /* We can't enable contexts until all firmware is loaded */
Dave Gordone556f7c2016-06-07 09:14:49 +01004400 ret = intel_guc_setup(dev);
4401 if (ret)
4402 goto out;
Alex Dai33a732f2015-08-12 15:43:36 +01004403
Chris Wilson5e4f5182015-02-13 14:35:59 +00004404out:
4405 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004406 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004407}
4408
Chris Wilson39df9192016-07-20 13:31:57 +01004409bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4410{
4411 if (INTEL_INFO(dev_priv)->gen < 6)
4412 return false;
4413
4414 /* TODO: make semaphores and Execlists play nicely together */
4415 if (i915.enable_execlists)
4416 return false;
4417
4418 if (value >= 0)
4419 return value;
4420
4421#ifdef CONFIG_INTEL_IOMMU
4422 /* Enable semaphores on SNB when IO remapping is off */
4423 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4424 return false;
4425#endif
4426
4427 return true;
4428}
4429
Chris Wilson1070a422012-04-24 15:47:41 +01004430int i915_gem_init(struct drm_device *dev)
4431{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004432 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson1070a422012-04-24 15:47:41 +01004433 int ret;
4434
Chris Wilson1070a422012-04-24 15:47:41 +01004435 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004436
Oscar Mateoa83014d2014-07-24 17:04:21 +01004437 if (!i915.enable_execlists) {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004438 dev_priv->gt.resume = intel_legacy_submission_resume;
Chris Wilson7e37f882016-08-02 22:50:21 +01004439 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004440 } else {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004441 dev_priv->gt.resume = intel_lr_context_resume;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004442 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004443 }
4444
Chris Wilson5e4f5182015-02-13 14:35:59 +00004445 /* This is just a security blanket to placate dragons.
4446 * On some systems, we very sporadically observe that the first TLBs
4447 * used by the CS may be stale, despite us poking the TLB reset. If
4448 * we hold the forcewake during initialisation these problems
4449 * just magically go away.
4450 */
4451 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4452
Chris Wilson72778cb2016-05-19 16:17:16 +01004453 i915_gem_init_userptr(dev_priv);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01004454
4455 ret = i915_gem_init_ggtt(dev_priv);
4456 if (ret)
4457 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08004458
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004459 ret = i915_gem_context_init(dev);
Jani Nikula7bcc3772014-12-05 14:17:42 +02004460 if (ret)
4461 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004462
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01004463 ret = intel_engines_init(dev);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004464 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02004465 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004466
4467 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004468 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01004469 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01004470 * wedged. But we only want to do this where the GPU is angry,
4471 * for all other failure, such as an allocation failure, bail.
4472 */
4473 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01004474 i915_gem_set_wedged(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004475 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004476 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02004477
4478out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00004479 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson60990322014-04-09 09:19:42 +01004480 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004481
Chris Wilson60990322014-04-09 09:19:42 +01004482 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004483}
4484
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004485void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004486i915_gem_cleanup_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004487{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004488 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004489 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05304490 enum intel_engine_id id;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004491
Akash Goel3b3f1652016-10-13 22:44:48 +05304492 for_each_engine(engine, dev_priv, id)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004493 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004494}
4495
Eric Anholt673a3942008-07-30 12:06:12 -07004496void
Imre Deak40ae4e12016-03-16 14:54:03 +02004497i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4498{
Chris Wilson49ef5292016-08-18 17:17:00 +01004499 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02004500
4501 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4502 !IS_CHERRYVIEW(dev_priv))
4503 dev_priv->num_fence_regs = 32;
4504 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4505 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4506 dev_priv->num_fence_regs = 16;
4507 else
4508 dev_priv->num_fence_regs = 8;
4509
Chris Wilsonc0336662016-05-06 15:40:21 +01004510 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004511 dev_priv->num_fence_regs =
4512 I915_READ(vgtif_reg(avail_rs.fence_num));
4513
4514 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01004515 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4516 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4517
4518 fence->i915 = dev_priv;
4519 fence->id = i;
4520 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4521 }
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00004522 i915_gem_restore_fences(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02004523
Tvrtko Ursulin4362f4f2016-11-16 08:55:33 +00004524 i915_gem_detect_bit_6_swizzle(dev_priv);
Imre Deak40ae4e12016-03-16 14:54:03 +02004525}
4526
Chris Wilson73cb9702016-10-28 13:58:46 +01004527int
Imre Deakd64aa092016-01-19 15:26:29 +02004528i915_gem_load_init(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004529{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004530 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004531 int err = -ENOMEM;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004532
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004533 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4534 if (!dev_priv->objects)
Chris Wilson73cb9702016-10-28 13:58:46 +01004535 goto err_out;
Chris Wilson73cb9702016-10-28 13:58:46 +01004536
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004537 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4538 if (!dev_priv->vmas)
Chris Wilson73cb9702016-10-28 13:58:46 +01004539 goto err_objects;
Chris Wilson73cb9702016-10-28 13:58:46 +01004540
Tvrtko Ursulina9335682016-11-02 15:14:59 +00004541 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4542 SLAB_HWCACHE_ALIGN |
4543 SLAB_RECLAIM_ACCOUNT |
4544 SLAB_DESTROY_BY_RCU);
4545 if (!dev_priv->requests)
Chris Wilson73cb9702016-10-28 13:58:46 +01004546 goto err_vmas;
Chris Wilson73cb9702016-10-28 13:58:46 +01004547
Chris Wilson52e54202016-11-14 20:41:02 +00004548 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4549 SLAB_HWCACHE_ALIGN |
4550 SLAB_RECLAIM_ACCOUNT);
4551 if (!dev_priv->dependencies)
4552 goto err_requests;
4553
Chris Wilson73cb9702016-10-28 13:58:46 +01004554 mutex_lock(&dev_priv->drm.struct_mutex);
4555 INIT_LIST_HEAD(&dev_priv->gt.timelines);
Chris Wilsonbb894852016-11-14 20:40:57 +00004556 err = i915_gem_timeline_init__global(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01004557 mutex_unlock(&dev_priv->drm.struct_mutex);
4558 if (err)
Chris Wilson52e54202016-11-14 20:41:02 +00004559 goto err_dependencies;
Eric Anholt673a3942008-07-30 12:06:12 -07004560
Ben Widawskya33afea2013-09-17 21:12:45 -07004561 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004562 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4563 init_llist_head(&dev_priv->mm.free_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004564 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4565 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004566 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson275f0392016-10-24 13:42:14 +01004567 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
Chris Wilson67d97da2016-07-04 08:08:31 +01004568 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07004569 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01004570 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004571 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01004572 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004573 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004574
Chris Wilson72bfa192010-12-19 11:42:05 +00004575 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4576
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004577 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004578
Chris Wilsonce453d82011-02-21 14:43:56 +00004579 dev_priv->mm.interruptible = true;
4580
Joonas Lahtinen6f633402016-09-01 14:58:21 +03004581 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4582
Chris Wilsonb5add952016-08-04 16:32:36 +01004583 spin_lock_init(&dev_priv->fb_tracking.lock);
Chris Wilson73cb9702016-10-28 13:58:46 +01004584
4585 return 0;
4586
Chris Wilson52e54202016-11-14 20:41:02 +00004587err_dependencies:
4588 kmem_cache_destroy(dev_priv->dependencies);
Chris Wilson73cb9702016-10-28 13:58:46 +01004589err_requests:
4590 kmem_cache_destroy(dev_priv->requests);
4591err_vmas:
4592 kmem_cache_destroy(dev_priv->vmas);
4593err_objects:
4594 kmem_cache_destroy(dev_priv->objects);
4595err_out:
4596 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07004597}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004598
Imre Deakd64aa092016-01-19 15:26:29 +02004599void i915_gem_load_cleanup(struct drm_device *dev)
4600{
4601 struct drm_i915_private *dev_priv = to_i915(dev);
4602
Chris Wilson7d5d59e2016-11-01 08:48:41 +00004603 WARN_ON(!llist_empty(&dev_priv->mm.free_list));
4604
Matthew Auldea84aa72016-11-17 21:04:11 +00004605 mutex_lock(&dev_priv->drm.struct_mutex);
4606 i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
4607 WARN_ON(!list_empty(&dev_priv->gt.timelines));
4608 mutex_unlock(&dev_priv->drm.struct_mutex);
4609
Chris Wilson52e54202016-11-14 20:41:02 +00004610 kmem_cache_destroy(dev_priv->dependencies);
Imre Deakd64aa092016-01-19 15:26:29 +02004611 kmem_cache_destroy(dev_priv->requests);
4612 kmem_cache_destroy(dev_priv->vmas);
4613 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01004614
4615 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4616 rcu_barrier();
Imre Deakd64aa092016-01-19 15:26:29 +02004617}
4618
Chris Wilson6a800ea2016-09-21 14:51:07 +01004619int i915_gem_freeze(struct drm_i915_private *dev_priv)
4620{
4621 intel_runtime_pm_get(dev_priv);
4622
4623 mutex_lock(&dev_priv->drm.struct_mutex);
4624 i915_gem_shrink_all(dev_priv);
4625 mutex_unlock(&dev_priv->drm.struct_mutex);
4626
4627 intel_runtime_pm_put(dev_priv);
4628
4629 return 0;
4630}
4631
Chris Wilson461fb992016-05-14 07:26:33 +01004632int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4633{
4634 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01004635 struct list_head *phases[] = {
4636 &dev_priv->mm.unbound_list,
4637 &dev_priv->mm.bound_list,
4638 NULL
4639 }, **p;
Chris Wilson461fb992016-05-14 07:26:33 +01004640
4641 /* Called just before we write the hibernation image.
4642 *
4643 * We need to update the domain tracking to reflect that the CPU
4644 * will be accessing all the pages to create and restore from the
4645 * hibernation, and so upon restoration those pages will be in the
4646 * CPU domain.
4647 *
4648 * To make sure the hibernation image contains the latest state,
4649 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01004650 *
4651 * To try and reduce the hibernation image, we manually shrink
4652 * the objects as well.
Chris Wilson461fb992016-05-14 07:26:33 +01004653 */
4654
Chris Wilson6a800ea2016-09-21 14:51:07 +01004655 mutex_lock(&dev_priv->drm.struct_mutex);
4656 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
Chris Wilson461fb992016-05-14 07:26:33 +01004657
Chris Wilson7aab2d52016-09-09 20:02:18 +01004658 for (p = phases; *p; p++) {
Joonas Lahtinen56cea322016-11-02 12:16:04 +02004659 list_for_each_entry(obj, *p, global_link) {
Chris Wilson7aab2d52016-09-09 20:02:18 +01004660 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4661 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4662 }
Chris Wilson461fb992016-05-14 07:26:33 +01004663 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01004664 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson461fb992016-05-14 07:26:33 +01004665
4666 return 0;
4667}
4668
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004669void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004670{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004671 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004672 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00004673
4674 /* Clean up our request list when the client is going away, so that
4675 * later retire_requests won't dereference our soon-to-be-gone
4676 * file_priv.
4677 */
Chris Wilson1c255952010-09-26 11:03:27 +01004678 spin_lock(&file_priv->mm.lock);
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004679 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004680 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01004681 spin_unlock(&file_priv->mm.lock);
Chris Wilson31169712009-09-14 16:50:28 +01004682
Chris Wilson2e1b8732015-04-27 13:41:22 +01004683 if (!list_empty(&file_priv->rps.link)) {
Chris Wilson8d3afd72015-05-21 21:01:47 +01004684 spin_lock(&to_i915(dev)->rps.client_lock);
Chris Wilson2e1b8732015-04-27 13:41:22 +01004685 list_del(&file_priv->rps.link);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004686 spin_unlock(&to_i915(dev)->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004687 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004688}
4689
4690int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4691{
4692 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004693 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004694
Chris Wilsonc4c29d72016-11-09 10:45:07 +00004695 DRM_DEBUG("\n");
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004696
4697 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4698 if (!file_priv)
4699 return -ENOMEM;
4700
4701 file->driver_priv = file_priv;
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004702 file_priv->dev_priv = to_i915(dev);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004703 file_priv->file = file;
Chris Wilson2e1b8732015-04-27 13:41:22 +01004704 INIT_LIST_HEAD(&file_priv->rps.link);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004705
4706 spin_lock_init(&file_priv->mm.lock);
4707 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004708
Chris Wilsonc80ff162016-07-27 09:07:27 +01004709 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00004710
Ben Widawskye422b882013-12-06 14:10:58 -08004711 ret = i915_gem_context_open(dev, file);
4712 if (ret)
4713 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004714
Ben Widawskye422b882013-12-06 14:10:58 -08004715 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004716}
4717
Daniel Vetterb680c372014-09-19 18:27:27 +02004718/**
4719 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07004720 * @old: current GEM buffer for the frontbuffer slots
4721 * @new: new GEM buffer for the frontbuffer slots
4722 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02004723 *
4724 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4725 * from @old and setting them in @new. Both @old and @new can be NULL.
4726 */
Daniel Vettera071fa02014-06-18 23:28:09 +02004727void i915_gem_track_fb(struct drm_i915_gem_object *old,
4728 struct drm_i915_gem_object *new,
4729 unsigned frontbuffer_bits)
4730{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004731 /* Control of individual bits within the mask are guarded by
4732 * the owning plane->mutex, i.e. we can never see concurrent
4733 * manipulation of individual bits. But since the bitfield as a whole
4734 * is updated using RMW, we need to use atomics in order to update
4735 * the bits.
4736 */
4737 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4738 sizeof(atomic_t) * BITS_PER_BYTE);
4739
Daniel Vettera071fa02014-06-18 23:28:09 +02004740 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004741 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4742 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004743 }
4744
4745 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004746 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4747 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004748 }
4749}
4750
Dave Gordonea702992015-07-09 19:29:02 +01004751/* Allocate a new GEM object and fill it with the supplied data */
4752struct drm_i915_gem_object *
4753i915_gem_object_create_from_data(struct drm_device *dev,
4754 const void *data, size_t size)
4755{
4756 struct drm_i915_gem_object *obj;
4757 struct sg_table *sg;
4758 size_t bytes;
4759 int ret;
4760
Dave Gordond37cd8a2016-04-22 19:14:32 +01004761 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01004762 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01004763 return obj;
4764
4765 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4766 if (ret)
4767 goto fail;
4768
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004769 ret = i915_gem_object_pin_pages(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004770 if (ret)
4771 goto fail;
4772
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004773 sg = obj->mm.pages;
Dave Gordonea702992015-07-09 19:29:02 +01004774 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004775 obj->mm.dirty = true; /* Backing store is now out of date */
Dave Gordonea702992015-07-09 19:29:02 +01004776 i915_gem_object_unpin_pages(obj);
4777
4778 if (WARN_ON(bytes != size)) {
4779 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4780 ret = -EFAULT;
4781 goto fail;
4782 }
4783
4784 return obj;
4785
4786fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004787 i915_gem_object_put(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004788 return ERR_PTR(ret);
4789}
Chris Wilson96d77632016-10-28 13:58:33 +01004790
4791struct scatterlist *
4792i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
4793 unsigned int n,
4794 unsigned int *offset)
4795{
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004796 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
Chris Wilson96d77632016-10-28 13:58:33 +01004797 struct scatterlist *sg;
4798 unsigned int idx, count;
4799
4800 might_sleep();
4801 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004802 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Chris Wilson96d77632016-10-28 13:58:33 +01004803
4804 /* As we iterate forward through the sg, we record each entry in a
4805 * radixtree for quick repeated (backwards) lookups. If we have seen
4806 * this index previously, we will have an entry for it.
4807 *
4808 * Initial lookup is O(N), but this is amortized to O(1) for
4809 * sequential page access (where each new request is consecutive
4810 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
4811 * i.e. O(1) with a large constant!
4812 */
4813 if (n < READ_ONCE(iter->sg_idx))
4814 goto lookup;
4815
4816 mutex_lock(&iter->lock);
4817
4818 /* We prefer to reuse the last sg so that repeated lookup of this
4819 * (or the subsequent) sg are fast - comparing against the last
4820 * sg is faster than going through the radixtree.
4821 */
4822
4823 sg = iter->sg_pos;
4824 idx = iter->sg_idx;
4825 count = __sg_page_count(sg);
4826
4827 while (idx + count <= n) {
4828 unsigned long exception, i;
4829 int ret;
4830
4831 /* If we cannot allocate and insert this entry, or the
4832 * individual pages from this range, cancel updating the
4833 * sg_idx so that on this lookup we are forced to linearly
4834 * scan onwards, but on future lookups we will try the
4835 * insertion again (in which case we need to be careful of
4836 * the error return reporting that we have already inserted
4837 * this index).
4838 */
4839 ret = radix_tree_insert(&iter->radix, idx, sg);
4840 if (ret && ret != -EEXIST)
4841 goto scan;
4842
4843 exception =
4844 RADIX_TREE_EXCEPTIONAL_ENTRY |
4845 idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
4846 for (i = 1; i < count; i++) {
4847 ret = radix_tree_insert(&iter->radix, idx + i,
4848 (void *)exception);
4849 if (ret && ret != -EEXIST)
4850 goto scan;
4851 }
4852
4853 idx += count;
4854 sg = ____sg_next(sg);
4855 count = __sg_page_count(sg);
4856 }
4857
4858scan:
4859 iter->sg_pos = sg;
4860 iter->sg_idx = idx;
4861
4862 mutex_unlock(&iter->lock);
4863
4864 if (unlikely(n < idx)) /* insertion completed by another thread */
4865 goto lookup;
4866
4867 /* In case we failed to insert the entry into the radixtree, we need
4868 * to look beyond the current sg.
4869 */
4870 while (idx + count <= n) {
4871 idx += count;
4872 sg = ____sg_next(sg);
4873 count = __sg_page_count(sg);
4874 }
4875
4876 *offset = n - idx;
4877 return sg;
4878
4879lookup:
4880 rcu_read_lock();
4881
4882 sg = radix_tree_lookup(&iter->radix, n);
4883 GEM_BUG_ON(!sg);
4884
4885 /* If this index is in the middle of multi-page sg entry,
4886 * the radixtree will contain an exceptional entry that points
4887 * to the start of that range. We will return the pointer to
4888 * the base page and the offset of this page within the
4889 * sg entry's range.
4890 */
4891 *offset = 0;
4892 if (unlikely(radix_tree_exception(sg))) {
4893 unsigned long base =
4894 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
4895
4896 sg = radix_tree_lookup(&iter->radix, base);
4897 GEM_BUG_ON(!sg);
4898
4899 *offset = n - base;
4900 }
4901
4902 rcu_read_unlock();
4903
4904 return sg;
4905}
4906
4907struct page *
4908i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
4909{
4910 struct scatterlist *sg;
4911 unsigned int offset;
4912
4913 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
4914
4915 sg = i915_gem_object_get_sg(obj, n, &offset);
4916 return nth_page(sg_page(sg), offset);
4917}
4918
4919/* Like i915_gem_object_get_page(), but mark the returned page dirty */
4920struct page *
4921i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
4922 unsigned int n)
4923{
4924 struct page *page;
4925
4926 page = i915_gem_object_get_page(obj, n);
Chris Wilsona4f5ea62016-10-28 13:58:35 +01004927 if (!obj->mm.dirty)
Chris Wilson96d77632016-10-28 13:58:33 +01004928 set_page_dirty(page);
4929
4930 return page;
4931}
4932
4933dma_addr_t
4934i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
4935 unsigned long n)
4936{
4937 struct scatterlist *sg;
4938 unsigned int offset;
4939
4940 sg = i915_gem_object_get_sg(obj, n, &offset);
4941 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
4942}