blob: 00eb4814b9131655b4ac441d7dbabed02c6ccc8e [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
Daniel Vetterbe6a0372015-03-18 10:46:04 +01002 * Copyright © 2008-2015 Intel Corporation
Eric Anholt673a3942008-07-30 12:06:12 -07003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilsonc13d87e2016-07-20 09:21:15 +010032#include "i915_gem_dmabuf.h"
Yu Zhangeb822892015-02-10 19:05:49 +080033#include "i915_vgpu.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010034#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070035#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010036#include "intel_frontbuffer.h"
Peter Antoine0ccdacf2016-04-13 15:03:25 +010037#include "intel_mocs.h"
Chris Wilsonc13d87e2016-07-20 09:21:15 +010038#include <linux/reservation.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070039#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070041#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080042#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020043#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070044
Chris Wilson05394f32010-11-08 19:18:58 +000045static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Daniel Vettere62b59e2015-01-21 14:53:48 +010046static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson61050802012-04-17 15:31:31 +010047
Chris Wilsonc76ce032013-08-08 14:41:03 +010048static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
50{
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
52}
53
Chris Wilson2c225692013-08-09 12:26:45 +010054static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55{
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +053056 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
57 return false;
58
Chris Wilson2c225692013-08-09 12:26:45 +010059 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
60 return true;
61
62 return obj->pin_display;
63}
64
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +053065static int
66insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
68{
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
71 size, 0, 0, 0,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
75}
76
77static void
78remove_mappable_node(struct drm_mm_node *node)
79{
80 drm_mm_remove_node(node);
81}
82
Chris Wilson73aa8082010-09-30 11:46:12 +010083/* some bookkeeping */
84static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85 size_t size)
86{
Daniel Vetterc20e8352013-07-24 22:40:23 +020087 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010088 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020090 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010091}
92
93static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
Chris Wilson21dd3732011-01-26 15:55:56 +0000102static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100103i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100104{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100105 int ret;
106
Chris Wilsond98c52c2016-04-13 17:35:05 +0100107 if (!i915_reset_in_progress(error))
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100108 return 0;
109
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200110 /*
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
114 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100115 ret = wait_event_interruptible_timeout(error->reset_queue,
Chris Wilsond98c52c2016-04-13 17:35:05 +0100116 !i915_reset_in_progress(error),
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100117 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200118 if (ret == 0) {
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120 return -EIO;
121 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100122 return ret;
Chris Wilsond98c52c2016-04-13 17:35:05 +0100123 } else {
124 return 0;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200125 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100126}
127
Chris Wilson54cf91d2010-11-25 18:00:26 +0000128int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100129{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100130 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100131 int ret;
132
Daniel Vetter33196de2012-11-14 17:14:05 +0100133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100134 if (ret)
135 return ret;
136
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
138 if (ret)
139 return ret;
140
Chris Wilson76c1dec2010-09-25 11:22:51 +0100141 return 0;
142}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143
Eric Anholt673a3942008-07-30 12:06:12 -0700144int
Eric Anholt5a125c32008-10-22 21:40:13 -0700145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000146 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700147{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300148 struct drm_i915_private *dev_priv = to_i915(dev);
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Eric Anholt5a125c32008-10-22 21:40:13 -0700150 struct drm_i915_gem_get_aperture *args = data;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100151 struct i915_vma *vma;
Chris Wilson6299f992010-11-24 12:23:44 +0000152 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700153
Chris Wilson6299f992010-11-24 12:23:44 +0000154 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100155 mutex_lock(&dev->struct_mutex);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100157 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100158 pinned += vma->node.size;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100160 if (i915_vma_is_pinned(vma))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100161 pinned += vma->node.size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100162 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700163
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300164 args->aper_size = ggtt->base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400165 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000166
Eric Anholt5a125c32008-10-22 21:40:13 -0700167 return 0;
168}
169
Chris Wilson6a2c4232014-11-04 04:51:40 -0800170static int
171i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
Chris Wilson00731152014-05-21 12:42:56 +0100172{
Al Viro93c76a32015-12-04 23:45:44 -0500173 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800174 char *vaddr = obj->phys_handle->vaddr;
175 struct sg_table *st;
176 struct scatterlist *sg;
177 int i;
Chris Wilson00731152014-05-21 12:42:56 +0100178
Chris Wilson6a2c4232014-11-04 04:51:40 -0800179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
180 return -EINVAL;
Chris Wilson00731152014-05-21 12:42:56 +0100181
Chris Wilson6a2c4232014-11-04 04:51:40 -0800182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
183 struct page *page;
184 char *src;
185
186 page = shmem_read_mapping_page(mapping, i);
187 if (IS_ERR(page))
188 return PTR_ERR(page);
189
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
193 kunmap_atomic(src);
194
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300195 put_page(page);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800196 vaddr += PAGE_SIZE;
197 }
198
Chris Wilsonc0336662016-05-06 15:40:21 +0100199 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson6a2c4232014-11-04 04:51:40 -0800200
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
202 if (st == NULL)
203 return -ENOMEM;
204
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
206 kfree(st);
207 return -ENOMEM;
208 }
209
210 sg = st->sgl;
211 sg->offset = 0;
212 sg->length = obj->base.size;
213
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
216
217 obj->pages = st;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800218 return 0;
219}
220
221static void
222i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
223{
224 int ret;
225
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
227
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100229 if (WARN_ON(ret)) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800230 /* In the event of a disaster, abandon all caches and
231 * hope for the best.
232 */
Chris Wilson6a2c4232014-11-04 04:51:40 -0800233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234 }
235
236 if (obj->madv == I915_MADV_DONTNEED)
237 obj->dirty = 0;
238
239 if (obj->dirty) {
Al Viro93c76a32015-12-04 23:45:44 -0500240 struct address_space *mapping = obj->base.filp->f_mapping;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800241 char *vaddr = obj->phys_handle->vaddr;
Chris Wilson00731152014-05-21 12:42:56 +0100242 int i;
243
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
Chris Wilson6a2c4232014-11-04 04:51:40 -0800245 struct page *page;
246 char *dst;
Chris Wilson00731152014-05-21 12:42:56 +0100247
Chris Wilson6a2c4232014-11-04 04:51:40 -0800248 page = shmem_read_mapping_page(mapping, i);
249 if (IS_ERR(page))
250 continue;
251
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
255 kunmap_atomic(dst);
256
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson00731152014-05-21 12:42:56 +0100259 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300260 put_page(page);
Chris Wilson00731152014-05-21 12:42:56 +0100261 vaddr += PAGE_SIZE;
262 }
Chris Wilson6a2c4232014-11-04 04:51:40 -0800263 obj->dirty = 0;
Chris Wilson00731152014-05-21 12:42:56 +0100264 }
265
Chris Wilson6a2c4232014-11-04 04:51:40 -0800266 sg_free_table(obj->pages);
267 kfree(obj->pages);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800268}
269
270static void
271i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
272{
273 drm_pci_free(obj->base.dev, obj->phys_handle);
274}
275
276static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
280};
281
Chris Wilson35a96112016-08-14 18:44:40 +0100282int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Chris Wilson6a2c4232014-11-04 04:51:40 -0800283{
Chris Wilsonaa653a62016-08-04 07:52:27 +0100284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800286 int ret;
287
Chris Wilson02bef8f2016-08-14 18:44:41 +0100288 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800289
Chris Wilson02bef8f2016-08-14 18:44:41 +0100290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
Chris Wilsonaa653a62016-08-04 07:52:27 +0100294 */
Chris Wilson02bef8f2016-08-14 18:44:41 +0100295 ret = i915_gem_object_wait_rendering(obj, false);
296 if (ret)
297 return ret;
298
299 i915_gem_retire_requests(to_i915(obj->base.dev));
300
Chris Wilsonaa653a62016-08-04 07:52:27 +0100301 while ((vma = list_first_entry_or_null(&obj->vma_list,
302 struct i915_vma,
303 obj_link))) {
304 list_move_tail(&vma->obj_link, &still_in_list);
305 ret = i915_vma_unbind(vma);
306 if (ret)
307 break;
308 }
309 list_splice(&still_in_list, &obj->vma_list);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800310
311 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100312}
313
Chris Wilson00e60f22016-08-04 16:32:40 +0100314/**
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
319 */
320int
321i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
322 bool readonly)
323{
324 struct reservation_object *resv;
325 struct i915_gem_active *active;
326 unsigned long active_mask;
327 int idx;
328
329 lockdep_assert_held(&obj->base.dev->struct_mutex);
330
331 if (!readonly) {
332 active = obj->last_read;
333 active_mask = i915_gem_object_get_active(obj);
334 } else {
335 active_mask = 1;
336 active = &obj->last_write;
337 }
338
339 for_each_active(active_mask, idx) {
340 int ret;
341
342 ret = i915_gem_active_wait(&active[idx],
343 &obj->base.dev->struct_mutex);
344 if (ret)
345 return ret;
346 }
347
348 resv = i915_gem_object_get_dmabuf_resv(obj);
349 if (resv) {
350 long err;
351
352 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353 MAX_SCHEDULE_TIMEOUT);
354 if (err < 0)
355 return err;
356 }
357
358 return 0;
359}
360
Chris Wilsonb8f90962016-08-05 10:14:07 +0100361/* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
Chris Wilson00e60f22016-08-04 16:32:40 +0100364 */
365static __must_check int
Chris Wilsonb8f90962016-08-05 10:14:07 +0100366__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367 struct intel_rps_client *rps,
368 bool readonly)
Chris Wilson00e60f22016-08-04 16:32:40 +0100369{
Chris Wilson00e60f22016-08-04 16:32:40 +0100370 struct i915_gem_active *active;
371 unsigned long active_mask;
Chris Wilsonb8f90962016-08-05 10:14:07 +0100372 int idx;
Chris Wilson00e60f22016-08-04 16:32:40 +0100373
Chris Wilsonb8f90962016-08-05 10:14:07 +0100374 active_mask = __I915_BO_ACTIVE(obj);
Chris Wilson00e60f22016-08-04 16:32:40 +0100375 if (!active_mask)
376 return 0;
377
378 if (!readonly) {
379 active = obj->last_read;
380 } else {
381 active_mask = 1;
382 active = &obj->last_write;
383 }
384
Chris Wilsonb8f90962016-08-05 10:14:07 +0100385 for_each_active(active_mask, idx) {
386 int ret;
Chris Wilson00e60f22016-08-04 16:32:40 +0100387
Chris Wilsonb8f90962016-08-05 10:14:07 +0100388 ret = i915_gem_active_wait_unlocked(&active[idx],
Chris Wilsonea746f32016-09-09 14:11:49 +0100389 I915_WAIT_INTERRUPTIBLE,
390 NULL, rps);
Chris Wilsonb8f90962016-08-05 10:14:07 +0100391 if (ret)
392 return ret;
Chris Wilson00e60f22016-08-04 16:32:40 +0100393 }
394
Chris Wilsonb8f90962016-08-05 10:14:07 +0100395 return 0;
Chris Wilson00e60f22016-08-04 16:32:40 +0100396}
397
398static struct intel_rps_client *to_rps_client(struct drm_file *file)
399{
400 struct drm_i915_file_private *fpriv = file->driver_priv;
401
402 return &fpriv->rps;
403}
404
Chris Wilson00731152014-05-21 12:42:56 +0100405int
406i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
407 int align)
408{
409 drm_dma_handle_t *phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800410 int ret;
Chris Wilson00731152014-05-21 12:42:56 +0100411
412 if (obj->phys_handle) {
413 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
414 return -EBUSY;
415
416 return 0;
417 }
418
419 if (obj->madv != I915_MADV_WILLNEED)
420 return -EFAULT;
421
422 if (obj->base.filp == NULL)
423 return -EINVAL;
424
Chris Wilson4717ca92016-08-04 07:52:28 +0100425 ret = i915_gem_object_unbind(obj);
426 if (ret)
427 return ret;
428
429 ret = i915_gem_object_put_pages(obj);
Chris Wilson6a2c4232014-11-04 04:51:40 -0800430 if (ret)
431 return ret;
432
Chris Wilson00731152014-05-21 12:42:56 +0100433 /* create a new object */
434 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
435 if (!phys)
436 return -ENOMEM;
437
Chris Wilson00731152014-05-21 12:42:56 +0100438 obj->phys_handle = phys;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800439 obj->ops = &i915_gem_phys_ops;
440
441 return i915_gem_object_get_pages(obj);
Chris Wilson00731152014-05-21 12:42:56 +0100442}
443
444static int
445i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
446 struct drm_i915_gem_pwrite *args,
447 struct drm_file *file_priv)
448{
449 struct drm_device *dev = obj->base.dev;
450 void *vaddr = obj->phys_handle->vaddr + args->offset;
Gustavo Padovan3ed605b2016-04-26 12:32:27 -0300451 char __user *user_data = u64_to_user_ptr(args->data_ptr);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200452 int ret = 0;
Chris Wilson6a2c4232014-11-04 04:51:40 -0800453
454 /* We manually control the domain here and pretend that it
455 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
456 */
457 ret = i915_gem_object_wait_rendering(obj, false);
458 if (ret)
459 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100460
Rodrigo Vivi77a0d1c2015-06-18 11:43:24 -0700461 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Chris Wilson00731152014-05-21 12:42:56 +0100462 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
463 unsigned long unwritten;
464
465 /* The physical object once assigned is fixed for the lifetime
466 * of the obj, so we can safely drop the lock and continue
467 * to access vaddr.
468 */
469 mutex_unlock(&dev->struct_mutex);
470 unwritten = copy_from_user(vaddr, user_data, args->size);
471 mutex_lock(&dev->struct_mutex);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200472 if (unwritten) {
473 ret = -EFAULT;
474 goto out;
475 }
Chris Wilson00731152014-05-21 12:42:56 +0100476 }
477
Chris Wilson6a2c4232014-11-04 04:51:40 -0800478 drm_clflush_virt_range(vaddr, args->size);
Chris Wilsonc0336662016-05-06 15:40:21 +0100479 i915_gem_chipset_flush(to_i915(dev));
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200480
481out:
Rodrigo Vivide152b62015-07-07 16:28:51 -0700482 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Paulo Zanoni063e4e62015-02-13 17:23:45 -0200483 return ret;
Chris Wilson00731152014-05-21 12:42:56 +0100484}
485
Chris Wilson42dcedd2012-11-15 11:32:30 +0000486void *i915_gem_object_alloc(struct drm_device *dev)
487{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100488 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100489 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000490}
491
492void i915_gem_object_free(struct drm_i915_gem_object *obj)
493{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100494 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonefab6d82015-04-07 16:20:57 +0100495 kmem_cache_free(dev_priv->objects, obj);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000496}
497
Dave Airlieff72145b2011-02-07 12:16:14 +1000498static int
499i915_gem_create(struct drm_file *file,
500 struct drm_device *dev,
501 uint64_t size,
502 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700503{
Chris Wilson05394f32010-11-08 19:18:58 +0000504 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300505 int ret;
506 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700507
Dave Airlieff72145b2011-02-07 12:16:14 +1000508 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200509 if (size == 0)
510 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700511
512 /* Allocate the new object */
Dave Gordond37cd8a2016-04-22 19:14:32 +0100513 obj = i915_gem_object_create(dev, size);
Chris Wilsonfe3db792016-04-25 13:32:13 +0100514 if (IS_ERR(obj))
515 return PTR_ERR(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700516
Chris Wilson05394f32010-11-08 19:18:58 +0000517 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100518 /* drop reference from allocate - handle holds it now */
Chris Wilson34911fd2016-07-20 13:31:54 +0100519 i915_gem_object_put_unlocked(obj);
Daniel Vetterd861e332013-07-24 23:25:03 +0200520 if (ret)
521 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100522
Dave Airlieff72145b2011-02-07 12:16:14 +1000523 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700524 return 0;
525}
526
Dave Airlieff72145b2011-02-07 12:16:14 +1000527int
528i915_gem_dumb_create(struct drm_file *file,
529 struct drm_device *dev,
530 struct drm_mode_create_dumb *args)
531{
532 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300533 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000534 args->size = args->pitch * args->height;
535 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000536 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000537}
538
Dave Airlieff72145b2011-02-07 12:16:14 +1000539/**
540 * Creates a new mm object and returns a handle to it.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100541 * @dev: drm device pointer
542 * @data: ioctl data blob
543 * @file: drm file pointer
Dave Airlieff72145b2011-02-07 12:16:14 +1000544 */
545int
546i915_gem_create_ioctl(struct drm_device *dev, void *data,
547 struct drm_file *file)
548{
549 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200550
Dave Airlieff72145b2011-02-07 12:16:14 +1000551 return i915_gem_create(file, dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +1000552 args->size, &args->handle);
Dave Airlieff72145b2011-02-07 12:16:14 +1000553}
554
Daniel Vetter8c599672011-12-14 13:57:31 +0100555static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100556__copy_to_user_swizzled(char __user *cpu_vaddr,
557 const char *gpu_vaddr, int gpu_offset,
558 int length)
559{
560 int ret, cpu_offset = 0;
561
562 while (length > 0) {
563 int cacheline_end = ALIGN(gpu_offset + 1, 64);
564 int this_length = min(cacheline_end - gpu_offset, length);
565 int swizzled_gpu_offset = gpu_offset ^ 64;
566
567 ret = __copy_to_user(cpu_vaddr + cpu_offset,
568 gpu_vaddr + swizzled_gpu_offset,
569 this_length);
570 if (ret)
571 return ret + length;
572
573 cpu_offset += this_length;
574 gpu_offset += this_length;
575 length -= this_length;
576 }
577
578 return 0;
579}
580
581static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700582__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
583 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100584 int length)
585{
586 int ret, cpu_offset = 0;
587
588 while (length > 0) {
589 int cacheline_end = ALIGN(gpu_offset + 1, 64);
590 int this_length = min(cacheline_end - gpu_offset, length);
591 int swizzled_gpu_offset = gpu_offset ^ 64;
592
593 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
594 cpu_vaddr + cpu_offset,
595 this_length);
596 if (ret)
597 return ret + length;
598
599 cpu_offset += this_length;
600 gpu_offset += this_length;
601 length -= this_length;
602 }
603
604 return 0;
605}
606
Brad Volkin4c914c02014-02-18 10:15:45 -0800607/*
608 * Pins the specified object's pages and synchronizes the object with
609 * GPU accesses. Sets needs_clflush to non-zero if the caller should
610 * flush the object from the CPU cache.
611 */
612int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
Chris Wilson43394c72016-08-18 17:16:47 +0100613 unsigned int *needs_clflush)
Brad Volkin4c914c02014-02-18 10:15:45 -0800614{
615 int ret;
616
617 *needs_clflush = 0;
618
Chris Wilson43394c72016-08-18 17:16:47 +0100619 if (!i915_gem_object_has_struct_page(obj))
620 return -ENODEV;
Brad Volkin4c914c02014-02-18 10:15:45 -0800621
Chris Wilsonc13d87e2016-07-20 09:21:15 +0100622 ret = i915_gem_object_wait_rendering(obj, true);
623 if (ret)
624 return ret;
Brad Volkin4c914c02014-02-18 10:15:45 -0800625
626 ret = i915_gem_object_get_pages(obj);
627 if (ret)
628 return ret;
629
630 i915_gem_object_pin_pages(obj);
631
Chris Wilsona314d5c2016-08-18 17:16:48 +0100632 i915_gem_object_flush_gtt_write_domain(obj);
633
Chris Wilson43394c72016-08-18 17:16:47 +0100634 /* If we're not in the cpu read domain, set ourself into the gtt
635 * read domain and manually flush cachelines (if required). This
636 * optimizes for the case when the gpu will dirty the data
637 * anyway again before the next pread happens.
638 */
639 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
Brad Volkin4c914c02014-02-18 10:15:45 -0800640 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
641 obj->cache_level);
Brad Volkin4c914c02014-02-18 10:15:45 -0800642
Chris Wilson43394c72016-08-18 17:16:47 +0100643 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
644 ret = i915_gem_object_set_to_cpu_domain(obj, false);
Chris Wilson97649512016-08-18 17:16:50 +0100645 if (ret)
646 goto err_unpin;
647
Chris Wilson43394c72016-08-18 17:16:47 +0100648 *needs_clflush = 0;
649 }
650
Chris Wilson97649512016-08-18 17:16:50 +0100651 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100652 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100653
654err_unpin:
655 i915_gem_object_unpin_pages(obj);
656 return ret;
Chris Wilson43394c72016-08-18 17:16:47 +0100657}
658
659int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
660 unsigned int *needs_clflush)
661{
662 int ret;
663
664 *needs_clflush = 0;
665 if (!i915_gem_object_has_struct_page(obj))
666 return -ENODEV;
667
668 ret = i915_gem_object_wait_rendering(obj, false);
669 if (ret)
670 return ret;
671
Chris Wilson97649512016-08-18 17:16:50 +0100672 ret = i915_gem_object_get_pages(obj);
673 if (ret)
674 return ret;
675
676 i915_gem_object_pin_pages(obj);
677
Chris Wilsona314d5c2016-08-18 17:16:48 +0100678 i915_gem_object_flush_gtt_write_domain(obj);
679
Chris Wilson43394c72016-08-18 17:16:47 +0100680 /* If we're not in the cpu write domain, set ourself into the
681 * gtt write domain and manually flush cachelines (as required).
682 * This optimizes for the case when the gpu will use the data
683 * right away and we therefore have to clflush anyway.
684 */
685 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
686 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
687
688 /* Same trick applies to invalidate partially written cachelines read
689 * before writing.
690 */
691 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
692 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
693 obj->cache_level);
694
Chris Wilson43394c72016-08-18 17:16:47 +0100695 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
696 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilson97649512016-08-18 17:16:50 +0100697 if (ret)
698 goto err_unpin;
699
Chris Wilson43394c72016-08-18 17:16:47 +0100700 *needs_clflush = 0;
701 }
702
703 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
704 obj->cache_dirty = true;
705
706 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
707 obj->dirty = 1;
Chris Wilson97649512016-08-18 17:16:50 +0100708 /* return with the pages pinned */
Chris Wilson43394c72016-08-18 17:16:47 +0100709 return 0;
Chris Wilson97649512016-08-18 17:16:50 +0100710
711err_unpin:
712 i915_gem_object_unpin_pages(obj);
Brad Volkin4c914c02014-02-18 10:15:45 -0800713 return ret;
714}
715
Daniel Vetterd174bd62012-03-25 19:47:40 +0200716/* Per-page copy function for the shmem pread fastpath.
717 * Flushes invalid cachelines before reading the target if
718 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700719static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
721 char __user *user_data,
722 bool page_do_bit17_swizzling, bool needs_clflush)
723{
724 char *vaddr;
725 int ret;
726
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200727 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200728 return -EINVAL;
729
730 vaddr = kmap_atomic(page);
731 if (needs_clflush)
732 drm_clflush_virt_range(vaddr + shmem_page_offset,
733 page_length);
734 ret = __copy_to_user_inatomic(user_data,
735 vaddr + shmem_page_offset,
736 page_length);
737 kunmap_atomic(vaddr);
738
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100739 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200740}
741
Daniel Vetter23c18c72012-03-25 19:47:42 +0200742static void
743shmem_clflush_swizzled_range(char *addr, unsigned long length,
744 bool swizzled)
745{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200746 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200747 unsigned long start = (unsigned long) addr;
748 unsigned long end = (unsigned long) addr + length;
749
750 /* For swizzling simply ensure that we always flush both
751 * channels. Lame, but simple and it works. Swizzled
752 * pwrite/pread is far from a hotpath - current userspace
753 * doesn't use it at all. */
754 start = round_down(start, 128);
755 end = round_up(end, 128);
756
757 drm_clflush_virt_range((void *)start, end - start);
758 } else {
759 drm_clflush_virt_range(addr, length);
760 }
761
762}
763
Daniel Vetterd174bd62012-03-25 19:47:40 +0200764/* Only difference to the fast-path function is that this can handle bit17
765 * and uses non-atomic copy and kmap functions. */
766static int
767shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
768 char __user *user_data,
769 bool page_do_bit17_swizzling, bool needs_clflush)
770{
771 char *vaddr;
772 int ret;
773
774 vaddr = kmap(page);
775 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200776 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
777 page_length,
778 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200779
780 if (page_do_bit17_swizzling)
781 ret = __copy_to_user_swizzled(user_data,
782 vaddr, shmem_page_offset,
783 page_length);
784 else
785 ret = __copy_to_user(user_data,
786 vaddr + shmem_page_offset,
787 page_length);
788 kunmap(page);
789
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100790 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200791}
792
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530793static inline unsigned long
794slow_user_access(struct io_mapping *mapping,
795 uint64_t page_base, int page_offset,
796 char __user *user_data,
797 unsigned long length, bool pwrite)
798{
799 void __iomem *ioaddr;
800 void *vaddr;
801 uint64_t unwritten;
802
803 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
804 /* We can use the cpu mem copy function because this is X86. */
805 vaddr = (void __force *)ioaddr + page_offset;
806 if (pwrite)
807 unwritten = __copy_from_user(vaddr, user_data, length);
808 else
809 unwritten = __copy_to_user(user_data, vaddr, length);
810
811 io_mapping_unmap(ioaddr);
812 return unwritten;
813}
814
815static int
816i915_gem_gtt_pread(struct drm_device *dev,
817 struct drm_i915_gem_object *obj, uint64_t size,
818 uint64_t data_offset, uint64_t data_ptr)
819{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100820 struct drm_i915_private *dev_priv = to_i915(dev);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530821 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson058d88c2016-08-15 10:49:06 +0100822 struct i915_vma *vma;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530823 struct drm_mm_node node;
824 char __user *user_data;
825 uint64_t remain;
826 uint64_t offset;
827 int ret;
828
Chris Wilson058d88c2016-08-15 10:49:06 +0100829 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
Chris Wilson18034582016-08-18 17:16:45 +0100830 if (!IS_ERR(vma)) {
831 node.start = i915_ggtt_offset(vma);
832 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +0100833 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +0100834 if (ret) {
835 i915_vma_unpin(vma);
836 vma = ERR_PTR(ret);
837 }
838 }
Chris Wilson058d88c2016-08-15 10:49:06 +0100839 if (IS_ERR(vma)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530840 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
841 if (ret)
842 goto out;
843
844 ret = i915_gem_object_get_pages(obj);
845 if (ret) {
846 remove_mappable_node(&node);
847 goto out;
848 }
849
850 i915_gem_object_pin_pages(obj);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530851 }
852
853 ret = i915_gem_object_set_to_gtt_domain(obj, false);
854 if (ret)
855 goto out_unpin;
856
857 user_data = u64_to_user_ptr(data_ptr);
858 remain = size;
859 offset = data_offset;
860
861 mutex_unlock(&dev->struct_mutex);
862 if (likely(!i915.prefault_disable)) {
Al Viro4bce9f62016-09-17 18:02:44 -0400863 ret = fault_in_pages_writeable(user_data, remain);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530864 if (ret) {
865 mutex_lock(&dev->struct_mutex);
866 goto out_unpin;
867 }
868 }
869
870 while (remain > 0) {
871 /* Operation in this page
872 *
873 * page_base = page offset within aperture
874 * page_offset = offset within page
875 * page_length = bytes to copy for this page
876 */
877 u32 page_base = node.start;
878 unsigned page_offset = offset_in_page(offset);
879 unsigned page_length = PAGE_SIZE - page_offset;
880 page_length = remain < page_length ? remain : page_length;
881 if (node.allocated) {
882 wmb();
883 ggtt->base.insert_page(&ggtt->base,
884 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
885 node.start,
886 I915_CACHE_NONE, 0);
887 wmb();
888 } else {
889 page_base += offset & PAGE_MASK;
890 }
891 /* This is a slow read/write as it tries to read from
892 * and write to user memory which may result into page
893 * faults, and so we cannot perform this under struct_mutex.
894 */
Chris Wilsonf7bbe782016-08-19 16:54:27 +0100895 if (slow_user_access(&ggtt->mappable, page_base,
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530896 page_offset, user_data,
897 page_length, false)) {
898 ret = -EFAULT;
899 break;
900 }
901
902 remain -= page_length;
903 user_data += page_length;
904 offset += page_length;
905 }
906
907 mutex_lock(&dev->struct_mutex);
908 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
909 /* The user has modified the object whilst we tried
910 * reading from it, and we now have no idea what domain
911 * the pages should be in. As we have just been touching
912 * them directly, flush everything back to the GTT
913 * domain.
914 */
915 ret = i915_gem_object_set_to_gtt_domain(obj, false);
916 }
917
918out_unpin:
919 if (node.allocated) {
920 wmb();
921 ggtt->base.clear_range(&ggtt->base,
922 node.start, node.size,
923 true);
924 i915_gem_object_unpin_pages(obj);
925 remove_mappable_node(&node);
926 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +0100927 i915_vma_unpin(vma);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +0530928 }
929out:
930 return ret;
931}
932
Eric Anholteb014592009-03-10 11:44:52 -0700933static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200934i915_gem_shmem_pread(struct drm_device *dev,
935 struct drm_i915_gem_object *obj,
936 struct drm_i915_gem_pread *args,
937 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700938{
Daniel Vetter8461d222011-12-14 13:57:32 +0100939 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700940 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100941 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100942 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100943 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200944 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200945 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200946 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700947
Brad Volkin4c914c02014-02-18 10:15:45 -0800948 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100949 if (ret)
950 return ret;
951
Chris Wilson43394c72016-08-18 17:16:47 +0100952 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
953 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700954 offset = args->offset;
Chris Wilson43394c72016-08-18 17:16:47 +0100955 remain = args->size;
Daniel Vetter8461d222011-12-14 13:57:32 +0100956
Imre Deak67d5a502013-02-18 19:28:02 +0200957 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
958 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200959 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100960
961 if (remain <= 0)
962 break;
963
Eric Anholteb014592009-03-10 11:44:52 -0700964 /* Operation in this page
965 *
Eric Anholteb014592009-03-10 11:44:52 -0700966 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700967 * page_length = bytes to copy for this page
968 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100969 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700970 page_length = remain;
971 if ((shmem_page_offset + page_length) > PAGE_SIZE)
972 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700973
Daniel Vetter8461d222011-12-14 13:57:32 +0100974 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
975 (page_to_phys(page) & (1 << 17)) != 0;
976
Daniel Vetterd174bd62012-03-25 19:47:40 +0200977 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
978 user_data, page_do_bit17_swizzling,
979 needs_clflush);
980 if (ret == 0)
981 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700982
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200983 mutex_unlock(&dev->struct_mutex);
984
Jani Nikulad330a952014-01-21 11:24:25 +0200985 if (likely(!i915.prefault_disable) && !prefaulted) {
Al Viro4bce9f62016-09-17 18:02:44 -0400986 ret = fault_in_pages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200987 /* Userspace is tricking us, but we've already clobbered
988 * its pages with the prefault and promised to write the
989 * data up to the first fault. Hence ignore any errors
990 * and just continue. */
991 (void)ret;
992 prefaulted = 1;
993 }
994
Daniel Vetterd174bd62012-03-25 19:47:40 +0200995 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
996 user_data, page_do_bit17_swizzling,
997 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700998
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200999 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001000
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001001 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +01001002 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +01001003
Chris Wilson17793c92014-03-07 08:30:36 +00001004next_page:
Eric Anholteb014592009-03-10 11:44:52 -07001005 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +01001006 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -07001007 offset += page_length;
1008 }
1009
Chris Wilson4f27b752010-10-14 15:26:45 +01001010out:
Chris Wilson43394c72016-08-18 17:16:47 +01001011 i915_gem_obj_finish_shmem_access(obj);
Chris Wilsonf60d7f02012-09-04 21:02:56 +01001012
Eric Anholteb014592009-03-10 11:44:52 -07001013 return ret;
1014}
1015
Eric Anholt673a3942008-07-30 12:06:12 -07001016/**
1017 * Reads data from the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001018 * @dev: drm device pointer
1019 * @data: ioctl data blob
1020 * @file: drm file pointer
Eric Anholt673a3942008-07-30 12:06:12 -07001021 *
1022 * On error, the contents of *data are undefined.
1023 */
1024int
1025i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001026 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001027{
1028 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001029 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +01001030 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001031
Chris Wilson51311d02010-11-17 09:10:42 +00001032 if (args->size == 0)
1033 return 0;
1034
1035 if (!access_ok(VERIFY_WRITE,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001036 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001037 args->size))
1038 return -EFAULT;
1039
Chris Wilson03ac0642016-07-20 13:31:51 +01001040 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001041 if (!obj)
1042 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001043
Chris Wilson7dcd2492010-09-26 20:21:44 +01001044 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +00001045 if (args->offset > obj->base.size ||
1046 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001047 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001048 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001049 }
1050
Chris Wilsondb53a302011-02-03 11:57:46 +00001051 trace_i915_gem_object_pread(obj, args->offset, args->size);
1052
Chris Wilson258a5ed2016-08-05 10:14:16 +01001053 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1054 if (ret)
1055 goto err;
1056
1057 ret = i915_mutex_lock_interruptible(dev);
1058 if (ret)
1059 goto err;
1060
Daniel Vetterdbf7bff2012-03-25 19:47:29 +02001061 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -07001062
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301063 /* pread for non shmem backed objects */
Chris Wilson2ca17b82016-08-04 09:09:53 +01001064 if (ret == -EFAULT || ret == -ENODEV) {
1065 intel_runtime_pm_get(to_i915(dev));
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301066 ret = i915_gem_gtt_pread(dev, obj, args->size,
1067 args->offset, args->data_ptr);
Chris Wilson2ca17b82016-08-04 09:09:53 +01001068 intel_runtime_pm_put(to_i915(dev));
1069 }
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301070
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001071 i915_gem_object_put(obj);
Chris Wilson4f27b752010-10-14 15:26:45 +01001072 mutex_unlock(&dev->struct_mutex);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001073
1074 return ret;
1075
1076err:
1077 i915_gem_object_put_unlocked(obj);
Eric Anholteb014592009-03-10 11:44:52 -07001078 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001079}
1080
Keith Packard0839ccb2008-10-30 19:38:48 -07001081/* This is the fast write path which cannot handle
1082 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001083 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -07001084
Keith Packard0839ccb2008-10-30 19:38:48 -07001085static inline int
1086fast_user_write(struct io_mapping *mapping,
1087 loff_t page_base, int page_offset,
1088 char __user *user_data,
1089 int length)
1090{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001091 void __iomem *vaddr_atomic;
1092 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -07001093 unsigned long unwritten;
1094
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07001095 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -07001096 /* We can use the cpu mem copy function because this is X86. */
1097 vaddr = (void __force*)vaddr_atomic + page_offset;
1098 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -07001099 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07001100 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001101 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -07001102}
1103
Eric Anholt3de09aa2009-03-09 09:42:23 -07001104/**
1105 * This is the fast pwrite path, where we copy the data directly from the
1106 * user into the GTT, uncached.
Daniel Vetter62f90b32016-07-15 21:48:07 +02001107 * @i915: i915 device private data
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001108 * @obj: i915 gem object
1109 * @args: pwrite arguments structure
1110 * @file: drm file pointer
Eric Anholt3de09aa2009-03-09 09:42:23 -07001111 */
Eric Anholt673a3942008-07-30 12:06:12 -07001112static int
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301113i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
Chris Wilson05394f32010-11-08 19:18:58 +00001114 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -07001115 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +00001116 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001117{
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301118 struct i915_ggtt *ggtt = &i915->ggtt;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301119 struct drm_device *dev = obj->base.dev;
Chris Wilson058d88c2016-08-15 10:49:06 +01001120 struct i915_vma *vma;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301121 struct drm_mm_node node;
1122 uint64_t remain, offset;
Eric Anholt673a3942008-07-30 12:06:12 -07001123 char __user *user_data;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301124 int ret;
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301125 bool hit_slow_path = false;
1126
Chris Wilson3e510a82016-08-05 10:14:23 +01001127 if (i915_gem_object_is_tiled(obj))
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301128 return -EFAULT;
Daniel Vetter935aaa62012-03-25 19:47:35 +02001129
Chris Wilson058d88c2016-08-15 10:49:06 +01001130 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
Chris Wilsonde895082016-08-04 16:32:34 +01001131 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson18034582016-08-18 17:16:45 +01001132 if (!IS_ERR(vma)) {
1133 node.start = i915_ggtt_offset(vma);
1134 node.allocated = false;
Chris Wilson49ef5292016-08-18 17:17:00 +01001135 ret = i915_vma_put_fence(vma);
Chris Wilson18034582016-08-18 17:16:45 +01001136 if (ret) {
1137 i915_vma_unpin(vma);
1138 vma = ERR_PTR(ret);
1139 }
1140 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001141 if (IS_ERR(vma)) {
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301142 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1143 if (ret)
1144 goto out;
1145
1146 ret = i915_gem_object_get_pages(obj);
1147 if (ret) {
1148 remove_mappable_node(&node);
1149 goto out;
1150 }
1151
1152 i915_gem_object_pin_pages(obj);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301153 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001154
1155 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1156 if (ret)
1157 goto out_unpin;
1158
Chris Wilsonb19482d2016-08-18 17:16:43 +01001159 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301160 obj->dirty = true;
Eric Anholt673a3942008-07-30 12:06:12 -07001161
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001162 user_data = u64_to_user_ptr(args->data_ptr);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301163 offset = args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -07001164 remain = args->size;
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301165 while (remain) {
Eric Anholt673a3942008-07-30 12:06:12 -07001166 /* Operation in this page
1167 *
Keith Packard0839ccb2008-10-30 19:38:48 -07001168 * page_base = page offset within aperture
1169 * page_offset = offset within page
1170 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -07001171 */
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301172 u32 page_base = node.start;
1173 unsigned page_offset = offset_in_page(offset);
1174 unsigned page_length = PAGE_SIZE - page_offset;
1175 page_length = remain < page_length ? remain : page_length;
1176 if (node.allocated) {
1177 wmb(); /* flush the write before we modify the GGTT */
1178 ggtt->base.insert_page(&ggtt->base,
1179 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1180 node.start, I915_CACHE_NONE, 0);
1181 wmb(); /* flush modifications to the GGTT (insert_page) */
1182 } else {
1183 page_base += offset & PAGE_MASK;
1184 }
Keith Packard0839ccb2008-10-30 19:38:48 -07001185 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -07001186 * source page isn't available. Return the error and we'll
1187 * retry in the slow path.
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301188 * If the object is non-shmem backed, we retry again with the
1189 * path that handles page fault.
Keith Packard0839ccb2008-10-30 19:38:48 -07001190 */
Chris Wilsonf7bbe782016-08-19 16:54:27 +01001191 if (fast_user_write(&ggtt->mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +02001192 page_offset, user_data, page_length)) {
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301193 hit_slow_path = true;
1194 mutex_unlock(&dev->struct_mutex);
Chris Wilsonf7bbe782016-08-19 16:54:27 +01001195 if (slow_user_access(&ggtt->mappable,
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301196 page_base,
1197 page_offset, user_data,
1198 page_length, true)) {
1199 ret = -EFAULT;
1200 mutex_lock(&dev->struct_mutex);
1201 goto out_flush;
1202 }
1203
1204 mutex_lock(&dev->struct_mutex);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001205 }
Eric Anholt673a3942008-07-30 12:06:12 -07001206
Keith Packard0839ccb2008-10-30 19:38:48 -07001207 remain -= page_length;
1208 user_data += page_length;
1209 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -07001210 }
Eric Anholt673a3942008-07-30 12:06:12 -07001211
Paulo Zanoni063e4e62015-02-13 17:23:45 -02001212out_flush:
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301213 if (hit_slow_path) {
1214 if (ret == 0 &&
1215 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1216 /* The user has modified the object whilst we tried
1217 * reading from it, and we now have no idea what domain
1218 * the pages should be in. As we have just been touching
1219 * them directly, flush everything back to the GTT
1220 * domain.
1221 */
1222 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1223 }
1224 }
1225
Chris Wilsonb19482d2016-08-18 17:16:43 +01001226 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001227out_unpin:
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301228 if (node.allocated) {
1229 wmb();
1230 ggtt->base.clear_range(&ggtt->base,
1231 node.start, node.size,
1232 true);
1233 i915_gem_object_unpin_pages(obj);
1234 remove_mappable_node(&node);
1235 } else {
Chris Wilson058d88c2016-08-15 10:49:06 +01001236 i915_vma_unpin(vma);
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301237 }
Daniel Vetter935aaa62012-03-25 19:47:35 +02001238out:
Eric Anholt3de09aa2009-03-09 09:42:23 -07001239 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001240}
1241
Daniel Vetterd174bd62012-03-25 19:47:40 +02001242/* Per-page copy function for the shmem pwrite fastpath.
1243 * Flushes invalid cachelines before writing to the target if
1244 * needs_clflush_before is set and flushes out any written cachelines after
1245 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -07001246static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001247shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1248 char __user *user_data,
1249 bool page_do_bit17_swizzling,
1250 bool needs_clflush_before,
1251 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001252{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001253 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001254 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001255
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001256 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +02001257 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001258
Daniel Vetterd174bd62012-03-25 19:47:40 +02001259 vaddr = kmap_atomic(page);
1260 if (needs_clflush_before)
1261 drm_clflush_virt_range(vaddr + shmem_page_offset,
1262 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +00001263 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1264 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001265 if (needs_clflush_after)
1266 drm_clflush_virt_range(vaddr + shmem_page_offset,
1267 page_length);
1268 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -07001269
Chris Wilson755d2212012-09-04 21:02:55 +01001270 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -07001271}
1272
Daniel Vetterd174bd62012-03-25 19:47:40 +02001273/* Only difference to the fast-path function is that this can handle bit17
1274 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -07001275static int
Daniel Vetterd174bd62012-03-25 19:47:40 +02001276shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1277 char __user *user_data,
1278 bool page_do_bit17_swizzling,
1279 bool needs_clflush_before,
1280 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -07001281{
Daniel Vetterd174bd62012-03-25 19:47:40 +02001282 char *vaddr;
1283 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -07001284
Daniel Vetterd174bd62012-03-25 19:47:40 +02001285 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +02001286 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +02001287 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1288 page_length,
1289 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001290 if (page_do_bit17_swizzling)
1291 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001292 user_data,
1293 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001294 else
1295 ret = __copy_from_user(vaddr + shmem_page_offset,
1296 user_data,
1297 page_length);
1298 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +02001299 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1300 page_length,
1301 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001302 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001303
Chris Wilson755d2212012-09-04 21:02:55 +01001304 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -07001305}
1306
Eric Anholt40123c12009-03-09 13:42:30 -07001307static int
Daniel Vettere244a442012-03-25 19:47:28 +02001308i915_gem_shmem_pwrite(struct drm_device *dev,
1309 struct drm_i915_gem_object *obj,
1310 struct drm_i915_gem_pwrite *args,
1311 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -07001312{
Eric Anholt40123c12009-03-09 13:42:30 -07001313 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +01001314 loff_t offset;
1315 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +01001316 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +01001317 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +02001318 int hit_slowpath = 0;
Chris Wilson43394c72016-08-18 17:16:47 +01001319 unsigned int needs_clflush;
Imre Deak67d5a502013-02-18 19:28:02 +02001320 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -07001321
Chris Wilson43394c72016-08-18 17:16:47 +01001322 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
Chris Wilson755d2212012-09-04 21:02:55 +01001323 if (ret)
1324 return ret;
1325
Eric Anholt40123c12009-03-09 13:42:30 -07001326 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Chris Wilson43394c72016-08-18 17:16:47 +01001327 user_data = u64_to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -07001328 offset = args->offset;
Chris Wilson43394c72016-08-18 17:16:47 +01001329 remain = args->size;
Eric Anholt40123c12009-03-09 13:42:30 -07001330
Imre Deak67d5a502013-02-18 19:28:02 +02001331 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1332 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +02001333 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +02001334 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001335
Chris Wilson9da3da62012-06-01 15:20:22 +01001336 if (remain <= 0)
1337 break;
1338
Eric Anholt40123c12009-03-09 13:42:30 -07001339 /* Operation in this page
1340 *
Eric Anholt40123c12009-03-09 13:42:30 -07001341 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -07001342 * page_length = bytes to copy for this page
1343 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +01001344 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -07001345
1346 page_length = remain;
1347 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1348 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -07001349
Daniel Vetter58642882012-03-25 19:47:37 +02001350 /* If we don't overwrite a cacheline completely we need to be
1351 * careful to have up-to-date data by first clflushing. Don't
1352 * overcomplicate things and flush the entire patch. */
Chris Wilson43394c72016-08-18 17:16:47 +01001353 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
Daniel Vetter58642882012-03-25 19:47:37 +02001354 ((shmem_page_offset | page_length)
1355 & (boot_cpu_data.x86_clflush_size - 1));
1356
Daniel Vetter8c599672011-12-14 13:57:31 +01001357 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1358 (page_to_phys(page) & (1 << 17)) != 0;
1359
Daniel Vetterd174bd62012-03-25 19:47:40 +02001360 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1361 user_data, page_do_bit17_swizzling,
1362 partial_cacheline_write,
Chris Wilson43394c72016-08-18 17:16:47 +01001363 needs_clflush & CLFLUSH_AFTER);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001364 if (ret == 0)
1365 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -07001366
Daniel Vettere244a442012-03-25 19:47:28 +02001367 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +02001368 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +02001369 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1370 user_data, page_do_bit17_swizzling,
1371 partial_cacheline_write,
Chris Wilson43394c72016-08-18 17:16:47 +01001372 needs_clflush & CLFLUSH_AFTER);
Eric Anholt40123c12009-03-09 13:42:30 -07001373
Daniel Vettere244a442012-03-25 19:47:28 +02001374 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +01001375
Chris Wilson755d2212012-09-04 21:02:55 +01001376 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +01001377 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +01001378
Chris Wilson17793c92014-03-07 08:30:36 +00001379next_page:
Eric Anholt40123c12009-03-09 13:42:30 -07001380 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +01001381 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -07001382 offset += page_length;
1383 }
1384
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001385out:
Chris Wilson43394c72016-08-18 17:16:47 +01001386 i915_gem_obj_finish_shmem_access(obj);
Chris Wilson755d2212012-09-04 21:02:55 +01001387
Daniel Vettere244a442012-03-25 19:47:28 +02001388 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +01001389 /*
1390 * Fixup: Flush cpu caches in case we didn't flush the dirty
1391 * cachelines in-line while writing and the object moved
1392 * out of the cpu write domain while we've dropped the lock.
1393 */
Chris Wilson43394c72016-08-18 17:16:47 +01001394 if (!(needs_clflush & CLFLUSH_AFTER) &&
Daniel Vetter8dcf0152012-11-15 16:53:58 +01001395 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +01001396 if (i915_gem_clflush_object(obj, obj->pin_display))
Chris Wilson43394c72016-08-18 17:16:47 +01001397 needs_clflush |= CLFLUSH_AFTER;
Daniel Vettere244a442012-03-25 19:47:28 +02001398 }
Daniel Vetter8c599672011-12-14 13:57:31 +01001399 }
Eric Anholt40123c12009-03-09 13:42:30 -07001400
Chris Wilson43394c72016-08-18 17:16:47 +01001401 if (needs_clflush & CLFLUSH_AFTER)
Chris Wilsonc0336662016-05-06 15:40:21 +01001402 i915_gem_chipset_flush(to_i915(dev));
Daniel Vetter58642882012-03-25 19:47:37 +02001403
Rodrigo Vivide152b62015-07-07 16:28:51 -07001404 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Eric Anholt40123c12009-03-09 13:42:30 -07001405 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001406}
1407
1408/**
1409 * Writes data to the object referenced by handle.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001410 * @dev: drm device
1411 * @data: ioctl data blob
1412 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001413 *
1414 * On error, the contents of the buffer that were to be modified are undefined.
1415 */
1416int
1417i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001418 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001419{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001420 struct drm_i915_private *dev_priv = to_i915(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001421 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001422 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +00001423 int ret;
1424
1425 if (args->size == 0)
1426 return 0;
1427
1428 if (!access_ok(VERIFY_READ,
Gustavo Padovan3ed605b2016-04-26 12:32:27 -03001429 u64_to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001430 args->size))
1431 return -EFAULT;
1432
Jani Nikulad330a952014-01-21 11:24:25 +02001433 if (likely(!i915.prefault_disable)) {
Al Viro4bce9f62016-09-17 18:02:44 -04001434 ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
Xiong Zhang0b74b502013-07-19 13:51:24 +08001435 args->size);
1436 if (ret)
1437 return -EFAULT;
1438 }
Eric Anholt673a3942008-07-30 12:06:12 -07001439
Chris Wilson03ac0642016-07-20 13:31:51 +01001440 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson258a5ed2016-08-05 10:14:16 +01001441 if (!obj)
1442 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001443
Chris Wilson7dcd2492010-09-26 20:21:44 +01001444 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001445 if (args->offset > obj->base.size ||
1446 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001447 ret = -EINVAL;
Chris Wilson258a5ed2016-08-05 10:14:16 +01001448 goto err;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001449 }
1450
Chris Wilsondb53a302011-02-03 11:57:46 +00001451 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1452
Chris Wilson258a5ed2016-08-05 10:14:16 +01001453 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1454 if (ret)
1455 goto err;
1456
1457 intel_runtime_pm_get(dev_priv);
1458
1459 ret = i915_mutex_lock_interruptible(dev);
1460 if (ret)
1461 goto err_rpm;
1462
Daniel Vetter935aaa62012-03-25 19:47:35 +02001463 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001464 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1465 * it would end up going through the fenced access, and we'll get
1466 * different detiling behavior between reading and writing.
1467 * pread/pwrite currently are reading and writing from the CPU
1468 * perspective, requiring manual detiling by the client.
1469 */
Chris Wilson6eae0052016-06-20 15:05:52 +01001470 if (!i915_gem_object_has_struct_page(obj) ||
Chris Wilson2c225692013-08-09 12:26:45 +01001471 cpu_write_needs_clflush(obj)) {
Ankitprasad Sharma4f1959e2016-06-10 14:23:01 +05301472 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001473 /* Note that the gtt paths might fail with non-page-backed user
1474 * pointers (e.g. gtt mappings when moving data between
1475 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -07001476 }
Eric Anholt673a3942008-07-30 12:06:12 -07001477
Chris Wilsonfae82e52016-07-16 18:42:36 +01001478 if (ret == -EFAULT || ret == -ENOSPC) {
Chris Wilson6a2c4232014-11-04 04:51:40 -08001479 if (obj->phys_handle)
1480 ret = i915_gem_phys_pwrite(obj, args, file);
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05301481 else
Chris Wilson43394c72016-08-18 17:16:47 +01001482 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Chris Wilson6a2c4232014-11-04 04:51:40 -08001483 }
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001484
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001485 i915_gem_object_put(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001486 mutex_unlock(&dev->struct_mutex);
Imre Deak5d77d9c2014-11-12 16:40:35 +02001487 intel_runtime_pm_put(dev_priv);
1488
Eric Anholt673a3942008-07-30 12:06:12 -07001489 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001490
Chris Wilson258a5ed2016-08-05 10:14:16 +01001491err_rpm:
1492 intel_runtime_pm_put(dev_priv);
1493err:
1494 i915_gem_object_put_unlocked(obj);
Chris Wilson094f9a52013-09-25 17:34:55 +01001495 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001496}
1497
Chris Wilsond243ad82016-08-18 17:16:44 +01001498static inline enum fb_op_origin
Chris Wilsonaeecc962016-06-17 14:46:39 -03001499write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1500{
Chris Wilson50349242016-08-18 17:17:04 +01001501 return (domain == I915_GEM_DOMAIN_GTT ?
1502 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001503}
1504
Eric Anholt673a3942008-07-30 12:06:12 -07001505/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001506 * Called when user space prepares to use an object with the CPU, either
1507 * through the mmap ioctl's mapping or a GTT mapping.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001508 * @dev: drm device
1509 * @data: ioctl data blob
1510 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001511 */
1512int
1513i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001514 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001515{
1516 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001517 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001518 uint32_t read_domains = args->read_domains;
1519 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001520 int ret;
1521
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001522 /* Only handle setting domains to types used by the CPU. */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001523 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001524 return -EINVAL;
1525
1526 /* Having something in the write domain implies it's in the read
1527 * domain, and only that read domain. Enforce that in the request.
1528 */
1529 if (write_domain != 0 && read_domains != write_domain)
1530 return -EINVAL;
1531
Chris Wilson03ac0642016-07-20 13:31:51 +01001532 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001533 if (!obj)
1534 return -ENOENT;
Jesse Barnes652c3932009-08-17 13:31:43 -07001535
Chris Wilson3236f572012-08-24 09:35:09 +01001536 /* Try to flush the object off the GPU without holding the lock.
1537 * We will repeat the flush holding the lock in the normal manner
1538 * to catch cases where we are gazumped.
1539 */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001540 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001541 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001542 goto err;
1543
1544 ret = i915_mutex_lock_interruptible(dev);
1545 if (ret)
1546 goto err;
Chris Wilson3236f572012-08-24 09:35:09 +01001547
Chris Wilson43566de2015-01-02 16:29:29 +05301548 if (read_domains & I915_GEM_DOMAIN_GTT)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001549 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Chris Wilson43566de2015-01-02 16:29:29 +05301550 else
Eric Anholte47c68e2008-11-14 13:35:19 -08001551 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001552
Daniel Vetter031b6982015-06-26 19:35:16 +02001553 if (write_domain != 0)
Chris Wilsonaeecc962016-06-17 14:46:39 -03001554 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
Daniel Vetter031b6982015-06-26 19:35:16 +02001555
Chris Wilsonf8c417c2016-07-20 13:31:53 +01001556 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001557 mutex_unlock(&dev->struct_mutex);
1558 return ret;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001559
1560err:
1561 i915_gem_object_put_unlocked(obj);
1562 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001563}
1564
1565/**
1566 * Called when user space has done writes to this buffer
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001567 * @dev: drm device
1568 * @data: ioctl data blob
1569 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001570 */
1571int
1572i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001573 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001574{
1575 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001576 struct drm_i915_gem_object *obj;
Chris Wilsonc21724c2016-08-05 10:14:19 +01001577 int err = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001578
Chris Wilson03ac0642016-07-20 13:31:51 +01001579 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilsonc21724c2016-08-05 10:14:19 +01001580 if (!obj)
1581 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001582
Eric Anholt673a3942008-07-30 12:06:12 -07001583 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilsonc21724c2016-08-05 10:14:19 +01001584 if (READ_ONCE(obj->pin_display)) {
1585 err = i915_mutex_lock_interruptible(dev);
1586 if (!err) {
1587 i915_gem_object_flush_cpu_write_domain(obj);
1588 mutex_unlock(&dev->struct_mutex);
1589 }
1590 }
Eric Anholte47c68e2008-11-14 13:35:19 -08001591
Chris Wilsonc21724c2016-08-05 10:14:19 +01001592 i915_gem_object_put_unlocked(obj);
1593 return err;
Eric Anholt673a3942008-07-30 12:06:12 -07001594}
1595
1596/**
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001597 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1598 * it is mapped to.
1599 * @dev: drm device
1600 * @data: ioctl data blob
1601 * @file: drm file
Eric Anholt673a3942008-07-30 12:06:12 -07001602 *
1603 * While the mapping holds a reference on the contents of the object, it doesn't
1604 * imply a ref on the object itself.
Daniel Vetter34367382014-10-16 12:28:18 +02001605 *
1606 * IMPORTANT:
1607 *
1608 * DRM driver writers who look a this function as an example for how to do GEM
1609 * mmap support, please don't implement mmap support like here. The modern way
1610 * to implement DRM mmap support is with an mmap offset ioctl (like
1611 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1612 * That way debug tooling like valgrind will understand what's going on, hiding
1613 * the mmap call in a driver private ioctl will break that. The i915 driver only
1614 * does cpu mmaps this way because we didn't know better.
Eric Anholt673a3942008-07-30 12:06:12 -07001615 */
1616int
1617i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001618 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001619{
1620 struct drm_i915_gem_mmap *args = data;
Chris Wilson03ac0642016-07-20 13:31:51 +01001621 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001622 unsigned long addr;
1623
Akash Goel1816f922015-01-02 16:29:30 +05301624 if (args->flags & ~(I915_MMAP_WC))
1625 return -EINVAL;
1626
Borislav Petkov568a58e2016-03-29 17:42:01 +02001627 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
Akash Goel1816f922015-01-02 16:29:30 +05301628 return -ENODEV;
1629
Chris Wilson03ac0642016-07-20 13:31:51 +01001630 obj = i915_gem_object_lookup(file, args->handle);
1631 if (!obj)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001632 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001633
Daniel Vetter1286ff72012-05-10 15:25:09 +02001634 /* prime objects have no backing filp to GEM mmap
1635 * pages from.
1636 */
Chris Wilson03ac0642016-07-20 13:31:51 +01001637 if (!obj->base.filp) {
Chris Wilson34911fd2016-07-20 13:31:54 +01001638 i915_gem_object_put_unlocked(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001639 return -EINVAL;
1640 }
1641
Chris Wilson03ac0642016-07-20 13:31:51 +01001642 addr = vm_mmap(obj->base.filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001643 PROT_READ | PROT_WRITE, MAP_SHARED,
1644 args->offset);
Akash Goel1816f922015-01-02 16:29:30 +05301645 if (args->flags & I915_MMAP_WC) {
1646 struct mm_struct *mm = current->mm;
1647 struct vm_area_struct *vma;
1648
Michal Hocko80a89a52016-05-23 16:26:11 -07001649 if (down_write_killable(&mm->mmap_sem)) {
Chris Wilson34911fd2016-07-20 13:31:54 +01001650 i915_gem_object_put_unlocked(obj);
Michal Hocko80a89a52016-05-23 16:26:11 -07001651 return -EINTR;
1652 }
Akash Goel1816f922015-01-02 16:29:30 +05301653 vma = find_vma(mm, addr);
1654 if (vma)
1655 vma->vm_page_prot =
1656 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1657 else
1658 addr = -ENOMEM;
1659 up_write(&mm->mmap_sem);
Chris Wilsonaeecc962016-06-17 14:46:39 -03001660
1661 /* This may race, but that's ok, it only gets set */
Chris Wilson50349242016-08-18 17:17:04 +01001662 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
Akash Goel1816f922015-01-02 16:29:30 +05301663 }
Chris Wilson34911fd2016-07-20 13:31:54 +01001664 i915_gem_object_put_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001665 if (IS_ERR((void *)addr))
1666 return addr;
1667
1668 args->addr_ptr = (uint64_t) addr;
1669
1670 return 0;
1671}
1672
Chris Wilson03af84f2016-08-18 17:17:01 +01001673static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1674{
1675 u64 size;
1676
1677 size = i915_gem_object_get_stride(obj);
1678 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1679
1680 return size >> PAGE_SHIFT;
1681}
1682
Jesse Barnesde151cf2008-11-12 10:03:55 -08001683/**
Chris Wilson4cc69072016-08-25 19:05:19 +01001684 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1685 *
1686 * A history of the GTT mmap interface:
1687 *
1688 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1689 * aligned and suitable for fencing, and still fit into the available
1690 * mappable space left by the pinned display objects. A classic problem
1691 * we called the page-fault-of-doom where we would ping-pong between
1692 * two objects that could not fit inside the GTT and so the memcpy
1693 * would page one object in at the expense of the other between every
1694 * single byte.
1695 *
1696 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1697 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1698 * object is too large for the available space (or simply too large
1699 * for the mappable aperture!), a view is created instead and faulted
1700 * into userspace. (This view is aligned and sized appropriately for
1701 * fenced access.)
1702 *
1703 * Restrictions:
1704 *
1705 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1706 * hangs on some architectures, corruption on others. An attempt to service
1707 * a GTT page fault from a snoopable object will generate a SIGBUS.
1708 *
1709 * * the object must be able to fit into RAM (physical memory, though no
1710 * limited to the mappable aperture).
1711 *
1712 *
1713 * Caveats:
1714 *
1715 * * a new GTT page fault will synchronize rendering from the GPU and flush
1716 * all data to system memory. Subsequent access will not be synchronized.
1717 *
1718 * * all mappings are revoked on runtime device suspend.
1719 *
1720 * * there are only 8, 16 or 32 fence registers to share between all users
1721 * (older machines require fence register for display and blitter access
1722 * as well). Contention of the fence registers will cause the previous users
1723 * to be unmapped and any new access will generate new page faults.
1724 *
1725 * * running out of memory while servicing a fault may generate a SIGBUS,
1726 * rather than the expected SIGSEGV.
1727 */
1728int i915_gem_mmap_gtt_version(void)
1729{
1730 return 1;
1731}
1732
Jesse Barnesde151cf2008-11-12 10:03:55 -08001733/**
1734 * i915_gem_fault - fault a page into the GTT
Chris Wilson058d88c2016-08-15 10:49:06 +01001735 * @area: CPU VMA in question
Geliang Tangd9072a32015-09-15 05:58:44 -07001736 * @vmf: fault info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001737 *
1738 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1739 * from userspace. The fault handler takes care of binding the object to
1740 * the GTT (if needed), allocating and programming a fence register (again,
1741 * only if needed based on whether the old reg is still valid or the object
1742 * is tiled) and inserting a new PTE into the faulting process.
1743 *
1744 * Note that the faulting process may involve evicting existing objects
1745 * from the GTT and/or fence registers to make room. So performance may
1746 * suffer if the GTT working set is large or there are few fence registers
1747 * left.
Chris Wilson4cc69072016-08-25 19:05:19 +01001748 *
1749 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1750 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
Jesse Barnesde151cf2008-11-12 10:03:55 -08001751 */
Chris Wilson058d88c2016-08-15 10:49:06 +01001752int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001753{
Chris Wilson03af84f2016-08-18 17:17:01 +01001754#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
Chris Wilson058d88c2016-08-15 10:49:06 +01001755 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
Chris Wilson05394f32010-11-08 19:18:58 +00001756 struct drm_device *dev = obj->base.dev;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001757 struct drm_i915_private *dev_priv = to_i915(dev);
1758 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001759 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Chris Wilson058d88c2016-08-15 10:49:06 +01001760 struct i915_vma *vma;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001761 pgoff_t page_offset;
Chris Wilson82118872016-08-18 17:17:05 +01001762 unsigned int flags;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001763 int ret;
Paulo Zanonif65c9162013-11-27 18:20:34 -02001764
Jesse Barnesde151cf2008-11-12 10:03:55 -08001765 /* We don't use vmf->pgoff since that has the fake offset */
Chris Wilson058d88c2016-08-15 10:49:06 +01001766 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
Jesse Barnesde151cf2008-11-12 10:03:55 -08001767 PAGE_SHIFT;
1768
Chris Wilsondb53a302011-02-03 11:57:46 +00001769 trace_i915_gem_object_fault(obj, page_offset, true, write);
1770
Chris Wilson6e4930f2014-02-07 18:37:06 -02001771 /* Try to flush the object off the GPU first without holding the lock.
Chris Wilsonb8f90962016-08-05 10:14:07 +01001772 * Upon acquiring the lock, we will perform our sanity checks and then
Chris Wilson6e4930f2014-02-07 18:37:06 -02001773 * repeat the flush holding the lock in the normal manner to catch cases
1774 * where we are gazumped.
1775 */
Chris Wilsonb8f90962016-08-05 10:14:07 +01001776 ret = __unsafe_wait_rendering(obj, NULL, !write);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001777 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001778 goto err;
1779
1780 intel_runtime_pm_get(dev_priv);
1781
1782 ret = i915_mutex_lock_interruptible(dev);
1783 if (ret)
1784 goto err_rpm;
Chris Wilson6e4930f2014-02-07 18:37:06 -02001785
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001786 /* Access to snoopable pages through the GTT is incoherent. */
1787 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001788 ret = -EFAULT;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001789 goto err_unlock;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001790 }
1791
Chris Wilson82118872016-08-18 17:17:05 +01001792 /* If the object is smaller than a couple of partial vma, it is
1793 * not worth only creating a single partial vma - we may as well
1794 * clear enough space for the full object.
1795 */
1796 flags = PIN_MAPPABLE;
1797 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1798 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1799
Chris Wilsona61007a2016-08-18 17:17:02 +01001800 /* Now pin it into the GTT as needed */
Chris Wilson82118872016-08-18 17:17:05 +01001801 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
Chris Wilsona61007a2016-08-18 17:17:02 +01001802 if (IS_ERR(vma)) {
1803 struct i915_ggtt_view view;
Chris Wilson03af84f2016-08-18 17:17:01 +01001804 unsigned int chunk_size;
1805
Chris Wilsona61007a2016-08-18 17:17:02 +01001806 /* Use a partial view if it is bigger than available space */
Chris Wilson03af84f2016-08-18 17:17:01 +01001807 chunk_size = MIN_CHUNK_PAGES;
1808 if (i915_gem_object_is_tiled(obj))
Chris Wilsonc4b8c572016-11-07 10:54:43 +00001809 chunk_size = roundup(chunk_size, tile_row_pages(obj));
Joonas Lahtinene7ded2d2015-05-08 14:37:39 +03001810
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001811 memset(&view, 0, sizeof(view));
1812 view.type = I915_GGTT_VIEW_PARTIAL;
1813 view.params.partial.offset = rounddown(page_offset, chunk_size);
1814 view.params.partial.size =
Chris Wilsona61007a2016-08-18 17:17:02 +01001815 min_t(unsigned int, chunk_size,
Chris Wilson058d88c2016-08-15 10:49:06 +01001816 (area->vm_end - area->vm_start) / PAGE_SIZE -
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001817 view.params.partial.offset);
Joonas Lahtinenc5ad54c2015-05-06 14:36:09 +03001818
Chris Wilsonaa136d92016-08-18 17:17:03 +01001819 /* If the partial covers the entire object, just create a
1820 * normal VMA.
1821 */
1822 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1823 view.type = I915_GGTT_VIEW_NORMAL;
1824
Chris Wilson50349242016-08-18 17:17:04 +01001825 /* Userspace is now writing through an untracked VMA, abandon
1826 * all hope that the hardware is able to track future writes.
1827 */
1828 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1829
Chris Wilsona61007a2016-08-18 17:17:02 +01001830 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1831 }
Chris Wilson058d88c2016-08-15 10:49:06 +01001832 if (IS_ERR(vma)) {
1833 ret = PTR_ERR(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001834 goto err_unlock;
Chris Wilson058d88c2016-08-15 10:49:06 +01001835 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001836
Chris Wilsonc9839302012-11-20 10:45:17 +00001837 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1838 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001839 goto err_unpin;
Chris Wilsonc9839302012-11-20 10:45:17 +00001840
Chris Wilson49ef5292016-08-18 17:17:00 +01001841 ret = i915_vma_get_fence(vma);
Chris Wilsonc9839302012-11-20 10:45:17 +00001842 if (ret)
Chris Wilsonb8f90962016-08-05 10:14:07 +01001843 goto err_unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001844
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001845 /* Finally, remap it using the new GTT offset */
Chris Wilsonc58305a2016-08-19 16:54:28 +01001846 ret = remap_io_mapping(area,
1847 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1848 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1849 min_t(u64, vma->size, area->vm_end - area->vm_start),
1850 &ggtt->mappable);
1851 if (ret)
1852 goto err_unpin;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001853
Chris Wilsona61007a2016-08-18 17:17:02 +01001854 obj->fault_mappable = true;
Chris Wilsonb8f90962016-08-05 10:14:07 +01001855err_unpin:
Chris Wilson058d88c2016-08-15 10:49:06 +01001856 __i915_vma_unpin(vma);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001857err_unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001858 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb8f90962016-08-05 10:14:07 +01001859err_rpm:
1860 intel_runtime_pm_put(dev_priv);
1861err:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001862 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001863 case -EIO:
Daniel Vetter2232f032014-09-04 09:36:18 +02001864 /*
1865 * We eat errors when the gpu is terminally wedged to avoid
1866 * userspace unduly crashing (gl has no provisions for mmaps to
1867 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1868 * and so needs to be reported.
1869 */
1870 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
Paulo Zanonif65c9162013-11-27 18:20:34 -02001871 ret = VM_FAULT_SIGBUS;
1872 break;
1873 }
Chris Wilson045e7692010-11-07 09:18:22 +00001874 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001875 /*
1876 * EAGAIN means the gpu is hung and we'll wait for the error
1877 * handler to reset everything when re-faulting in
1878 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001879 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001880 case 0:
1881 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001882 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001883 case -EBUSY:
1884 /*
1885 * EBUSY is ok: this just means that another thread
1886 * already did the job.
1887 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001888 ret = VM_FAULT_NOPAGE;
1889 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001890 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001891 ret = VM_FAULT_OOM;
1892 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001893 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001894 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001895 ret = VM_FAULT_SIGBUS;
1896 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001897 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001898 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001899 ret = VM_FAULT_SIGBUS;
1900 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001901 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001902 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001903}
1904
1905/**
Chris Wilson901782b2009-07-10 08:18:50 +01001906 * i915_gem_release_mmap - remove physical page mappings
1907 * @obj: obj in question
1908 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001909 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001910 * relinquish ownership of the pages back to the system.
1911 *
1912 * It is vital that we remove the page mapping if we have mapped a tiled
1913 * object through the GTT and then lose the fence register due to
1914 * resource pressure. Similarly if the object has been moved out of the
1915 * aperture, than pages mapped into userspace must be revoked. Removing the
1916 * mapping will then trigger a page fault on the next user access, allowing
1917 * fixup by i915_gem_fault().
1918 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001919void
Chris Wilson05394f32010-11-08 19:18:58 +00001920i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001921{
Chris Wilson349f2cc2016-04-13 17:35:12 +01001922 /* Serialisation between user GTT access and our code depends upon
1923 * revoking the CPU's PTE whilst the mutex is held. The next user
1924 * pagefault then has to wait until we release the mutex.
1925 */
1926 lockdep_assert_held(&obj->base.dev->struct_mutex);
1927
Chris Wilson6299f992010-11-24 12:23:44 +00001928 if (!obj->fault_mappable)
1929 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001930
David Herrmann6796cb12014-01-03 14:24:19 +01001931 drm_vma_node_unmap(&obj->base.vma_node,
1932 obj->base.dev->anon_inode->i_mapping);
Chris Wilson349f2cc2016-04-13 17:35:12 +01001933
1934 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1935 * memory transactions from userspace before we return. The TLB
1936 * flushing implied above by changing the PTE above *should* be
1937 * sufficient, an extra barrier here just provides us with a bit
1938 * of paranoid documentation about our requirement to serialise
1939 * memory writes before touching registers / GSM.
1940 */
1941 wmb();
1942
Chris Wilson6299f992010-11-24 12:23:44 +00001943 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001944}
1945
Chris Wilsoneedd10f2014-06-16 08:57:44 +01001946void
1947i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1948{
1949 struct drm_i915_gem_object *obj;
1950
1951 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1952 i915_gem_release_mmap(obj);
1953}
1954
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001955/**
1956 * i915_gem_get_ggtt_size - return required global GTT size for an object
Chris Wilsona9f14812016-08-04 16:32:28 +01001957 * @dev_priv: i915 device
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001958 * @size: object size
1959 * @tiling_mode: tiling mode
1960 *
1961 * Return the required global GTT size for an object, taking into account
1962 * potential fence register mapping.
1963 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001964u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1965 u64 size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001966{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001967 u64 ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001968
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001969 GEM_BUG_ON(size == 0);
1970
Chris Wilsona9f14812016-08-04 16:32:28 +01001971 if (INTEL_GEN(dev_priv) >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001972 tiling_mode == I915_TILING_NONE)
1973 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001974
1975 /* Previous chips need a power-of-two fence region when tiling */
Chris Wilsona9f14812016-08-04 16:32:28 +01001976 if (IS_GEN3(dev_priv))
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001977 ggtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001978 else
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001979 ggtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001980
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001981 while (ggtt_size < size)
1982 ggtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001983
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001984 return ggtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001985}
1986
Jesse Barnesde151cf2008-11-12 10:03:55 -08001987/**
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001988 * i915_gem_get_ggtt_alignment - return required global GTT alignment
Chris Wilsona9f14812016-08-04 16:32:28 +01001989 * @dev_priv: i915 device
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001990 * @size: object size
1991 * @tiling_mode: tiling mode
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001992 * @fenced: is fenced alignment required or not
Jesse Barnesde151cf2008-11-12 10:03:55 -08001993 *
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001994 * Return the required global GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001995 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001996 */
Chris Wilsona9f14812016-08-04 16:32:28 +01001997u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
Chris Wilsonad1a7d22016-08-04 16:32:27 +01001998 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001999{
Chris Wilsonad1a7d22016-08-04 16:32:27 +01002000 GEM_BUG_ON(size == 0);
2001
Jesse Barnesde151cf2008-11-12 10:03:55 -08002002 /*
2003 * Minimum alignment is 4k (GTT page size), but might be greater
2004 * if a fence register is needed for the object.
2005 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002006 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07002007 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002008 return 4096;
2009
2010 /*
2011 * Previous chips need to be aligned to the size of the smallest
2012 * fence register that can contain the object.
2013 */
Chris Wilsona9f14812016-08-04 16:32:28 +01002014 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002015}
2016
Chris Wilsond8cb5082012-08-11 15:41:03 +01002017static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2018{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002019 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002020 int err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002021
Chris Wilsonf3f61842016-08-05 10:14:14 +01002022 err = drm_gem_create_mmap_offset(&obj->base);
2023 if (!err)
2024 return 0;
Daniel Vetterda494d72012-12-20 15:11:16 +01002025
Chris Wilsonf3f61842016-08-05 10:14:14 +01002026 /* We can idle the GPU locklessly to flush stale objects, but in order
2027 * to claim that space for ourselves, we need to take the big
2028 * struct_mutex to free the requests+objects and allocate our slot.
Chris Wilsond8cb5082012-08-11 15:41:03 +01002029 */
Chris Wilsonea746f32016-09-09 14:11:49 +01002030 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002031 if (err)
2032 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002033
Chris Wilsonf3f61842016-08-05 10:14:14 +01002034 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2035 if (!err) {
2036 i915_gem_retire_requests(dev_priv);
2037 err = drm_gem_create_mmap_offset(&obj->base);
2038 mutex_unlock(&dev_priv->drm.struct_mutex);
2039 }
Daniel Vetterda494d72012-12-20 15:11:16 +01002040
Chris Wilsonf3f61842016-08-05 10:14:14 +01002041 return err;
Chris Wilsond8cb5082012-08-11 15:41:03 +01002042}
2043
2044static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2045{
Chris Wilsond8cb5082012-08-11 15:41:03 +01002046 drm_gem_free_mmap_offset(&obj->base);
2047}
2048
Dave Airlieda6b51d2014-12-24 13:11:17 +10002049int
Dave Airlieff72145b2011-02-07 12:16:14 +10002050i915_gem_mmap_gtt(struct drm_file *file,
2051 struct drm_device *dev,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002052 uint32_t handle,
Dave Airlieff72145b2011-02-07 12:16:14 +10002053 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002054{
Chris Wilson05394f32010-11-08 19:18:58 +00002055 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002056 int ret;
2057
Chris Wilson03ac0642016-07-20 13:31:51 +01002058 obj = i915_gem_object_lookup(file, handle);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002059 if (!obj)
2060 return -ENOENT;
Chris Wilsonab182822009-09-22 18:46:17 +01002061
Chris Wilsond8cb5082012-08-11 15:41:03 +01002062 ret = i915_gem_object_create_mmap_offset(obj);
Chris Wilsonf3f61842016-08-05 10:14:14 +01002063 if (ret == 0)
2064 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002065
Chris Wilsonf3f61842016-08-05 10:14:14 +01002066 i915_gem_object_put_unlocked(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01002067 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002068}
2069
Dave Airlieff72145b2011-02-07 12:16:14 +10002070/**
2071 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2072 * @dev: DRM device
2073 * @data: GTT mapping ioctl data
2074 * @file: GEM object info
2075 *
2076 * Simply returns the fake offset to userspace so it can mmap it.
2077 * The mmap call will end up in drm_gem_mmap(), which will set things
2078 * up so we can get faults in the handler above.
2079 *
2080 * The fault handler will take care of binding the object into the GTT
2081 * (since it may have been evicted to make room for something), allocating
2082 * a fence register, and mapping the appropriate aperture address into
2083 * userspace.
2084 */
2085int
2086i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2087 struct drm_file *file)
2088{
2089 struct drm_i915_gem_mmap_gtt *args = data;
2090
Dave Airlieda6b51d2014-12-24 13:11:17 +10002091 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
Dave Airlieff72145b2011-02-07 12:16:14 +10002092}
2093
Daniel Vetter225067e2012-08-20 10:23:20 +02002094/* Immediately discard the backing storage */
2095static void
2096i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01002097{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002098 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02002099
Chris Wilson4d6294bf2012-08-11 15:41:05 +01002100 if (obj->base.filp == NULL)
2101 return;
2102
Daniel Vetter225067e2012-08-20 10:23:20 +02002103 /* Our goal here is to return as much of the memory as
2104 * is possible back to the system as we are called from OOM.
2105 * To do this we must instruct the shmfs to drop all of its
2106 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01002107 */
Chris Wilson55372522014-03-25 13:23:06 +00002108 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02002109 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01002110}
Chris Wilsone5281cc2010-10-28 13:45:36 +01002111
Chris Wilson55372522014-03-25 13:23:06 +00002112/* Try to discard unwanted pages */
2113static void
2114i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02002115{
Chris Wilson55372522014-03-25 13:23:06 +00002116 struct address_space *mapping;
2117
2118 switch (obj->madv) {
2119 case I915_MADV_DONTNEED:
2120 i915_gem_object_truncate(obj);
2121 case __I915_MADV_PURGED:
2122 return;
2123 }
2124
2125 if (obj->base.filp == NULL)
2126 return;
2127
Al Viro93c76a32015-12-04 23:45:44 -05002128 mapping = obj->base.filp->f_mapping,
Chris Wilson55372522014-03-25 13:23:06 +00002129 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002130}
2131
Chris Wilson5cdf5882010-09-27 15:51:07 +01002132static void
Chris Wilson05394f32010-11-08 19:18:58 +00002133i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002134{
Dave Gordon85d12252016-05-20 11:54:06 +01002135 struct sgt_iter sgt_iter;
2136 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002137 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02002138
Chris Wilson05394f32010-11-08 19:18:58 +00002139 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07002140
Chris Wilson6c085a72012-08-20 11:40:46 +02002141 ret = i915_gem_object_set_to_cpu_domain(obj, true);
Chris Wilsonf4457ae2016-04-13 17:35:08 +01002142 if (WARN_ON(ret)) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002143 /* In the event of a disaster, abandon all caches and
2144 * hope for the best.
2145 */
Chris Wilson2c225692013-08-09 12:26:45 +01002146 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02002147 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2148 }
2149
Imre Deake2273302015-07-09 12:59:05 +03002150 i915_gem_gtt_finish_object(obj);
2151
Daniel Vetter6dacfd22011-09-12 21:30:02 +02002152 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07002153 i915_gem_object_save_bit_17_swizzle(obj);
2154
Chris Wilson05394f32010-11-08 19:18:58 +00002155 if (obj->madv == I915_MADV_DONTNEED)
2156 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01002157
Dave Gordon85d12252016-05-20 11:54:06 +01002158 for_each_sgt_page(page, sgt_iter, obj->pages) {
Chris Wilson05394f32010-11-08 19:18:58 +00002159 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01002160 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002161
Chris Wilson05394f32010-11-08 19:18:58 +00002162 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01002163 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002164
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002165 put_page(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01002166 }
Chris Wilson05394f32010-11-08 19:18:58 +00002167 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002168
Chris Wilson9da3da62012-06-01 15:20:22 +01002169 sg_free_table(obj->pages);
2170 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01002171}
2172
Chris Wilsondd624af2013-01-15 12:39:35 +00002173int
Chris Wilson37e680a2012-06-07 15:38:42 +01002174i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2175{
2176 const struct drm_i915_gem_object_ops *ops = obj->ops;
2177
Chris Wilson2f745ad2012-09-04 21:02:58 +01002178 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01002179 return 0;
2180
Chris Wilsona5570172012-09-04 21:02:54 +01002181 if (obj->pages_pin_count)
2182 return -EBUSY;
2183
Chris Wilson15717de2016-08-04 07:52:26 +01002184 GEM_BUG_ON(obj->bind_count);
Ben Widawsky3e123022013-07-31 17:00:04 -07002185
Chris Wilsona2165e32012-12-03 11:49:00 +00002186 /* ->put_pages might need to allocate memory for the bit17 swizzle
2187 * array, hence protect them from being reaped by removing them from gtt
2188 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07002189 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00002190
Chris Wilson0a798eb2016-04-08 12:11:11 +01002191 if (obj->mapping) {
Chris Wilson4b30cb22016-08-18 17:16:42 +01002192 void *ptr;
2193
2194 ptr = ptr_mask_bits(obj->mapping);
2195 if (is_vmalloc_addr(ptr))
2196 vunmap(ptr);
Chris Wilsonfb8621d2016-04-08 12:11:14 +01002197 else
Chris Wilson4b30cb22016-08-18 17:16:42 +01002198 kunmap(kmap_to_page(ptr));
2199
Chris Wilson0a798eb2016-04-08 12:11:11 +01002200 obj->mapping = NULL;
2201 }
2202
Chris Wilson37e680a2012-06-07 15:38:42 +01002203 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002204 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02002205
Chris Wilson55372522014-03-25 13:23:06 +00002206 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02002207
2208 return 0;
2209}
2210
Chris Wilson37e680a2012-06-07 15:38:42 +01002211static int
Chris Wilson6c085a72012-08-20 11:40:46 +02002212i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002213{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002214 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002215 int page_count, i;
2216 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002217 struct sg_table *st;
2218 struct scatterlist *sg;
Dave Gordon85d12252016-05-20 11:54:06 +01002219 struct sgt_iter sgt_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002220 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002221 unsigned long last_pfn = 0; /* suppress gcc warning */
Imre Deake2273302015-07-09 12:59:05 +03002222 int ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02002223 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002224
Chris Wilson6c085a72012-08-20 11:40:46 +02002225 /* Assert that the object is not currently in any GPU domain. As it
2226 * wasn't in the GTT, there shouldn't be any way it could have been in
2227 * a GPU cache
2228 */
2229 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2230 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2231
Chris Wilson9da3da62012-06-01 15:20:22 +01002232 st = kmalloc(sizeof(*st), GFP_KERNEL);
2233 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002234 return -ENOMEM;
2235
Chris Wilson9da3da62012-06-01 15:20:22 +01002236 page_count = obj->base.size / PAGE_SIZE;
2237 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002238 kfree(st);
2239 return -ENOMEM;
2240 }
2241
2242 /* Get the list of pages out of our struct file. They'll be pinned
2243 * at this point until we release them.
2244 *
2245 * Fail silently without starting the shrinker
2246 */
Al Viro93c76a32015-12-04 23:45:44 -05002247 mapping = obj->base.filp->f_mapping;
Michal Hockoc62d2552015-11-06 16:28:49 -08002248 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
Mel Gormand0164ad2015-11-06 16:28:21 -08002249 gfp |= __GFP_NORETRY | __GFP_NOWARN;
Imre Deak90797e62013-02-18 19:28:03 +02002250 sg = st->sgl;
2251 st->nents = 0;
2252 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002253 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2254 if (IS_ERR(page)) {
Chris Wilson21ab4e72014-09-09 11:16:08 +01002255 i915_gem_shrink(dev_priv,
2256 page_count,
2257 I915_SHRINK_BOUND |
2258 I915_SHRINK_UNBOUND |
2259 I915_SHRINK_PURGEABLE);
Chris Wilson6c085a72012-08-20 11:40:46 +02002260 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2261 }
2262 if (IS_ERR(page)) {
2263 /* We've tried hard to allocate the memory by reaping
2264 * our own buffer, now let the real VM do its job and
2265 * go down in flames if truly OOM.
2266 */
Chris Wilson6c085a72012-08-20 11:40:46 +02002267 i915_gem_shrink_all(dev_priv);
David Herrmannf461d1b2014-05-25 14:34:10 +02002268 page = shmem_read_mapping_page(mapping, i);
Imre Deake2273302015-07-09 12:59:05 +03002269 if (IS_ERR(page)) {
2270 ret = PTR_ERR(page);
Chris Wilson24204892016-11-14 11:29:30 +00002271 goto err_sg;
Imre Deake2273302015-07-09 12:59:05 +03002272 }
Chris Wilson6c085a72012-08-20 11:40:46 +02002273 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002274#ifdef CONFIG_SWIOTLB
2275 if (swiotlb_nr_tbl()) {
2276 st->nents++;
2277 sg_set_page(sg, page, PAGE_SIZE, 0);
2278 sg = sg_next(sg);
2279 continue;
2280 }
2281#endif
Imre Deak90797e62013-02-18 19:28:03 +02002282 if (!i || page_to_pfn(page) != last_pfn + 1) {
2283 if (i)
2284 sg = sg_next(sg);
2285 st->nents++;
2286 sg_set_page(sg, page, PAGE_SIZE, 0);
2287 } else {
2288 sg->length += PAGE_SIZE;
2289 }
2290 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002291
2292 /* Check that the i965g/gm workaround works. */
2293 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002294 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002295#ifdef CONFIG_SWIOTLB
2296 if (!swiotlb_nr_tbl())
2297#endif
2298 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002299 obj->pages = st;
2300
Imre Deake2273302015-07-09 12:59:05 +03002301 ret = i915_gem_gtt_prepare_object(obj);
2302 if (ret)
2303 goto err_pages;
2304
Eric Anholt673a3942008-07-30 12:06:12 -07002305 if (i915_gem_object_needs_bit17_swizzle(obj))
2306 i915_gem_object_do_bit_17_swizzle(obj);
2307
Chris Wilson3e510a82016-08-05 10:14:23 +01002308 if (i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01002309 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2310 i915_gem_object_pin_pages(obj);
2311
Eric Anholt673a3942008-07-30 12:06:12 -07002312 return 0;
2313
Chris Wilson24204892016-11-14 11:29:30 +00002314err_sg:
Imre Deak90797e62013-02-18 19:28:03 +02002315 sg_mark_end(sg);
Chris Wilson24204892016-11-14 11:29:30 +00002316err_pages:
Dave Gordon85d12252016-05-20 11:54:06 +01002317 for_each_sgt_page(page, sgt_iter, st)
2318 put_page(page);
Chris Wilson9da3da62012-06-01 15:20:22 +01002319 sg_free_table(st);
2320 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002321
2322 /* shmemfs first checks if there is enough memory to allocate the page
2323 * and reports ENOSPC should there be insufficient, along with the usual
2324 * ENOMEM for a genuine allocation failure.
2325 *
2326 * We use ENOSPC in our driver to mean that we have run out of aperture
2327 * space and so want to translate the error from shmemfs back to our
2328 * usual understanding of ENOMEM.
2329 */
Imre Deake2273302015-07-09 12:59:05 +03002330 if (ret == -ENOSPC)
2331 ret = -ENOMEM;
2332
2333 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002334}
2335
Chris Wilson37e680a2012-06-07 15:38:42 +01002336/* Ensure that the associated pages are gathered from the backing storage
2337 * and pinned into our object. i915_gem_object_get_pages() may be called
2338 * multiple times before they are released by a single call to
2339 * i915_gem_object_put_pages() - once the pages are no longer referenced
2340 * either as a result of memory pressure (reaping pages under the shrinker)
2341 * or as the object is itself released.
2342 */
2343int
2344i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2345{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002346 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson37e680a2012-06-07 15:38:42 +01002347 const struct drm_i915_gem_object_ops *ops = obj->ops;
2348 int ret;
2349
Chris Wilson2f745ad2012-09-04 21:02:58 +01002350 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002351 return 0;
2352
Chris Wilson43e28f02013-01-08 10:53:09 +00002353 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002354 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002355 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002356 }
2357
Chris Wilsona5570172012-09-04 21:02:54 +01002358 BUG_ON(obj->pages_pin_count);
2359
Chris Wilson37e680a2012-06-07 15:38:42 +01002360 ret = ops->get_pages(obj);
2361 if (ret)
2362 return ret;
2363
Ben Widawsky35c20a62013-05-31 11:28:48 -07002364 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilsonee286372015-04-07 16:20:25 +01002365
2366 obj->get_page.sg = obj->pages->sgl;
2367 obj->get_page.last = 0;
2368
Chris Wilson37e680a2012-06-07 15:38:42 +01002369 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002370}
2371
Dave Gordondd6034c2016-05-20 11:54:04 +01002372/* The 'mapping' part of i915_gem_object_pin_map() below */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002373static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2374 enum i915_map_type type)
Dave Gordondd6034c2016-05-20 11:54:04 +01002375{
2376 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2377 struct sg_table *sgt = obj->pages;
Dave Gordon85d12252016-05-20 11:54:06 +01002378 struct sgt_iter sgt_iter;
2379 struct page *page;
Dave Gordonb338fa42016-05-20 11:54:05 +01002380 struct page *stack_pages[32];
2381 struct page **pages = stack_pages;
Dave Gordondd6034c2016-05-20 11:54:04 +01002382 unsigned long i = 0;
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002383 pgprot_t pgprot;
Dave Gordondd6034c2016-05-20 11:54:04 +01002384 void *addr;
2385
2386 /* A single page can always be kmapped */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002387 if (n_pages == 1 && type == I915_MAP_WB)
Dave Gordondd6034c2016-05-20 11:54:04 +01002388 return kmap(sg_page(sgt->sgl));
2389
Dave Gordonb338fa42016-05-20 11:54:05 +01002390 if (n_pages > ARRAY_SIZE(stack_pages)) {
2391 /* Too big for stack -- allocate temporary array instead */
2392 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2393 if (!pages)
2394 return NULL;
2395 }
Dave Gordondd6034c2016-05-20 11:54:04 +01002396
Dave Gordon85d12252016-05-20 11:54:06 +01002397 for_each_sgt_page(page, sgt_iter, sgt)
2398 pages[i++] = page;
Dave Gordondd6034c2016-05-20 11:54:04 +01002399
2400 /* Check that we have the expected number of pages */
2401 GEM_BUG_ON(i != n_pages);
2402
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002403 switch (type) {
2404 case I915_MAP_WB:
2405 pgprot = PAGE_KERNEL;
2406 break;
2407 case I915_MAP_WC:
2408 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2409 break;
2410 }
2411 addr = vmap(pages, n_pages, 0, pgprot);
Dave Gordondd6034c2016-05-20 11:54:04 +01002412
Dave Gordonb338fa42016-05-20 11:54:05 +01002413 if (pages != stack_pages)
2414 drm_free_large(pages);
Dave Gordondd6034c2016-05-20 11:54:04 +01002415
2416 return addr;
2417}
2418
2419/* get, pin, and map the pages of the object into kernel space */
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002420void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2421 enum i915_map_type type)
Chris Wilson0a798eb2016-04-08 12:11:11 +01002422{
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002423 enum i915_map_type has_type;
2424 bool pinned;
2425 void *ptr;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002426 int ret;
2427
2428 lockdep_assert_held(&obj->base.dev->struct_mutex);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002429 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
Chris Wilson0a798eb2016-04-08 12:11:11 +01002430
2431 ret = i915_gem_object_get_pages(obj);
2432 if (ret)
2433 return ERR_PTR(ret);
2434
2435 i915_gem_object_pin_pages(obj);
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002436 pinned = obj->pages_pin_count > 1;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002437
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002438 ptr = ptr_unpack_bits(obj->mapping, has_type);
2439 if (ptr && has_type != type) {
2440 if (pinned) {
2441 ret = -EBUSY;
2442 goto err;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002443 }
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002444
2445 if (is_vmalloc_addr(ptr))
2446 vunmap(ptr);
2447 else
2448 kunmap(kmap_to_page(ptr));
2449
2450 ptr = obj->mapping = NULL;
Chris Wilson0a798eb2016-04-08 12:11:11 +01002451 }
2452
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002453 if (!ptr) {
2454 ptr = i915_gem_object_map(obj, type);
2455 if (!ptr) {
2456 ret = -ENOMEM;
2457 goto err;
2458 }
Chris Wilson0a798eb2016-04-08 12:11:11 +01002459
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002460 obj->mapping = ptr_pack_bits(ptr, type);
2461 }
John Harrisonb2af0372015-05-29 17:43:50 +01002462
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002463 return ptr;
Chris Wilsonb4716182015-04-27 13:41:17 +01002464
Chris Wilsond31d7cb2016-08-12 12:39:58 +01002465err:
2466 i915_gem_object_unpin_pages(obj);
2467 return ERR_PTR(ret);
Ben Widawskye2d05a82013-09-24 09:57:58 -07002468}
2469
Chris Wilsoncaea7472010-11-12 13:53:37 +00002470static void
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002471i915_gem_object_retire__write(struct i915_gem_active *active,
2472 struct drm_i915_gem_request *request)
Chris Wilsonb4716182015-04-27 13:41:17 +01002473{
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002474 struct drm_i915_gem_object *obj =
2475 container_of(active, struct drm_i915_gem_object, last_write);
Chris Wilsonb4716182015-04-27 13:41:17 +01002476
Rodrigo Vivide152b62015-07-07 16:28:51 -07002477 intel_fb_obj_flush(obj, true, ORIGIN_CS);
Chris Wilsonb4716182015-04-27 13:41:17 +01002478}
2479
2480static void
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002481i915_gem_object_retire__read(struct i915_gem_active *active,
2482 struct drm_i915_gem_request *request)
Chris Wilsoncaea7472010-11-12 13:53:37 +00002483{
Chris Wilsonfa545cb2016-08-04 07:52:35 +01002484 int idx = request->engine->id;
2485 struct drm_i915_gem_object *obj =
2486 container_of(active, struct drm_i915_gem_object, last_read[idx]);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002487
Chris Wilson573adb32016-08-04 16:32:39 +01002488 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
Chris Wilsonb4716182015-04-27 13:41:17 +01002489
Chris Wilson573adb32016-08-04 16:32:39 +01002490 i915_gem_object_clear_active(obj, idx);
2491 if (i915_gem_object_is_active(obj))
Chris Wilsonb4716182015-04-27 13:41:17 +01002492 return;
Chris Wilson65ce3022012-07-20 12:41:02 +01002493
Chris Wilson6c246952015-07-27 10:26:26 +01002494 /* Bump our place on the bound list to keep it roughly in LRU order
2495 * so that we don't steal from recently used but inactive objects
2496 * (unless we are forced to ofc!)
2497 */
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002498 if (obj->bind_count)
2499 list_move_tail(&obj->global_list,
2500 &request->i915->mm.bound_list);
Chris Wilson6c246952015-07-27 10:26:26 +01002501
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002502 i915_gem_object_put(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002503}
2504
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002505static bool i915_context_is_banned(const struct i915_gem_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002506{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002507 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002508
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002509 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002510 return true;
2511
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002512 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
Chris Wilson676fa572014-12-24 08:13:39 -08002513 if (ctx->hang_stats.ban_period_seconds &&
2514 elapsed <= ctx->hang_stats.ban_period_seconds) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002515 DRM_DEBUG("context hanging too fast, banning!\n");
2516 return true;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002517 }
2518
2519 return false;
2520}
2521
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002522static void i915_set_reset_status(struct i915_gem_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002523 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002524{
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002525 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002526
2527 if (guilty) {
Chris Wilson7b4d3a12016-07-04 08:08:37 +01002528 hs->banned = i915_context_is_banned(ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002529 hs->batch_active++;
2530 hs->guilty_ts = get_seconds();
2531 } else {
2532 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002533 }
2534}
2535
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002536struct drm_i915_gem_request *
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002537i915_gem_find_active_request(struct intel_engine_cs *engine)
Chris Wilson9375e442010-09-19 12:21:28 +01002538{
Chris Wilson4db080f2013-12-04 11:37:09 +00002539 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002540
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002541 /* We are called by the error capture and reset at a random
2542 * point in time. In particular, note that neither is crucially
2543 * ordered with an interrupt. After a hang, the GPU is dead and we
2544 * assume that no more writes can happen (we waited long enough for
2545 * all writes that were in transaction to be flushed) - adding an
2546 * extra delay for a recent interrupt is pointless. Hence, we do
2547 * not need an engine->irq_seqno_barrier() before the seqno reads.
2548 */
Chris Wilsonefdf7c02016-08-04 07:52:33 +01002549 list_for_each_entry(request, &engine->request_list, link) {
Chris Wilsonf69a02c2016-07-01 17:23:16 +01002550 if (i915_gem_request_completed(request))
Chris Wilson4db080f2013-12-04 11:37:09 +00002551 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002552
Chris Wilson5590af32016-09-09 14:11:54 +01002553 if (!i915_sw_fence_done(&request->submit))
2554 break;
2555
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002556 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002557 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002558
2559 return NULL;
2560}
2561
Chris Wilson821ed7d2016-09-09 14:11:53 +01002562static void reset_request(struct drm_i915_gem_request *request)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002563{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002564 void *vaddr = request->ring->vaddr;
2565 u32 head;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002566
Chris Wilson821ed7d2016-09-09 14:11:53 +01002567 /* As this request likely depends on state from the lost
2568 * context, clear out all the user operations leaving the
2569 * breadcrumb at the end (so we get the fence notifications).
2570 */
2571 head = request->head;
2572 if (request->postfix < head) {
2573 memset(vaddr + head, 0, request->ring->size - head);
2574 head = 0;
2575 }
2576 memset(vaddr + head, 0, request->postfix - head);
Chris Wilson4db080f2013-12-04 11:37:09 +00002577}
2578
Chris Wilson821ed7d2016-09-09 14:11:53 +01002579static void i915_gem_reset_engine(struct intel_engine_cs *engine)
Eric Anholt673a3942008-07-30 12:06:12 -07002580{
2581 struct drm_i915_gem_request *request;
Chris Wilson821ed7d2016-09-09 14:11:53 +01002582 struct i915_gem_context *incomplete_ctx;
Eric Anholt673a3942008-07-30 12:06:12 -07002583 bool ring_hung;
2584
Chris Wilson70c2a242016-09-09 14:11:46 +01002585 /* Ensure irq handler finishes, and not run again. */
2586 tasklet_kill(&engine->irq_tasklet);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002587 if (engine->irq_seqno_barrier)
2588 engine->irq_seqno_barrier(engine);
2589
Eric Anholt673a3942008-07-30 12:06:12 -07002590 request = i915_gem_find_active_request(engine);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002591 if (!request)
Eric Anholt673a3942008-07-30 12:06:12 -07002592 return;
2593
2594 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
Eric Anholt673a3942008-07-30 12:06:12 -07002595 i915_set_reset_status(request->ctx, ring_hung);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002596 if (!ring_hung)
2597 return;
2598
2599 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2600 engine->name, request->fence.seqno);
2601
2602 /* Setup the CS to resume from the breadcrumb of the hung request */
2603 engine->reset_hw(engine, request);
2604
2605 /* Users of the default context do not rely on logical state
2606 * preserved between batches. They have to emit full state on
2607 * every batch and so it is safe to execute queued requests following
2608 * the hang.
2609 *
2610 * Other contexts preserve state, now corrupt. We want to skip all
2611 * queued requests that reference the corrupt context.
2612 */
2613 incomplete_ctx = request->ctx;
2614 if (i915_gem_context_is_default(incomplete_ctx))
2615 return;
2616
2617 list_for_each_entry_continue(request, &engine->request_list, link)
2618 if (request->ctx == incomplete_ctx)
2619 reset_request(request);
Chris Wilson4db080f2013-12-04 11:37:09 +00002620}
2621
Chris Wilson821ed7d2016-09-09 14:11:53 +01002622void i915_gem_reset(struct drm_i915_private *dev_priv)
Chris Wilson4db080f2013-12-04 11:37:09 +00002623{
Chris Wilson821ed7d2016-09-09 14:11:53 +01002624 struct intel_engine_cs *engine;
Chris Wilson608c1a52015-09-03 13:01:40 +01002625
Chris Wilson821ed7d2016-09-09 14:11:53 +01002626 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002627
Chris Wilson821ed7d2016-09-09 14:11:53 +01002628 for_each_engine(engine, dev_priv)
2629 i915_gem_reset_engine(engine);
Eric Anholt673a3942008-07-30 12:06:12 -07002630
Chris Wilson821ed7d2016-09-09 14:11:53 +01002631 i915_gem_restore_fences(&dev_priv->drm);
Chris Wilsonac756942016-09-21 14:51:06 +01002632
2633 if (dev_priv->gt.awake) {
2634 intel_sanitize_gt_powersave(dev_priv);
2635 intel_enable_gt_powersave(dev_priv);
2636 if (INTEL_GEN(dev_priv) >= 6)
2637 gen6_rps_busy(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07002638 }
Chris Wilson821ed7d2016-09-09 14:11:53 +01002639}
2640
2641static void nop_submit_request(struct drm_i915_gem_request *request)
2642{
2643}
2644
2645static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2646{
2647 engine->submit_request = nop_submit_request;
Chris Wilson70c2a242016-09-09 14:11:46 +01002648
Chris Wilsonc4b09302016-07-20 09:21:10 +01002649 /* Mark all pending requests as complete so that any concurrent
2650 * (lockless) lookup doesn't try and wait upon the request as we
2651 * reset it.
2652 */
Chris Wilson87b723a2016-08-09 08:37:02 +01002653 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002654
2655 /*
Oscar Mateodcb4c122014-11-13 10:28:10 +00002656 * Clear the execlists queue up before freeing the requests, as those
2657 * are the ones that keep the context and ringbuffer backing objects
2658 * pinned in place.
2659 */
Oscar Mateodcb4c122014-11-13 10:28:10 +00002660
Tomas Elf7de16912015-10-19 16:32:32 +01002661 if (i915.enable_execlists) {
Chris Wilson70c2a242016-09-09 14:11:46 +01002662 spin_lock(&engine->execlist_lock);
2663 INIT_LIST_HEAD(&engine->execlist_queue);
2664 i915_gem_request_put(engine->execlist_port[0].request);
2665 i915_gem_request_put(engine->execlist_port[1].request);
2666 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2667 spin_unlock(&engine->execlist_lock);
Oscar Mateodcb4c122014-11-13 10:28:10 +00002668 }
2669
Chris Wilsonb06bc7e2016-07-13 09:10:31 +01002670 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
Eric Anholt673a3942008-07-30 12:06:12 -07002671}
2672
Chris Wilson821ed7d2016-09-09 14:11:53 +01002673void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002674{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002675 struct intel_engine_cs *engine;
Chris Wilson67d97da2016-07-04 08:08:31 +01002676
Chris Wilson91c8a322016-07-05 10:40:23 +01002677 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Chris Wilson821ed7d2016-09-09 14:11:53 +01002678 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
Chris Wilson67d97da2016-07-04 08:08:31 +01002679
Chris Wilson821ed7d2016-09-09 14:11:53 +01002680 i915_gem_context_lost(dev_priv);
Chris Wilson2ed53a92016-04-07 07:29:11 +01002681 for_each_engine(engine, dev_priv)
Chris Wilson821ed7d2016-09-09 14:11:53 +01002682 i915_gem_cleanup_engine(engine);
Chris Wilsonb913b332016-07-13 09:10:31 +01002683 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
Chris Wilson67d97da2016-07-04 08:08:31 +01002684
Chris Wilson821ed7d2016-09-09 14:11:53 +01002685 i915_gem_retire_requests(dev_priv);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002686}
2687
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002688static void
Eric Anholt673a3942008-07-30 12:06:12 -07002689i915_gem_retire_work_handler(struct work_struct *work)
2690{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002691 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002692 container_of(work, typeof(*dev_priv), gt.retire_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002693 struct drm_device *dev = &dev_priv->drm;
Eric Anholt673a3942008-07-30 12:06:12 -07002694
Chris Wilson891b48c2010-09-29 12:26:37 +01002695 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002696 if (mutex_trylock(&dev->struct_mutex)) {
Chris Wilson67d97da2016-07-04 08:08:31 +01002697 i915_gem_retire_requests(dev_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002698 mutex_unlock(&dev->struct_mutex);
2699 }
Chris Wilson67d97da2016-07-04 08:08:31 +01002700
2701 /* Keep the retire handler running until we are finally idle.
2702 * We do not need to do this test under locking as in the worst-case
2703 * we queue the retire worker once too often.
2704 */
Chris Wilsonc9615612016-07-09 10:12:06 +01002705 if (READ_ONCE(dev_priv->gt.awake)) {
2706 i915_queue_hangcheck(dev_priv);
Chris Wilson67d97da2016-07-04 08:08:31 +01002707 queue_delayed_work(dev_priv->wq,
2708 &dev_priv->gt.retire_work,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002709 round_jiffies_up_relative(HZ));
Chris Wilsonc9615612016-07-09 10:12:06 +01002710 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002711}
Chris Wilson891b48c2010-09-29 12:26:37 +01002712
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002713static void
2714i915_gem_idle_work_handler(struct work_struct *work)
2715{
2716 struct drm_i915_private *dev_priv =
Chris Wilson67d97da2016-07-04 08:08:31 +01002717 container_of(work, typeof(*dev_priv), gt.idle_work.work);
Chris Wilson91c8a322016-07-05 10:40:23 +01002718 struct drm_device *dev = &dev_priv->drm;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002719 struct intel_engine_cs *engine;
Chris Wilson67d97da2016-07-04 08:08:31 +01002720 bool rearm_hangcheck;
2721
2722 if (!READ_ONCE(dev_priv->gt.awake))
2723 return;
2724
2725 if (READ_ONCE(dev_priv->gt.active_engines))
2726 return;
2727
2728 rearm_hangcheck =
2729 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2730
2731 if (!mutex_trylock(&dev->struct_mutex)) {
2732 /* Currently busy, come back later */
2733 mod_delayed_work(dev_priv->wq,
2734 &dev_priv->gt.idle_work,
2735 msecs_to_jiffies(50));
2736 goto out_rearm;
2737 }
2738
2739 if (dev_priv->gt.active_engines)
2740 goto out_unlock;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002741
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002742 for_each_engine(engine, dev_priv)
Chris Wilson67d97da2016-07-04 08:08:31 +01002743 i915_gem_batch_pool_fini(&engine->batch_pool);
Zou Nan hai852835f2010-05-21 09:08:56 +08002744
Chris Wilson67d97da2016-07-04 08:08:31 +01002745 GEM_BUG_ON(!dev_priv->gt.awake);
2746 dev_priv->gt.awake = false;
2747 rearm_hangcheck = false;
Daniel Vetter30ecad72015-12-09 09:29:36 +01002748
Chris Wilson67d97da2016-07-04 08:08:31 +01002749 if (INTEL_GEN(dev_priv) >= 6)
2750 gen6_rps_idle(dev_priv);
2751 intel_runtime_pm_put(dev_priv);
2752out_unlock:
2753 mutex_unlock(&dev->struct_mutex);
Chris Wilson35c94182015-04-07 16:20:37 +01002754
Chris Wilson67d97da2016-07-04 08:08:31 +01002755out_rearm:
2756 if (rearm_hangcheck) {
2757 GEM_BUG_ON(!dev_priv->gt.awake);
2758 i915_queue_hangcheck(dev_priv);
Chris Wilson35c94182015-04-07 16:20:37 +01002759 }
Eric Anholt673a3942008-07-30 12:06:12 -07002760}
2761
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002762void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002763{
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002764 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2765 struct drm_i915_file_private *fpriv = file->driver_priv;
2766 struct i915_vma *vma, *vn;
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002767
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002768 mutex_lock(&obj->base.dev->struct_mutex);
2769 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2770 if (vma->vm->file == fpriv)
2771 i915_vma_close(vma);
2772 mutex_unlock(&obj->base.dev->struct_mutex);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002773}
2774
2775/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002776 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002777 * @dev: drm device pointer
2778 * @data: ioctl data blob
2779 * @file: drm file pointer
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002780 *
2781 * Returns 0 if successful, else an error is returned with the remaining time in
2782 * the timeout parameter.
2783 * -ETIME: object is still busy after timeout
2784 * -ERESTARTSYS: signal interrupted the wait
2785 * -ENONENT: object doesn't exist
2786 * Also possible, but rare:
2787 * -EAGAIN: GPU wedged
2788 * -ENOMEM: damn
2789 * -ENODEV: Internal IRQ fail
2790 * -E?: The add request failed
2791 *
2792 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2793 * non-zero timeout parameter the wait ioctl will wait for the given number of
2794 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2795 * without holding struct_mutex the object may become re-busied before this
2796 * function completes. A similar but shorter * race condition exists in the busy
2797 * ioctl
2798 */
2799int
2800i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2801{
2802 struct drm_i915_gem_wait *args = data;
Chris Wilson033d5492016-08-05 10:14:17 +01002803 struct intel_rps_client *rps = to_rps_client(file);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002804 struct drm_i915_gem_object *obj;
Chris Wilson033d5492016-08-05 10:14:17 +01002805 unsigned long active;
2806 int idx, ret = 0;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002807
Daniel Vetter11b5d512014-09-29 15:31:26 +02002808 if (args->flags != 0)
2809 return -EINVAL;
2810
Chris Wilson03ac0642016-07-20 13:31:51 +01002811 obj = i915_gem_object_lookup(file, args->bo_handle);
Chris Wilson033d5492016-08-05 10:14:17 +01002812 if (!obj)
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002813 return -ENOENT;
Chris Wilson033d5492016-08-05 10:14:17 +01002814
2815 active = __I915_BO_ACTIVE(obj);
2816 for_each_active(active, idx) {
2817 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
Chris Wilsonea746f32016-09-09 14:11:49 +01002818 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
2819 I915_WAIT_INTERRUPTIBLE,
Chris Wilson033d5492016-08-05 10:14:17 +01002820 timeout, rps);
2821 if (ret)
2822 break;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002823 }
2824
Chris Wilson033d5492016-08-05 10:14:17 +01002825 i915_gem_object_put_unlocked(obj);
John Harrisonff865882014-11-24 18:49:28 +00002826 return ret;
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002827}
2828
Chris Wilson8ef85612016-04-28 09:56:39 +01002829static void __i915_vma_iounmap(struct i915_vma *vma)
2830{
Chris Wilson20dfbde2016-08-04 16:32:30 +01002831 GEM_BUG_ON(i915_vma_is_pinned(vma));
Chris Wilson8ef85612016-04-28 09:56:39 +01002832
2833 if (vma->iomap == NULL)
2834 return;
2835
2836 io_mapping_unmap(vma->iomap);
2837 vma->iomap = NULL;
2838}
2839
Chris Wilsondf0e9a22016-08-04 07:52:47 +01002840int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002841{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002842 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002843 unsigned long active;
Chris Wilson43e28f02013-01-08 10:53:09 +00002844 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002845
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002846 /* First wait upon any activity as retiring the request may
2847 * have side-effects such as unpinning or even unbinding this vma.
2848 */
2849 active = i915_vma_get_active(vma);
Chris Wilsondf0e9a22016-08-04 07:52:47 +01002850 if (active) {
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002851 int idx;
Eric Anholt673a3942008-07-30 12:06:12 -07002852
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002853 /* When a closed VMA is retired, it is unbound - eek.
2854 * In order to prevent it from being recursively closed,
2855 * take a pin on the vma so that the second unbind is
2856 * aborted.
2857 */
Chris Wilson20dfbde2016-08-04 16:32:30 +01002858 __i915_vma_pin(vma);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002859
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002860 for_each_active(active, idx) {
2861 ret = i915_gem_active_retire(&vma->last_read[idx],
2862 &vma->vm->dev->struct_mutex);
2863 if (ret)
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002864 break;
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002865 }
2866
Chris Wilson20dfbde2016-08-04 16:32:30 +01002867 __i915_vma_unpin(vma);
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002868 if (ret)
2869 return ret;
2870
Chris Wilsonb0decaf2016-08-04 07:52:44 +01002871 GEM_BUG_ON(i915_vma_is_active(vma));
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002872 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002873
Chris Wilson20dfbde2016-08-04 16:32:30 +01002874 if (i915_vma_is_pinned(vma))
Chris Wilson31d8d652012-05-24 19:11:20 +01002875 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002876
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002877 if (!drm_mm_node_allocated(&vma->node))
2878 goto destroy;
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002879
Chris Wilson15717de2016-08-04 07:52:26 +01002880 GEM_BUG_ON(obj->bind_count == 0);
2881 GEM_BUG_ON(!obj->pages);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002882
Chris Wilson05a20d02016-08-18 17:16:55 +01002883 if (i915_vma_is_map_and_fenceable(vma)) {
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002884 /* release the fence reg _after_ flushing */
Chris Wilson49ef5292016-08-18 17:17:00 +01002885 ret = i915_vma_put_fence(vma);
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002886 if (ret)
2887 return ret;
Chris Wilson8ef85612016-04-28 09:56:39 +01002888
Chris Wilsoncd3127d2016-08-18 17:17:09 +01002889 /* Force a pagefault for domain tracking on next user access */
2890 i915_gem_release_mmap(obj);
2891
Chris Wilson8ef85612016-04-28 09:56:39 +01002892 __i915_vma_iounmap(vma);
Chris Wilson05a20d02016-08-18 17:16:55 +01002893 vma->flags &= ~I915_VMA_CAN_FENCE;
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002894 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01002895
Chris Wilson50e046b2016-08-04 07:52:46 +01002896 if (likely(!vma->vm->closed)) {
2897 trace_i915_vma_unbind(vma);
2898 vma->vm->unbind_vma(vma);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00002899 }
Chris Wilson3272db52016-08-04 16:32:32 +01002900 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
Eric Anholt673a3942008-07-30 12:06:12 -07002901
Ben Widawsky2f633152013-07-17 12:19:03 -07002902 drm_mm_remove_node(&vma->node);
Chris Wilson50e046b2016-08-04 07:52:46 +01002903 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2904
Chris Wilson05a20d02016-08-18 17:16:55 +01002905 if (vma->pages != obj->pages) {
2906 GEM_BUG_ON(!vma->pages);
2907 sg_free_table(vma->pages);
2908 kfree(vma->pages);
Ben Widawsky2f633152013-07-17 12:19:03 -07002909 }
Chris Wilson247177d2016-08-15 10:48:47 +01002910 vma->pages = NULL;
Ben Widawsky2f633152013-07-17 12:19:03 -07002911
2912 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002913 * no more VMAs exist. */
Chris Wilson15717de2016-08-04 07:52:26 +01002914 if (--obj->bind_count == 0)
2915 list_move_tail(&obj->global_list,
2916 &to_i915(obj->base.dev)->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002917
Chris Wilson70903c32013-12-04 09:59:09 +00002918 /* And finally now the object is completely decoupled from this vma,
2919 * we can drop its hold on the backing storage and allow it to be
2920 * reaped by the shrinker.
2921 */
2922 i915_gem_object_unpin_pages(obj);
2923
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002924destroy:
Chris Wilson3272db52016-08-04 16:32:32 +01002925 if (unlikely(i915_vma_is_closed(vma)))
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002926 i915_vma_destroy(vma);
2927
Chris Wilson88241782011-01-07 17:09:48 +00002928 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002929}
2930
Chris Wilsondcff85c2016-08-05 10:14:11 +01002931int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
Chris Wilsonea746f32016-09-09 14:11:49 +01002932 unsigned int flags)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002933{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002934 struct intel_engine_cs *engine;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002935 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002936
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002937 for_each_engine(engine, dev_priv) {
Chris Wilson62e63002016-06-24 14:55:52 +01002938 if (engine->last_context == NULL)
2939 continue;
Ben Widawskyb6c74882012-08-14 14:35:14 -07002940
Chris Wilsonea746f32016-09-09 14:11:49 +01002941 ret = intel_engine_idle(engine, flags);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002942 if (ret)
2943 return ret;
2944 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002945
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002946 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002947}
2948
Chris Wilson4144f9b2014-09-11 08:43:48 +01002949static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
Chris Wilson42d6ab42012-07-26 11:49:32 +01002950 unsigned long cache_level)
2951{
Chris Wilson4144f9b2014-09-11 08:43:48 +01002952 struct drm_mm_node *gtt_space = &vma->node;
Chris Wilson42d6ab42012-07-26 11:49:32 +01002953 struct drm_mm_node *other;
2954
Chris Wilson4144f9b2014-09-11 08:43:48 +01002955 /*
2956 * On some machines we have to be careful when putting differing types
2957 * of snoopable memory together to avoid the prefetcher crossing memory
2958 * domains and dying. During vm initialisation, we decide whether or not
2959 * these constraints apply and set the drm_mm.color_adjust
2960 * appropriately.
Chris Wilson42d6ab42012-07-26 11:49:32 +01002961 */
Chris Wilson4144f9b2014-09-11 08:43:48 +01002962 if (vma->vm->mm.color_adjust == NULL)
Chris Wilson42d6ab42012-07-26 11:49:32 +01002963 return true;
2964
Ben Widawskyc6cfb322013-07-05 14:41:06 -07002965 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01002966 return true;
2967
2968 if (list_empty(&gtt_space->node_list))
2969 return true;
2970
2971 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2972 if (other->allocated && !other->hole_follows && other->color != cache_level)
2973 return false;
2974
2975 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2976 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2977 return false;
2978
2979 return true;
2980}
2981
Jesse Barnesde151cf2008-11-12 10:03:55 -08002982/**
Chris Wilson59bfa122016-08-04 16:32:31 +01002983 * i915_vma_insert - finds a slot for the vma in its address space
2984 * @vma: the vma
Chris Wilson91b2db62016-08-04 16:32:23 +01002985 * @size: requested size in bytes (can be larger than the VMA)
Chris Wilson59bfa122016-08-04 16:32:31 +01002986 * @alignment: required alignment
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002987 * @flags: mask of PIN_* flags to use
Chris Wilson59bfa122016-08-04 16:32:31 +01002988 *
2989 * First we try to allocate some free space that meets the requirements for
2990 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2991 * preferrably the oldest idle entry to make room for the new VMA.
2992 *
2993 * Returns:
2994 * 0 on success, negative error code otherwise.
Eric Anholt673a3942008-07-30 12:06:12 -07002995 */
Chris Wilson59bfa122016-08-04 16:32:31 +01002996static int
2997i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
Eric Anholt673a3942008-07-30 12:06:12 -07002998{
Chris Wilson59bfa122016-08-04 16:32:31 +01002999 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
3000 struct drm_i915_gem_object *obj = vma->obj;
Michel Thierry101b5062015-10-01 13:33:57 +01003001 u64 start, end;
Chris Wilson07f73f62009-09-14 16:50:30 +01003002 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003003
Chris Wilson3272db52016-08-04 16:32:32 +01003004 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
Chris Wilson59bfa122016-08-04 16:32:31 +01003005 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003006
Chris Wilsonde180032016-08-04 16:32:29 +01003007 size = max(size, vma->size);
3008 if (flags & PIN_MAPPABLE)
Chris Wilson3e510a82016-08-05 10:14:23 +01003009 size = i915_gem_get_ggtt_size(dev_priv, size,
3010 i915_gem_object_get_tiling(obj));
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003011
Chris Wilsond8923dc2016-08-18 17:17:07 +01003012 alignment = max(max(alignment, vma->display_alignment),
3013 i915_gem_get_ggtt_alignment(dev_priv, size,
3014 i915_gem_object_get_tiling(obj),
3015 flags & PIN_MAPPABLE));
Chris Wilsona00b10c2010-09-24 21:15:47 +01003016
Michel Thierry101b5062015-10-01 13:33:57 +01003017 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonde180032016-08-04 16:32:29 +01003018
3019 end = vma->vm->total;
Michel Thierry101b5062015-10-01 13:33:57 +01003020 if (flags & PIN_MAPPABLE)
Chris Wilson91b2db62016-08-04 16:32:23 +01003021 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
Michel Thierry101b5062015-10-01 13:33:57 +01003022 if (flags & PIN_ZONE_4G)
Michel Thierry48ea1e32016-01-11 11:39:27 +00003023 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
Michel Thierry101b5062015-10-01 13:33:57 +01003024
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003025 /* If binding the object/GGTT view requires more space than the entire
3026 * aperture has, reject it early before evicting everything in a vain
3027 * attempt to find space.
Chris Wilson654fc602010-05-27 13:18:21 +01003028 */
Joonas Lahtinen91e67112015-05-06 14:33:58 +03003029 if (size > end) {
Chris Wilsonde180032016-08-04 16:32:29 +01003030 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
Chris Wilson91b2db62016-08-04 16:32:23 +01003031 size, obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003032 flags & PIN_MAPPABLE ? "mappable" : "total",
Chris Wilsond23db882014-05-23 08:48:08 +02003033 end);
Chris Wilson59bfa122016-08-04 16:32:31 +01003034 return -E2BIG;
Chris Wilson654fc602010-05-27 13:18:21 +01003035 }
3036
Chris Wilson37e680a2012-06-07 15:38:42 +01003037 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003038 if (ret)
Chris Wilson59bfa122016-08-04 16:32:31 +01003039 return ret;
Chris Wilson6c085a72012-08-20 11:40:46 +02003040
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003041 i915_gem_object_pin_pages(obj);
3042
Chris Wilson506a8e82015-12-08 11:55:07 +00003043 if (flags & PIN_OFFSET_FIXED) {
Chris Wilson59bfa122016-08-04 16:32:31 +01003044 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonde180032016-08-04 16:32:29 +01003045 if (offset & (alignment - 1) || offset > end - size) {
Chris Wilson506a8e82015-12-08 11:55:07 +00003046 ret = -EINVAL;
Chris Wilsonde180032016-08-04 16:32:29 +01003047 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003048 }
Chris Wilsonde180032016-08-04 16:32:29 +01003049
Chris Wilson506a8e82015-12-08 11:55:07 +00003050 vma->node.start = offset;
3051 vma->node.size = size;
3052 vma->node.color = obj->cache_level;
Chris Wilsonde180032016-08-04 16:32:29 +01003053 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
Chris Wilson506a8e82015-12-08 11:55:07 +00003054 if (ret) {
3055 ret = i915_gem_evict_for_vma(vma);
3056 if (ret == 0)
Chris Wilsonde180032016-08-04 16:32:29 +01003057 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3058 if (ret)
3059 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003060 }
Michel Thierry101b5062015-10-01 13:33:57 +01003061 } else {
Chris Wilsonde180032016-08-04 16:32:29 +01003062 u32 search_flag, alloc_flag;
3063
Chris Wilson506a8e82015-12-08 11:55:07 +00003064 if (flags & PIN_HIGH) {
3065 search_flag = DRM_MM_SEARCH_BELOW;
3066 alloc_flag = DRM_MM_CREATE_TOP;
3067 } else {
3068 search_flag = DRM_MM_SEARCH_DEFAULT;
3069 alloc_flag = DRM_MM_CREATE_DEFAULT;
3070 }
Michel Thierry101b5062015-10-01 13:33:57 +01003071
Chris Wilson954c4692016-08-04 16:32:26 +01003072 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3073 * so we know that we always have a minimum alignment of 4096.
3074 * The drm_mm range manager is optimised to return results
3075 * with zero alignment, so where possible use the optimal
3076 * path.
3077 */
3078 if (alignment <= 4096)
3079 alignment = 0;
3080
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003081search_free:
Chris Wilsonde180032016-08-04 16:32:29 +01003082 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3083 &vma->node,
Chris Wilson506a8e82015-12-08 11:55:07 +00003084 size, alignment,
3085 obj->cache_level,
3086 start, end,
3087 search_flag,
3088 alloc_flag);
3089 if (ret) {
Chris Wilsonde180032016-08-04 16:32:29 +01003090 ret = i915_gem_evict_something(vma->vm, size, alignment,
Chris Wilson506a8e82015-12-08 11:55:07 +00003091 obj->cache_level,
3092 start, end,
3093 flags);
3094 if (ret == 0)
3095 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003096
Chris Wilsonde180032016-08-04 16:32:29 +01003097 goto err_unpin;
Chris Wilson506a8e82015-12-08 11:55:07 +00003098 }
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003099 }
Chris Wilson37508582016-08-04 16:32:24 +01003100 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003101
Ben Widawsky35c20a62013-05-31 11:28:48 -07003102 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Chris Wilsonde180032016-08-04 16:32:29 +01003103 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
Chris Wilson15717de2016-08-04 07:52:26 +01003104 obj->bind_count++;
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003105
Chris Wilson59bfa122016-08-04 16:32:31 +01003106 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003107
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003108err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003109 i915_gem_object_unpin_pages(obj);
Chris Wilson59bfa122016-08-04 16:32:31 +01003110 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003111}
3112
Chris Wilson000433b2013-08-08 14:41:09 +01003113bool
Chris Wilson2c225692013-08-09 12:26:45 +01003114i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3115 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003116{
Eric Anholt673a3942008-07-30 12:06:12 -07003117 /* If we don't have a page list set up, then we're not pinned
3118 * to GPU, and we can ignore the cache flush because it'll happen
3119 * again at bind time.
3120 */
Chris Wilson05394f32010-11-08 19:18:58 +00003121 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003122 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003123
Imre Deak769ce462013-02-13 21:56:05 +02003124 /*
3125 * Stolen memory is always coherent with the GPU as it is explicitly
3126 * marked as wc by the system, or the system is cache-coherent.
3127 */
Chris Wilson6a2c4232014-11-04 04:51:40 -08003128 if (obj->stolen || obj->phys_handle)
Chris Wilson000433b2013-08-08 14:41:09 +01003129 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003130
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003131 /* If the GPU is snooping the contents of the CPU cache,
3132 * we do not need to manually clear the CPU cache lines. However,
3133 * the caches are only snooped when the render cache is
3134 * flushed/invalidated. As we always have to emit invalidations
3135 * and flushes when moving into and out of the RENDER domain, correct
3136 * snooping behaviour occurs naturally as the result of our domain
3137 * tracking.
3138 */
Chris Wilson0f719792015-01-13 13:32:52 +00003139 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3140 obj->cache_dirty = true;
Chris Wilson000433b2013-08-08 14:41:09 +01003141 return false;
Chris Wilson0f719792015-01-13 13:32:52 +00003142 }
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003143
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003144 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003145 drm_clflush_sg(obj->pages);
Chris Wilson0f719792015-01-13 13:32:52 +00003146 obj->cache_dirty = false;
Chris Wilson000433b2013-08-08 14:41:09 +01003147
3148 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003149}
3150
3151/** Flushes the GTT write domain for the object if it's dirty. */
3152static void
Chris Wilson05394f32010-11-08 19:18:58 +00003153i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003154{
Chris Wilson3b5724d2016-08-18 17:16:49 +01003155 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003156
Chris Wilson05394f32010-11-08 19:18:58 +00003157 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003158 return;
3159
Chris Wilson63256ec2011-01-04 18:42:07 +00003160 /* No actual flushing is required for the GTT write domain. Writes
Chris Wilson3b5724d2016-08-18 17:16:49 +01003161 * to it "immediately" go to main memory as far as we know, so there's
Eric Anholte47c68e2008-11-14 13:35:19 -08003162 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003163 *
3164 * However, we do have to enforce the order so that all writes through
3165 * the GTT land before any writes to the device, such as updates to
3166 * the GATT itself.
Chris Wilson3b5724d2016-08-18 17:16:49 +01003167 *
3168 * We also have to wait a bit for the writes to land from the GTT.
3169 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3170 * timing. This issue has only been observed when switching quickly
3171 * between GTT writes and CPU reads from inside the kernel on recent hw,
3172 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3173 * system agents we cannot reproduce this behaviour).
Eric Anholte47c68e2008-11-14 13:35:19 -08003174 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003175 wmb();
Chris Wilson3b5724d2016-08-18 17:16:49 +01003176 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3177 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
Chris Wilson63256ec2011-01-04 18:42:07 +00003178
Chris Wilsond243ad82016-08-18 17:16:44 +01003179 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
Daniel Vetterf99d7062014-06-19 16:01:59 +02003180
Eric Anholte47c68e2008-11-14 13:35:19 -08003181 obj->base.write_domain = 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08003182 trace_i915_gem_object_change_domain(obj,
3183 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003184 I915_GEM_DOMAIN_GTT);
Eric Anholte47c68e2008-11-14 13:35:19 -08003185}
3186
3187/** Flushes the CPU write domain for the object if it's dirty. */
3188static void
Daniel Vettere62b59e2015-01-21 14:53:48 +01003189i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003190{
Chris Wilson05394f32010-11-08 19:18:58 +00003191 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003192 return;
3193
Daniel Vettere62b59e2015-01-21 14:53:48 +01003194 if (i915_gem_clflush_object(obj, obj->pin_display))
Chris Wilsonc0336662016-05-06 15:40:21 +01003195 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilson000433b2013-08-08 14:41:09 +01003196
Rodrigo Vivide152b62015-07-07 16:28:51 -07003197 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
Daniel Vetterf99d7062014-06-19 16:01:59 +02003198
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003199 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003200 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003201 obj->base.read_domains,
Chris Wilsonb0dc4652016-08-18 17:16:51 +01003202 I915_GEM_DOMAIN_CPU);
Eric Anholte47c68e2008-11-14 13:35:19 -08003203}
3204
Chris Wilson383d5822016-08-18 17:17:08 +01003205static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3206{
3207 struct i915_vma *vma;
3208
3209 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3210 if (!i915_vma_is_ggtt(vma))
3211 continue;
3212
3213 if (i915_vma_is_active(vma))
3214 continue;
3215
3216 if (!drm_mm_node_allocated(&vma->node))
3217 continue;
3218
3219 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3220 }
Eric Anholt673a3942008-07-30 12:06:12 -07003221}
3222
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003223/**
3224 * Moves a single object to the GTT read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003225 * @obj: object to act on
3226 * @write: ask for write access or read only
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003227 *
3228 * This function returns when the move is complete, including waiting on
3229 * flushes to occur.
3230 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003231int
Chris Wilson20217462010-11-23 15:26:33 +00003232i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003233{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003234 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003235 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003236
Chris Wilson0201f1e2012-07-20 12:41:01 +01003237 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003238 if (ret)
3239 return ret;
3240
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003241 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3242 return 0;
3243
Chris Wilson43566de2015-01-02 16:29:29 +05303244 /* Flush and acquire obj->pages so that we are coherent through
3245 * direct access in memory with previous cached writes through
3246 * shmemfs and that our cache domain tracking remains valid.
3247 * For example, if the obj->filp was moved to swap without us
3248 * being notified and releasing the pages, we would mistakenly
3249 * continue to assume that the obj remained out of the CPU cached
3250 * domain.
3251 */
3252 ret = i915_gem_object_get_pages(obj);
3253 if (ret)
3254 return ret;
3255
Daniel Vettere62b59e2015-01-21 14:53:48 +01003256 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003257
Chris Wilsond0a57782012-10-09 19:24:37 +01003258 /* Serialise direct access to this object with the barriers for
3259 * coherent writes from the GPU, by effectively invalidating the
3260 * GTT domain upon first access.
3261 */
3262 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3263 mb();
3264
Chris Wilson05394f32010-11-08 19:18:58 +00003265 old_write_domain = obj->base.write_domain;
3266 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003267
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003268 /* It should now be out of any other write domains, and we can update
3269 * the domain values for our changes.
3270 */
Chris Wilson05394f32010-11-08 19:18:58 +00003271 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3272 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003273 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003274 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3275 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3276 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003277 }
3278
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003279 trace_i915_gem_object_change_domain(obj,
3280 old_read_domains,
3281 old_write_domain);
3282
Chris Wilson8325a092012-04-24 15:52:35 +01003283 /* And bump the LRU for this access */
Chris Wilson383d5822016-08-18 17:17:08 +01003284 i915_gem_object_bump_inactive_ggtt(obj);
Chris Wilson8325a092012-04-24 15:52:35 +01003285
Eric Anholte47c68e2008-11-14 13:35:19 -08003286 return 0;
3287}
3288
Chris Wilsonef55f922015-10-09 14:11:27 +01003289/**
3290 * Changes the cache-level of an object across all VMA.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003291 * @obj: object to act on
3292 * @cache_level: new cache level to set for the object
Chris Wilsonef55f922015-10-09 14:11:27 +01003293 *
3294 * After this function returns, the object will be in the new cache-level
3295 * across all GTT and the contents of the backing storage will be coherent,
3296 * with respect to the new cache-level. In order to keep the backing storage
3297 * coherent for all users, we only allow a single cache level to be set
3298 * globally on the object and prevent it from being changed whilst the
3299 * hardware is reading from the object. That is if the object is currently
3300 * on the scanout it will be set to uncached (or equivalent display
3301 * cache coherency) and all non-MOCS GPU access will also be uncached so
3302 * that all direct access to the scanout remains coherent.
3303 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003304int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3305 enum i915_cache_level cache_level)
3306{
Chris Wilsonaa653a62016-08-04 07:52:27 +01003307 struct i915_vma *vma;
Ville Syrjäläed75a552015-08-11 19:47:10 +03003308 int ret = 0;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003309
3310 if (obj->cache_level == cache_level)
Ville Syrjäläed75a552015-08-11 19:47:10 +03003311 goto out;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003312
Chris Wilsonef55f922015-10-09 14:11:27 +01003313 /* Inspect the list of currently bound VMA and unbind any that would
3314 * be invalid given the new cache-level. This is principally to
3315 * catch the issue of the CS prefetch crossing page boundaries and
3316 * reading an invalid PTE on older architectures.
3317 */
Chris Wilsonaa653a62016-08-04 07:52:27 +01003318restart:
3319 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003320 if (!drm_mm_node_allocated(&vma->node))
3321 continue;
3322
Chris Wilson20dfbde2016-08-04 16:32:30 +01003323 if (i915_vma_is_pinned(vma)) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003324 DRM_DEBUG("can not change the cache level of pinned objects\n");
3325 return -EBUSY;
3326 }
3327
Chris Wilsonaa653a62016-08-04 07:52:27 +01003328 if (i915_gem_valid_gtt_space(vma, cache_level))
3329 continue;
3330
3331 ret = i915_vma_unbind(vma);
3332 if (ret)
3333 return ret;
3334
3335 /* As unbinding may affect other elements in the
3336 * obj->vma_list (due to side-effects from retiring
3337 * an active vma), play safe and restart the iterator.
3338 */
3339 goto restart;
Chris Wilson42d6ab42012-07-26 11:49:32 +01003340 }
3341
Chris Wilsonef55f922015-10-09 14:11:27 +01003342 /* We can reuse the existing drm_mm nodes but need to change the
3343 * cache-level on the PTE. We could simply unbind them all and
3344 * rebind with the correct cache-level on next use. However since
3345 * we already have a valid slot, dma mapping, pages etc, we may as
3346 * rewrite the PTE in the belief that doing so tramples upon less
3347 * state and so involves less work.
3348 */
Chris Wilson15717de2016-08-04 07:52:26 +01003349 if (obj->bind_count) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003350 /* Before we change the PTE, the GPU must not be accessing it.
3351 * If we wait upon the object, we know that all the bound
3352 * VMA are no longer active.
3353 */
Chris Wilson2e2f3512015-04-27 13:41:14 +01003354 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003355 if (ret)
3356 return ret;
3357
Chris Wilsonaa653a62016-08-04 07:52:27 +01003358 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003359 /* Access to snoopable pages through the GTT is
3360 * incoherent and on some machines causes a hard
3361 * lockup. Relinquish the CPU mmaping to force
3362 * userspace to refault in the pages and we can
3363 * then double check if the GTT mapping is still
3364 * valid for that pointer access.
3365 */
3366 i915_gem_release_mmap(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003367
Chris Wilsonef55f922015-10-09 14:11:27 +01003368 /* As we no longer need a fence for GTT access,
3369 * we can relinquish it now (and so prevent having
3370 * to steal a fence from someone else on the next
3371 * fence request). Note GPU activity would have
3372 * dropped the fence as all snoopable access is
3373 * supposed to be linear.
3374 */
Chris Wilson49ef5292016-08-18 17:17:00 +01003375 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3376 ret = i915_vma_put_fence(vma);
3377 if (ret)
3378 return ret;
3379 }
Chris Wilsonef55f922015-10-09 14:11:27 +01003380 } else {
3381 /* We either have incoherent backing store and
3382 * so no GTT access or the architecture is fully
3383 * coherent. In such cases, existing GTT mmaps
3384 * ignore the cache bit in the PTE and we can
3385 * rewrite it without confusing the GPU or having
3386 * to force userspace to fault back in its mmaps.
3387 */
Chris Wilsone4ffd172011-04-04 09:44:39 +01003388 }
3389
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003390 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilsonef55f922015-10-09 14:11:27 +01003391 if (!drm_mm_node_allocated(&vma->node))
3392 continue;
3393
3394 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3395 if (ret)
3396 return ret;
3397 }
Chris Wilsone4ffd172011-04-04 09:44:39 +01003398 }
3399
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00003400 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson2c225692013-08-09 12:26:45 +01003401 vma->node.color = cache_level;
3402 obj->cache_level = cache_level;
3403
Ville Syrjäläed75a552015-08-11 19:47:10 +03003404out:
Chris Wilsonef55f922015-10-09 14:11:27 +01003405 /* Flush the dirty CPU caches to the backing storage so that the
3406 * object is now coherent at its new cache level (with respect
3407 * to the access domain).
3408 */
Ankitprasad Sharmab50a5372016-06-10 14:23:03 +05303409 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
Chris Wilson0f719792015-01-13 13:32:52 +00003410 if (i915_gem_clflush_object(obj, true))
Chris Wilsonc0336662016-05-06 15:40:21 +01003411 i915_gem_chipset_flush(to_i915(obj->base.dev));
Chris Wilsone4ffd172011-04-04 09:44:39 +01003412 }
3413
Chris Wilsone4ffd172011-04-04 09:44:39 +01003414 return 0;
3415}
3416
Ben Widawsky199adf42012-09-21 17:01:20 -07003417int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3418 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003419{
Ben Widawsky199adf42012-09-21 17:01:20 -07003420 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003421 struct drm_i915_gem_object *obj;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003422
Chris Wilson03ac0642016-07-20 13:31:51 +01003423 obj = i915_gem_object_lookup(file, args->handle);
3424 if (!obj)
Chris Wilson432be692015-05-07 12:14:55 +01003425 return -ENOENT;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003426
Chris Wilson651d7942013-08-08 14:41:10 +01003427 switch (obj->cache_level) {
3428 case I915_CACHE_LLC:
3429 case I915_CACHE_L3_LLC:
3430 args->caching = I915_CACHING_CACHED;
3431 break;
3432
Chris Wilson4257d3b2013-08-08 14:41:11 +01003433 case I915_CACHE_WT:
3434 args->caching = I915_CACHING_DISPLAY;
3435 break;
3436
Chris Wilson651d7942013-08-08 14:41:10 +01003437 default:
3438 args->caching = I915_CACHING_NONE;
3439 break;
3440 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003441
Chris Wilson34911fd2016-07-20 13:31:54 +01003442 i915_gem_object_put_unlocked(obj);
Chris Wilson432be692015-05-07 12:14:55 +01003443 return 0;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003444}
3445
Ben Widawsky199adf42012-09-21 17:01:20 -07003446int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3447 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003448{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003449 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky199adf42012-09-21 17:01:20 -07003450 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003451 struct drm_i915_gem_object *obj;
3452 enum i915_cache_level level;
3453 int ret;
3454
Ben Widawsky199adf42012-09-21 17:01:20 -07003455 switch (args->caching) {
3456 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003457 level = I915_CACHE_NONE;
3458 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003459 case I915_CACHING_CACHED:
Imre Deake5756c12015-08-14 18:43:30 +03003460 /*
3461 * Due to a HW issue on BXT A stepping, GPU stores via a
3462 * snooped mapping may leave stale data in a corresponding CPU
3463 * cacheline, whereas normally such cachelines would get
3464 * invalidated.
3465 */
Tvrtko Ursulinca377802016-03-02 12:10:31 +00003466 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
Imre Deake5756c12015-08-14 18:43:30 +03003467 return -ENODEV;
3468
Chris Wilsone6994ae2012-07-10 10:27:08 +01003469 level = I915_CACHE_LLC;
3470 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003471 case I915_CACHING_DISPLAY:
3472 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3473 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003474 default:
3475 return -EINVAL;
3476 }
3477
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003478 intel_runtime_pm_get(dev_priv);
3479
Ben Widawsky3bc29132012-09-26 16:15:20 -07003480 ret = i915_mutex_lock_interruptible(dev);
3481 if (ret)
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003482 goto rpm_put;
Ben Widawsky3bc29132012-09-26 16:15:20 -07003483
Chris Wilson03ac0642016-07-20 13:31:51 +01003484 obj = i915_gem_object_lookup(file, args->handle);
3485 if (!obj) {
Chris Wilsone6994ae2012-07-10 10:27:08 +01003486 ret = -ENOENT;
3487 goto unlock;
3488 }
3489
3490 ret = i915_gem_object_set_cache_level(obj, level);
3491
Chris Wilsonf8c417c2016-07-20 13:31:53 +01003492 i915_gem_object_put(obj);
Chris Wilsone6994ae2012-07-10 10:27:08 +01003493unlock:
3494 mutex_unlock(&dev->struct_mutex);
Imre Deakfd0fe6a2015-11-04 21:25:32 +02003495rpm_put:
3496 intel_runtime_pm_put(dev_priv);
3497
Chris Wilsone6994ae2012-07-10 10:27:08 +01003498 return ret;
3499}
3500
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003501/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003502 * Prepare buffer for display plane (scanout, cursors, etc).
3503 * Can be called from an uninterruptible phase (modesetting) and allows
3504 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003505 */
Chris Wilson058d88c2016-08-15 10:49:06 +01003506struct i915_vma *
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003507i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3508 u32 alignment,
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003509 const struct i915_ggtt_view *view)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003510{
Chris Wilson058d88c2016-08-15 10:49:06 +01003511 struct i915_vma *vma;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003512 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003513 int ret;
3514
Chris Wilsoncc98b412013-08-09 12:25:09 +01003515 /* Mark the pin_display early so that we account for the
3516 * display coherency whilst setting up the cache domains.
3517 */
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003518 obj->pin_display++;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003519
Eric Anholta7ef0642011-03-29 16:59:54 -07003520 /* The display engine is not coherent with the LLC cache on gen6. As
3521 * a result, we make sure that the pinning that is about to occur is
3522 * done with uncached PTEs. This is lowest common denominator for all
3523 * chipsets.
3524 *
3525 * However for gen6+, we could do better by using the GFDT bit instead
3526 * of uncaching, which would allow us to flush all the LLC-cached data
3527 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3528 */
Chris Wilson651d7942013-08-08 14:41:10 +01003529 ret = i915_gem_object_set_cache_level(obj,
3530 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Chris Wilson058d88c2016-08-15 10:49:06 +01003531 if (ret) {
3532 vma = ERR_PTR(ret);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003533 goto err_unpin_display;
Chris Wilson058d88c2016-08-15 10:49:06 +01003534 }
Eric Anholta7ef0642011-03-29 16:59:54 -07003535
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003536 /* As the user may map the buffer once pinned in the display plane
3537 * (e.g. libkms for the bootup splash), we have to ensure that we
Chris Wilson2efb8132016-08-18 17:17:06 +01003538 * always use map_and_fenceable for all scanout buffers. However,
3539 * it may simply be too big to fit into mappable, in which case
3540 * put it anyway and hope that userspace can cope (but always first
3541 * try to preserve the existing ABI).
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003542 */
Chris Wilson2efb8132016-08-18 17:17:06 +01003543 vma = ERR_PTR(-ENOSPC);
3544 if (view->type == I915_GGTT_VIEW_NORMAL)
3545 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3546 PIN_MAPPABLE | PIN_NONBLOCK);
Chris Wilson54905ab2016-11-07 11:01:28 +00003547 if (IS_ERR(vma)) {
3548 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3549 unsigned int flags;
3550
3551 /* Valleyview is definitely limited to scanning out the first
3552 * 512MiB. Lets presume this behaviour was inherited from the
3553 * g4x display engine and that all earlier gen are similarly
3554 * limited. Testing suggests that it is a little more
3555 * complicated than this. For example, Cherryview appears quite
3556 * happy to scanout from anywhere within its global aperture.
3557 */
3558 flags = 0;
3559 if (HAS_GMCH_DISPLAY(i915))
3560 flags = PIN_MAPPABLE;
3561 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3562 }
Chris Wilson058d88c2016-08-15 10:49:06 +01003563 if (IS_ERR(vma))
Chris Wilsoncc98b412013-08-09 12:25:09 +01003564 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003565
Chris Wilsond8923dc2016-08-18 17:17:07 +01003566 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3567
Daniel Vettere62b59e2015-01-21 14:53:48 +01003568 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003569
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003570 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003571 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003572
3573 /* It should now be out of any other write domains, and we can update
3574 * the domain values for our changes.
3575 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003576 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003577 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003578
3579 trace_i915_gem_object_change_domain(obj,
3580 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003581 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003582
Chris Wilson058d88c2016-08-15 10:49:06 +01003583 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003584
3585err_unpin_display:
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003586 obj->pin_display--;
Chris Wilson058d88c2016-08-15 10:49:06 +01003587 return vma;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003588}
3589
3590void
Chris Wilson058d88c2016-08-15 10:49:06 +01003591i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003592{
Chris Wilson058d88c2016-08-15 10:49:06 +01003593 if (WARN_ON(vma->obj->pin_display == 0))
Tvrtko Ursulin8a0c39b2015-04-13 11:50:09 +01003594 return;
3595
Chris Wilsond8923dc2016-08-18 17:17:07 +01003596 if (--vma->obj->pin_display == 0)
3597 vma->display_alignment = 0;
Tvrtko Ursuline6617332015-03-23 11:10:33 +00003598
Chris Wilson383d5822016-08-18 17:17:08 +01003599 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3600 if (!i915_vma_is_active(vma))
3601 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3602
Chris Wilson058d88c2016-08-15 10:49:06 +01003603 i915_vma_unpin(vma);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003604}
3605
Eric Anholte47c68e2008-11-14 13:35:19 -08003606/**
3607 * Moves a single object to the CPU read, and possibly write domain.
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003608 * @obj: object to act on
3609 * @write: requesting write or read-only access
Eric Anholte47c68e2008-11-14 13:35:19 -08003610 *
3611 * This function returns when the move is complete, including waiting on
3612 * flushes to occur.
3613 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003614int
Chris Wilson919926a2010-11-12 13:42:53 +00003615i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003616{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003617 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003618 int ret;
3619
Chris Wilson0201f1e2012-07-20 12:41:01 +01003620 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003621 if (ret)
3622 return ret;
3623
Chris Wilsonc13d87e2016-07-20 09:21:15 +01003624 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3625 return 0;
3626
Eric Anholte47c68e2008-11-14 13:35:19 -08003627 i915_gem_object_flush_gtt_write_domain(obj);
3628
Chris Wilson05394f32010-11-08 19:18:58 +00003629 old_write_domain = obj->base.write_domain;
3630 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003631
Eric Anholte47c68e2008-11-14 13:35:19 -08003632 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003633 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003634 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003635
Chris Wilson05394f32010-11-08 19:18:58 +00003636 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003637 }
3638
3639 /* It should now be out of any other write domains, and we can update
3640 * the domain values for our changes.
3641 */
Chris Wilson05394f32010-11-08 19:18:58 +00003642 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003643
3644 /* If we're writing through the CPU, then the GPU read domains will
3645 * need to be invalidated at next use.
3646 */
3647 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003648 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3649 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003650 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003651
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003652 trace_i915_gem_object_change_domain(obj,
3653 old_read_domains,
3654 old_write_domain);
3655
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003656 return 0;
3657}
3658
Eric Anholt673a3942008-07-30 12:06:12 -07003659/* Throttle our rendering by waiting until the ring has completed our requests
3660 * emitted over 20 msec ago.
3661 *
Eric Anholtb9624422009-06-03 07:27:35 +00003662 * Note that if we were to use the current jiffies each time around the loop,
3663 * we wouldn't escape the function with any frames outstanding if the time to
3664 * render a frame was over 20ms.
3665 *
Eric Anholt673a3942008-07-30 12:06:12 -07003666 * This should get us reasonable parallelism between CPU and GPU but also
3667 * relatively low latency when blocking on a particular request to finish.
3668 */
3669static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003670i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003671{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003672 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003673 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsond0bc54f2015-05-21 21:01:48 +01003674 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
John Harrison54fb2412014-11-24 18:49:27 +00003675 struct drm_i915_gem_request *request, *target = NULL;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003676 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003677
Daniel Vetter308887a2012-11-14 17:14:06 +01003678 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3679 if (ret)
3680 return ret;
3681
Chris Wilsonf4457ae2016-04-13 17:35:08 +01003682 /* ABI: return -EIO if already wedged */
3683 if (i915_terminally_wedged(&dev_priv->gpu_error))
3684 return -EIO;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003685
Chris Wilson1c255952010-09-26 11:03:27 +01003686 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003687 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003688 if (time_after_eq(request->emitted_jiffies, recent_enough))
3689 break;
3690
John Harrisonfcfa423c2015-05-29 17:44:12 +01003691 /*
3692 * Note that the request might not have been submitted yet.
3693 * In which case emitted_jiffies will be zero.
3694 */
3695 if (!request->emitted_jiffies)
3696 continue;
3697
John Harrison54fb2412014-11-24 18:49:27 +00003698 target = request;
Eric Anholtb9624422009-06-03 07:27:35 +00003699 }
John Harrisonff865882014-11-24 18:49:28 +00003700 if (target)
Chris Wilsone8a261e2016-07-20 13:31:49 +01003701 i915_gem_request_get(target);
Chris Wilson1c255952010-09-26 11:03:27 +01003702 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003703
John Harrison54fb2412014-11-24 18:49:27 +00003704 if (target == NULL)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003705 return 0;
3706
Chris Wilsonea746f32016-09-09 14:11:49 +01003707 ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
Chris Wilsone8a261e2016-07-20 13:31:49 +01003708 i915_gem_request_put(target);
John Harrisonff865882014-11-24 18:49:28 +00003709
Eric Anholt673a3942008-07-30 12:06:12 -07003710 return ret;
3711}
3712
Chris Wilsond23db882014-05-23 08:48:08 +02003713static bool
Chris Wilson91b2db62016-08-04 16:32:23 +01003714i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
Chris Wilsond23db882014-05-23 08:48:08 +02003715{
Chris Wilson59bfa122016-08-04 16:32:31 +01003716 if (!drm_mm_node_allocated(&vma->node))
3717 return false;
Chris Wilsond23db882014-05-23 08:48:08 +02003718
Chris Wilson91b2db62016-08-04 16:32:23 +01003719 if (vma->node.size < size)
Chris Wilsond23db882014-05-23 08:48:08 +02003720 return true;
3721
Chris Wilson91b2db62016-08-04 16:32:23 +01003722 if (alignment && vma->node.start & (alignment - 1))
Chris Wilsond23db882014-05-23 08:48:08 +02003723 return true;
3724
Chris Wilson05a20d02016-08-18 17:16:55 +01003725 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
Chris Wilsond23db882014-05-23 08:48:08 +02003726 return true;
3727
3728 if (flags & PIN_OFFSET_BIAS &&
3729 vma->node.start < (flags & PIN_OFFSET_MASK))
3730 return true;
3731
Chris Wilson506a8e82015-12-08 11:55:07 +00003732 if (flags & PIN_OFFSET_FIXED &&
3733 vma->node.start != (flags & PIN_OFFSET_MASK))
3734 return true;
3735
Chris Wilsond23db882014-05-23 08:48:08 +02003736 return false;
3737}
3738
Chris Wilsond0710ab2015-11-20 14:16:39 +00003739void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3740{
3741 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona9f14812016-08-04 16:32:28 +01003742 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003743 bool mappable, fenceable;
3744 u32 fence_size, fence_alignment;
3745
Chris Wilsona9f14812016-08-04 16:32:28 +01003746 fence_size = i915_gem_get_ggtt_size(dev_priv,
Chris Wilson05a20d02016-08-18 17:16:55 +01003747 vma->size,
Chris Wilson3e510a82016-08-05 10:14:23 +01003748 i915_gem_object_get_tiling(obj));
Chris Wilsona9f14812016-08-04 16:32:28 +01003749 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
Chris Wilson05a20d02016-08-18 17:16:55 +01003750 vma->size,
Chris Wilson3e510a82016-08-05 10:14:23 +01003751 i915_gem_object_get_tiling(obj),
Chris Wilsonad1a7d22016-08-04 16:32:27 +01003752 true);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003753
3754 fenceable = (vma->node.size == fence_size &&
3755 (vma->node.start & (fence_alignment - 1)) == 0);
3756
3757 mappable = (vma->node.start + fence_size <=
Chris Wilsona9f14812016-08-04 16:32:28 +01003758 dev_priv->ggtt.mappable_end);
Chris Wilsond0710ab2015-11-20 14:16:39 +00003759
Tvrtko Ursulin6d9deb92016-10-25 17:40:35 +01003760 /*
3761 * Explicitly disable for rotated VMA since the display does not
3762 * need the fence and the VMA is not accessible to other users.
3763 */
3764 if (mappable && fenceable &&
3765 vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
Chris Wilson05a20d02016-08-18 17:16:55 +01003766 vma->flags |= I915_VMA_CAN_FENCE;
3767 else
3768 vma->flags &= ~I915_VMA_CAN_FENCE;
Chris Wilsond0710ab2015-11-20 14:16:39 +00003769}
3770
Chris Wilson305bc232016-08-04 16:32:33 +01003771int __i915_vma_do_pin(struct i915_vma *vma,
3772 u64 size, u64 alignment, u64 flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003773{
Chris Wilson305bc232016-08-04 16:32:33 +01003774 unsigned int bound = vma->flags;
Eric Anholt673a3942008-07-30 12:06:12 -07003775 int ret;
3776
Chris Wilson59bfa122016-08-04 16:32:31 +01003777 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
Chris Wilson3272db52016-08-04 16:32:32 +01003778 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003779
Chris Wilson305bc232016-08-04 16:32:33 +01003780 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3781 ret = -EBUSY;
3782 goto err;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003783 }
3784
Chris Wilsonde895082016-08-04 16:32:34 +01003785 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson59bfa122016-08-04 16:32:31 +01003786 ret = i915_vma_insert(vma, size, alignment, flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003787 if (ret)
Chris Wilson59bfa122016-08-04 16:32:31 +01003788 goto err;
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00003789 }
Daniel Vetter74898d72012-02-15 23:50:22 +01003790
Chris Wilson59bfa122016-08-04 16:32:31 +01003791 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
Chris Wilson3b165252016-08-04 16:32:25 +01003792 if (ret)
Chris Wilson59bfa122016-08-04 16:32:31 +01003793 goto err;
Chris Wilson3b165252016-08-04 16:32:25 +01003794
Chris Wilson3272db52016-08-04 16:32:32 +01003795 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
Chris Wilsond0710ab2015-11-20 14:16:39 +00003796 __i915_vma_set_map_and_fenceable(vma);
Chris Wilsonef79e172014-10-31 13:53:52 +00003797
Chris Wilson3b165252016-08-04 16:32:25 +01003798 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
Eric Anholt673a3942008-07-30 12:06:12 -07003799 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003800
Chris Wilson59bfa122016-08-04 16:32:31 +01003801err:
3802 __i915_vma_unpin(vma);
3803 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003804}
3805
Chris Wilson058d88c2016-08-15 10:49:06 +01003806struct i915_vma *
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003807i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3808 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01003809 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01003810 u64 alignment,
3811 u64 flags)
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003812{
Chris Wilson058d88c2016-08-15 10:49:06 +01003813 struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
Chris Wilson59bfa122016-08-04 16:32:31 +01003814 struct i915_vma *vma;
3815 int ret;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003816
Chris Wilson058d88c2016-08-15 10:49:06 +01003817 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
Chris Wilson59bfa122016-08-04 16:32:31 +01003818 if (IS_ERR(vma))
Chris Wilson058d88c2016-08-15 10:49:06 +01003819 return vma;
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003820
Chris Wilson59bfa122016-08-04 16:32:31 +01003821 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3822 if (flags & PIN_NONBLOCK &&
3823 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
Chris Wilson058d88c2016-08-15 10:49:06 +01003824 return ERR_PTR(-ENOSPC);
Chris Wilson59bfa122016-08-04 16:32:31 +01003825
3826 WARN(i915_vma_is_pinned(vma),
3827 "bo is already pinned in ggtt with incorrect alignment:"
Chris Wilson05a20d02016-08-18 17:16:55 +01003828 " offset=%08x, req.alignment=%llx,"
3829 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3830 i915_ggtt_offset(vma), alignment,
Chris Wilson59bfa122016-08-04 16:32:31 +01003831 !!(flags & PIN_MAPPABLE),
Chris Wilson05a20d02016-08-18 17:16:55 +01003832 i915_vma_is_map_and_fenceable(vma));
Chris Wilson59bfa122016-08-04 16:32:31 +01003833 ret = i915_vma_unbind(vma);
3834 if (ret)
Chris Wilson058d88c2016-08-15 10:49:06 +01003835 return ERR_PTR(ret);
Chris Wilson59bfa122016-08-04 16:32:31 +01003836 }
3837
Chris Wilson058d88c2016-08-15 10:49:06 +01003838 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3839 if (ret)
3840 return ERR_PTR(ret);
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02003841
Chris Wilson058d88c2016-08-15 10:49:06 +01003842 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003843}
3844
Chris Wilsonedf6b762016-08-09 09:23:33 +01003845static __always_inline unsigned int __busy_read_flag(unsigned int id)
Eric Anholt673a3942008-07-30 12:06:12 -07003846{
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003847 /* Note that we could alias engines in the execbuf API, but
3848 * that would be very unwise as it prevents userspace from
3849 * fine control over engine selection. Ahem.
3850 *
3851 * This should be something like EXEC_MAX_ENGINE instead of
3852 * I915_NUM_ENGINES.
3853 */
3854 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3855 return 0x10000 << id;
3856}
Eric Anholt673a3942008-07-30 12:06:12 -07003857
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003858static __always_inline unsigned int __busy_write_id(unsigned int id)
3859{
Chris Wilson70cb4722016-08-09 18:08:25 +01003860 /* The uABI guarantees an active writer is also amongst the read
3861 * engines. This would be true if we accessed the activity tracking
3862 * under the lock, but as we perform the lookup of the object and
3863 * its activity locklessly we can not guarantee that the last_write
3864 * being active implies that we have set the same engine flag from
3865 * last_read - hence we always set both read and write busy for
3866 * last_write.
3867 */
3868 return id | __busy_read_flag(id);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003869}
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003870
Chris Wilsonedf6b762016-08-09 09:23:33 +01003871static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003872__busy_set_if_active(const struct i915_gem_active *active,
3873 unsigned int (*flag)(unsigned int id))
3874{
Chris Wilson12555012016-08-16 09:50:40 +01003875 struct drm_i915_gem_request *request;
3876
3877 request = rcu_dereference(active->request);
3878 if (!request || i915_gem_request_completed(request))
3879 return 0;
3880
3881 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3882 * discussion of how to handle the race correctly, but for reporting
3883 * the busy state we err on the side of potentially reporting the
3884 * wrong engine as being busy (but we guarantee that the result
3885 * is at least self-consistent).
3886 *
3887 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3888 * whilst we are inspecting it, even under the RCU read lock as we are.
3889 * This means that there is a small window for the engine and/or the
3890 * seqno to have been overwritten. The seqno will always be in the
3891 * future compared to the intended, and so we know that if that
3892 * seqno is idle (on whatever engine) our request is idle and the
3893 * return 0 above is correct.
3894 *
3895 * The issue is that if the engine is switched, it is just as likely
3896 * to report that it is busy (but since the switch happened, we know
3897 * the request should be idle). So there is a small chance that a busy
3898 * result is actually the wrong engine.
3899 *
3900 * So why don't we care?
3901 *
3902 * For starters, the busy ioctl is a heuristic that is by definition
3903 * racy. Even with perfect serialisation in the driver, the hardware
3904 * state is constantly advancing - the state we report to the user
3905 * is stale.
3906 *
3907 * The critical information for the busy-ioctl is whether the object
3908 * is idle as userspace relies on that to detect whether its next
3909 * access will stall, or if it has missed submitting commands to
3910 * the hardware allowing the GPU to stall. We never generate a
3911 * false-positive for idleness, thus busy-ioctl is reliable at the
3912 * most fundamental level, and we maintain the guarantee that a
3913 * busy object left to itself will eventually become idle (and stay
3914 * idle!).
3915 *
3916 * We allow ourselves the leeway of potentially misreporting the busy
3917 * state because that is an optimisation heuristic that is constantly
3918 * in flux. Being quickly able to detect the busy/idle state is much
3919 * more important than accurate logging of exactly which engines were
3920 * busy.
3921 *
3922 * For accuracy in reporting the engine, we could use
3923 *
3924 * result = 0;
3925 * request = __i915_gem_active_get_rcu(active);
3926 * if (request) {
3927 * if (!i915_gem_request_completed(request))
3928 * result = flag(request->engine->exec_id);
3929 * i915_gem_request_put(request);
3930 * }
3931 *
3932 * but that still remains susceptible to both hardware and userspace
3933 * races. So we accept making the result of that race slightly worse,
3934 * given the rarity of the race and its low impact on the result.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003935 */
Chris Wilson12555012016-08-16 09:50:40 +01003936 return flag(READ_ONCE(request->engine->exec_id));
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003937}
3938
Chris Wilsonedf6b762016-08-09 09:23:33 +01003939static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003940busy_check_reader(const struct i915_gem_active *active)
3941{
3942 return __busy_set_if_active(active, __busy_read_flag);
3943}
3944
Chris Wilsonedf6b762016-08-09 09:23:33 +01003945static __always_inline unsigned int
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003946busy_check_writer(const struct i915_gem_active *active)
3947{
3948 return __busy_set_if_active(active, __busy_write_id);
Eric Anholt673a3942008-07-30 12:06:12 -07003949}
3950
3951int
Eric Anholt673a3942008-07-30 12:06:12 -07003952i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003953 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003954{
3955 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003956 struct drm_i915_gem_object *obj;
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003957 unsigned long active;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003958
Chris Wilson03ac0642016-07-20 13:31:51 +01003959 obj = i915_gem_object_lookup(file, args->handle);
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003960 if (!obj)
3961 return -ENOENT;
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003962
Chris Wilson426960b2016-01-15 16:51:46 +00003963 args->busy = 0;
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003964 active = __I915_BO_ACTIVE(obj);
3965 if (active) {
3966 int idx;
Chris Wilson426960b2016-01-15 16:51:46 +00003967
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003968 /* Yes, the lookups are intentionally racy.
3969 *
3970 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
3971 * to regard the value as stale and as our ABI guarantees
3972 * forward progress, we confirm the status of each active
3973 * request with the hardware.
3974 *
3975 * Even though we guard the pointer lookup by RCU, that only
3976 * guarantees that the pointer and its contents remain
3977 * dereferencable and does *not* mean that the request we
3978 * have is the same as the one being tracked by the object.
3979 *
3980 * Consider that we lookup the request just as it is being
3981 * retired and freed. We take a local copy of the pointer,
3982 * but before we add its engine into the busy set, the other
3983 * thread reallocates it and assigns it to a task on another
Chris Wilson12555012016-08-16 09:50:40 +01003984 * engine with a fresh and incomplete seqno. Guarding against
3985 * that requires careful serialisation and reference counting,
3986 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
3987 * instead we expect that if the result is busy, which engines
3988 * are busy is not completely reliable - we only guarantee
3989 * that the object was busy.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003990 */
3991 rcu_read_lock();
Chris Wilson426960b2016-01-15 16:51:46 +00003992
Chris Wilson3fdc13c2016-08-05 10:14:18 +01003993 for_each_active(active, idx)
3994 args->busy |= busy_check_reader(&obj->last_read[idx]);
3995
3996 /* For ABI sanity, we only care that the write engine is in
Chris Wilson70cb4722016-08-09 18:08:25 +01003997 * the set of read engines. This should be ensured by the
3998 * ordering of setting last_read/last_write in
3999 * i915_vma_move_to_active(), and then in reverse in retire.
4000 * However, for good measure, we always report the last_write
4001 * request as a busy read as well as being a busy write.
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004002 *
4003 * We don't care that the set of active read/write engines
4004 * may change during construction of the result, as it is
4005 * equally liable to change before userspace can inspect
4006 * the result.
4007 */
4008 args->busy |= busy_check_writer(&obj->last_write);
4009
4010 rcu_read_unlock();
Chris Wilson426960b2016-01-15 16:51:46 +00004011 }
Eric Anholt673a3942008-07-30 12:06:12 -07004012
Chris Wilson3fdc13c2016-08-05 10:14:18 +01004013 i915_gem_object_put_unlocked(obj);
4014 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004015}
4016
4017int
4018i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4019 struct drm_file *file_priv)
4020{
Akshay Joshi0206e352011-08-16 15:34:10 -04004021 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004022}
4023
Chris Wilson3ef94da2009-09-14 16:50:29 +01004024int
4025i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4026 struct drm_file *file_priv)
4027{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004028 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004029 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004030 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004031 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004032
4033 switch (args->madv) {
4034 case I915_MADV_DONTNEED:
4035 case I915_MADV_WILLNEED:
4036 break;
4037 default:
4038 return -EINVAL;
4039 }
4040
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004041 ret = i915_mutex_lock_interruptible(dev);
4042 if (ret)
4043 return ret;
4044
Chris Wilson03ac0642016-07-20 13:31:51 +01004045 obj = i915_gem_object_lookup(file_priv, args->handle);
4046 if (!obj) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004047 ret = -ENOENT;
4048 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004049 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004050
Daniel Vetter656bfa32014-11-20 09:26:30 +01004051 if (obj->pages &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004052 i915_gem_object_is_tiled(obj) &&
Daniel Vetter656bfa32014-11-20 09:26:30 +01004053 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4054 if (obj->madv == I915_MADV_WILLNEED)
4055 i915_gem_object_unpin_pages(obj);
4056 if (args->madv == I915_MADV_WILLNEED)
4057 i915_gem_object_pin_pages(obj);
4058 }
4059
Chris Wilson05394f32010-11-08 19:18:58 +00004060 if (obj->madv != __I915_MADV_PURGED)
4061 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004062
Chris Wilson6c085a72012-08-20 11:40:46 +02004063 /* if the object is no longer attached, discard its backing storage */
Daniel Vetterbe6a0372015-03-18 10:46:04 +01004064 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004065 i915_gem_object_truncate(obj);
4066
Chris Wilson05394f32010-11-08 19:18:58 +00004067 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004068
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004069 i915_gem_object_put(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004070unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004071 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004072 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004073}
4074
Chris Wilson37e680a2012-06-07 15:38:42 +01004075void i915_gem_object_init(struct drm_i915_gem_object *obj,
4076 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004077{
Chris Wilsonb4716182015-04-27 13:41:17 +01004078 int i;
4079
Ben Widawsky35c20a62013-05-31 11:28:48 -07004080 INIT_LIST_HEAD(&obj->global_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004081 for (i = 0; i < I915_NUM_ENGINES; i++)
Chris Wilsonfa545cb2016-08-04 07:52:35 +01004082 init_request_active(&obj->last_read[i],
4083 i915_gem_object_retire__read);
4084 init_request_active(&obj->last_write,
4085 i915_gem_object_retire__write);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004086 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004087 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson8d9d5742015-04-07 16:20:38 +01004088 INIT_LIST_HEAD(&obj->batch_pool_link);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004089
Chris Wilson37e680a2012-06-07 15:38:42 +01004090 obj->ops = ops;
4091
Chris Wilson50349242016-08-18 17:17:04 +01004092 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004093 obj->madv = I915_MADV_WILLNEED;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004094
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004095 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004096}
4097
Chris Wilson37e680a2012-06-07 15:38:42 +01004098static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
Chris Wilsonde472662016-01-22 18:32:31 +00004099 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
Chris Wilson37e680a2012-06-07 15:38:42 +01004100 .get_pages = i915_gem_object_get_pages_gtt,
4101 .put_pages = i915_gem_object_put_pages_gtt,
4102};
4103
Dave Gordond37cd8a2016-04-22 19:14:32 +01004104struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004105 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004106{
Daniel Vetterc397b902010-04-09 19:05:07 +00004107 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004108 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004109 gfp_t mask;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004110 int ret;
Daniel Vetterc397b902010-04-09 19:05:07 +00004111
Chris Wilson42dcedd2012-11-15 11:32:30 +00004112 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004113 if (obj == NULL)
Chris Wilsonfe3db792016-04-25 13:32:13 +01004114 return ERR_PTR(-ENOMEM);
Daniel Vetterc397b902010-04-09 19:05:07 +00004115
Chris Wilsonfe3db792016-04-25 13:32:13 +01004116 ret = drm_gem_object_init(dev, &obj->base, size);
4117 if (ret)
4118 goto fail;
Daniel Vetterc397b902010-04-09 19:05:07 +00004119
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004120 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4121 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4122 /* 965gm cannot relocate objects above 4GiB. */
4123 mask &= ~__GFP_HIGHMEM;
4124 mask |= __GFP_DMA32;
4125 }
4126
Al Viro93c76a32015-12-04 23:45:44 -05004127 mapping = obj->base.filp->f_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004128 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004129
Chris Wilson37e680a2012-06-07 15:38:42 +01004130 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004131
Daniel Vetterc397b902010-04-09 19:05:07 +00004132 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4133 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4134
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004135 if (HAS_LLC(dev)) {
4136 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004137 * cache) for about a 10% performance improvement
4138 * compared to uncached. Graphics requests other than
4139 * display scanout are coherent with the CPU in
4140 * accessing this cache. This means in this mode we
4141 * don't need to clflush on the CPU side, and on the
4142 * GPU side we only need to flush internal caches to
4143 * get data visible to the CPU.
4144 *
4145 * However, we maintain the display planes as UC, and so
4146 * need to rebind when first used as such.
4147 */
4148 obj->cache_level = I915_CACHE_LLC;
4149 } else
4150 obj->cache_level = I915_CACHE_NONE;
4151
Daniel Vetterd861e332013-07-24 23:25:03 +02004152 trace_i915_gem_object_create(obj);
4153
Chris Wilson05394f32010-11-08 19:18:58 +00004154 return obj;
Chris Wilsonfe3db792016-04-25 13:32:13 +01004155
4156fail:
4157 i915_gem_object_free(obj);
4158
4159 return ERR_PTR(ret);
Daniel Vetterac52bc52010-04-09 19:05:06 +00004160}
4161
Chris Wilson340fbd82014-05-22 09:16:52 +01004162static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4163{
4164 /* If we are the last user of the backing storage (be it shmemfs
4165 * pages or stolen etc), we know that the pages are going to be
4166 * immediately released. In this case, we can then skip copying
4167 * back the contents from the GPU.
4168 */
4169
4170 if (obj->madv != I915_MADV_WILLNEED)
4171 return false;
4172
4173 if (obj->base.filp == NULL)
4174 return true;
4175
4176 /* At first glance, this looks racy, but then again so would be
4177 * userspace racing mmap against close. However, the first external
4178 * reference to the filp can only be obtained through the
4179 * i915_gem_mmap_ioctl() which safeguards us against the user
4180 * acquiring such a reference whilst we are in the middle of
4181 * freeing the object.
4182 */
4183 return atomic_long_read(&obj->base.filp->f_count) == 1;
4184}
4185
Chris Wilson1488fc02012-04-24 15:47:31 +01004186void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004187{
Chris Wilson1488fc02012-04-24 15:47:31 +01004188 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004189 struct drm_device *dev = obj->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004190 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004191 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004192
Paulo Zanonif65c9162013-11-27 18:20:34 -02004193 intel_runtime_pm_get(dev_priv);
4194
Chris Wilson26e12f892011-03-20 11:20:19 +00004195 trace_i915_gem_object_destroy(obj);
4196
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004197 /* All file-owned VMA should have been released by this point through
4198 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4199 * However, the object may also be bound into the global GTT (e.g.
4200 * older GPUs without per-process support, or for direct access through
4201 * the GTT either for the user or for scanout). Those VMA still need to
4202 * unbound now.
4203 */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00004204 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
Chris Wilson3272db52016-08-04 16:32:32 +01004205 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004206 GEM_BUG_ON(i915_vma_is_active(vma));
Chris Wilson3272db52016-08-04 16:32:32 +01004207 vma->flags &= ~I915_VMA_PIN_MASK;
Chris Wilsonb1f788c2016-08-04 07:52:45 +01004208 i915_vma_close(vma);
Chris Wilson1488fc02012-04-24 15:47:31 +01004209 }
Chris Wilson15717de2016-08-04 07:52:26 +01004210 GEM_BUG_ON(obj->bind_count);
Chris Wilson1488fc02012-04-24 15:47:31 +01004211
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004212 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4213 * before progressing. */
4214 if (obj->stolen)
4215 i915_gem_object_unpin_pages(obj);
4216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004217 WARN_ON(atomic_read(&obj->frontbuffer_bits));
Daniel Vettera071fa02014-06-18 23:28:09 +02004218
Daniel Vetter656bfa32014-11-20 09:26:30 +01004219 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4220 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
Chris Wilson3e510a82016-08-05 10:14:23 +01004221 i915_gem_object_is_tiled(obj))
Daniel Vetter656bfa32014-11-20 09:26:30 +01004222 i915_gem_object_unpin_pages(obj);
4223
Ben Widawsky401c29f2013-05-31 11:28:47 -07004224 if (WARN_ON(obj->pages_pin_count))
4225 obj->pages_pin_count = 0;
Chris Wilson340fbd82014-05-22 09:16:52 +01004226 if (discard_backing_storage(obj))
Chris Wilson55372522014-03-25 13:23:06 +00004227 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004228 i915_gem_object_put_pages(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004229
Chris Wilson9da3da62012-06-01 15:20:22 +01004230 BUG_ON(obj->pages);
4231
Chris Wilson2f745ad2012-09-04 21:02:58 +01004232 if (obj->base.import_attach)
4233 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004234
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004235 if (obj->ops->release)
4236 obj->ops->release(obj);
4237
Chris Wilson05394f32010-11-08 19:18:58 +00004238 drm_gem_object_release(&obj->base);
4239 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004240
Chris Wilson05394f32010-11-08 19:18:58 +00004241 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004242 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004243
4244 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004245}
4246
Chris Wilsondcff85c2016-08-05 10:14:11 +01004247int i915_gem_suspend(struct drm_device *dev)
Chris Wilsone3efda42014-04-09 09:19:41 +01004248{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004249 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsondcff85c2016-08-05 10:14:11 +01004250 int ret;
Chris Wilsone3efda42014-04-09 09:19:41 +01004251
Chris Wilson54b4f682016-07-21 21:16:19 +01004252 intel_suspend_gt_powersave(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004253
Chris Wilson45c5f202013-10-16 11:50:01 +01004254 mutex_lock(&dev->struct_mutex);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004255
4256 /* We have to flush all the executing contexts to main memory so
4257 * that they can saved in the hibernation image. To ensure the last
4258 * context image is coherent, we have to switch away from it. That
4259 * leaves the dev_priv->kernel_context still active when
4260 * we actually suspend, and its image in memory may not match the GPU
4261 * state. Fortunately, the kernel_context is disposable and we do
4262 * not rely on its state.
4263 */
4264 ret = i915_gem_switch_to_kernel_context(dev_priv);
4265 if (ret)
4266 goto err;
4267
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004268 ret = i915_gem_wait_for_idle(dev_priv,
4269 I915_WAIT_INTERRUPTIBLE |
4270 I915_WAIT_LOCKED);
Chris Wilsonf7403342013-09-13 23:57:04 +01004271 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004272 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004273
Chris Wilsonc0336662016-05-06 15:40:21 +01004274 i915_gem_retire_requests(dev_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004275
Chris Wilsonb2e862d2016-04-28 09:56:41 +01004276 i915_gem_context_lost(dev_priv);
Chris Wilson45c5f202013-10-16 11:50:01 +01004277 mutex_unlock(&dev->struct_mutex);
4278
Chris Wilson737b1502015-01-26 18:03:03 +02004279 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson67d97da2016-07-04 08:08:31 +01004280 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4281 flush_delayed_work(&dev_priv->gt.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004282
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004283 /* Assert that we sucessfully flushed all the work and
4284 * reset the GPU back to its idle, low power state.
4285 */
Chris Wilson67d97da2016-07-04 08:08:31 +01004286 WARN_ON(dev_priv->gt.awake);
Chris Wilsonbdcf1202014-11-25 11:56:33 +00004287
Eric Anholt673a3942008-07-30 12:06:12 -07004288 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004289
4290err:
4291 mutex_unlock(&dev->struct_mutex);
4292 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004293}
4294
Chris Wilson5ab57c72016-07-15 14:56:20 +01004295void i915_gem_resume(struct drm_device *dev)
4296{
4297 struct drm_i915_private *dev_priv = to_i915(dev);
4298
4299 mutex_lock(&dev->struct_mutex);
4300 i915_gem_restore_gtt_mappings(dev);
4301
4302 /* As we didn't flush the kernel context before suspend, we cannot
4303 * guarantee that the context image is complete. So let's just reset
4304 * it and start again.
4305 */
Chris Wilson821ed7d2016-09-09 14:11:53 +01004306 dev_priv->gt.resume(dev_priv);
Chris Wilson5ab57c72016-07-15 14:56:20 +01004307
4308 mutex_unlock(&dev->struct_mutex);
4309}
4310
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004311void i915_gem_init_swizzling(struct drm_device *dev)
4312{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004313 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004314
Daniel Vetter11782b02012-01-31 16:47:55 +01004315 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004316 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4317 return;
4318
4319 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4320 DISP_TILE_SURFACE_SWIZZLING);
4321
Daniel Vetter11782b02012-01-31 16:47:55 +01004322 if (IS_GEN5(dev))
4323 return;
4324
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004325 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4326 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004327 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004328 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004329 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004330 else if (IS_GEN8(dev))
4331 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004332 else
4333 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004334}
Daniel Vettere21af882012-02-09 20:53:27 +01004335
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004336static void init_unused_ring(struct drm_device *dev, u32 base)
4337{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004338 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä81e7f202014-08-15 01:21:55 +03004339
4340 I915_WRITE(RING_CTL(base), 0);
4341 I915_WRITE(RING_HEAD(base), 0);
4342 I915_WRITE(RING_TAIL(base), 0);
4343 I915_WRITE(RING_START(base), 0);
4344}
4345
4346static void init_unused_rings(struct drm_device *dev)
4347{
4348 if (IS_I830(dev)) {
4349 init_unused_ring(dev, PRB1_BASE);
4350 init_unused_ring(dev, SRB0_BASE);
4351 init_unused_ring(dev, SRB1_BASE);
4352 init_unused_ring(dev, SRB2_BASE);
4353 init_unused_ring(dev, SRB3_BASE);
4354 } else if (IS_GEN2(dev)) {
4355 init_unused_ring(dev, SRB0_BASE);
4356 init_unused_ring(dev, SRB1_BASE);
4357 } else if (IS_GEN3(dev)) {
4358 init_unused_ring(dev, PRB1_BASE);
4359 init_unused_ring(dev, PRB2_BASE);
4360 }
4361}
4362
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004363int
4364i915_gem_init_hw(struct drm_device *dev)
4365{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004366 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004367 struct intel_engine_cs *engine;
Chris Wilsond200cda2016-04-28 09:56:44 +01004368 int ret;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004369
Chris Wilson5e4f5182015-02-13 14:35:59 +00004370 /* Double layer security blanket, see i915_gem_init() */
4371 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4372
Mika Kuoppala3accaf72016-04-13 17:26:43 +03004373 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004374 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004375
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004376 if (IS_HASWELL(dev))
4377 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4378 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004379
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004380 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004381 if (IS_IVYBRIDGE(dev)) {
4382 u32 temp = I915_READ(GEN7_MSG_CTL);
4383 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4384 I915_WRITE(GEN7_MSG_CTL, temp);
4385 } else if (INTEL_INFO(dev)->gen >= 7) {
4386 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4387 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4388 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4389 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004390 }
4391
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004392 i915_gem_init_swizzling(dev);
4393
Daniel Vetterd5abdfd2014-11-20 09:45:19 +01004394 /*
4395 * At least 830 can leave some of the unused rings
4396 * "active" (ie. head != tail) after resume which
4397 * will prevent c3 entry. Makes sure all unused rings
4398 * are totally idle.
4399 */
4400 init_unused_rings(dev);
4401
Dave Gordoned54c1a2016-01-19 19:02:54 +00004402 BUG_ON(!dev_priv->kernel_context);
John Harrison90638cc2015-05-29 17:43:37 +01004403
John Harrison4ad2fd82015-06-18 13:11:20 +01004404 ret = i915_ppgtt_init_hw(dev);
4405 if (ret) {
4406 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4407 goto out;
4408 }
4409
4410 /* Need to do basic initialisation of all rings first: */
Dave Gordonb4ac5af2016-03-24 11:20:38 +00004411 for_each_engine(engine, dev_priv) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004412 ret = engine->init_hw(engine);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004413 if (ret)
Chris Wilson5e4f5182015-02-13 14:35:59 +00004414 goto out;
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004415 }
Mika Kuoppala99433932013-01-22 14:12:17 +02004416
Peter Antoine0ccdacf2016-04-13 15:03:25 +01004417 intel_mocs_init_l3cc_table(dev);
4418
Alex Dai33a732f2015-08-12 15:43:36 +01004419 /* We can't enable contexts until all firmware is loaded */
Dave Gordone556f7c2016-06-07 09:14:49 +01004420 ret = intel_guc_setup(dev);
Nick Hoathe84fe802015-09-11 12:53:46 +01004421 if (ret)
4422 goto out;
4423
Chris Wilson5e4f5182015-02-13 14:35:59 +00004424out:
4425 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004426 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004427}
4428
Chris Wilson39df9192016-07-20 13:31:57 +01004429bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4430{
4431 if (INTEL_INFO(dev_priv)->gen < 6)
4432 return false;
4433
4434 /* TODO: make semaphores and Execlists play nicely together */
4435 if (i915.enable_execlists)
4436 return false;
4437
4438 if (value >= 0)
4439 return value;
4440
4441#ifdef CONFIG_INTEL_IOMMU
4442 /* Enable semaphores on SNB when IO remapping is off */
4443 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4444 return false;
4445#endif
4446
4447 return true;
4448}
4449
Chris Wilson1070a422012-04-24 15:47:41 +01004450int i915_gem_init(struct drm_device *dev)
4451{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004452 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson1070a422012-04-24 15:47:41 +01004453 int ret;
4454
Chris Wilson1070a422012-04-24 15:47:41 +01004455 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004456
Oscar Mateoa83014d2014-07-24 17:04:21 +01004457 if (!i915.enable_execlists) {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004458 dev_priv->gt.resume = intel_legacy_submission_resume;
Chris Wilson7e37f882016-08-02 22:50:21 +01004459 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004460 } else {
Chris Wilson821ed7d2016-09-09 14:11:53 +01004461 dev_priv->gt.resume = intel_lr_context_resume;
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004462 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004463 }
4464
Chris Wilson5e4f5182015-02-13 14:35:59 +00004465 /* This is just a security blanket to placate dragons.
4466 * On some systems, we very sporadically observe that the first TLBs
4467 * used by the CS may be stale, despite us poking the TLB reset. If
4468 * we hold the forcewake during initialisation these problems
4469 * just magically go away.
4470 */
4471 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4472
Chris Wilson72778cb2016-05-19 16:17:16 +01004473 i915_gem_init_userptr(dev_priv);
Chris Wilsonf6b9d5c2016-08-04 07:52:23 +01004474
4475 ret = i915_gem_init_ggtt(dev_priv);
4476 if (ret)
4477 goto out_unlock;
Jesse Barnesd62b4892013-03-08 10:45:53 -08004478
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004479 ret = i915_gem_context_init(dev);
Jani Nikula7bcc3772014-12-05 14:17:42 +02004480 if (ret)
4481 goto out_unlock;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004482
Tvrtko Ursulin8b3e2d32016-07-13 16:03:37 +01004483 ret = intel_engines_init(dev);
Daniel Vetter35a57ff2014-11-20 00:33:07 +01004484 if (ret)
Jani Nikula7bcc3772014-12-05 14:17:42 +02004485 goto out_unlock;
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004486
4487 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004488 if (ret == -EIO) {
Chris Wilson7e21d642016-07-27 09:07:29 +01004489 /* Allow engine initialisation to fail by marking the GPU as
Chris Wilson60990322014-04-09 09:19:42 +01004490 * wedged. But we only want to do this where the GPU is angry,
4491 * for all other failure, such as an allocation failure, bail.
4492 */
4493 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01004494 i915_gem_set_wedged(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004495 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004496 }
Jani Nikula7bcc3772014-12-05 14:17:42 +02004497
4498out_unlock:
Chris Wilson5e4f5182015-02-13 14:35:59 +00004499 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson60990322014-04-09 09:19:42 +01004500 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004501
Chris Wilson60990322014-04-09 09:19:42 +01004502 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004503}
4504
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004505void
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004506i915_gem_cleanup_engines(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004507{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004508 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00004509 struct intel_engine_cs *engine;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004510
Dave Gordonb4ac5af2016-03-24 11:20:38 +00004511 for_each_engine(engine, dev_priv)
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00004512 dev_priv->gt.cleanup_engine(engine);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004513}
4514
Chris Wilson64193402010-10-24 12:38:05 +01004515static void
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004516init_engine_lists(struct intel_engine_cs *engine)
Chris Wilson64193402010-10-24 12:38:05 +01004517{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00004518 INIT_LIST_HEAD(&engine->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004519}
4520
Eric Anholt673a3942008-07-30 12:06:12 -07004521void
Imre Deak40ae4e12016-03-16 14:54:03 +02004522i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4523{
Chris Wilson91c8a322016-07-05 10:40:23 +01004524 struct drm_device *dev = &dev_priv->drm;
Chris Wilson49ef5292016-08-18 17:17:00 +01004525 int i;
Imre Deak40ae4e12016-03-16 14:54:03 +02004526
4527 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4528 !IS_CHERRYVIEW(dev_priv))
4529 dev_priv->num_fence_regs = 32;
4530 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4531 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4532 dev_priv->num_fence_regs = 16;
4533 else
4534 dev_priv->num_fence_regs = 8;
4535
Chris Wilsonc0336662016-05-06 15:40:21 +01004536 if (intel_vgpu_active(dev_priv))
Imre Deak40ae4e12016-03-16 14:54:03 +02004537 dev_priv->num_fence_regs =
4538 I915_READ(vgtif_reg(avail_rs.fence_num));
4539
4540 /* Initialize fence registers to zero */
Chris Wilson49ef5292016-08-18 17:17:00 +01004541 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4542 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4543
4544 fence->i915 = dev_priv;
4545 fence->id = i;
4546 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4547 }
Imre Deak40ae4e12016-03-16 14:54:03 +02004548 i915_gem_restore_fences(dev);
4549
4550 i915_gem_detect_bit_6_swizzle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004551}
4552
4553void
Imre Deakd64aa092016-01-19 15:26:29 +02004554i915_gem_load_init(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004555{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004556 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004557 int i;
4558
Chris Wilsonefab6d82015-04-07 16:20:57 +01004559 dev_priv->objects =
Chris Wilson42dcedd2012-11-15 11:32:30 +00004560 kmem_cache_create("i915_gem_object",
4561 sizeof(struct drm_i915_gem_object), 0,
4562 SLAB_HWCACHE_ALIGN,
4563 NULL);
Chris Wilsone20d2ab2015-04-07 16:20:58 +01004564 dev_priv->vmas =
4565 kmem_cache_create("i915_gem_vma",
4566 sizeof(struct i915_vma), 0,
4567 SLAB_HWCACHE_ALIGN,
4568 NULL);
Chris Wilsonefab6d82015-04-07 16:20:57 +01004569 dev_priv->requests =
4570 kmem_cache_create("i915_gem_request",
4571 sizeof(struct drm_i915_gem_request), 0,
Chris Wilson0eafec62016-08-04 16:32:41 +01004572 SLAB_HWCACHE_ALIGN |
4573 SLAB_RECLAIM_ACCOUNT |
4574 SLAB_DESTROY_BY_RCU,
Chris Wilsonefab6d82015-04-07 16:20:57 +01004575 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004576
Ben Widawskya33afea2013-09-17 21:12:45 -07004577 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004578 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4579 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004580 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00004581 for (i = 0; i < I915_NUM_ENGINES; i++)
4582 init_engine_lists(&dev_priv->engine[i]);
Chris Wilson67d97da2016-07-04 08:08:31 +01004583 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
Eric Anholt673a3942008-07-30 12:06:12 -07004584 i915_gem_retire_work_handler);
Chris Wilson67d97da2016-07-04 08:08:31 +01004585 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004586 i915_gem_idle_work_handler);
Chris Wilson1f15b762016-07-01 17:23:14 +01004587 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004588 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004589
Chris Wilson72bfa192010-12-19 11:42:05 +00004590 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4591
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004592 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004593
Chris Wilsonce453d82011-02-21 14:43:56 +00004594 dev_priv->mm.interruptible = true;
4595
Joonas Lahtinen6f633402016-09-01 14:58:21 +03004596 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4597
Chris Wilsonb5add952016-08-04 16:32:36 +01004598 spin_lock_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004599}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004600
Imre Deakd64aa092016-01-19 15:26:29 +02004601void i915_gem_load_cleanup(struct drm_device *dev)
4602{
4603 struct drm_i915_private *dev_priv = to_i915(dev);
4604
4605 kmem_cache_destroy(dev_priv->requests);
4606 kmem_cache_destroy(dev_priv->vmas);
4607 kmem_cache_destroy(dev_priv->objects);
Chris Wilson0eafec62016-08-04 16:32:41 +01004608
4609 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4610 rcu_barrier();
Imre Deakd64aa092016-01-19 15:26:29 +02004611}
4612
Chris Wilsonec7ce652016-09-21 14:51:07 +01004613int i915_gem_freeze(struct drm_i915_private *dev_priv)
4614{
4615 intel_runtime_pm_get(dev_priv);
4616
4617 mutex_lock(&dev_priv->drm.struct_mutex);
4618 i915_gem_shrink_all(dev_priv);
4619 mutex_unlock(&dev_priv->drm.struct_mutex);
4620
4621 intel_runtime_pm_put(dev_priv);
4622
4623 return 0;
Eric Anholtb9624422009-06-03 07:27:35 +00004624}
Chris Wilson1c255952010-09-26 11:03:27 +01004625
Chris Wilson461fb992016-05-14 07:26:33 +01004626int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4627{
4628 struct drm_i915_gem_object *obj;
Chris Wilson7aab2d52016-09-09 20:02:18 +01004629 struct list_head *phases[] = {
4630 &dev_priv->mm.unbound_list,
4631 &dev_priv->mm.bound_list,
4632 NULL
4633 }, **p;
Chris Wilson461fb992016-05-14 07:26:33 +01004634
4635 /* Called just before we write the hibernation image.
4636 *
4637 * We need to update the domain tracking to reflect that the CPU
4638 * will be accessing all the pages to create and restore from the
4639 * hibernation, and so upon restoration those pages will be in the
4640 * CPU domain.
4641 *
4642 * To make sure the hibernation image contains the latest state,
4643 * we update that state just before writing out the image.
Chris Wilson7aab2d52016-09-09 20:02:18 +01004644 *
4645 * To try and reduce the hibernation image, we manually shrink
4646 * the objects as well.
Chris Wilson461fb992016-05-14 07:26:33 +01004647 */
4648
Chris Wilsonec7ce652016-09-21 14:51:07 +01004649 mutex_lock(&dev_priv->drm.struct_mutex);
4650 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
Chris Wilson461fb992016-05-14 07:26:33 +01004651
Chris Wilson7aab2d52016-09-09 20:02:18 +01004652 for (p = phases; *p; p++) {
4653 list_for_each_entry(obj, *p, global_list) {
4654 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4655 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4656 }
Chris Wilson461fb992016-05-14 07:26:33 +01004657 }
Chris Wilsonec7ce652016-09-21 14:51:07 +01004658 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson461fb992016-05-14 07:26:33 +01004659
4660 return 0;
4661}
4662
Eric Anholtb9624422009-06-03 07:27:35 +00004663void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4664{
4665 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004666 struct drm_i915_gem_request *request;
Eric Anholtb9624422009-06-03 07:27:35 +00004667
4668 /* Clean up our request list when the client is going away, so that
4669 * later retire_requests won't dereference our soon-to-be-gone
4670 * file_priv.
4671 */
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004672 spin_lock(&file_priv->mm.lock);
Chris Wilson15f7bbc2016-07-26 12:01:52 +01004673 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004674 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01004675 spin_unlock(&file_priv->mm.lock);
Chris Wilson31169712009-09-14 16:50:28 +01004676
Chris Wilson2e1b8732015-04-27 13:41:22 +01004677 if (!list_empty(&file_priv->rps.link)) {
Chris Wilson8d3afd72015-05-21 21:01:47 +01004678 spin_lock(&to_i915(dev)->rps.client_lock);
Chris Wilson2e1b8732015-04-27 13:41:22 +01004679 list_del(&file_priv->rps.link);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004680 spin_unlock(&to_i915(dev)->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004681 }
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004682}
4683
4684int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4685{
4686 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004687 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004688
4689 DRM_DEBUG_DRIVER("\n");
4690
4691 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4692 if (!file_priv)
4693 return -ENOMEM;
4694
4695 file->driver_priv = file_priv;
Dave Gordonf19ec8c2016-07-04 11:34:37 +01004696 file_priv->dev_priv = to_i915(dev);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004697 file_priv->file = file;
Chris Wilson2e1b8732015-04-27 13:41:22 +01004698 INIT_LIST_HEAD(&file_priv->rps.link);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004699
4700 spin_lock_init(&file_priv->mm.lock);
4701 INIT_LIST_HEAD(&file_priv->mm.request_list);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004702
Chris Wilsonc80ff162016-07-27 09:07:27 +01004703 file_priv->bsd_engine = -1;
Tvrtko Ursulinde1add32016-01-15 15:12:50 +00004704
Ben Widawskye422b882013-12-06 14:10:58 -08004705 ret = i915_gem_context_open(dev, file);
4706 if (ret)
4707 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004708
Ben Widawskye422b882013-12-06 14:10:58 -08004709 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004710}
4711
Daniel Vetterb680c372014-09-19 18:27:27 +02004712/**
4713 * i915_gem_track_fb - update frontbuffer tracking
Geliang Tangd9072a32015-09-15 05:58:44 -07004714 * @old: current GEM buffer for the frontbuffer slots
4715 * @new: new GEM buffer for the frontbuffer slots
4716 * @frontbuffer_bits: bitmask of frontbuffer slots
Daniel Vetterb680c372014-09-19 18:27:27 +02004717 *
4718 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4719 * from @old and setting them in @new. Both @old and @new can be NULL.
4720 */
Daniel Vettera071fa02014-06-18 23:28:09 +02004721void i915_gem_track_fb(struct drm_i915_gem_object *old,
4722 struct drm_i915_gem_object *new,
4723 unsigned frontbuffer_bits)
4724{
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004725 /* Control of individual bits within the mask are guarded by
4726 * the owning plane->mutex, i.e. we can never see concurrent
4727 * manipulation of individual bits. But since the bitfield as a whole
4728 * is updated using RMW, we need to use atomics in order to update
4729 * the bits.
4730 */
4731 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4732 sizeof(atomic_t) * BITS_PER_BYTE);
4733
Daniel Vettera071fa02014-06-18 23:28:09 +02004734 if (old) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004735 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4736 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004737 }
4738
4739 if (new) {
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01004740 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4741 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
Daniel Vettera071fa02014-06-18 23:28:09 +02004742 }
4743}
4744
Dave Gordon033908a2015-12-10 18:51:23 +00004745/* Like i915_gem_object_get_page(), but mark the returned page dirty */
4746struct page *
4747i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4748{
4749 struct page *page;
4750
4751 /* Only default objects have per-page dirty tracking */
Chris Wilsonb9bcd142016-06-20 15:05:51 +01004752 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
Dave Gordon033908a2015-12-10 18:51:23 +00004753 return NULL;
4754
4755 page = i915_gem_object_get_page(obj, n);
4756 set_page_dirty(page);
4757 return page;
4758}
4759
Dave Gordonea702992015-07-09 19:29:02 +01004760/* Allocate a new GEM object and fill it with the supplied data */
4761struct drm_i915_gem_object *
4762i915_gem_object_create_from_data(struct drm_device *dev,
4763 const void *data, size_t size)
4764{
4765 struct drm_i915_gem_object *obj;
4766 struct sg_table *sg;
4767 size_t bytes;
4768 int ret;
4769
Dave Gordond37cd8a2016-04-22 19:14:32 +01004770 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
Chris Wilsonfe3db792016-04-25 13:32:13 +01004771 if (IS_ERR(obj))
Dave Gordonea702992015-07-09 19:29:02 +01004772 return obj;
4773
4774 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4775 if (ret)
4776 goto fail;
4777
4778 ret = i915_gem_object_get_pages(obj);
4779 if (ret)
4780 goto fail;
4781
4782 i915_gem_object_pin_pages(obj);
4783 sg = obj->pages;
4784 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
Dave Gordon9e7d18c2015-12-10 18:51:24 +00004785 obj->dirty = 1; /* Backing store is now out of date */
Dave Gordonea702992015-07-09 19:29:02 +01004786 i915_gem_object_unpin_pages(obj);
4787
4788 if (WARN_ON(bytes != size)) {
4789 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4790 ret = -EFAULT;
4791 goto fail;
4792 }
4793
4794 return obj;
4795
4796fail:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01004797 i915_gem_object_put(obj);
Dave Gordonea702992015-07-09 19:29:02 +01004798 return ERR_PTR(ret);
4799}