blob: 32fa1e9eb8449bba97f2a6611be4a48a069ca674 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Chris Wilson2cfcd322014-05-20 08:28:43 +010034#include <linux/oom.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070035#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020039#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070040
Chris Wilson05394f32010-11-08 19:18:58 +000041static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010042static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070044static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070045i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46 bool readonly);
Chris Wilsonc8725f32014-03-17 12:21:55 +000047static void
48i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
Chris Wilson61050802012-04-17 15:31:31 +010050static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
Chris Wilsonceabbba52014-03-25 13:23:04 +000056static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100057 struct shrink_control *sc);
Chris Wilsonceabbba52014-03-25 13:23:04 +000058static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100059 struct shrink_control *sc);
Chris Wilson2cfcd322014-05-20 08:28:43 +010060static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event,
62 void *ptr);
Chris Wilsond9973b42013-10-04 10:33:00 +010063static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Chris Wilson31169712009-09-14 16:50:28 +010065
Chris Wilsonc76ce032013-08-08 14:41:03 +010066static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
Chris Wilson2c225692013-08-09 12:26:45 +010072static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
Chris Wilson61050802012-04-17 15:31:31 +010080static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
81{
82 if (obj->tiling_mode)
83 i915_gem_release_mmap(obj);
84
85 /* As we do not have an associated fence register, we will force
86 * a tiling change if we ever need to acquire one.
87 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010088 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010089 obj->fence_reg = I915_FENCE_REG_NONE;
90}
91
Chris Wilson73aa8082010-09-30 11:46:12 +010092/* some bookkeeping */
93static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count++;
98 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
102static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
103 size_t size)
104{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200105 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100106 dev_priv->mm.object_count--;
107 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200108 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100109}
110
Chris Wilson21dd3732011-01-26 15:55:56 +0000111static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100112i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114 int ret;
115
Daniel Vetter7abb6902013-05-24 21:29:32 +0200116#define EXIT_COND (!i915_reset_in_progress(error) || \
117 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100118 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 return 0;
120
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200121 /*
122 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
123 * userspace. If it takes that long something really bad is going on and
124 * we should simply try to bail out and fail as gracefully as possible.
125 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100126 ret = wait_event_interruptible_timeout(error->reset_queue,
127 EXIT_COND,
128 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200129 if (ret == 0) {
130 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131 return -EIO;
132 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100133 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100135#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100136
Chris Wilson21dd3732011-01-26 15:55:56 +0000137 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138}
139
Chris Wilson54cf91d2010-11-25 18:00:26 +0000140int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100141{
Daniel Vetter33196de2012-11-14 17:14:05 +0100142 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143 int ret;
144
Daniel Vetter33196de2012-11-14 17:14:05 +0100145 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146 if (ret)
147 return ret;
148
149 ret = mutex_lock_interruptible(&dev->struct_mutex);
150 if (ret)
151 return ret;
152
Chris Wilson23bc5982010-09-29 16:10:57 +0100153 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100154 return 0;
155}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100156
Chris Wilson7d1c4802010-08-07 21:45:03 +0100157static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000158i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100159{
Ben Widawsky98438772013-07-31 17:00:12 -0700160 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100161}
162
Eric Anholt673a3942008-07-30 12:06:12 -0700163int
164i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000165 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700166{
Ben Widawsky93d18792013-01-17 12:45:17 -0800167 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700168 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000169
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200170 if (drm_core_check_feature(dev, DRIVER_MODESET))
171 return -ENODEV;
172
Chris Wilson20217462010-11-23 15:26:33 +0000173 if (args->gtt_start >= args->gtt_end ||
174 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
175 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700176
Daniel Vetterf534bc02012-03-26 22:37:04 +0200177 /* GEM with user mode setting was never supported on ilk and later. */
178 if (INTEL_INFO(dev)->gen >= 5)
179 return -ENODEV;
180
Eric Anholt673a3942008-07-30 12:06:12 -0700181 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800182 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800184 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700185 mutex_unlock(&dev->struct_mutex);
186
Chris Wilson20217462010-11-23 15:26:33 +0000187 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700188}
189
Eric Anholt5a125c32008-10-22 21:40:13 -0700190int
191i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000192 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700193{
Chris Wilson73aa8082010-09-30 11:46:12 +0100194 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700195 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000196 struct drm_i915_gem_object *obj;
197 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700198
Chris Wilson6299f992010-11-24 12:23:44 +0000199 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100200 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800202 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700203 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100204 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700205
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700206 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400207 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000208
Eric Anholt5a125c32008-10-22 21:40:13 -0700209 return 0;
210}
211
Chris Wilson00731152014-05-21 12:42:56 +0100212static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
213{
214 drm_dma_handle_t *phys = obj->phys_handle;
215
216 if (!phys)
217 return;
218
219 if (obj->madv == I915_MADV_WILLNEED) {
220 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
221 char *vaddr = phys->vaddr;
222 int i;
223
224 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
225 struct page *page = shmem_read_mapping_page(mapping, i);
226 if (!IS_ERR(page)) {
227 char *dst = kmap_atomic(page);
228 memcpy(dst, vaddr, PAGE_SIZE);
229 drm_clflush_virt_range(dst, PAGE_SIZE);
230 kunmap_atomic(dst);
231
232 set_page_dirty(page);
233 mark_page_accessed(page);
234 page_cache_release(page);
235 }
236 vaddr += PAGE_SIZE;
237 }
238 i915_gem_chipset_flush(obj->base.dev);
239 }
240
241#ifdef CONFIG_X86
242 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
243#endif
244 drm_pci_free(obj->base.dev, phys);
245 obj->phys_handle = NULL;
246}
247
248int
249i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
250 int align)
251{
252 drm_dma_handle_t *phys;
253 struct address_space *mapping;
254 char *vaddr;
255 int i;
256
257 if (obj->phys_handle) {
258 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
259 return -EBUSY;
260
261 return 0;
262 }
263
264 if (obj->madv != I915_MADV_WILLNEED)
265 return -EFAULT;
266
267 if (obj->base.filp == NULL)
268 return -EINVAL;
269
270 /* create a new object */
271 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
272 if (!phys)
273 return -ENOMEM;
274
275 vaddr = phys->vaddr;
276#ifdef CONFIG_X86
277 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
278#endif
279 mapping = file_inode(obj->base.filp)->i_mapping;
280 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
281 struct page *page;
282 char *src;
283
284 page = shmem_read_mapping_page(mapping, i);
285 if (IS_ERR(page)) {
286#ifdef CONFIG_X86
287 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
288#endif
289 drm_pci_free(obj->base.dev, phys);
290 return PTR_ERR(page);
291 }
292
293 src = kmap_atomic(page);
294 memcpy(vaddr, src, PAGE_SIZE);
295 kunmap_atomic(src);
296
297 mark_page_accessed(page);
298 page_cache_release(page);
299
300 vaddr += PAGE_SIZE;
301 }
302
303 obj->phys_handle = phys;
304 return 0;
305}
306
307static int
308i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
309 struct drm_i915_gem_pwrite *args,
310 struct drm_file *file_priv)
311{
312 struct drm_device *dev = obj->base.dev;
313 void *vaddr = obj->phys_handle->vaddr + args->offset;
314 char __user *user_data = to_user_ptr(args->data_ptr);
315
316 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
317 unsigned long unwritten;
318
319 /* The physical object once assigned is fixed for the lifetime
320 * of the obj, so we can safely drop the lock and continue
321 * to access vaddr.
322 */
323 mutex_unlock(&dev->struct_mutex);
324 unwritten = copy_from_user(vaddr, user_data, args->size);
325 mutex_lock(&dev->struct_mutex);
326 if (unwritten)
327 return -EFAULT;
328 }
329
330 i915_gem_chipset_flush(dev);
331 return 0;
332}
333
Chris Wilson42dcedd2012-11-15 11:32:30 +0000334void *i915_gem_object_alloc(struct drm_device *dev)
335{
336 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700337 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000338}
339
340void i915_gem_object_free(struct drm_i915_gem_object *obj)
341{
342 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
343 kmem_cache_free(dev_priv->slab, obj);
344}
345
Dave Airlieff72145b2011-02-07 12:16:14 +1000346static int
347i915_gem_create(struct drm_file *file,
348 struct drm_device *dev,
349 uint64_t size,
350 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700351{
Chris Wilson05394f32010-11-08 19:18:58 +0000352 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300353 int ret;
354 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700355
Dave Airlieff72145b2011-02-07 12:16:14 +1000356 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200357 if (size == 0)
358 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700359
360 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000361 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700362 if (obj == NULL)
363 return -ENOMEM;
364
Chris Wilson05394f32010-11-08 19:18:58 +0000365 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100366 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200367 drm_gem_object_unreference_unlocked(&obj->base);
368 if (ret)
369 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100370
Dave Airlieff72145b2011-02-07 12:16:14 +1000371 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700372 return 0;
373}
374
Dave Airlieff72145b2011-02-07 12:16:14 +1000375int
376i915_gem_dumb_create(struct drm_file *file,
377 struct drm_device *dev,
378 struct drm_mode_create_dumb *args)
379{
380 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300381 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000382 args->size = args->pitch * args->height;
383 return i915_gem_create(file, dev,
384 args->size, &args->handle);
385}
386
Dave Airlieff72145b2011-02-07 12:16:14 +1000387/**
388 * Creates a new mm object and returns a handle to it.
389 */
390int
391i915_gem_create_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file)
393{
394 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200395
Dave Airlieff72145b2011-02-07 12:16:14 +1000396 return i915_gem_create(file, dev,
397 args->size, &args->handle);
398}
399
Daniel Vetter8c599672011-12-14 13:57:31 +0100400static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100401__copy_to_user_swizzled(char __user *cpu_vaddr,
402 const char *gpu_vaddr, int gpu_offset,
403 int length)
404{
405 int ret, cpu_offset = 0;
406
407 while (length > 0) {
408 int cacheline_end = ALIGN(gpu_offset + 1, 64);
409 int this_length = min(cacheline_end - gpu_offset, length);
410 int swizzled_gpu_offset = gpu_offset ^ 64;
411
412 ret = __copy_to_user(cpu_vaddr + cpu_offset,
413 gpu_vaddr + swizzled_gpu_offset,
414 this_length);
415 if (ret)
416 return ret + length;
417
418 cpu_offset += this_length;
419 gpu_offset += this_length;
420 length -= this_length;
421 }
422
423 return 0;
424}
425
426static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700427__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
428 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100429 int length)
430{
431 int ret, cpu_offset = 0;
432
433 while (length > 0) {
434 int cacheline_end = ALIGN(gpu_offset + 1, 64);
435 int this_length = min(cacheline_end - gpu_offset, length);
436 int swizzled_gpu_offset = gpu_offset ^ 64;
437
438 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
439 cpu_vaddr + cpu_offset,
440 this_length);
441 if (ret)
442 return ret + length;
443
444 cpu_offset += this_length;
445 gpu_offset += this_length;
446 length -= this_length;
447 }
448
449 return 0;
450}
451
Brad Volkin4c914c02014-02-18 10:15:45 -0800452/*
453 * Pins the specified object's pages and synchronizes the object with
454 * GPU accesses. Sets needs_clflush to non-zero if the caller should
455 * flush the object from the CPU cache.
456 */
457int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
458 int *needs_clflush)
459{
460 int ret;
461
462 *needs_clflush = 0;
463
464 if (!obj->base.filp)
465 return -EINVAL;
466
467 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
468 /* If we're not in the cpu read domain, set ourself into the gtt
469 * read domain and manually flush cachelines (if required). This
470 * optimizes for the case when the gpu will dirty the data
471 * anyway again before the next pread happens. */
472 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
473 obj->cache_level);
474 ret = i915_gem_object_wait_rendering(obj, true);
475 if (ret)
476 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000477
478 i915_gem_object_retire(obj);
Brad Volkin4c914c02014-02-18 10:15:45 -0800479 }
480
481 ret = i915_gem_object_get_pages(obj);
482 if (ret)
483 return ret;
484
485 i915_gem_object_pin_pages(obj);
486
487 return ret;
488}
489
Daniel Vetterd174bd62012-03-25 19:47:40 +0200490/* Per-page copy function for the shmem pread fastpath.
491 * Flushes invalid cachelines before reading the target if
492 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700493static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200494shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
495 char __user *user_data,
496 bool page_do_bit17_swizzling, bool needs_clflush)
497{
498 char *vaddr;
499 int ret;
500
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200501 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200502 return -EINVAL;
503
504 vaddr = kmap_atomic(page);
505 if (needs_clflush)
506 drm_clflush_virt_range(vaddr + shmem_page_offset,
507 page_length);
508 ret = __copy_to_user_inatomic(user_data,
509 vaddr + shmem_page_offset,
510 page_length);
511 kunmap_atomic(vaddr);
512
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100513 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200514}
515
Daniel Vetter23c18c72012-03-25 19:47:42 +0200516static void
517shmem_clflush_swizzled_range(char *addr, unsigned long length,
518 bool swizzled)
519{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200520 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200521 unsigned long start = (unsigned long) addr;
522 unsigned long end = (unsigned long) addr + length;
523
524 /* For swizzling simply ensure that we always flush both
525 * channels. Lame, but simple and it works. Swizzled
526 * pwrite/pread is far from a hotpath - current userspace
527 * doesn't use it at all. */
528 start = round_down(start, 128);
529 end = round_up(end, 128);
530
531 drm_clflush_virt_range((void *)start, end - start);
532 } else {
533 drm_clflush_virt_range(addr, length);
534 }
535
536}
537
Daniel Vetterd174bd62012-03-25 19:47:40 +0200538/* Only difference to the fast-path function is that this can handle bit17
539 * and uses non-atomic copy and kmap functions. */
540static int
541shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
542 char __user *user_data,
543 bool page_do_bit17_swizzling, bool needs_clflush)
544{
545 char *vaddr;
546 int ret;
547
548 vaddr = kmap(page);
549 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200550 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
551 page_length,
552 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200553
554 if (page_do_bit17_swizzling)
555 ret = __copy_to_user_swizzled(user_data,
556 vaddr, shmem_page_offset,
557 page_length);
558 else
559 ret = __copy_to_user(user_data,
560 vaddr + shmem_page_offset,
561 page_length);
562 kunmap(page);
563
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100564 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200565}
566
Eric Anholteb014592009-03-10 11:44:52 -0700567static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200568i915_gem_shmem_pread(struct drm_device *dev,
569 struct drm_i915_gem_object *obj,
570 struct drm_i915_gem_pread *args,
571 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700572{
Daniel Vetter8461d222011-12-14 13:57:32 +0100573 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700574 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100575 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100576 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100577 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200578 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200579 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200580 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700581
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200582 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700583 remain = args->size;
584
Daniel Vetter8461d222011-12-14 13:57:32 +0100585 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700586
Brad Volkin4c914c02014-02-18 10:15:45 -0800587 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100588 if (ret)
589 return ret;
590
Eric Anholteb014592009-03-10 11:44:52 -0700591 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100592
Imre Deak67d5a502013-02-18 19:28:02 +0200593 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
594 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200595 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100596
597 if (remain <= 0)
598 break;
599
Eric Anholteb014592009-03-10 11:44:52 -0700600 /* Operation in this page
601 *
Eric Anholteb014592009-03-10 11:44:52 -0700602 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700603 * page_length = bytes to copy for this page
604 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100605 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700606 page_length = remain;
607 if ((shmem_page_offset + page_length) > PAGE_SIZE)
608 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700609
Daniel Vetter8461d222011-12-14 13:57:32 +0100610 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
611 (page_to_phys(page) & (1 << 17)) != 0;
612
Daniel Vetterd174bd62012-03-25 19:47:40 +0200613 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
614 user_data, page_do_bit17_swizzling,
615 needs_clflush);
616 if (ret == 0)
617 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700618
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200619 mutex_unlock(&dev->struct_mutex);
620
Jani Nikulad330a952014-01-21 11:24:25 +0200621 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200622 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200623 /* Userspace is tricking us, but we've already clobbered
624 * its pages with the prefault and promised to write the
625 * data up to the first fault. Hence ignore any errors
626 * and just continue. */
627 (void)ret;
628 prefaulted = 1;
629 }
630
Daniel Vetterd174bd62012-03-25 19:47:40 +0200631 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
632 user_data, page_do_bit17_swizzling,
633 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700634
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200635 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100636
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100637 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100638 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100639
Chris Wilson17793c92014-03-07 08:30:36 +0000640next_page:
Eric Anholteb014592009-03-10 11:44:52 -0700641 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100642 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700643 offset += page_length;
644 }
645
Chris Wilson4f27b752010-10-14 15:26:45 +0100646out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100647 i915_gem_object_unpin_pages(obj);
648
Eric Anholteb014592009-03-10 11:44:52 -0700649 return ret;
650}
651
Eric Anholt673a3942008-07-30 12:06:12 -0700652/**
653 * Reads data from the object referenced by handle.
654 *
655 * On error, the contents of *data are undefined.
656 */
657int
658i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000659 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700660{
661 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000662 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100663 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700664
Chris Wilson51311d02010-11-17 09:10:42 +0000665 if (args->size == 0)
666 return 0;
667
668 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200669 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000670 args->size))
671 return -EFAULT;
672
Chris Wilson4f27b752010-10-14 15:26:45 +0100673 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100674 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100675 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700676
Chris Wilson05394f32010-11-08 19:18:58 +0000677 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000678 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100679 ret = -ENOENT;
680 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100681 }
Eric Anholt673a3942008-07-30 12:06:12 -0700682
Chris Wilson7dcd2492010-09-26 20:21:44 +0100683 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000684 if (args->offset > obj->base.size ||
685 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100686 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100687 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100688 }
689
Daniel Vetter1286ff72012-05-10 15:25:09 +0200690 /* prime objects have no backing filp to GEM pread/pwrite
691 * pages from.
692 */
693 if (!obj->base.filp) {
694 ret = -EINVAL;
695 goto out;
696 }
697
Chris Wilsondb53a302011-02-03 11:57:46 +0000698 trace_i915_gem_object_pread(obj, args->offset, args->size);
699
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200700 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700701
Chris Wilson35b62a82010-09-26 20:23:38 +0100702out:
Chris Wilson05394f32010-11-08 19:18:58 +0000703 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100704unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100705 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700706 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700707}
708
Keith Packard0839ccb2008-10-30 19:38:48 -0700709/* This is the fast write path which cannot handle
710 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700711 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700712
Keith Packard0839ccb2008-10-30 19:38:48 -0700713static inline int
714fast_user_write(struct io_mapping *mapping,
715 loff_t page_base, int page_offset,
716 char __user *user_data,
717 int length)
718{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700719 void __iomem *vaddr_atomic;
720 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700721 unsigned long unwritten;
722
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700723 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700724 /* We can use the cpu mem copy function because this is X86. */
725 vaddr = (void __force*)vaddr_atomic + page_offset;
726 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700727 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700728 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100729 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700730}
731
Eric Anholt3de09aa2009-03-09 09:42:23 -0700732/**
733 * This is the fast pwrite path, where we copy the data directly from the
734 * user into the GTT, uncached.
735 */
Eric Anholt673a3942008-07-30 12:06:12 -0700736static int
Chris Wilson05394f32010-11-08 19:18:58 +0000737i915_gem_gtt_pwrite_fast(struct drm_device *dev,
738 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700739 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000740 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700741{
Jani Nikula3e31c6c2014-03-31 14:27:16 +0300742 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700743 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700744 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700745 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200746 int page_offset, page_length, ret;
747
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100748 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200749 if (ret)
750 goto out;
751
752 ret = i915_gem_object_set_to_gtt_domain(obj, true);
753 if (ret)
754 goto out_unpin;
755
756 ret = i915_gem_object_put_fence(obj);
757 if (ret)
758 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700759
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200760 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700761 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700762
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700763 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700764
765 while (remain > 0) {
766 /* Operation in this page
767 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700768 * page_base = page offset within aperture
769 * page_offset = offset within page
770 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700771 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100772 page_base = offset & PAGE_MASK;
773 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700774 page_length = remain;
775 if ((page_offset + remain) > PAGE_SIZE)
776 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700777
Keith Packard0839ccb2008-10-30 19:38:48 -0700778 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700779 * source page isn't available. Return the error and we'll
780 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700781 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800782 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200783 page_offset, user_data, page_length)) {
784 ret = -EFAULT;
785 goto out_unpin;
786 }
Eric Anholt673a3942008-07-30 12:06:12 -0700787
Keith Packard0839ccb2008-10-30 19:38:48 -0700788 remain -= page_length;
789 user_data += page_length;
790 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700791 }
Eric Anholt673a3942008-07-30 12:06:12 -0700792
Daniel Vetter935aaa62012-03-25 19:47:35 +0200793out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800794 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200795out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700796 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700797}
798
Daniel Vetterd174bd62012-03-25 19:47:40 +0200799/* Per-page copy function for the shmem pwrite fastpath.
800 * Flushes invalid cachelines before writing to the target if
801 * needs_clflush_before is set and flushes out any written cachelines after
802 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700803static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200804shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
805 char __user *user_data,
806 bool page_do_bit17_swizzling,
807 bool needs_clflush_before,
808 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700809{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200810 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700811 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700812
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200813 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200814 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700815
Daniel Vetterd174bd62012-03-25 19:47:40 +0200816 vaddr = kmap_atomic(page);
817 if (needs_clflush_before)
818 drm_clflush_virt_range(vaddr + shmem_page_offset,
819 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +0000820 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
821 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200822 if (needs_clflush_after)
823 drm_clflush_virt_range(vaddr + shmem_page_offset,
824 page_length);
825 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700826
Chris Wilson755d2212012-09-04 21:02:55 +0100827 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700828}
829
Daniel Vetterd174bd62012-03-25 19:47:40 +0200830/* Only difference to the fast-path function is that this can handle bit17
831 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700832static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200833shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
834 char __user *user_data,
835 bool page_do_bit17_swizzling,
836 bool needs_clflush_before,
837 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700838{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200839 char *vaddr;
840 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700841
Daniel Vetterd174bd62012-03-25 19:47:40 +0200842 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200843 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200844 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
845 page_length,
846 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200847 if (page_do_bit17_swizzling)
848 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100849 user_data,
850 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200851 else
852 ret = __copy_from_user(vaddr + shmem_page_offset,
853 user_data,
854 page_length);
855 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200856 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
857 page_length,
858 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200859 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100860
Chris Wilson755d2212012-09-04 21:02:55 +0100861 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700862}
863
Eric Anholt40123c12009-03-09 13:42:30 -0700864static int
Daniel Vettere244a442012-03-25 19:47:28 +0200865i915_gem_shmem_pwrite(struct drm_device *dev,
866 struct drm_i915_gem_object *obj,
867 struct drm_i915_gem_pwrite *args,
868 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700869{
Eric Anholt40123c12009-03-09 13:42:30 -0700870 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100871 loff_t offset;
872 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100873 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100874 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200875 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200876 int needs_clflush_after = 0;
877 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200878 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700879
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200880 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700881 remain = args->size;
882
Daniel Vetter8c599672011-12-14 13:57:31 +0100883 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700884
Daniel Vetter58642882012-03-25 19:47:37 +0200885 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
886 /* If we're not in the cpu write domain, set ourself into the gtt
887 * write domain and manually flush cachelines (if required). This
888 * optimizes for the case when the gpu will use the data
889 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100890 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700891 ret = i915_gem_object_wait_rendering(obj, false);
892 if (ret)
893 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000894
895 i915_gem_object_retire(obj);
Daniel Vetter58642882012-03-25 19:47:37 +0200896 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100897 /* Same trick applies to invalidate partially written cachelines read
898 * before writing. */
899 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
900 needs_clflush_before =
901 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200902
Chris Wilson755d2212012-09-04 21:02:55 +0100903 ret = i915_gem_object_get_pages(obj);
904 if (ret)
905 return ret;
906
907 i915_gem_object_pin_pages(obj);
908
Eric Anholt40123c12009-03-09 13:42:30 -0700909 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000910 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700911
Imre Deak67d5a502013-02-18 19:28:02 +0200912 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
913 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200914 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200915 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100916
Chris Wilson9da3da62012-06-01 15:20:22 +0100917 if (remain <= 0)
918 break;
919
Eric Anholt40123c12009-03-09 13:42:30 -0700920 /* Operation in this page
921 *
Eric Anholt40123c12009-03-09 13:42:30 -0700922 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700923 * page_length = bytes to copy for this page
924 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100925 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700926
927 page_length = remain;
928 if ((shmem_page_offset + page_length) > PAGE_SIZE)
929 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700930
Daniel Vetter58642882012-03-25 19:47:37 +0200931 /* If we don't overwrite a cacheline completely we need to be
932 * careful to have up-to-date data by first clflushing. Don't
933 * overcomplicate things and flush the entire patch. */
934 partial_cacheline_write = needs_clflush_before &&
935 ((shmem_page_offset | page_length)
936 & (boot_cpu_data.x86_clflush_size - 1));
937
Daniel Vetter8c599672011-12-14 13:57:31 +0100938 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
939 (page_to_phys(page) & (1 << 17)) != 0;
940
Daniel Vetterd174bd62012-03-25 19:47:40 +0200941 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
942 user_data, page_do_bit17_swizzling,
943 partial_cacheline_write,
944 needs_clflush_after);
945 if (ret == 0)
946 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700947
Daniel Vettere244a442012-03-25 19:47:28 +0200948 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200949 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200950 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
951 user_data, page_do_bit17_swizzling,
952 partial_cacheline_write,
953 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700954
Daniel Vettere244a442012-03-25 19:47:28 +0200955 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100956
Chris Wilson755d2212012-09-04 21:02:55 +0100957 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100958 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100959
Chris Wilson17793c92014-03-07 08:30:36 +0000960next_page:
Eric Anholt40123c12009-03-09 13:42:30 -0700961 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100962 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700963 offset += page_length;
964 }
965
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100966out:
Chris Wilson755d2212012-09-04 21:02:55 +0100967 i915_gem_object_unpin_pages(obj);
968
Daniel Vettere244a442012-03-25 19:47:28 +0200969 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100970 /*
971 * Fixup: Flush cpu caches in case we didn't flush the dirty
972 * cachelines in-line while writing and the object moved
973 * out of the cpu write domain while we've dropped the lock.
974 */
975 if (!needs_clflush_after &&
976 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100977 if (i915_gem_clflush_object(obj, obj->pin_display))
978 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200979 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100980 }
Eric Anholt40123c12009-03-09 13:42:30 -0700981
Daniel Vetter58642882012-03-25 19:47:37 +0200982 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800983 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200984
Eric Anholt40123c12009-03-09 13:42:30 -0700985 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700986}
987
988/**
989 * Writes data to the object referenced by handle.
990 *
991 * On error, the contents of the buffer that were to be modified are undefined.
992 */
993int
994i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100995 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700996{
997 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000998 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000999 int ret;
1000
1001 if (args->size == 0)
1002 return 0;
1003
1004 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +02001005 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +00001006 args->size))
1007 return -EFAULT;
1008
Jani Nikulad330a952014-01-21 11:24:25 +02001009 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +08001010 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1011 args->size);
1012 if (ret)
1013 return -EFAULT;
1014 }
Eric Anholt673a3942008-07-30 12:06:12 -07001015
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001016 ret = i915_mutex_lock_interruptible(dev);
1017 if (ret)
1018 return ret;
1019
Chris Wilson05394f32010-11-08 19:18:58 +00001020 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001021 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001022 ret = -ENOENT;
1023 goto unlock;
1024 }
Eric Anholt673a3942008-07-30 12:06:12 -07001025
Chris Wilson7dcd2492010-09-26 20:21:44 +01001026 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +00001027 if (args->offset > obj->base.size ||
1028 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +01001029 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +01001030 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +01001031 }
1032
Daniel Vetter1286ff72012-05-10 15:25:09 +02001033 /* prime objects have no backing filp to GEM pread/pwrite
1034 * pages from.
1035 */
1036 if (!obj->base.filp) {
1037 ret = -EINVAL;
1038 goto out;
1039 }
1040
Chris Wilsondb53a302011-02-03 11:57:46 +00001041 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1042
Daniel Vetter935aaa62012-03-25 19:47:35 +02001043 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -07001044 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1045 * it would end up going through the fenced access, and we'll get
1046 * different detiling behavior between reading and writing.
1047 * pread/pwrite currently are reading and writing from the CPU
1048 * perspective, requiring manual detiling by the client.
1049 */
Chris Wilson00731152014-05-21 12:42:56 +01001050 if (obj->phys_handle) {
1051 ret = i915_gem_phys_pwrite(obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001052 goto out;
1053 }
1054
Chris Wilson2c225692013-08-09 12:26:45 +01001055 if (obj->tiling_mode == I915_TILING_NONE &&
1056 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1057 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001058 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +02001059 /* Note that the gtt paths might fail with non-page-backed user
1060 * pointers (e.g. gtt mappings when moving data between
1061 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -07001062 }
Eric Anholt673a3942008-07-30 12:06:12 -07001063
Chris Wilson86a1ee22012-08-11 15:41:04 +01001064 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +02001065 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +01001066
Chris Wilson35b62a82010-09-26 20:23:38 +01001067out:
Chris Wilson05394f32010-11-08 19:18:58 +00001068 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001069unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001070 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07001071 return ret;
1072}
1073
Chris Wilsonb3612372012-08-24 09:35:08 +01001074int
Daniel Vetter33196de2012-11-14 17:14:05 +01001075i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +01001076 bool interruptible)
1077{
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001078 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +01001079 /* Non-interruptible callers can't handle -EAGAIN, hence return
1080 * -EIO unconditionally for these. */
1081 if (!interruptible)
1082 return -EIO;
1083
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001084 /* Recovery complete, but the reset failed ... */
1085 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +01001086 return -EIO;
1087
1088 return -EAGAIN;
1089 }
1090
1091 return 0;
1092}
1093
1094/*
1095 * Compare seqno against outstanding lazy request. Emit a request if they are
1096 * equal.
1097 */
Sourab Gupta84c33a62014-06-02 16:47:17 +05301098int
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001099i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
Chris Wilsonb3612372012-08-24 09:35:08 +01001100{
1101 int ret;
1102
1103 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1104
1105 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +01001106 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +03001107 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001108
1109 return ret;
1110}
1111
Chris Wilson094f9a52013-09-25 17:34:55 +01001112static void fake_irq(unsigned long data)
1113{
1114 wake_up_process((struct task_struct *)data);
1115}
1116
1117static bool missed_irq(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001118 struct intel_engine_cs *ring)
Chris Wilson094f9a52013-09-25 17:34:55 +01001119{
1120 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1121}
1122
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001123static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1124{
1125 if (file_priv == NULL)
1126 return true;
1127
1128 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1129}
1130
Chris Wilsonb3612372012-08-24 09:35:08 +01001131/**
1132 * __wait_seqno - wait until execution of seqno has finished
1133 * @ring: the ring expected to report seqno
1134 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +01001135 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +01001136 * @interruptible: do an interruptible wait (normally yes)
1137 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1138 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001139 * Note: It is of utmost importance that the passed in seqno and reset_counter
1140 * values have been read by the caller in an smp safe manner. Where read-side
1141 * locks are involved, it is sufficient to read the reset_counter before
1142 * unlocking the lock that protects the seqno. For lockless tricks, the
1143 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1144 * inserted.
1145 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001146 * Returns 0 if the seqno was found within the alloted time. Else returns the
1147 * errno with remaining time filled in timeout argument.
1148 */
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001149static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001150 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001151 bool interruptible,
1152 struct timespec *timeout,
1153 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001154{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001155 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001156 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001157 const bool irq_test_in_progress =
1158 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001159 struct timespec before, now;
1160 DEFINE_WAIT(wait);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001161 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001162 int ret;
1163
Jesse Barnes9df7575f2014-06-20 09:29:20 -07001164 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
Paulo Zanonic67a4702013-08-19 13:18:09 -03001165
Chris Wilsonb3612372012-08-24 09:35:08 +01001166 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1167 return 0;
1168
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001169 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001170
Chris Wilsonec5cc0f2014-06-12 10:28:55 +01001171 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001172 gen6_rps_boost(dev_priv);
1173 if (file_priv)
1174 mod_delayed_work(dev_priv->wq,
1175 &file_priv->mm.idle_work,
1176 msecs_to_jiffies(100));
1177 }
1178
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001179 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001180 return -ENODEV;
1181
Chris Wilson094f9a52013-09-25 17:34:55 +01001182 /* Record current time in case interrupted by signal, or wedged */
1183 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001184 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001185 for (;;) {
1186 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001187
Chris Wilson094f9a52013-09-25 17:34:55 +01001188 prepare_to_wait(&ring->irq_queue, &wait,
1189 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001190
Daniel Vetterf69061b2012-12-06 09:01:42 +01001191 /* We need to check whether any gpu reset happened in between
1192 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001193 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1194 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1195 * is truely gone. */
1196 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1197 if (ret == 0)
1198 ret = -EAGAIN;
1199 break;
1200 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001201
Chris Wilson094f9a52013-09-25 17:34:55 +01001202 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1203 ret = 0;
1204 break;
1205 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001206
Chris Wilson094f9a52013-09-25 17:34:55 +01001207 if (interruptible && signal_pending(current)) {
1208 ret = -ERESTARTSYS;
1209 break;
1210 }
1211
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001212 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001213 ret = -ETIME;
1214 break;
1215 }
1216
1217 timer.function = NULL;
1218 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001219 unsigned long expire;
1220
Chris Wilson094f9a52013-09-25 17:34:55 +01001221 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001222 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001223 mod_timer(&timer, expire);
1224 }
1225
Chris Wilson5035c272013-10-04 09:58:46 +01001226 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001227
Chris Wilson094f9a52013-09-25 17:34:55 +01001228 if (timer.function) {
1229 del_singleshot_timer_sync(&timer);
1230 destroy_timer_on_stack(&timer);
1231 }
1232 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001233 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001234 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001235
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001236 if (!irq_test_in_progress)
1237 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001238
1239 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001240
1241 if (timeout) {
1242 struct timespec sleep_time = timespec_sub(now, before);
1243 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001244 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1245 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001246 }
1247
Chris Wilson094f9a52013-09-25 17:34:55 +01001248 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001249}
1250
1251/**
1252 * Waits for a sequence number to be signaled, and cleans up the
1253 * request and object lists appropriately for that event.
1254 */
1255int
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001256i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
Chris Wilsonb3612372012-08-24 09:35:08 +01001257{
1258 struct drm_device *dev = ring->dev;
1259 struct drm_i915_private *dev_priv = dev->dev_private;
1260 bool interruptible = dev_priv->mm.interruptible;
1261 int ret;
1262
1263 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1264 BUG_ON(seqno == 0);
1265
Daniel Vetter33196de2012-11-14 17:14:05 +01001266 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001267 if (ret)
1268 return ret;
1269
1270 ret = i915_gem_check_olr(ring, seqno);
1271 if (ret)
1272 return ret;
1273
Daniel Vetterf69061b2012-12-06 09:01:42 +01001274 return __wait_seqno(ring, seqno,
1275 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001276 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001277}
1278
Chris Wilsond26e3af2013-06-29 22:05:26 +01001279static int
1280i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001281 struct intel_engine_cs *ring)
Chris Wilsond26e3af2013-06-29 22:05:26 +01001282{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001283 if (!obj->active)
1284 return 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001285
1286 /* Manually manage the write flush as we may have not yet
1287 * retired the buffer.
1288 *
1289 * Note that the last_write_seqno is always the earlier of
1290 * the two (read/write) seqno, so if we haved successfully waited,
1291 * we know we have passed the last write.
1292 */
1293 obj->last_write_seqno = 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001294
1295 return 0;
1296}
1297
Chris Wilsonb3612372012-08-24 09:35:08 +01001298/**
1299 * Ensures that all rendering to the object has completed and the object is
1300 * safe to unbind from the GTT or access from the CPU.
1301 */
1302static __must_check int
1303i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1304 bool readonly)
1305{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001306 struct intel_engine_cs *ring = obj->ring;
Chris Wilsonb3612372012-08-24 09:35:08 +01001307 u32 seqno;
1308 int ret;
1309
1310 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1311 if (seqno == 0)
1312 return 0;
1313
1314 ret = i915_wait_seqno(ring, seqno);
1315 if (ret)
1316 return ret;
1317
Chris Wilsond26e3af2013-06-29 22:05:26 +01001318 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001319}
1320
Chris Wilson3236f572012-08-24 09:35:09 +01001321/* A nonblocking variant of the above wait. This is a highly dangerous routine
1322 * as the object state may change during this call.
1323 */
1324static __must_check int
1325i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001326 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001327 bool readonly)
1328{
1329 struct drm_device *dev = obj->base.dev;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01001331 struct intel_engine_cs *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001332 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001333 u32 seqno;
1334 int ret;
1335
1336 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1337 BUG_ON(!dev_priv->mm.interruptible);
1338
1339 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1340 if (seqno == 0)
1341 return 0;
1342
Daniel Vetter33196de2012-11-14 17:14:05 +01001343 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001344 if (ret)
1345 return ret;
1346
1347 ret = i915_gem_check_olr(ring, seqno);
1348 if (ret)
1349 return ret;
1350
Daniel Vetterf69061b2012-12-06 09:01:42 +01001351 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001352 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001353 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001354 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001355 if (ret)
1356 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001357
Chris Wilsond26e3af2013-06-29 22:05:26 +01001358 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001359}
1360
Eric Anholt673a3942008-07-30 12:06:12 -07001361/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001362 * Called when user space prepares to use an object with the CPU, either
1363 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001364 */
1365int
1366i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001367 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001368{
1369 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001370 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001371 uint32_t read_domains = args->read_domains;
1372 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001373 int ret;
1374
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001375 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001376 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001377 return -EINVAL;
1378
Chris Wilson21d509e2009-06-06 09:46:02 +01001379 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001380 return -EINVAL;
1381
1382 /* Having something in the write domain implies it's in the read
1383 * domain, and only that read domain. Enforce that in the request.
1384 */
1385 if (write_domain != 0 && read_domains != write_domain)
1386 return -EINVAL;
1387
Chris Wilson76c1dec2010-09-25 11:22:51 +01001388 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001389 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001390 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001391
Chris Wilson05394f32010-11-08 19:18:58 +00001392 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001393 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001394 ret = -ENOENT;
1395 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001396 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001397
Chris Wilson3236f572012-08-24 09:35:09 +01001398 /* Try to flush the object off the GPU without holding the lock.
1399 * We will repeat the flush holding the lock in the normal manner
1400 * to catch cases where we are gazumped.
1401 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001402 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1403 file->driver_priv,
1404 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001405 if (ret)
1406 goto unref;
1407
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001408 if (read_domains & I915_GEM_DOMAIN_GTT) {
1409 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001410
1411 /* Silently promote "you're not bound, there was nothing to do"
1412 * to success, since the client was just asking us to
1413 * make sure everything was done.
1414 */
1415 if (ret == -EINVAL)
1416 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001417 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001418 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001419 }
1420
Chris Wilson3236f572012-08-24 09:35:09 +01001421unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001422 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001423unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001424 mutex_unlock(&dev->struct_mutex);
1425 return ret;
1426}
1427
1428/**
1429 * Called when user space has done writes to this buffer
1430 */
1431int
1432i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001433 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001434{
1435 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001436 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001437 int ret = 0;
1438
Chris Wilson76c1dec2010-09-25 11:22:51 +01001439 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001440 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001441 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001442
Chris Wilson05394f32010-11-08 19:18:58 +00001443 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001444 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001445 ret = -ENOENT;
1446 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001447 }
1448
Eric Anholt673a3942008-07-30 12:06:12 -07001449 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001450 if (obj->pin_display)
1451 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001452
Chris Wilson05394f32010-11-08 19:18:58 +00001453 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001454unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001455 mutex_unlock(&dev->struct_mutex);
1456 return ret;
1457}
1458
1459/**
1460 * Maps the contents of an object, returning the address it is mapped
1461 * into.
1462 *
1463 * While the mapping holds a reference on the contents of the object, it doesn't
1464 * imply a ref on the object itself.
1465 */
1466int
1467i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001468 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001469{
1470 struct drm_i915_gem_mmap *args = data;
1471 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001472 unsigned long addr;
1473
Chris Wilson05394f32010-11-08 19:18:58 +00001474 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001475 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001476 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001477
Daniel Vetter1286ff72012-05-10 15:25:09 +02001478 /* prime objects have no backing filp to GEM mmap
1479 * pages from.
1480 */
1481 if (!obj->filp) {
1482 drm_gem_object_unreference_unlocked(obj);
1483 return -EINVAL;
1484 }
1485
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001486 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001487 PROT_READ | PROT_WRITE, MAP_SHARED,
1488 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001489 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001490 if (IS_ERR((void *)addr))
1491 return addr;
1492
1493 args->addr_ptr = (uint64_t) addr;
1494
1495 return 0;
1496}
1497
Jesse Barnesde151cf2008-11-12 10:03:55 -08001498/**
1499 * i915_gem_fault - fault a page into the GTT
1500 * vma: VMA in question
1501 * vmf: fault info
1502 *
1503 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1504 * from userspace. The fault handler takes care of binding the object to
1505 * the GTT (if needed), allocating and programming a fence register (again,
1506 * only if needed based on whether the old reg is still valid or the object
1507 * is tiled) and inserting a new PTE into the faulting process.
1508 *
1509 * Note that the faulting process may involve evicting existing objects
1510 * from the GTT and/or fence registers to make room. So performance may
1511 * suffer if the GTT working set is large or there are few fence registers
1512 * left.
1513 */
1514int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1515{
Chris Wilson05394f32010-11-08 19:18:58 +00001516 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1517 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001518 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001519 pgoff_t page_offset;
1520 unsigned long pfn;
1521 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001522 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001523
Paulo Zanonif65c9162013-11-27 18:20:34 -02001524 intel_runtime_pm_get(dev_priv);
1525
Jesse Barnesde151cf2008-11-12 10:03:55 -08001526 /* We don't use vmf->pgoff since that has the fake offset */
1527 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1528 PAGE_SHIFT;
1529
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001530 ret = i915_mutex_lock_interruptible(dev);
1531 if (ret)
1532 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001533
Chris Wilsondb53a302011-02-03 11:57:46 +00001534 trace_i915_gem_object_fault(obj, page_offset, true, write);
1535
Chris Wilson6e4930f2014-02-07 18:37:06 -02001536 /* Try to flush the object off the GPU first without holding the lock.
1537 * Upon reacquiring the lock, we will perform our sanity checks and then
1538 * repeat the flush holding the lock in the normal manner to catch cases
1539 * where we are gazumped.
1540 */
1541 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1542 if (ret)
1543 goto unlock;
1544
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001545 /* Access to snoopable pages through the GTT is incoherent. */
1546 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
Chris Wilsonddeff6e2014-05-28 16:16:41 +01001547 ret = -EFAULT;
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001548 goto unlock;
1549 }
1550
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001551 /* Now bind it into the GTT if needed */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001552 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001553 if (ret)
1554 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001555
Chris Wilsonc9839302012-11-20 10:45:17 +00001556 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1557 if (ret)
1558 goto unpin;
1559
1560 ret = i915_gem_object_get_fence(obj);
1561 if (ret)
1562 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001563
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001564 /* Finally, remap it using the new GTT offset */
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001565 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1566 pfn >>= PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001567
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001568 if (!obj->fault_mappable) {
Ville Syrjäläbeff0d02014-06-17 21:03:00 +03001569 unsigned long size = min_t(unsigned long,
1570 vma->vm_end - vma->vm_start,
1571 obj->base.size);
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001572 int i;
1573
Ville Syrjäläbeff0d02014-06-17 21:03:00 +03001574 for (i = 0; i < size >> PAGE_SHIFT; i++) {
Chris Wilsonb90b91d2014-06-10 12:14:40 +01001575 ret = vm_insert_pfn(vma,
1576 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1577 pfn + i);
1578 if (ret)
1579 break;
1580 }
1581
1582 obj->fault_mappable = true;
1583 } else
1584 ret = vm_insert_pfn(vma,
1585 (unsigned long)vmf->virtual_address,
1586 pfn + page_offset);
Chris Wilsonc9839302012-11-20 10:45:17 +00001587unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001588 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001589unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001590 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001591out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001592 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001593 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001594 /* If this -EIO is due to a gpu hang, give the reset code a
1595 * chance to clean up the mess. Otherwise return the proper
1596 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001597 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1598 ret = VM_FAULT_SIGBUS;
1599 break;
1600 }
Chris Wilson045e7692010-11-07 09:18:22 +00001601 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001602 /*
1603 * EAGAIN means the gpu is hung and we'll wait for the error
1604 * handler to reset everything when re-faulting in
1605 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001606 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001607 case 0:
1608 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001609 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001610 case -EBUSY:
1611 /*
1612 * EBUSY is ok: this just means that another thread
1613 * already did the job.
1614 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001615 ret = VM_FAULT_NOPAGE;
1616 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001617 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001618 ret = VM_FAULT_OOM;
1619 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001620 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001621 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001622 ret = VM_FAULT_SIGBUS;
1623 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001624 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001625 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001626 ret = VM_FAULT_SIGBUS;
1627 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001628 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001629
1630 intel_runtime_pm_put(dev_priv);
1631 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001632}
1633
1634/**
Chris Wilson901782b2009-07-10 08:18:50 +01001635 * i915_gem_release_mmap - remove physical page mappings
1636 * @obj: obj in question
1637 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001638 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001639 * relinquish ownership of the pages back to the system.
1640 *
1641 * It is vital that we remove the page mapping if we have mapped a tiled
1642 * object through the GTT and then lose the fence register due to
1643 * resource pressure. Similarly if the object has been moved out of the
1644 * aperture, than pages mapped into userspace must be revoked. Removing the
1645 * mapping will then trigger a page fault on the next user access, allowing
1646 * fixup by i915_gem_fault().
1647 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001648void
Chris Wilson05394f32010-11-08 19:18:58 +00001649i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001650{
Chris Wilson6299f992010-11-24 12:23:44 +00001651 if (!obj->fault_mappable)
1652 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001653
David Herrmann6796cb12014-01-03 14:24:19 +01001654 drm_vma_node_unmap(&obj->base.vma_node,
1655 obj->base.dev->anon_inode->i_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001656 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001657}
1658
Chris Wilson6254b202014-06-16 08:57:44 +01001659void
1660i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1661{
1662 struct drm_i915_gem_object *obj;
1663
1664 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1665 i915_gem_release_mmap(obj);
1666}
1667
Imre Deak0fa87792013-01-07 21:47:35 +02001668uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001669i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001670{
Chris Wilsone28f8712011-07-18 13:11:49 -07001671 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001672
1673 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001674 tiling_mode == I915_TILING_NONE)
1675 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001676
1677 /* Previous chips need a power-of-two fence region when tiling */
1678 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001679 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001680 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001681 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001682
Chris Wilsone28f8712011-07-18 13:11:49 -07001683 while (gtt_size < size)
1684 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001685
Chris Wilsone28f8712011-07-18 13:11:49 -07001686 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001687}
1688
Jesse Barnesde151cf2008-11-12 10:03:55 -08001689/**
1690 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1691 * @obj: object to check
1692 *
1693 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001694 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001695 */
Imre Deakd8651102013-01-07 21:47:33 +02001696uint32_t
1697i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1698 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001699{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001700 /*
1701 * Minimum alignment is 4k (GTT page size), but might be greater
1702 * if a fence register is needed for the object.
1703 */
Imre Deakd8651102013-01-07 21:47:33 +02001704 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001705 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001706 return 4096;
1707
1708 /*
1709 * Previous chips need to be aligned to the size of the smallest
1710 * fence register that can contain the object.
1711 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001712 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001713}
1714
Chris Wilsond8cb5082012-08-11 15:41:03 +01001715static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1716{
1717 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1718 int ret;
1719
David Herrmann0de23972013-07-24 21:07:52 +02001720 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001721 return 0;
1722
Daniel Vetterda494d72012-12-20 15:11:16 +01001723 dev_priv->mm.shrinker_no_lock_stealing = true;
1724
Chris Wilsond8cb5082012-08-11 15:41:03 +01001725 ret = drm_gem_create_mmap_offset(&obj->base);
1726 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001727 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001728
1729 /* Badly fragmented mmap space? The only way we can recover
1730 * space is by destroying unwanted objects. We can't randomly release
1731 * mmap_offsets as userspace expects them to be persistent for the
1732 * lifetime of the objects. The closest we can is to release the
1733 * offsets on purgeable objects by truncating it and marking it purged,
1734 * which prevents userspace from ever using that object again.
1735 */
1736 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1737 ret = drm_gem_create_mmap_offset(&obj->base);
1738 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001739 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001740
1741 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001742 ret = drm_gem_create_mmap_offset(&obj->base);
1743out:
1744 dev_priv->mm.shrinker_no_lock_stealing = false;
1745
1746 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001747}
1748
1749static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1750{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001751 drm_gem_free_mmap_offset(&obj->base);
1752}
1753
Jesse Barnesde151cf2008-11-12 10:03:55 -08001754int
Dave Airlieff72145b2011-02-07 12:16:14 +10001755i915_gem_mmap_gtt(struct drm_file *file,
1756 struct drm_device *dev,
1757 uint32_t handle,
1758 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001759{
Chris Wilsonda761a62010-10-27 17:37:08 +01001760 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001761 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001762 int ret;
1763
Chris Wilson76c1dec2010-09-25 11:22:51 +01001764 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001765 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001766 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001767
Dave Airlieff72145b2011-02-07 12:16:14 +10001768 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001769 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001770 ret = -ENOENT;
1771 goto unlock;
1772 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001773
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001774 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001775 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001776 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001777 }
1778
Chris Wilson05394f32010-11-08 19:18:58 +00001779 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001780 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001781 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001782 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001783 }
1784
Chris Wilsond8cb5082012-08-11 15:41:03 +01001785 ret = i915_gem_object_create_mmap_offset(obj);
1786 if (ret)
1787 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001788
David Herrmann0de23972013-07-24 21:07:52 +02001789 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001790
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001791out:
Chris Wilson05394f32010-11-08 19:18:58 +00001792 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001793unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001794 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001795 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001796}
1797
Dave Airlieff72145b2011-02-07 12:16:14 +10001798/**
1799 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1800 * @dev: DRM device
1801 * @data: GTT mapping ioctl data
1802 * @file: GEM object info
1803 *
1804 * Simply returns the fake offset to userspace so it can mmap it.
1805 * The mmap call will end up in drm_gem_mmap(), which will set things
1806 * up so we can get faults in the handler above.
1807 *
1808 * The fault handler will take care of binding the object into the GTT
1809 * (since it may have been evicted to make room for something), allocating
1810 * a fence register, and mapping the appropriate aperture address into
1811 * userspace.
1812 */
1813int
1814i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *file)
1816{
1817 struct drm_i915_gem_mmap_gtt *args = data;
1818
Dave Airlieff72145b2011-02-07 12:16:14 +10001819 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1820}
1821
Chris Wilson55372522014-03-25 13:23:06 +00001822static inline int
1823i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1824{
1825 return obj->madv == I915_MADV_DONTNEED;
1826}
1827
Daniel Vetter225067e2012-08-20 10:23:20 +02001828/* Immediately discard the backing storage */
1829static void
1830i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001831{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001832 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001833
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001834 if (obj->base.filp == NULL)
1835 return;
1836
Daniel Vetter225067e2012-08-20 10:23:20 +02001837 /* Our goal here is to return as much of the memory as
1838 * is possible back to the system as we are called from OOM.
1839 * To do this we must instruct the shmfs to drop all of its
1840 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001841 */
Chris Wilson55372522014-03-25 13:23:06 +00001842 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02001843 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001844}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001845
Chris Wilson55372522014-03-25 13:23:06 +00001846/* Try to discard unwanted pages */
1847static void
1848i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02001849{
Chris Wilson55372522014-03-25 13:23:06 +00001850 struct address_space *mapping;
1851
1852 switch (obj->madv) {
1853 case I915_MADV_DONTNEED:
1854 i915_gem_object_truncate(obj);
1855 case __I915_MADV_PURGED:
1856 return;
1857 }
1858
1859 if (obj->base.filp == NULL)
1860 return;
1861
1862 mapping = file_inode(obj->base.filp)->i_mapping,
1863 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001864}
1865
Chris Wilson5cdf5882010-09-27 15:51:07 +01001866static void
Chris Wilson05394f32010-11-08 19:18:58 +00001867i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001868{
Imre Deak90797e62013-02-18 19:28:03 +02001869 struct sg_page_iter sg_iter;
1870 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001871
Chris Wilson05394f32010-11-08 19:18:58 +00001872 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001873
Chris Wilson6c085a72012-08-20 11:40:46 +02001874 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1875 if (ret) {
1876 /* In the event of a disaster, abandon all caches and
1877 * hope for the best.
1878 */
1879 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001880 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001881 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1882 }
1883
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001884 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001885 i915_gem_object_save_bit_17_swizzle(obj);
1886
Chris Wilson05394f32010-11-08 19:18:58 +00001887 if (obj->madv == I915_MADV_DONTNEED)
1888 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001889
Imre Deak90797e62013-02-18 19:28:03 +02001890 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001891 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001892
Chris Wilson05394f32010-11-08 19:18:58 +00001893 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001894 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001895
Chris Wilson05394f32010-11-08 19:18:58 +00001896 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001897 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001898
Chris Wilson9da3da62012-06-01 15:20:22 +01001899 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001900 }
Chris Wilson05394f32010-11-08 19:18:58 +00001901 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001902
Chris Wilson9da3da62012-06-01 15:20:22 +01001903 sg_free_table(obj->pages);
1904 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001905}
1906
Chris Wilsondd624af2013-01-15 12:39:35 +00001907int
Chris Wilson37e680a2012-06-07 15:38:42 +01001908i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1909{
1910 const struct drm_i915_gem_object_ops *ops = obj->ops;
1911
Chris Wilson2f745ad2012-09-04 21:02:58 +01001912 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001913 return 0;
1914
Chris Wilsona5570172012-09-04 21:02:54 +01001915 if (obj->pages_pin_count)
1916 return -EBUSY;
1917
Ben Widawsky98438772013-07-31 17:00:12 -07001918 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001919
Chris Wilsona2165e32012-12-03 11:49:00 +00001920 /* ->put_pages might need to allocate memory for the bit17 swizzle
1921 * array, hence protect them from being reaped by removing them from gtt
1922 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001923 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001924
Chris Wilson37e680a2012-06-07 15:38:42 +01001925 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001926 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001927
Chris Wilson55372522014-03-25 13:23:06 +00001928 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02001929
1930 return 0;
1931}
1932
Chris Wilsond9973b42013-10-04 10:33:00 +01001933static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001934__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1935 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001936{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001937 struct list_head still_in_list;
1938 struct drm_i915_gem_object *obj;
Chris Wilsond9973b42013-10-04 10:33:00 +01001939 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001940
Chris Wilson57094f82013-09-04 10:45:50 +01001941 /*
Chris Wilsonc8725f32014-03-17 12:21:55 +00001942 * As we may completely rewrite the (un)bound list whilst unbinding
Chris Wilson57094f82013-09-04 10:45:50 +01001943 * (due to retiring requests) we have to strictly process only
1944 * one element of the list at the time, and recheck the list
1945 * on every iteration.
Chris Wilsonc8725f32014-03-17 12:21:55 +00001946 *
1947 * In particular, we must hold a reference whilst removing the
1948 * object as we may end up waiting for and/or retiring the objects.
1949 * This might release the final reference (held by the active list)
1950 * and result in the object being freed from under us. This is
1951 * similar to the precautions the eviction code must take whilst
1952 * removing objects.
1953 *
1954 * Also note that although these lists do not hold a reference to
1955 * the object we can safely grab one here: The final object
1956 * unreferencing and the bound_list are both protected by the
1957 * dev->struct_mutex and so we won't ever be able to observe an
1958 * object on the bound_list with a reference count equals 0.
Chris Wilson57094f82013-09-04 10:45:50 +01001959 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00001960 INIT_LIST_HEAD(&still_in_list);
1961 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1962 obj = list_first_entry(&dev_priv->mm.unbound_list,
1963 typeof(*obj), global_list);
1964 list_move_tail(&obj->global_list, &still_in_list);
1965
1966 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1967 continue;
1968
1969 drm_gem_object_reference(&obj->base);
1970
1971 if (i915_gem_object_put_pages(obj) == 0)
1972 count += obj->base.size >> PAGE_SHIFT;
1973
1974 drm_gem_object_unreference(&obj->base);
1975 }
1976 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1977
1978 INIT_LIST_HEAD(&still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001979 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001980 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001981
Chris Wilson57094f82013-09-04 10:45:50 +01001982 obj = list_first_entry(&dev_priv->mm.bound_list,
1983 typeof(*obj), global_list);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001984 list_move_tail(&obj->global_list, &still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001985
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001986 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1987 continue;
1988
Chris Wilson57094f82013-09-04 10:45:50 +01001989 drm_gem_object_reference(&obj->base);
1990
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001991 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1992 if (i915_vma_unbind(vma))
1993 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001994
Chris Wilson57094f82013-09-04 10:45:50 +01001995 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001996 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001997
1998 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001999 }
Chris Wilsonc8725f32014-03-17 12:21:55 +00002000 list_splice(&still_in_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02002001
2002 return count;
2003}
2004
Chris Wilsond9973b42013-10-04 10:33:00 +01002005static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01002006i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2007{
2008 return __i915_gem_shrink(dev_priv, target, true);
2009}
2010
Chris Wilsond9973b42013-10-04 10:33:00 +01002011static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02002012i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2013{
Chris Wilson6c085a72012-08-20 11:40:46 +02002014 i915_gem_evict_everything(dev_priv->dev);
Chris Wilsonc8725f32014-03-17 12:21:55 +00002015 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
Daniel Vetter225067e2012-08-20 10:23:20 +02002016}
2017
Chris Wilson37e680a2012-06-07 15:38:42 +01002018static int
Chris Wilson6c085a72012-08-20 11:40:46 +02002019i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002020{
Chris Wilson6c085a72012-08-20 11:40:46 +02002021 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002022 int page_count, i;
2023 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01002024 struct sg_table *st;
2025 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02002026 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07002027 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02002028 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02002029 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07002030
Chris Wilson6c085a72012-08-20 11:40:46 +02002031 /* Assert that the object is not currently in any GPU domain. As it
2032 * wasn't in the GTT, there shouldn't be any way it could have been in
2033 * a GPU cache
2034 */
2035 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2036 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2037
Chris Wilson9da3da62012-06-01 15:20:22 +01002038 st = kmalloc(sizeof(*st), GFP_KERNEL);
2039 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002040 return -ENOMEM;
2041
Chris Wilson9da3da62012-06-01 15:20:22 +01002042 page_count = obj->base.size / PAGE_SIZE;
2043 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01002044 kfree(st);
2045 return -ENOMEM;
2046 }
2047
2048 /* Get the list of pages out of our struct file. They'll be pinned
2049 * at this point until we release them.
2050 *
2051 * Fail silently without starting the shrinker
2052 */
Al Viro496ad9a2013-01-23 17:07:38 -05002053 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02002054 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08002055 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02002056 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02002057 sg = st->sgl;
2058 st->nents = 0;
2059 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02002060 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2061 if (IS_ERR(page)) {
2062 i915_gem_purge(dev_priv, page_count);
2063 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2064 }
2065 if (IS_ERR(page)) {
2066 /* We've tried hard to allocate the memory by reaping
2067 * our own buffer, now let the real VM do its job and
2068 * go down in flames if truly OOM.
2069 */
Chris Wilson6c085a72012-08-20 11:40:46 +02002070 i915_gem_shrink_all(dev_priv);
David Herrmannf461d1b2014-05-25 14:34:10 +02002071 page = shmem_read_mapping_page(mapping, i);
Chris Wilson6c085a72012-08-20 11:40:46 +02002072 if (IS_ERR(page))
2073 goto err_pages;
Chris Wilson6c085a72012-08-20 11:40:46 +02002074 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002075#ifdef CONFIG_SWIOTLB
2076 if (swiotlb_nr_tbl()) {
2077 st->nents++;
2078 sg_set_page(sg, page, PAGE_SIZE, 0);
2079 sg = sg_next(sg);
2080 continue;
2081 }
2082#endif
Imre Deak90797e62013-02-18 19:28:03 +02002083 if (!i || page_to_pfn(page) != last_pfn + 1) {
2084 if (i)
2085 sg = sg_next(sg);
2086 st->nents++;
2087 sg_set_page(sg, page, PAGE_SIZE, 0);
2088 } else {
2089 sg->length += PAGE_SIZE;
2090 }
2091 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03002092
2093 /* Check that the i965g/gm workaround works. */
2094 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07002095 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04002096#ifdef CONFIG_SWIOTLB
2097 if (!swiotlb_nr_tbl())
2098#endif
2099 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01002100 obj->pages = st;
2101
Eric Anholt673a3942008-07-30 12:06:12 -07002102 if (i915_gem_object_needs_bit17_swizzle(obj))
2103 i915_gem_object_do_bit_17_swizzle(obj);
2104
2105 return 0;
2106
2107err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02002108 sg_mark_end(sg);
2109 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02002110 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01002111 sg_free_table(st);
2112 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00002113
2114 /* shmemfs first checks if there is enough memory to allocate the page
2115 * and reports ENOSPC should there be insufficient, along with the usual
2116 * ENOMEM for a genuine allocation failure.
2117 *
2118 * We use ENOSPC in our driver to mean that we have run out of aperture
2119 * space and so want to translate the error from shmemfs back to our
2120 * usual understanding of ENOMEM.
2121 */
2122 if (PTR_ERR(page) == -ENOSPC)
2123 return -ENOMEM;
2124 else
2125 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002126}
2127
Chris Wilson37e680a2012-06-07 15:38:42 +01002128/* Ensure that the associated pages are gathered from the backing storage
2129 * and pinned into our object. i915_gem_object_get_pages() may be called
2130 * multiple times before they are released by a single call to
2131 * i915_gem_object_put_pages() - once the pages are no longer referenced
2132 * either as a result of memory pressure (reaping pages under the shrinker)
2133 * or as the object is itself released.
2134 */
2135int
2136i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2137{
2138 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2139 const struct drm_i915_gem_object_ops *ops = obj->ops;
2140 int ret;
2141
Chris Wilson2f745ad2012-09-04 21:02:58 +01002142 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002143 return 0;
2144
Chris Wilson43e28f02013-01-08 10:53:09 +00002145 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002146 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002147 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002148 }
2149
Chris Wilsona5570172012-09-04 21:02:54 +01002150 BUG_ON(obj->pages_pin_count);
2151
Chris Wilson37e680a2012-06-07 15:38:42 +01002152 ret = ops->get_pages(obj);
2153 if (ret)
2154 return ret;
2155
Ben Widawsky35c20a62013-05-31 11:28:48 -07002156 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01002157 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002158}
2159
Ben Widawskye2d05a82013-09-24 09:57:58 -07002160static void
Chris Wilson05394f32010-11-08 19:18:58 +00002161i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002162 struct intel_engine_cs *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002163{
Chris Wilson9d7730912012-11-27 16:22:52 +00002164 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002165
Zou Nan hai852835f2010-05-21 09:08:56 +08002166 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002167 if (obj->ring != ring && obj->last_write_seqno) {
2168 /* Keep the seqno relative to the current ring */
2169 obj->last_write_seqno = seqno;
2170 }
Chris Wilson05394f32010-11-08 19:18:58 +00002171 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002172
2173 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002174 if (!obj->active) {
2175 drm_gem_object_reference(&obj->base);
2176 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002177 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002178
Chris Wilson05394f32010-11-08 19:18:58 +00002179 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002180
Chris Wilson0201f1e2012-07-20 12:41:01 +01002181 obj->last_read_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002182}
2183
Ben Widawskye2d05a82013-09-24 09:57:58 -07002184void i915_vma_move_to_active(struct i915_vma *vma,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002185 struct intel_engine_cs *ring)
Ben Widawskye2d05a82013-09-24 09:57:58 -07002186{
2187 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2188 return i915_gem_object_move_to_active(vma->obj, ring);
2189}
2190
Chris Wilsoncaea7472010-11-12 13:53:37 +00002191static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002192i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2193{
Ben Widawskyca191b12013-07-31 17:00:14 -07002194 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002195 struct i915_address_space *vm;
2196 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002197
Chris Wilson65ce3022012-07-20 12:41:02 +01002198 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002199 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002200
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002201 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2202 vma = i915_gem_obj_to_vma(obj, vm);
2203 if (vma && !list_empty(&vma->mm_list))
2204 list_move_tail(&vma->mm_list, &vm->inactive_list);
2205 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002206
Daniel Vetterf99d7062014-06-19 16:01:59 +02002207 intel_fb_obj_flush(obj, true);
2208
Chris Wilson65ce3022012-07-20 12:41:02 +01002209 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002210 obj->ring = NULL;
2211
Chris Wilson65ce3022012-07-20 12:41:02 +01002212 obj->last_read_seqno = 0;
2213 obj->last_write_seqno = 0;
2214 obj->base.write_domain = 0;
2215
2216 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002217
2218 obj->active = 0;
2219 drm_gem_object_unreference(&obj->base);
2220
2221 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002222}
Eric Anholt673a3942008-07-30 12:06:12 -07002223
Chris Wilsonc8725f32014-03-17 12:21:55 +00002224static void
2225i915_gem_object_retire(struct drm_i915_gem_object *obj)
2226{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002227 struct intel_engine_cs *ring = obj->ring;
Chris Wilsonc8725f32014-03-17 12:21:55 +00002228
2229 if (ring == NULL)
2230 return;
2231
2232 if (i915_seqno_passed(ring->get_seqno(ring, true),
2233 obj->last_read_seqno))
2234 i915_gem_object_move_to_inactive(obj);
2235}
2236
Chris Wilson9d7730912012-11-27 16:22:52 +00002237static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002238i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002239{
Chris Wilson9d7730912012-11-27 16:22:52 +00002240 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002241 struct intel_engine_cs *ring;
Chris Wilson9d7730912012-11-27 16:22:52 +00002242 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002243
Chris Wilson107f27a52012-12-10 13:56:17 +02002244 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002245 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002246 ret = intel_ring_idle(ring);
2247 if (ret)
2248 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002249 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002250 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002251
2252 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002253 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002254 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002255
Ben Widawskyebc348b2014-04-29 14:52:28 -07002256 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2257 ring->semaphore.sync_seqno[j] = 0;
Chris Wilson9d7730912012-11-27 16:22:52 +00002258 }
2259
2260 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002261}
2262
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002263int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2264{
2265 struct drm_i915_private *dev_priv = dev->dev_private;
2266 int ret;
2267
2268 if (seqno == 0)
2269 return -EINVAL;
2270
2271 /* HWS page needs to be set less than what we
2272 * will inject to ring
2273 */
2274 ret = i915_gem_init_seqno(dev, seqno - 1);
2275 if (ret)
2276 return ret;
2277
2278 /* Carefully set the last_seqno value so that wrap
2279 * detection still works
2280 */
2281 dev_priv->next_seqno = seqno;
2282 dev_priv->last_seqno = seqno - 1;
2283 if (dev_priv->last_seqno == 0)
2284 dev_priv->last_seqno--;
2285
2286 return 0;
2287}
2288
Chris Wilson9d7730912012-11-27 16:22:52 +00002289int
2290i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002291{
Chris Wilson9d7730912012-11-27 16:22:52 +00002292 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002293
Chris Wilson9d7730912012-11-27 16:22:52 +00002294 /* reserve 0 for non-seqno */
2295 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002296 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002297 if (ret)
2298 return ret;
2299
2300 dev_priv->next_seqno = 1;
2301 }
2302
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002303 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002304 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002305}
2306
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002307int __i915_add_request(struct intel_engine_cs *ring,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002308 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002309 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002310 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002311{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002312 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002313 struct drm_i915_gem_request *request;
Oscar Mateo48e29f52014-07-24 17:04:29 +01002314 struct intel_ringbuffer *ringbuf;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002315 u32 request_ring_position, request_start;
Chris Wilson3cce4692010-10-27 16:11:02 +01002316 int ret;
2317
Oscar Mateo48e29f52014-07-24 17:04:29 +01002318 request = ring->preallocated_lazy_request;
2319 if (WARN_ON(request == NULL))
2320 return -ENOMEM;
2321
2322 if (i915.enable_execlists) {
2323 struct intel_context *ctx = request->ctx;
2324 ringbuf = ctx->engine[ring->id].ringbuf;
2325 } else
2326 ringbuf = ring->buffer;
2327
2328 request_start = intel_ring_get_tail(ringbuf);
Daniel Vettercc889e02012-06-13 20:45:19 +02002329 /*
2330 * Emit any outstanding flushes - execbuf can fail to emit the flush
2331 * after having emitted the batchbuffer command. Hence we need to fix
2332 * things up similar to emitting the lazy request. The difference here
2333 * is that the flush _must_ happen before the next request, no matter
2334 * what.
2335 */
Oscar Mateo48e29f52014-07-24 17:04:29 +01002336 if (i915.enable_execlists) {
2337 ret = logical_ring_flush_all_caches(ringbuf);
2338 if (ret)
2339 return ret;
2340 } else {
2341 ret = intel_ring_flush_all_caches(ring);
2342 if (ret)
2343 return ret;
2344 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002345
Chris Wilsona71d8d92012-02-15 11:25:36 +00002346 /* Record the position of the start of the request so that
2347 * should we detect the updated seqno part-way through the
2348 * GPU processing the request, we never over-estimate the
2349 * position of the head.
2350 */
Oscar Mateo48e29f52014-07-24 17:04:29 +01002351 request_ring_position = intel_ring_get_tail(ringbuf);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002352
Oscar Mateo48e29f52014-07-24 17:04:29 +01002353 if (i915.enable_execlists) {
2354 ret = ring->emit_request(ringbuf);
2355 if (ret)
2356 return ret;
2357 } else {
2358 ret = ring->add_request(ring);
2359 if (ret)
2360 return ret;
2361 }
Eric Anholt673a3942008-07-30 12:06:12 -07002362
Chris Wilson9d7730912012-11-27 16:22:52 +00002363 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002364 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002365 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002366 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002367
2368 /* Whilst this request exists, batch_obj will be on the
2369 * active_list, and so will hold the active reference. Only when this
2370 * request is retired will the the batch_obj be moved onto the
2371 * inactive_list and lose its active reference. Hence we do not need
2372 * to explicitly hold another reference here.
2373 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002374 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002375
Oscar Mateo48e29f52014-07-24 17:04:29 +01002376 if (!i915.enable_execlists) {
2377 /* Hold a reference to the current context so that we can inspect
2378 * it later in case a hangcheck error event fires.
2379 */
2380 request->ctx = ring->last_context;
2381 if (request->ctx)
2382 i915_gem_context_reference(request->ctx);
2383 }
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002384
Eric Anholt673a3942008-07-30 12:06:12 -07002385 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002386 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002387 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002388
Chris Wilsondb53a302011-02-03 11:57:46 +00002389 if (file) {
2390 struct drm_i915_file_private *file_priv = file->driver_priv;
2391
Chris Wilson1c255952010-09-26 11:03:27 +01002392 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002393 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002394 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002395 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002396 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002397 }
Eric Anholt673a3942008-07-30 12:06:12 -07002398
Chris Wilson9d7730912012-11-27 16:22:52 +00002399 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002400 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002401 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002402
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002403 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002404 i915_queue_hangcheck(ring->dev);
2405
Chris Wilsonf62a0072014-02-21 17:55:39 +00002406 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2407 queue_delayed_work(dev_priv->wq,
2408 &dev_priv->mm.retire_work,
2409 round_jiffies_up_relative(HZ));
2410 intel_mark_busy(dev_priv->dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04002411 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002412
Chris Wilsonacb868d2012-09-26 13:47:30 +01002413 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002414 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002415 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002416}
2417
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002418static inline void
2419i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002420{
Chris Wilson1c255952010-09-26 11:03:27 +01002421 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002422
Chris Wilson1c255952010-09-26 11:03:27 +01002423 if (!file_priv)
2424 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002425
Chris Wilson1c255952010-09-26 11:03:27 +01002426 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002427 list_del(&request->client_list);
2428 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002429 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002430}
2431
Mika Kuoppala939fd762014-01-30 19:04:44 +02002432static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Oscar Mateo273497e2014-05-22 14:13:37 +01002433 const struct intel_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002434{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002435 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002436
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002437 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2438
2439 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002440 return true;
2441
2442 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002443 if (!i915_gem_context_is_default(ctx)) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002444 DRM_DEBUG("context hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002445 return true;
Mika Kuoppala88b4aa82014-03-28 18:18:18 +02002446 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2447 if (i915_stop_ring_allow_warn(dev_priv))
2448 DRM_ERROR("gpu hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002449 return true;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002450 }
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002451 }
2452
2453 return false;
2454}
2455
Mika Kuoppala939fd762014-01-30 19:04:44 +02002456static void i915_set_reset_status(struct drm_i915_private *dev_priv,
Oscar Mateo273497e2014-05-22 14:13:37 +01002457 struct intel_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002458 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002459{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002460 struct i915_ctx_hang_stats *hs;
2461
2462 if (WARN_ON(!ctx))
2463 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002464
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002465 hs = &ctx->hang_stats;
2466
2467 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002468 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002469 hs->batch_active++;
2470 hs->guilty_ts = get_seconds();
2471 } else {
2472 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002473 }
2474}
2475
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002476static void i915_gem_free_request(struct drm_i915_gem_request *request)
2477{
2478 list_del(&request->list);
2479 i915_gem_request_remove_from_client(request);
2480
2481 if (request->ctx)
2482 i915_gem_context_unreference(request->ctx);
2483
2484 kfree(request);
2485}
2486
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002487struct drm_i915_gem_request *
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002488i915_gem_find_active_request(struct intel_engine_cs *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002489{
Chris Wilson4db080f2013-12-04 11:37:09 +00002490 struct drm_i915_gem_request *request;
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002491 u32 completed_seqno;
2492
2493 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002494
Chris Wilson4db080f2013-12-04 11:37:09 +00002495 list_for_each_entry(request, &ring->request_list, list) {
2496 if (i915_seqno_passed(completed_seqno, request->seqno))
2497 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002498
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002499 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002500 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002501
2502 return NULL;
2503}
2504
2505static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002506 struct intel_engine_cs *ring)
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002507{
2508 struct drm_i915_gem_request *request;
2509 bool ring_hung;
2510
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002511 request = i915_gem_find_active_request(ring);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002512
2513 if (request == NULL)
2514 return;
2515
2516 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2517
Mika Kuoppala939fd762014-01-30 19:04:44 +02002518 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002519
2520 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002521 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002522}
2523
2524static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002525 struct intel_engine_cs *ring)
Chris Wilson4db080f2013-12-04 11:37:09 +00002526{
Chris Wilsondfaae392010-09-22 10:31:52 +01002527 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002528 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002529
Chris Wilson05394f32010-11-08 19:18:58 +00002530 obj = list_first_entry(&ring->active_list,
2531 struct drm_i915_gem_object,
2532 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002533
Chris Wilson05394f32010-11-08 19:18:58 +00002534 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002535 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002536
2537 /*
2538 * We must free the requests after all the corresponding objects have
2539 * been moved off active lists. Which is the same order as the normal
2540 * retire_requests function does. This is important if object hold
2541 * implicit references on things like e.g. ppgtt address spaces through
2542 * the request.
2543 */
2544 while (!list_empty(&ring->request_list)) {
2545 struct drm_i915_gem_request *request;
2546
2547 request = list_first_entry(&ring->request_list,
2548 struct drm_i915_gem_request,
2549 list);
2550
2551 i915_gem_free_request(request);
2552 }
Chris Wilsone3efda42014-04-09 09:19:41 +01002553
2554 /* These may not have been flush before the reset, do so now */
2555 kfree(ring->preallocated_lazy_request);
2556 ring->preallocated_lazy_request = NULL;
2557 ring->outstanding_lazy_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002558}
2559
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002560void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002561{
2562 struct drm_i915_private *dev_priv = dev->dev_private;
2563 int i;
2564
Daniel Vetter4b9de732011-10-09 21:52:02 +02002565 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002566 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002567
Daniel Vetter94a335d2013-07-17 14:51:28 +02002568 /*
2569 * Commit delayed tiling changes if we have an object still
2570 * attached to the fence, otherwise just clear the fence.
2571 */
2572 if (reg->obj) {
2573 i915_gem_object_update_fence(reg->obj, reg,
2574 reg->obj->tiling_mode);
2575 } else {
2576 i915_gem_write_fence(dev, i, NULL);
2577 }
Chris Wilson312817a2010-11-22 11:50:11 +00002578 }
2579}
2580
Chris Wilson069efc12010-09-30 16:53:18 +01002581void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002582{
Chris Wilsondfaae392010-09-22 10:31:52 +01002583 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002584 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002585 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002586
Chris Wilson4db080f2013-12-04 11:37:09 +00002587 /*
2588 * Before we free the objects from the requests, we need to inspect
2589 * them for finding the guilty party. As the requests only borrow
2590 * their reference to the objects, the inspection must be done first.
2591 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002592 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002593 i915_gem_reset_ring_status(dev_priv, ring);
2594
2595 for_each_ring(ring, dev_priv, i)
2596 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002597
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002598 i915_gem_context_reset(dev);
2599
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002600 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002601}
2602
2603/**
2604 * This function clears the request list as sequence numbers are passed.
2605 */
Chris Wilson1cf0ba12014-05-05 09:07:33 +01002606void
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002607i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002608{
Eric Anholt673a3942008-07-30 12:06:12 -07002609 uint32_t seqno;
2610
Chris Wilsondb53a302011-02-03 11:57:46 +00002611 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002612 return;
2613
Chris Wilsondb53a302011-02-03 11:57:46 +00002614 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002615
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002616 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002617
Chris Wilsone9103032014-01-07 11:45:14 +00002618 /* Move any buffers on the active list that are no longer referenced
2619 * by the ringbuffer to the flushing/inactive lists as appropriate,
2620 * before we free the context associated with the requests.
2621 */
2622 while (!list_empty(&ring->active_list)) {
2623 struct drm_i915_gem_object *obj;
2624
2625 obj = list_first_entry(&ring->active_list,
2626 struct drm_i915_gem_object,
2627 ring_list);
2628
2629 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2630 break;
2631
2632 i915_gem_object_move_to_inactive(obj);
2633 }
2634
2635
Zou Nan hai852835f2010-05-21 09:08:56 +08002636 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002637 struct drm_i915_gem_request *request;
Oscar Mateo48e29f52014-07-24 17:04:29 +01002638 struct intel_ringbuffer *ringbuf;
Eric Anholt673a3942008-07-30 12:06:12 -07002639
Zou Nan hai852835f2010-05-21 09:08:56 +08002640 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002641 struct drm_i915_gem_request,
2642 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002643
Chris Wilsondfaae392010-09-22 10:31:52 +01002644 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002645 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002646
Chris Wilsondb53a302011-02-03 11:57:46 +00002647 trace_i915_gem_request_retire(ring, request->seqno);
Oscar Mateo48e29f52014-07-24 17:04:29 +01002648
2649 /* This is one of the few common intersection points
2650 * between legacy ringbuffer submission and execlists:
2651 * we need to tell them apart in order to find the correct
2652 * ringbuffer to which the request belongs to.
2653 */
2654 if (i915.enable_execlists) {
2655 struct intel_context *ctx = request->ctx;
2656 ringbuf = ctx->engine[ring->id].ringbuf;
2657 } else
2658 ringbuf = ring->buffer;
2659
Chris Wilsona71d8d92012-02-15 11:25:36 +00002660 /* We know the GPU must have read the request to have
2661 * sent us the seqno + interrupt, so use the position
2662 * of tail of the request to update the last known position
2663 * of the GPU head.
2664 */
Oscar Mateo48e29f52014-07-24 17:04:29 +01002665 ringbuf->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002666
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002667 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002668 }
2669
Chris Wilsondb53a302011-02-03 11:57:46 +00002670 if (unlikely(ring->trace_irq_seqno &&
2671 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002672 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002673 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002674 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002675
Chris Wilsondb53a302011-02-03 11:57:46 +00002676 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002677}
2678
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002679bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002680i915_gem_retire_requests(struct drm_device *dev)
2681{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002682 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002683 struct intel_engine_cs *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002684 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002685 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002686
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002687 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002688 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002689 idle &= list_empty(&ring->request_list);
2690 }
2691
2692 if (idle)
2693 mod_delayed_work(dev_priv->wq,
2694 &dev_priv->mm.idle_work,
2695 msecs_to_jiffies(100));
2696
2697 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002698}
2699
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002700static void
Eric Anholt673a3942008-07-30 12:06:12 -07002701i915_gem_retire_work_handler(struct work_struct *work)
2702{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002703 struct drm_i915_private *dev_priv =
2704 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2705 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002706 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002707
Chris Wilson891b48c2010-09-29 12:26:37 +01002708 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002709 idle = false;
2710 if (mutex_trylock(&dev->struct_mutex)) {
2711 idle = i915_gem_retire_requests(dev);
2712 mutex_unlock(&dev->struct_mutex);
2713 }
2714 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002715 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2716 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002717}
Chris Wilson891b48c2010-09-29 12:26:37 +01002718
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002719static void
2720i915_gem_idle_work_handler(struct work_struct *work)
2721{
2722 struct drm_i915_private *dev_priv =
2723 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002724
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002725 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002726}
2727
Ben Widawsky5816d642012-04-11 11:18:19 -07002728/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002729 * Ensures that an object will eventually get non-busy by flushing any required
2730 * write domains, emitting any outstanding lazy request and retiring and
2731 * completed requests.
2732 */
2733static int
2734i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2735{
2736 int ret;
2737
2738 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002739 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002740 if (ret)
2741 return ret;
2742
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002743 i915_gem_retire_requests_ring(obj->ring);
2744 }
2745
2746 return 0;
2747}
2748
2749/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002750 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2751 * @DRM_IOCTL_ARGS: standard ioctl arguments
2752 *
2753 * Returns 0 if successful, else an error is returned with the remaining time in
2754 * the timeout parameter.
2755 * -ETIME: object is still busy after timeout
2756 * -ERESTARTSYS: signal interrupted the wait
2757 * -ENONENT: object doesn't exist
2758 * Also possible, but rare:
2759 * -EAGAIN: GPU wedged
2760 * -ENOMEM: damn
2761 * -ENODEV: Internal IRQ fail
2762 * -E?: The add request failed
2763 *
2764 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2765 * non-zero timeout parameter the wait ioctl will wait for the given number of
2766 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2767 * without holding struct_mutex the object may become re-busied before this
2768 * function completes. A similar but shorter * race condition exists in the busy
2769 * ioctl
2770 */
2771int
2772i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2773{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002774 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002775 struct drm_i915_gem_wait *args = data;
2776 struct drm_i915_gem_object *obj;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002777 struct intel_engine_cs *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002778 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002779 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002780 u32 seqno = 0;
2781 int ret = 0;
2782
Ben Widawskyeac1f142012-06-05 15:24:24 -07002783 if (args->timeout_ns >= 0) {
2784 timeout_stack = ns_to_timespec(args->timeout_ns);
2785 timeout = &timeout_stack;
2786 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002787
2788 ret = i915_mutex_lock_interruptible(dev);
2789 if (ret)
2790 return ret;
2791
2792 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2793 if (&obj->base == NULL) {
2794 mutex_unlock(&dev->struct_mutex);
2795 return -ENOENT;
2796 }
2797
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002798 /* Need to make sure the object gets inactive eventually. */
2799 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002800 if (ret)
2801 goto out;
2802
2803 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002804 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002805 ring = obj->ring;
2806 }
2807
2808 if (seqno == 0)
2809 goto out;
2810
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002811 /* Do this after OLR check to make sure we make forward progress polling
2812 * on this IOCTL with a 0 timeout (like busy ioctl)
2813 */
2814 if (!args->timeout_ns) {
2815 ret = -ETIME;
2816 goto out;
2817 }
2818
2819 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002820 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002821 mutex_unlock(&dev->struct_mutex);
2822
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002823 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002824 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002825 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002826 return ret;
2827
2828out:
2829 drm_gem_object_unreference(&obj->base);
2830 mutex_unlock(&dev->struct_mutex);
2831 return ret;
2832}
2833
2834/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002835 * i915_gem_object_sync - sync an object to a ring.
2836 *
2837 * @obj: object which may be in use on another ring.
2838 * @to: ring we wish to use the object on. May be NULL.
2839 *
2840 * This code is meant to abstract object synchronization with the GPU.
2841 * Calling with NULL implies synchronizing the object with the CPU
2842 * rather than a particular GPU ring.
2843 *
2844 * Returns 0 if successful, else propagates up the lower layer error.
2845 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002846int
2847i915_gem_object_sync(struct drm_i915_gem_object *obj,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002848 struct intel_engine_cs *to)
Ben Widawsky2911a352012-04-05 14:47:36 -07002849{
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002850 struct intel_engine_cs *from = obj->ring;
Ben Widawsky2911a352012-04-05 14:47:36 -07002851 u32 seqno;
2852 int ret, idx;
2853
2854 if (from == NULL || to == from)
2855 return 0;
2856
Ben Widawsky5816d642012-04-11 11:18:19 -07002857 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002858 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002859
2860 idx = intel_ring_sync_index(from, to);
2861
Chris Wilson0201f1e2012-07-20 12:41:01 +01002862 seqno = obj->last_read_seqno;
Rodrigo Vividdd4dbc2014-06-30 09:51:11 -07002863 /* Optimization: Avoid semaphore sync when we are sure we already
2864 * waited for an object with higher seqno */
Ben Widawskyebc348b2014-04-29 14:52:28 -07002865 if (seqno <= from->semaphore.sync_seqno[idx])
Ben Widawsky2911a352012-04-05 14:47:36 -07002866 return 0;
2867
Ben Widawskyb4aca012012-04-25 20:50:12 -07002868 ret = i915_gem_check_olr(obj->ring, seqno);
2869 if (ret)
2870 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002871
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002872 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawskyebc348b2014-04-29 14:52:28 -07002873 ret = to->semaphore.sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002874 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002875 /* We use last_read_seqno because sync_to()
2876 * might have just caused seqno wrap under
2877 * the radar.
2878 */
Ben Widawskyebc348b2014-04-29 14:52:28 -07002879 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002880
Ben Widawskye3a5a222012-04-11 11:18:20 -07002881 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002882}
2883
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002884static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2885{
2886 u32 old_write_domain, old_read_domains;
2887
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002888 /* Force a pagefault for domain tracking on next user access */
2889 i915_gem_release_mmap(obj);
2890
Keith Packardb97c3d92011-06-24 21:02:59 -07002891 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2892 return;
2893
Chris Wilson97c809fd2012-10-09 19:24:38 +01002894 /* Wait for any direct GTT access to complete */
2895 mb();
2896
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002897 old_read_domains = obj->base.read_domains;
2898 old_write_domain = obj->base.write_domain;
2899
2900 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2901 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2902
2903 trace_i915_gem_object_change_domain(obj,
2904 old_read_domains,
2905 old_write_domain);
2906}
2907
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002908int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002909{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002910 struct drm_i915_gem_object *obj = vma->obj;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002911 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002912 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002913
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002914 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002915 return 0;
2916
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002917 if (!drm_mm_node_allocated(&vma->node)) {
2918 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002919 return 0;
2920 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002921
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002922 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002923 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002924
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002925 BUG_ON(obj->pages == NULL);
2926
Chris Wilsona8198ee2011-04-13 22:04:09 +01002927 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002928 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002929 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002930 /* Continue on if we fail due to EIO, the GPU is hung so we
2931 * should be safe and we need to cleanup or else we might
2932 * cause memory corruption through use-after-free.
2933 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002934
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002935 if (i915_is_ggtt(vma->vm)) {
2936 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002937
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002938 /* release the fence reg _after_ flushing */
2939 ret = i915_gem_object_put_fence(obj);
2940 if (ret)
2941 return ret;
2942 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01002943
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002944 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002945
Ben Widawsky6f65e292013-12-06 14:10:56 -08002946 vma->unbind_vma(vma);
2947
Chris Wilson64bf9302014-02-25 14:23:28 +00002948 list_del_init(&vma->mm_list);
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002949 if (i915_is_ggtt(vma->vm))
Chris Wilsone6a84462014-08-11 12:00:12 +02002950 obj->map_and_fenceable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07002951
Ben Widawsky2f633152013-07-17 12:19:03 -07002952 drm_mm_remove_node(&vma->node);
2953 i915_gem_vma_destroy(vma);
2954
2955 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002956 * no more VMAs exist. */
Armin Reese9490edb2014-07-11 10:20:07 -07002957 if (list_empty(&obj->vma_list)) {
2958 i915_gem_gtt_finish_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07002959 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Armin Reese9490edb2014-07-11 10:20:07 -07002960 }
Eric Anholt673a3942008-07-30 12:06:12 -07002961
Chris Wilson70903c32013-12-04 09:59:09 +00002962 /* And finally now the object is completely decoupled from this vma,
2963 * we can drop its hold on the backing storage and allow it to be
2964 * reaped by the shrinker.
2965 */
2966 i915_gem_object_unpin_pages(obj);
2967
Chris Wilson88241782011-01-07 17:09:48 +00002968 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002969}
2970
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002971int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002972{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002973 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01002974 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002975 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002976
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002977 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002978 for_each_ring(ring, dev_priv, i) {
Chris Wilson691e6412014-04-09 09:07:36 +01002979 ret = i915_switch_context(ring, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002980 if (ret)
2981 return ret;
2982
Chris Wilson3e960502012-11-27 16:22:54 +00002983 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002984 if (ret)
2985 return ret;
2986 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002987
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002988 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002989}
2990
Chris Wilson9ce079e2012-04-17 15:31:30 +01002991static void i965_write_fence_reg(struct drm_device *dev, int reg,
2992 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002993{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002994 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002995 int fence_reg;
2996 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002997
Imre Deak56c844e2013-01-07 21:47:34 +02002998 if (INTEL_INFO(dev)->gen >= 6) {
2999 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3000 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3001 } else {
3002 fence_reg = FENCE_REG_965_0;
3003 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3004 }
3005
Chris Wilsond18b9612013-07-10 13:36:23 +01003006 fence_reg += reg * 8;
3007
3008 /* To w/a incoherency with non-atomic 64-bit register updates,
3009 * we split the 64-bit update into two 32-bit writes. In order
3010 * for a partial fence not to be evaluated between writes, we
3011 * precede the update with write to turn off the fence register,
3012 * and only enable the fence as the last step.
3013 *
3014 * For extra levels of paranoia, we make sure each step lands
3015 * before applying the next step.
3016 */
3017 I915_WRITE(fence_reg, 0);
3018 POSTING_READ(fence_reg);
3019
Chris Wilson9ce079e2012-04-17 15:31:30 +01003020 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003021 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01003022 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003023
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003024 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01003025 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003026 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02003027 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01003028 if (obj->tiling_mode == I915_TILING_Y)
3029 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3030 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00003031
Chris Wilsond18b9612013-07-10 13:36:23 +01003032 I915_WRITE(fence_reg + 4, val >> 32);
3033 POSTING_READ(fence_reg + 4);
3034
3035 I915_WRITE(fence_reg + 0, val);
3036 POSTING_READ(fence_reg);
3037 } else {
3038 I915_WRITE(fence_reg + 4, 0);
3039 POSTING_READ(fence_reg + 4);
3040 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08003041}
3042
Chris Wilson9ce079e2012-04-17 15:31:30 +01003043static void i915_write_fence_reg(struct drm_device *dev, int reg,
3044 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003045{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003046 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01003047 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003048
Chris Wilson9ce079e2012-04-17 15:31:30 +01003049 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003050 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01003051 int pitch_val;
3052 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003053
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003054 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01003055 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003056 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3057 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3058 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01003059
3060 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3061 tile_width = 128;
3062 else
3063 tile_width = 512;
3064
3065 /* Note: pitch better be a power of two tile widths */
3066 pitch_val = obj->stride / tile_width;
3067 pitch_val = ffs(pitch_val) - 1;
3068
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003069 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01003070 if (obj->tiling_mode == I915_TILING_Y)
3071 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3072 val |= I915_FENCE_SIZE_BITS(size);
3073 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3074 val |= I830_FENCE_REG_VALID;
3075 } else
3076 val = 0;
3077
3078 if (reg < 8)
3079 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003080 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01003081 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08003082
Chris Wilson9ce079e2012-04-17 15:31:30 +01003083 I915_WRITE(reg, val);
3084 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003085}
3086
Chris Wilson9ce079e2012-04-17 15:31:30 +01003087static void i830_write_fence_reg(struct drm_device *dev, int reg,
3088 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003089{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003090 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003091 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003092
Chris Wilson9ce079e2012-04-17 15:31:30 +01003093 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003094 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01003095 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003096
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003097 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01003098 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003099 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3100 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3101 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07003102
Chris Wilson9ce079e2012-04-17 15:31:30 +01003103 pitch_val = obj->stride / 128;
3104 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003105
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003106 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01003107 if (obj->tiling_mode == I915_TILING_Y)
3108 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3109 val |= I830_FENCE_SIZE_BITS(size);
3110 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3111 val |= I830_FENCE_REG_VALID;
3112 } else
3113 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00003114
Chris Wilson9ce079e2012-04-17 15:31:30 +01003115 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3116 POSTING_READ(FENCE_REG_830_0 + reg * 4);
3117}
3118
Chris Wilsond0a57782012-10-09 19:24:37 +01003119inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3120{
3121 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3122}
3123
Chris Wilson9ce079e2012-04-17 15:31:30 +01003124static void i915_gem_write_fence(struct drm_device *dev, int reg,
3125 struct drm_i915_gem_object *obj)
3126{
Chris Wilsond0a57782012-10-09 19:24:37 +01003127 struct drm_i915_private *dev_priv = dev->dev_private;
3128
3129 /* Ensure that all CPU reads are completed before installing a fence
3130 * and all writes before removing the fence.
3131 */
3132 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3133 mb();
3134
Daniel Vetter94a335d2013-07-17 14:51:28 +02003135 WARN(obj && (!obj->stride || !obj->tiling_mode),
3136 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3137 obj->stride, obj->tiling_mode);
3138
Chris Wilson9ce079e2012-04-17 15:31:30 +01003139 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07003140 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01003141 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02003142 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01003143 case 5:
3144 case 4: i965_write_fence_reg(dev, reg, obj); break;
3145 case 3: i915_write_fence_reg(dev, reg, obj); break;
3146 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08003147 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01003148 }
Chris Wilsond0a57782012-10-09 19:24:37 +01003149
3150 /* And similarly be paranoid that no direct access to this region
3151 * is reordered to before the fence is installed.
3152 */
3153 if (i915_gem_object_needs_mb(obj))
3154 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003155}
3156
Chris Wilson61050802012-04-17 15:31:31 +01003157static inline int fence_number(struct drm_i915_private *dev_priv,
3158 struct drm_i915_fence_reg *fence)
3159{
3160 return fence - dev_priv->fence_regs;
3161}
3162
3163static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3164 struct drm_i915_fence_reg *fence,
3165 bool enable)
3166{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01003167 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01003168 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01003169
Chris Wilson46a0b632013-07-10 13:36:24 +01003170 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003171
3172 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003173 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003174 fence->obj = obj;
3175 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3176 } else {
3177 obj->fence_reg = I915_FENCE_REG_NONE;
3178 fence->obj = NULL;
3179 list_del_init(&fence->lru_list);
3180 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003181 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003182}
3183
Chris Wilsond9e86c02010-11-10 16:40:20 +00003184static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003185i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003186{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003187 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003188 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003189 if (ret)
3190 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003191
3192 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003193 }
3194
3195 return 0;
3196}
3197
3198int
3199i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3200{
Chris Wilson61050802012-04-17 15:31:31 +01003201 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003202 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003203 int ret;
3204
Chris Wilsond0a57782012-10-09 19:24:37 +01003205 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003206 if (ret)
3207 return ret;
3208
Chris Wilson61050802012-04-17 15:31:31 +01003209 if (obj->fence_reg == I915_FENCE_REG_NONE)
3210 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003211
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003212 fence = &dev_priv->fence_regs[obj->fence_reg];
3213
Daniel Vetteraff10b302014-02-14 14:06:05 +01003214 if (WARN_ON(fence->pin_count))
3215 return -EBUSY;
3216
Chris Wilson61050802012-04-17 15:31:31 +01003217 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003218 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003219
3220 return 0;
3221}
3222
3223static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003224i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003225{
Daniel Vetterae3db242010-02-19 11:51:58 +01003226 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003227 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003228 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003229
3230 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003231 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003232 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3233 reg = &dev_priv->fence_regs[i];
3234 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003235 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003236
Chris Wilson1690e1e2011-12-14 13:57:08 +01003237 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003238 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003239 }
3240
Chris Wilsond9e86c02010-11-10 16:40:20 +00003241 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003242 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003243
3244 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003245 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003246 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003247 continue;
3248
Chris Wilson8fe301a2012-04-17 15:31:28 +01003249 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003250 }
3251
Chris Wilson5dce5b932014-01-20 10:17:36 +00003252deadlock:
3253 /* Wait for completion of pending flips which consume fences */
3254 if (intel_has_pending_fb_unpin(dev))
3255 return ERR_PTR(-EAGAIN);
3256
3257 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003258}
3259
Jesse Barnesde151cf2008-11-12 10:03:55 -08003260/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003261 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003262 * @obj: object to map through a fence reg
3263 *
3264 * When mapping objects through the GTT, userspace wants to be able to write
3265 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003266 * This function walks the fence regs looking for a free one for @obj,
3267 * stealing one if it can't find any.
3268 *
3269 * It then sets up the reg based on the object's properties: address, pitch
3270 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003271 *
3272 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003273 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003274int
Chris Wilson06d98132012-04-17 15:31:24 +01003275i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003276{
Chris Wilson05394f32010-11-08 19:18:58 +00003277 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003278 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003279 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003280 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003281 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003282
Chris Wilson14415742012-04-17 15:31:33 +01003283 /* Have we updated the tiling parameters upon the object and so
3284 * will need to serialise the write to the associated fence register?
3285 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003286 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003287 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003288 if (ret)
3289 return ret;
3290 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003291
Chris Wilsond9e86c02010-11-10 16:40:20 +00003292 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003293 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3294 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003295 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003296 list_move_tail(&reg->lru_list,
3297 &dev_priv->mm.fence_list);
3298 return 0;
3299 }
3300 } else if (enable) {
Chris Wilsone6a84462014-08-11 12:00:12 +02003301 if (WARN_ON(!obj->map_and_fenceable))
3302 return -EINVAL;
3303
Chris Wilson14415742012-04-17 15:31:33 +01003304 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003305 if (IS_ERR(reg))
3306 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003307
Chris Wilson14415742012-04-17 15:31:33 +01003308 if (reg->obj) {
3309 struct drm_i915_gem_object *old = reg->obj;
3310
Chris Wilsond0a57782012-10-09 19:24:37 +01003311 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003312 if (ret)
3313 return ret;
3314
Chris Wilson14415742012-04-17 15:31:33 +01003315 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003316 }
Chris Wilson14415742012-04-17 15:31:33 +01003317 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003318 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003319
Chris Wilson14415742012-04-17 15:31:33 +01003320 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003321
Chris Wilson9ce079e2012-04-17 15:31:30 +01003322 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003323}
3324
Chris Wilson42d6ab42012-07-26 11:49:32 +01003325static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3326 struct drm_mm_node *gtt_space,
3327 unsigned long cache_level)
3328{
3329 struct drm_mm_node *other;
3330
3331 /* On non-LLC machines we have to be careful when putting differing
3332 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003333 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003334 */
3335 if (HAS_LLC(dev))
3336 return true;
3337
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003338 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003339 return true;
3340
3341 if (list_empty(&gtt_space->node_list))
3342 return true;
3343
3344 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3345 if (other->allocated && !other->hole_follows && other->color != cache_level)
3346 return false;
3347
3348 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3349 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3350 return false;
3351
3352 return true;
3353}
3354
3355static void i915_gem_verify_gtt(struct drm_device *dev)
3356{
3357#if WATCH_GTT
3358 struct drm_i915_private *dev_priv = dev->dev_private;
3359 struct drm_i915_gem_object *obj;
3360 int err = 0;
3361
Ben Widawsky35c20a62013-05-31 11:28:48 -07003362 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003363 if (obj->gtt_space == NULL) {
3364 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3365 err++;
3366 continue;
3367 }
3368
3369 if (obj->cache_level != obj->gtt_space->color) {
3370 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003371 i915_gem_obj_ggtt_offset(obj),
3372 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003373 obj->cache_level,
3374 obj->gtt_space->color);
3375 err++;
3376 continue;
3377 }
3378
3379 if (!i915_gem_valid_gtt_space(dev,
3380 obj->gtt_space,
3381 obj->cache_level)) {
3382 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003383 i915_gem_obj_ggtt_offset(obj),
3384 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003385 obj->cache_level);
3386 err++;
3387 continue;
3388 }
3389 }
3390
3391 WARN_ON(err);
3392#endif
3393}
3394
Jesse Barnesde151cf2008-11-12 10:03:55 -08003395/**
Eric Anholt673a3942008-07-30 12:06:12 -07003396 * Finds free space in the GTT aperture and binds the object there.
3397 */
Daniel Vetter262de142014-02-14 14:01:20 +01003398static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003399i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3400 struct i915_address_space *vm,
3401 unsigned alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02003402 uint64_t flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003403{
Chris Wilson05394f32010-11-08 19:18:58 +00003404 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003405 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003406 u32 size, fence_size, fence_alignment, unfenced_alignment;
Chris Wilsond23db882014-05-23 08:48:08 +02003407 unsigned long start =
3408 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3409 unsigned long end =
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003410 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003411 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003412 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003413
Chris Wilsone28f8712011-07-18 13:11:49 -07003414 fence_size = i915_gem_get_gtt_size(dev,
3415 obj->base.size,
3416 obj->tiling_mode);
3417 fence_alignment = i915_gem_get_gtt_alignment(dev,
3418 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003419 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003420 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003421 i915_gem_get_gtt_alignment(dev,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003422 obj->base.size,
3423 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003424
Eric Anholt673a3942008-07-30 12:06:12 -07003425 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003426 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003427 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003428 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003429 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003430 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003431 }
3432
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003433 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003434
Chris Wilson654fc602010-05-27 13:18:21 +01003435 /* If the object is bigger than the entire aperture, reject it early
3436 * before evicting everything in a vain attempt to find space.
3437 */
Chris Wilsond23db882014-05-23 08:48:08 +02003438 if (obj->base.size > end) {
3439 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003440 obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003441 flags & PIN_MAPPABLE ? "mappable" : "total",
Chris Wilsond23db882014-05-23 08:48:08 +02003442 end);
Daniel Vetter262de142014-02-14 14:01:20 +01003443 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003444 }
3445
Chris Wilson37e680a2012-06-07 15:38:42 +01003446 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003447 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003448 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003449
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003450 i915_gem_object_pin_pages(obj);
3451
Ben Widawskyaccfef22013-08-14 11:38:35 +02003452 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Daniel Vetter262de142014-02-14 14:01:20 +01003453 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003454 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003455
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003456search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003457 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003458 size, alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02003459 obj->cache_level,
3460 start, end,
Lauri Kasanen62347f92014-04-02 20:03:57 +03003461 DRM_MM_SEARCH_DEFAULT,
3462 DRM_MM_CREATE_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003463 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003464 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02003465 obj->cache_level,
3466 start, end,
3467 flags);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003468 if (ret == 0)
3469 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003470
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003471 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003472 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003473 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003474 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003475 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003476 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003477 }
3478
Daniel Vetter74163902012-02-15 23:50:21 +01003479 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003480 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003481 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003482
Ben Widawsky35c20a62013-05-31 11:28:48 -07003483 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003484 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003485
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003486 if (i915_is_ggtt(vm)) {
3487 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003488
Daniel Vetter49987092013-08-14 10:21:23 +02003489 fenceable = (vma->node.size == fence_size &&
3490 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003491
Daniel Vetter49987092013-08-14 10:21:23 +02003492 mappable = (vma->node.start + obj->base.size <=
3493 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003494
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003495 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003496 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003497
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003498 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003499
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003500 trace_i915_vma_bind(vma, flags);
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003501 vma->bind_vma(vma, obj->cache_level,
3502 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3503
Chris Wilson42d6ab42012-07-26 11:49:32 +01003504 i915_gem_verify_gtt(dev);
Daniel Vetter262de142014-02-14 14:01:20 +01003505 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003506
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003507err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003508 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003509err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003510 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003511 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003512err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003513 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003514 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003515}
3516
Chris Wilson000433b2013-08-08 14:41:09 +01003517bool
Chris Wilson2c225692013-08-09 12:26:45 +01003518i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3519 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003520{
Eric Anholt673a3942008-07-30 12:06:12 -07003521 /* If we don't have a page list set up, then we're not pinned
3522 * to GPU, and we can ignore the cache flush because it'll happen
3523 * again at bind time.
3524 */
Chris Wilson05394f32010-11-08 19:18:58 +00003525 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003526 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003527
Imre Deak769ce462013-02-13 21:56:05 +02003528 /*
3529 * Stolen memory is always coherent with the GPU as it is explicitly
3530 * marked as wc by the system, or the system is cache-coherent.
3531 */
3532 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003533 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003534
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003535 /* If the GPU is snooping the contents of the CPU cache,
3536 * we do not need to manually clear the CPU cache lines. However,
3537 * the caches are only snooped when the render cache is
3538 * flushed/invalidated. As we always have to emit invalidations
3539 * and flushes when moving into and out of the RENDER domain, correct
3540 * snooping behaviour occurs naturally as the result of our domain
3541 * tracking.
3542 */
Chris Wilson2c225692013-08-09 12:26:45 +01003543 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003544 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003545
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003546 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003547 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003548
3549 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003550}
3551
3552/** Flushes the GTT write domain for the object if it's dirty. */
3553static void
Chris Wilson05394f32010-11-08 19:18:58 +00003554i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003555{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003556 uint32_t old_write_domain;
3557
Chris Wilson05394f32010-11-08 19:18:58 +00003558 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003559 return;
3560
Chris Wilson63256ec2011-01-04 18:42:07 +00003561 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003562 * to it immediately go to main memory as far as we know, so there's
3563 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003564 *
3565 * However, we do have to enforce the order so that all writes through
3566 * the GTT land before any writes to the device, such as updates to
3567 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003568 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003569 wmb();
3570
Chris Wilson05394f32010-11-08 19:18:58 +00003571 old_write_domain = obj->base.write_domain;
3572 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003573
Daniel Vetterf99d7062014-06-19 16:01:59 +02003574 intel_fb_obj_flush(obj, false);
3575
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003576 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003577 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003578 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003579}
3580
3581/** Flushes the CPU write domain for the object if it's dirty. */
3582static void
Chris Wilson2c225692013-08-09 12:26:45 +01003583i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3584 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003585{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003586 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003587
Chris Wilson05394f32010-11-08 19:18:58 +00003588 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003589 return;
3590
Chris Wilson000433b2013-08-08 14:41:09 +01003591 if (i915_gem_clflush_object(obj, force))
3592 i915_gem_chipset_flush(obj->base.dev);
3593
Chris Wilson05394f32010-11-08 19:18:58 +00003594 old_write_domain = obj->base.write_domain;
3595 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003596
Daniel Vetterf99d7062014-06-19 16:01:59 +02003597 intel_fb_obj_flush(obj, false);
3598
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003599 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003600 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003601 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003602}
3603
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003604/**
3605 * Moves a single object to the GTT read, and possibly write domain.
3606 *
3607 * This function returns when the move is complete, including waiting on
3608 * flushes to occur.
3609 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003610int
Chris Wilson20217462010-11-23 15:26:33 +00003611i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003612{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003613 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsondc8cd1e2014-08-09 17:37:22 +01003614 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003615 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003616 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003617
Eric Anholt02354392008-11-26 13:58:13 -08003618 /* Not valid to be called on unbound objects. */
Chris Wilsondc8cd1e2014-08-09 17:37:22 +01003619 if (vma == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08003620 return -EINVAL;
3621
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003622 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3623 return 0;
3624
Chris Wilson0201f1e2012-07-20 12:41:01 +01003625 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003626 if (ret)
3627 return ret;
3628
Chris Wilsonc8725f32014-03-17 12:21:55 +00003629 i915_gem_object_retire(obj);
Chris Wilson2c225692013-08-09 12:26:45 +01003630 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003631
Chris Wilsond0a57782012-10-09 19:24:37 +01003632 /* Serialise direct access to this object with the barriers for
3633 * coherent writes from the GPU, by effectively invalidating the
3634 * GTT domain upon first access.
3635 */
3636 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3637 mb();
3638
Chris Wilson05394f32010-11-08 19:18:58 +00003639 old_write_domain = obj->base.write_domain;
3640 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003641
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003642 /* It should now be out of any other write domains, and we can update
3643 * the domain values for our changes.
3644 */
Chris Wilson05394f32010-11-08 19:18:58 +00003645 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3646 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003647 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003648 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3649 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3650 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003651 }
3652
Daniel Vetterf99d7062014-06-19 16:01:59 +02003653 if (write)
3654 intel_fb_obj_invalidate(obj, NULL);
3655
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003656 trace_i915_gem_object_change_domain(obj,
3657 old_read_domains,
3658 old_write_domain);
3659
Chris Wilson8325a092012-04-24 15:52:35 +01003660 /* And bump the LRU for this access */
Chris Wilsondc8cd1e2014-08-09 17:37:22 +01003661 if (i915_gem_object_is_inactive(obj))
3662 list_move_tail(&vma->mm_list,
3663 &dev_priv->gtt.base.inactive_list);
Chris Wilson8325a092012-04-24 15:52:35 +01003664
Eric Anholte47c68e2008-11-14 13:35:19 -08003665 return 0;
3666}
3667
Chris Wilsone4ffd172011-04-04 09:44:39 +01003668int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3669 enum i915_cache_level cache_level)
3670{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003671 struct drm_device *dev = obj->base.dev;
Chris Wilsondf6f7832014-03-21 07:40:56 +00003672 struct i915_vma *vma, *next;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003673 int ret;
3674
3675 if (obj->cache_level == cache_level)
3676 return 0;
3677
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003678 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003679 DRM_DEBUG("can not change the cache level of pinned objects\n");
3680 return -EBUSY;
3681 }
3682
Chris Wilsondf6f7832014-03-21 07:40:56 +00003683 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003684 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003685 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003686 if (ret)
3687 return ret;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003688 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003689 }
3690
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003691 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003692 ret = i915_gem_object_finish_gpu(obj);
3693 if (ret)
3694 return ret;
3695
3696 i915_gem_object_finish_gtt(obj);
3697
3698 /* Before SandyBridge, you could not use tiling or fence
3699 * registers with snooped memory, so relinquish any fences
3700 * currently pointing to our region in the aperture.
3701 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003702 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003703 ret = i915_gem_object_put_fence(obj);
3704 if (ret)
3705 return ret;
3706 }
3707
Ben Widawsky6f65e292013-12-06 14:10:56 -08003708 list_for_each_entry(vma, &obj->vma_list, vma_link)
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003709 if (drm_mm_node_allocated(&vma->node))
3710 vma->bind_vma(vma, cache_level,
3711 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003712 }
3713
Chris Wilson2c225692013-08-09 12:26:45 +01003714 list_for_each_entry(vma, &obj->vma_list, vma_link)
3715 vma->node.color = cache_level;
3716 obj->cache_level = cache_level;
3717
3718 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003719 u32 old_read_domains, old_write_domain;
3720
3721 /* If we're coming from LLC cached, then we haven't
3722 * actually been tracking whether the data is in the
3723 * CPU cache or not, since we only allow one bit set
3724 * in obj->write_domain and have been skipping the clflushes.
3725 * Just set it to the CPU cache for now.
3726 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00003727 i915_gem_object_retire(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003728 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003729
3730 old_read_domains = obj->base.read_domains;
3731 old_write_domain = obj->base.write_domain;
3732
3733 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3734 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3735
3736 trace_i915_gem_object_change_domain(obj,
3737 old_read_domains,
3738 old_write_domain);
3739 }
3740
Chris Wilson42d6ab42012-07-26 11:49:32 +01003741 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003742 return 0;
3743}
3744
Ben Widawsky199adf42012-09-21 17:01:20 -07003745int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3746 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003747{
Ben Widawsky199adf42012-09-21 17:01:20 -07003748 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003749 struct drm_i915_gem_object *obj;
3750 int ret;
3751
3752 ret = i915_mutex_lock_interruptible(dev);
3753 if (ret)
3754 return ret;
3755
3756 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3757 if (&obj->base == NULL) {
3758 ret = -ENOENT;
3759 goto unlock;
3760 }
3761
Chris Wilson651d7942013-08-08 14:41:10 +01003762 switch (obj->cache_level) {
3763 case I915_CACHE_LLC:
3764 case I915_CACHE_L3_LLC:
3765 args->caching = I915_CACHING_CACHED;
3766 break;
3767
Chris Wilson4257d3b2013-08-08 14:41:11 +01003768 case I915_CACHE_WT:
3769 args->caching = I915_CACHING_DISPLAY;
3770 break;
3771
Chris Wilson651d7942013-08-08 14:41:10 +01003772 default:
3773 args->caching = I915_CACHING_NONE;
3774 break;
3775 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003776
3777 drm_gem_object_unreference(&obj->base);
3778unlock:
3779 mutex_unlock(&dev->struct_mutex);
3780 return ret;
3781}
3782
Ben Widawsky199adf42012-09-21 17:01:20 -07003783int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3784 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003785{
Ben Widawsky199adf42012-09-21 17:01:20 -07003786 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003787 struct drm_i915_gem_object *obj;
3788 enum i915_cache_level level;
3789 int ret;
3790
Ben Widawsky199adf42012-09-21 17:01:20 -07003791 switch (args->caching) {
3792 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003793 level = I915_CACHE_NONE;
3794 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003795 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003796 level = I915_CACHE_LLC;
3797 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003798 case I915_CACHING_DISPLAY:
3799 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3800 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003801 default:
3802 return -EINVAL;
3803 }
3804
Ben Widawsky3bc29132012-09-26 16:15:20 -07003805 ret = i915_mutex_lock_interruptible(dev);
3806 if (ret)
3807 return ret;
3808
Chris Wilsone6994ae2012-07-10 10:27:08 +01003809 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3810 if (&obj->base == NULL) {
3811 ret = -ENOENT;
3812 goto unlock;
3813 }
3814
3815 ret = i915_gem_object_set_cache_level(obj, level);
3816
3817 drm_gem_object_unreference(&obj->base);
3818unlock:
3819 mutex_unlock(&dev->struct_mutex);
3820 return ret;
3821}
3822
Chris Wilsoncc98b412013-08-09 12:25:09 +01003823static bool is_pin_display(struct drm_i915_gem_object *obj)
3824{
Oscar Mateo19656432014-05-16 14:20:43 +01003825 struct i915_vma *vma;
3826
Oscar Mateo19656432014-05-16 14:20:43 +01003827 vma = i915_gem_obj_to_ggtt(obj);
3828 if (!vma)
3829 return false;
3830
Chris Wilsoncc98b412013-08-09 12:25:09 +01003831 /* There are 3 sources that pin objects:
3832 * 1. The display engine (scanouts, sprites, cursors);
3833 * 2. Reservations for execbuffer;
3834 * 3. The user.
3835 *
3836 * We can ignore reservations as we hold the struct_mutex and
3837 * are only called outside of the reservation path. The user
3838 * can only increment pin_count once, and so if after
3839 * subtracting the potential reference by the user, any pin_count
3840 * remains, it must be due to another use by the display engine.
3841 */
Oscar Mateo19656432014-05-16 14:20:43 +01003842 return vma->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003843}
3844
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003845/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003846 * Prepare buffer for display plane (scanout, cursors, etc).
3847 * Can be called from an uninterruptible phase (modesetting) and allows
3848 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003849 */
3850int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003851i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3852 u32 alignment,
Oscar Mateoa4872ba2014-05-22 14:13:33 +01003853 struct intel_engine_cs *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003854{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003855 u32 old_read_domains, old_write_domain;
Oscar Mateo19656432014-05-16 14:20:43 +01003856 bool was_pin_display;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003857 int ret;
3858
Chris Wilson0be73282010-12-06 14:36:27 +00003859 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003860 ret = i915_gem_object_sync(obj, pipelined);
3861 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003862 return ret;
3863 }
3864
Chris Wilsoncc98b412013-08-09 12:25:09 +01003865 /* Mark the pin_display early so that we account for the
3866 * display coherency whilst setting up the cache domains.
3867 */
Oscar Mateo19656432014-05-16 14:20:43 +01003868 was_pin_display = obj->pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003869 obj->pin_display = true;
3870
Eric Anholta7ef0642011-03-29 16:59:54 -07003871 /* The display engine is not coherent with the LLC cache on gen6. As
3872 * a result, we make sure that the pinning that is about to occur is
3873 * done with uncached PTEs. This is lowest common denominator for all
3874 * chipsets.
3875 *
3876 * However for gen6+, we could do better by using the GFDT bit instead
3877 * of uncaching, which would allow us to flush all the LLC-cached data
3878 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3879 */
Chris Wilson651d7942013-08-08 14:41:10 +01003880 ret = i915_gem_object_set_cache_level(obj,
3881 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003882 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003883 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003884
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003885 /* As the user may map the buffer once pinned in the display plane
3886 * (e.g. libkms for the bootup splash), we have to ensure that we
3887 * always use map_and_fenceable for all scanout buffers.
3888 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003889 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003890 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003891 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003892
Chris Wilson2c225692013-08-09 12:26:45 +01003893 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003894
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003895 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003896 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003897
3898 /* It should now be out of any other write domains, and we can update
3899 * the domain values for our changes.
3900 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003901 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003902 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003903
3904 trace_i915_gem_object_change_domain(obj,
3905 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003906 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003907
3908 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003909
3910err_unpin_display:
Oscar Mateo19656432014-05-16 14:20:43 +01003911 WARN_ON(was_pin_display != is_pin_display(obj));
3912 obj->pin_display = was_pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003913 return ret;
3914}
3915
3916void
3917i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3918{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003919 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003920 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003921}
3922
Chris Wilson85345512010-11-13 09:49:11 +00003923int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003924i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003925{
Chris Wilson88241782011-01-07 17:09:48 +00003926 int ret;
3927
Chris Wilsona8198ee2011-04-13 22:04:09 +01003928 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003929 return 0;
3930
Chris Wilson0201f1e2012-07-20 12:41:01 +01003931 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003932 if (ret)
3933 return ret;
3934
Chris Wilsona8198ee2011-04-13 22:04:09 +01003935 /* Ensure that we invalidate the GPU's caches and TLBs. */
3936 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003937 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003938}
3939
Eric Anholte47c68e2008-11-14 13:35:19 -08003940/**
3941 * Moves a single object to the CPU read, and possibly write domain.
3942 *
3943 * This function returns when the move is complete, including waiting on
3944 * flushes to occur.
3945 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003946int
Chris Wilson919926a2010-11-12 13:42:53 +00003947i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003948{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003949 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003950 int ret;
3951
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003952 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3953 return 0;
3954
Chris Wilson0201f1e2012-07-20 12:41:01 +01003955 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003956 if (ret)
3957 return ret;
3958
Chris Wilsonc8725f32014-03-17 12:21:55 +00003959 i915_gem_object_retire(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003960 i915_gem_object_flush_gtt_write_domain(obj);
3961
Chris Wilson05394f32010-11-08 19:18:58 +00003962 old_write_domain = obj->base.write_domain;
3963 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003964
Eric Anholte47c68e2008-11-14 13:35:19 -08003965 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003966 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003967 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003968
Chris Wilson05394f32010-11-08 19:18:58 +00003969 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003970 }
3971
3972 /* It should now be out of any other write domains, and we can update
3973 * the domain values for our changes.
3974 */
Chris Wilson05394f32010-11-08 19:18:58 +00003975 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003976
3977 /* If we're writing through the CPU, then the GPU read domains will
3978 * need to be invalidated at next use.
3979 */
3980 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003981 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3982 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003983 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003984
Daniel Vetterf99d7062014-06-19 16:01:59 +02003985 if (write)
3986 intel_fb_obj_invalidate(obj, NULL);
3987
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003988 trace_i915_gem_object_change_domain(obj,
3989 old_read_domains,
3990 old_write_domain);
3991
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003992 return 0;
3993}
3994
Eric Anholt673a3942008-07-30 12:06:12 -07003995/* Throttle our rendering by waiting until the ring has completed our requests
3996 * emitted over 20 msec ago.
3997 *
Eric Anholtb9624422009-06-03 07:27:35 +00003998 * Note that if we were to use the current jiffies each time around the loop,
3999 * we wouldn't escape the function with any frames outstanding if the time to
4000 * render a frame was over 20ms.
4001 *
Eric Anholt673a3942008-07-30 12:06:12 -07004002 * This should get us reasonable parallelism between CPU and GPU but also
4003 * relatively low latency when blocking on a particular request to finish.
4004 */
4005static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004006i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004007{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004008 struct drm_i915_private *dev_priv = dev->dev_private;
4009 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004010 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004011 struct drm_i915_gem_request *request;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004012 struct intel_engine_cs *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01004013 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004014 u32 seqno = 0;
4015 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004016
Daniel Vetter308887a2012-11-14 17:14:06 +01004017 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4018 if (ret)
4019 return ret;
4020
4021 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4022 if (ret)
4023 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00004024
Chris Wilson1c255952010-09-26 11:03:27 +01004025 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004026 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00004027 if (time_after_eq(request->emitted_jiffies, recent_enough))
4028 break;
4029
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004030 ring = request->ring;
4031 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00004032 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01004033 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01004034 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004035
4036 if (seqno == 0)
4037 return 0;
4038
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004039 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004040 if (ret == 0)
4041 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00004042
Eric Anholt673a3942008-07-30 12:06:12 -07004043 return ret;
4044}
4045
Chris Wilsond23db882014-05-23 08:48:08 +02004046static bool
4047i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4048{
4049 struct drm_i915_gem_object *obj = vma->obj;
4050
4051 if (alignment &&
4052 vma->node.start & (alignment - 1))
4053 return true;
4054
4055 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4056 return true;
4057
4058 if (flags & PIN_OFFSET_BIAS &&
4059 vma->node.start < (flags & PIN_OFFSET_MASK))
4060 return true;
4061
4062 return false;
4063}
4064
Eric Anholt673a3942008-07-30 12:06:12 -07004065int
Chris Wilson05394f32010-11-08 19:18:58 +00004066i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07004067 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00004068 uint32_t alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02004069 uint64_t flags)
Eric Anholt673a3942008-07-30 12:06:12 -07004070{
Ben Widawsky6e7186a2014-05-06 22:21:36 -07004071 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004072 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07004073 int ret;
4074
Ben Widawsky6e7186a2014-05-06 22:21:36 -07004075 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4076 return -ENODEV;
4077
Daniel Vetterbf3d1492014-02-14 14:01:12 +01004078 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004079 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004080
4081 vma = i915_gem_obj_to_vma(obj, vm);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004082 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004083 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4084 return -EBUSY;
4085
Chris Wilsond23db882014-05-23 08:48:08 +02004086 if (i915_vma_misplaced(vma, alignment, flags)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004087 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004088 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07004089 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01004090 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004091 i915_gem_obj_offset(obj, vm), alignment,
Chris Wilsond23db882014-05-23 08:48:08 +02004092 !!(flags & PIN_MAPPABLE),
Chris Wilson05394f32010-11-08 19:18:58 +00004093 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004094 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004095 if (ret)
4096 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004097
4098 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004099 }
4100 }
4101
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004102 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Daniel Vetter262de142014-02-14 14:01:20 +01004103 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4104 if (IS_ERR(vma))
4105 return PTR_ERR(vma);
Chris Wilson22c344e2009-02-11 14:26:45 +00004106 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004107
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004108 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
4109 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter74898d72012-02-15 23:50:22 +01004110
Daniel Vetter8ea99c92014-02-14 14:01:21 +01004111 vma->pin_count++;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004112 if (flags & PIN_MAPPABLE)
4113 obj->pin_mappable |= true;
Eric Anholt673a3942008-07-30 12:06:12 -07004114
4115 return 0;
4116}
4117
4118void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004119i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07004120{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004121 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004122
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004123 BUG_ON(!vma);
4124 BUG_ON(vma->pin_count == 0);
4125 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4126
4127 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00004128 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07004129}
4130
Daniel Vetterd8ffa602014-05-13 12:11:26 +02004131bool
4132i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4133{
4134 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4135 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4136 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4137
4138 WARN_ON(!ggtt_vma ||
4139 dev_priv->fence_regs[obj->fence_reg].pin_count >
4140 ggtt_vma->pin_count);
4141 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4142 return true;
4143 } else
4144 return false;
4145}
4146
4147void
4148i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4149{
4150 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4151 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4152 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4153 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4154 }
4155}
4156
Eric Anholt673a3942008-07-30 12:06:12 -07004157int
4158i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004159 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004160{
4161 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004162 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004163 int ret;
4164
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01004165 if (INTEL_INFO(dev)->gen >= 6)
4166 return -ENODEV;
4167
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004168 ret = i915_mutex_lock_interruptible(dev);
4169 if (ret)
4170 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004171
Chris Wilson05394f32010-11-08 19:18:58 +00004172 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004173 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004174 ret = -ENOENT;
4175 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004176 }
Eric Anholt673a3942008-07-30 12:06:12 -07004177
Chris Wilson05394f32010-11-08 19:18:58 +00004178 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004179 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00004180 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004181 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004182 }
4183
Chris Wilson05394f32010-11-08 19:18:58 +00004184 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004185 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004186 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004187 ret = -EINVAL;
4188 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004189 }
4190
Daniel Vetteraa5f8022013-10-10 14:46:37 +02004191 if (obj->user_pin_count == ULONG_MAX) {
4192 ret = -EBUSY;
4193 goto out;
4194 }
4195
Chris Wilson93be8782013-01-02 10:31:22 +00004196 if (obj->user_pin_count == 0) {
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004197 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004198 if (ret)
4199 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07004200 }
4201
Chris Wilson93be8782013-01-02 10:31:22 +00004202 obj->user_pin_count++;
4203 obj->pin_filp = file;
4204
Ben Widawskyf343c5f2013-07-05 14:41:04 -07004205 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004206out:
Chris Wilson05394f32010-11-08 19:18:58 +00004207 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004208unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004209 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004210 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004211}
4212
4213int
4214i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004215 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004216{
4217 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004218 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004219 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004220
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004221 ret = i915_mutex_lock_interruptible(dev);
4222 if (ret)
4223 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004224
Chris Wilson05394f32010-11-08 19:18:58 +00004225 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004226 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004227 ret = -ENOENT;
4228 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004229 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01004230
Chris Wilson05394f32010-11-08 19:18:58 +00004231 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004232 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004233 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004234 ret = -EINVAL;
4235 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004236 }
Chris Wilson05394f32010-11-08 19:18:58 +00004237 obj->user_pin_count--;
4238 if (obj->user_pin_count == 0) {
4239 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004240 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004241 }
Eric Anholt673a3942008-07-30 12:06:12 -07004242
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004243out:
Chris Wilson05394f32010-11-08 19:18:58 +00004244 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004245unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004246 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004247 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004248}
4249
4250int
4251i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004252 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004253{
4254 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004255 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004256 int ret;
4257
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004258 ret = i915_mutex_lock_interruptible(dev);
4259 if (ret)
4260 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004261
Chris Wilson05394f32010-11-08 19:18:58 +00004262 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004263 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004264 ret = -ENOENT;
4265 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004266 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004267
Chris Wilson0be555b2010-08-04 15:36:30 +01004268 /* Count all active objects as busy, even if they are currently not used
4269 * by the gpu. Users of this interface expect objects to eventually
4270 * become non-busy without any further actions, therefore emit any
4271 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004272 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004273 ret = i915_gem_object_flush_active(obj);
4274
Chris Wilson05394f32010-11-08 19:18:58 +00004275 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004276 if (obj->ring) {
4277 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4278 args->busy |= intel_ring_flag(obj->ring) << 16;
4279 }
Eric Anholt673a3942008-07-30 12:06:12 -07004280
Chris Wilson05394f32010-11-08 19:18:58 +00004281 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004282unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004283 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004284 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004285}
4286
4287int
4288i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4289 struct drm_file *file_priv)
4290{
Akshay Joshi0206e352011-08-16 15:34:10 -04004291 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004292}
4293
Chris Wilson3ef94da2009-09-14 16:50:29 +01004294int
4295i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4296 struct drm_file *file_priv)
4297{
4298 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004299 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004300 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004301
4302 switch (args->madv) {
4303 case I915_MADV_DONTNEED:
4304 case I915_MADV_WILLNEED:
4305 break;
4306 default:
4307 return -EINVAL;
4308 }
4309
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004310 ret = i915_mutex_lock_interruptible(dev);
4311 if (ret)
4312 return ret;
4313
Chris Wilson05394f32010-11-08 19:18:58 +00004314 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004315 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004316 ret = -ENOENT;
4317 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004318 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004319
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004320 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004321 ret = -EINVAL;
4322 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004323 }
4324
Chris Wilson05394f32010-11-08 19:18:58 +00004325 if (obj->madv != __I915_MADV_PURGED)
4326 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004327
Chris Wilson6c085a72012-08-20 11:40:46 +02004328 /* if the object is no longer attached, discard its backing storage */
4329 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004330 i915_gem_object_truncate(obj);
4331
Chris Wilson05394f32010-11-08 19:18:58 +00004332 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004333
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004334out:
Chris Wilson05394f32010-11-08 19:18:58 +00004335 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004336unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004337 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004338 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004339}
4340
Chris Wilson37e680a2012-06-07 15:38:42 +01004341void i915_gem_object_init(struct drm_i915_gem_object *obj,
4342 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004343{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004344 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004345 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004346 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004347 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004348
Chris Wilson37e680a2012-06-07 15:38:42 +01004349 obj->ops = ops;
4350
Chris Wilson0327d6b2012-08-11 15:41:06 +01004351 obj->fence_reg = I915_FENCE_REG_NONE;
4352 obj->madv = I915_MADV_WILLNEED;
Chris Wilson0327d6b2012-08-11 15:41:06 +01004353
4354 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4355}
4356
Chris Wilson37e680a2012-06-07 15:38:42 +01004357static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4358 .get_pages = i915_gem_object_get_pages_gtt,
4359 .put_pages = i915_gem_object_put_pages_gtt,
4360};
4361
Chris Wilson05394f32010-11-08 19:18:58 +00004362struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4363 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004364{
Daniel Vetterc397b902010-04-09 19:05:07 +00004365 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004366 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004367 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004368
Chris Wilson42dcedd2012-11-15 11:32:30 +00004369 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004370 if (obj == NULL)
4371 return NULL;
4372
4373 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004374 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004375 return NULL;
4376 }
4377
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004378 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4379 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4380 /* 965gm cannot relocate objects above 4GiB. */
4381 mask &= ~__GFP_HIGHMEM;
4382 mask |= __GFP_DMA32;
4383 }
4384
Al Viro496ad9a2013-01-23 17:07:38 -05004385 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004386 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004387
Chris Wilson37e680a2012-06-07 15:38:42 +01004388 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004389
Daniel Vetterc397b902010-04-09 19:05:07 +00004390 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4391 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4392
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004393 if (HAS_LLC(dev)) {
4394 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004395 * cache) for about a 10% performance improvement
4396 * compared to uncached. Graphics requests other than
4397 * display scanout are coherent with the CPU in
4398 * accessing this cache. This means in this mode we
4399 * don't need to clflush on the CPU side, and on the
4400 * GPU side we only need to flush internal caches to
4401 * get data visible to the CPU.
4402 *
4403 * However, we maintain the display planes as UC, and so
4404 * need to rebind when first used as such.
4405 */
4406 obj->cache_level = I915_CACHE_LLC;
4407 } else
4408 obj->cache_level = I915_CACHE_NONE;
4409
Daniel Vetterd861e332013-07-24 23:25:03 +02004410 trace_i915_gem_object_create(obj);
4411
Chris Wilson05394f32010-11-08 19:18:58 +00004412 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004413}
4414
Chris Wilson340fbd82014-05-22 09:16:52 +01004415static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4416{
4417 /* If we are the last user of the backing storage (be it shmemfs
4418 * pages or stolen etc), we know that the pages are going to be
4419 * immediately released. In this case, we can then skip copying
4420 * back the contents from the GPU.
4421 */
4422
4423 if (obj->madv != I915_MADV_WILLNEED)
4424 return false;
4425
4426 if (obj->base.filp == NULL)
4427 return true;
4428
4429 /* At first glance, this looks racy, but then again so would be
4430 * userspace racing mmap against close. However, the first external
4431 * reference to the filp can only be obtained through the
4432 * i915_gem_mmap_ioctl() which safeguards us against the user
4433 * acquiring such a reference whilst we are in the middle of
4434 * freeing the object.
4435 */
4436 return atomic_long_read(&obj->base.filp->f_count) == 1;
4437}
4438
Chris Wilson1488fc02012-04-24 15:47:31 +01004439void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004440{
Chris Wilson1488fc02012-04-24 15:47:31 +01004441 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004442 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004443 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004444 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004445
Paulo Zanonif65c9162013-11-27 18:20:34 -02004446 intel_runtime_pm_get(dev_priv);
4447
Chris Wilson26e12f892011-03-20 11:20:19 +00004448 trace_i915_gem_object_destroy(obj);
4449
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004450 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004451 int ret;
4452
4453 vma->pin_count = 0;
4454 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004455 if (WARN_ON(ret == -ERESTARTSYS)) {
4456 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004457
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004458 was_interruptible = dev_priv->mm.interruptible;
4459 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004460
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004461 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004462
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004463 dev_priv->mm.interruptible = was_interruptible;
4464 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004465 }
4466
Chris Wilson00731152014-05-21 12:42:56 +01004467 i915_gem_object_detach_phys(obj);
4468
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004469 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4470 * before progressing. */
4471 if (obj->stolen)
4472 i915_gem_object_unpin_pages(obj);
4473
Daniel Vettera071fa02014-06-18 23:28:09 +02004474 WARN_ON(obj->frontbuffer_bits);
4475
Ben Widawsky401c29f2013-05-31 11:28:47 -07004476 if (WARN_ON(obj->pages_pin_count))
4477 obj->pages_pin_count = 0;
Chris Wilson340fbd82014-05-22 09:16:52 +01004478 if (discard_backing_storage(obj))
Chris Wilson55372522014-03-25 13:23:06 +00004479 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004480 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004481 i915_gem_object_free_mmap_offset(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004482
Chris Wilson9da3da62012-06-01 15:20:22 +01004483 BUG_ON(obj->pages);
4484
Chris Wilson2f745ad2012-09-04 21:02:58 +01004485 if (obj->base.import_attach)
4486 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004487
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004488 if (obj->ops->release)
4489 obj->ops->release(obj);
4490
Chris Wilson05394f32010-11-08 19:18:58 +00004491 drm_gem_object_release(&obj->base);
4492 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004493
Chris Wilson05394f32010-11-08 19:18:58 +00004494 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004495 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004496
4497 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004498}
4499
Daniel Vettere656a6c2013-08-14 14:14:04 +02004500struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004501 struct i915_address_space *vm)
4502{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004503 struct i915_vma *vma;
4504 list_for_each_entry(vma, &obj->vma_list, vma_link)
4505 if (vma->vm == vm)
4506 return vma;
4507
4508 return NULL;
4509}
4510
Ben Widawsky2f633152013-07-17 12:19:03 -07004511void i915_gem_vma_destroy(struct i915_vma *vma)
4512{
Michel Thierryb9d06dd2014-08-06 15:04:44 +02004513 struct i915_address_space *vm = NULL;
Ben Widawsky2f633152013-07-17 12:19:03 -07004514 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004515
4516 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4517 if (!list_empty(&vma->exec_list))
4518 return;
4519
Michel Thierryb9d06dd2014-08-06 15:04:44 +02004520 vm = vma->vm;
Michel Thierryb9d06dd2014-08-06 15:04:44 +02004521
Daniel Vetter841cd772014-08-06 15:04:48 +02004522 if (!i915_is_ggtt(vm))
4523 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
Michel Thierryb9d06dd2014-08-06 15:04:44 +02004524
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004525 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004526
Ben Widawsky2f633152013-07-17 12:19:03 -07004527 kfree(vma);
4528}
4529
Chris Wilsone3efda42014-04-09 09:19:41 +01004530static void
4531i915_gem_stop_ringbuffers(struct drm_device *dev)
4532{
4533 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004534 struct intel_engine_cs *ring;
Chris Wilsone3efda42014-04-09 09:19:41 +01004535 int i;
4536
4537 for_each_ring(ring, dev_priv, i)
Oscar Mateoa83014d2014-07-24 17:04:21 +01004538 dev_priv->gt.stop_ring(ring);
Chris Wilsone3efda42014-04-09 09:19:41 +01004539}
4540
Jesse Barnes5669fca2009-02-17 15:13:31 -08004541int
Chris Wilson45c5f202013-10-16 11:50:01 +01004542i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004543{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004544 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004545 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004546
Chris Wilson45c5f202013-10-16 11:50:01 +01004547 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004548 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004549 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004550
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004551 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004552 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004553 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004554
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004555 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004556
Chris Wilson29105cc2010-01-07 10:39:13 +00004557 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004558 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004559 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004560
Chris Wilson29105cc2010-01-07 10:39:13 +00004561 i915_kernel_lost_context(dev);
Chris Wilsone3efda42014-04-09 09:19:41 +01004562 i915_gem_stop_ringbuffers(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004563
Chris Wilson45c5f202013-10-16 11:50:01 +01004564 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4565 * We need to replace this with a semaphore, or something.
4566 * And not confound ums.mm_suspended!
4567 */
4568 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4569 DRIVER_MODESET);
4570 mutex_unlock(&dev->struct_mutex);
4571
4572 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004573 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Deepak S274fa1c2014-08-05 07:51:20 -07004574 flush_delayed_work(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004575
Eric Anholt673a3942008-07-30 12:06:12 -07004576 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004577
4578err:
4579 mutex_unlock(&dev->struct_mutex);
4580 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004581}
4582
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004583int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004584{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004585 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004586 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004587 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4588 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004589 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004590
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004591 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004592 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004593
Ben Widawskyc3787e22013-09-17 21:12:44 -07004594 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4595 if (ret)
4596 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004597
Ben Widawskyc3787e22013-09-17 21:12:44 -07004598 /*
4599 * Note: We do not worry about the concurrent register cacheline hang
4600 * here because no other code should access these registers other than
4601 * at initialization time.
4602 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004603 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004604 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4605 intel_ring_emit(ring, reg_base + i);
4606 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004607 }
4608
Ben Widawskyc3787e22013-09-17 21:12:44 -07004609 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004610
Ben Widawskyc3787e22013-09-17 21:12:44 -07004611 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004612}
4613
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004614void i915_gem_init_swizzling(struct drm_device *dev)
4615{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004616 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004617
Daniel Vetter11782b02012-01-31 16:47:55 +01004618 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004619 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4620 return;
4621
4622 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4623 DISP_TILE_SURFACE_SWIZZLING);
4624
Daniel Vetter11782b02012-01-31 16:47:55 +01004625 if (IS_GEN5(dev))
4626 return;
4627
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004628 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4629 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004630 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004631 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004632 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004633 else if (IS_GEN8(dev))
4634 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004635 else
4636 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004637}
Daniel Vettere21af882012-02-09 20:53:27 +01004638
Chris Wilson67b1b572012-07-05 23:49:40 +01004639static bool
4640intel_enable_blt(struct drm_device *dev)
4641{
4642 if (!HAS_BLT(dev))
4643 return false;
4644
4645 /* The blitter was dysfunctional on early prototypes */
4646 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4647 DRM_INFO("BLT not supported on this pre-production hardware;"
4648 " graphics performance will be degraded.\n");
4649 return false;
4650 }
4651
4652 return true;
4653}
4654
Oscar Mateoa83014d2014-07-24 17:04:21 +01004655int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004656{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004657 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004658 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004659
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004660 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004661 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004662 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004663
4664 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004665 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004666 if (ret)
4667 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004668 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004669
Chris Wilson67b1b572012-07-05 23:49:40 +01004670 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004671 ret = intel_init_blt_ring_buffer(dev);
4672 if (ret)
4673 goto cleanup_bsd_ring;
4674 }
4675
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004676 if (HAS_VEBOX(dev)) {
4677 ret = intel_init_vebox_ring_buffer(dev);
4678 if (ret)
4679 goto cleanup_blt_ring;
4680 }
4681
Zhao Yakui845f74a2014-04-17 10:37:37 +08004682 if (HAS_BSD2(dev)) {
4683 ret = intel_init_bsd2_ring_buffer(dev);
4684 if (ret)
4685 goto cleanup_vebox_ring;
4686 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004687
Mika Kuoppala99433932013-01-22 14:12:17 +02004688 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4689 if (ret)
Zhao Yakui845f74a2014-04-17 10:37:37 +08004690 goto cleanup_bsd2_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004691
4692 return 0;
4693
Zhao Yakui845f74a2014-04-17 10:37:37 +08004694cleanup_bsd2_ring:
4695 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004696cleanup_vebox_ring:
4697 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004698cleanup_blt_ring:
4699 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4700cleanup_bsd_ring:
4701 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4702cleanup_render_ring:
4703 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4704
4705 return ret;
4706}
4707
4708int
4709i915_gem_init_hw(struct drm_device *dev)
4710{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004711 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004712 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004713
4714 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4715 return -EIO;
4716
Ben Widawsky59124502013-07-04 11:02:05 -07004717 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004718 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004719
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004720 if (IS_HASWELL(dev))
4721 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4722 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004723
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004724 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004725 if (IS_IVYBRIDGE(dev)) {
4726 u32 temp = I915_READ(GEN7_MSG_CTL);
4727 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4728 I915_WRITE(GEN7_MSG_CTL, temp);
4729 } else if (INTEL_INFO(dev)->gen >= 7) {
4730 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4731 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4732 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4733 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004734 }
4735
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004736 i915_gem_init_swizzling(dev);
4737
Oscar Mateoa83014d2014-07-24 17:04:21 +01004738 ret = dev_priv->gt.init_rings(dev);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004739 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004740 return ret;
4741
Ben Widawskyc3787e22013-09-17 21:12:44 -07004742 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4743 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4744
Ben Widawsky254f9652012-06-04 14:42:42 -07004745 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004746 * XXX: Contexts should only be initialized once. Doing a switch to the
4747 * default context switch however is something we'd like to do after
4748 * reset or thaw (the latter may not actually be necessary for HW, but
4749 * goes with our code better). Context switching requires rings (for
4750 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004751 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004752 ret = i915_gem_context_enable(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004753 if (ret && ret != -EIO) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004754 DRM_ERROR("Context enable failed %d\n", ret);
Chris Wilson60990322014-04-09 09:19:42 +01004755 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetter82460d92014-08-06 20:19:53 +02004756
4757 return ret;
4758 }
4759
4760 ret = i915_ppgtt_init_hw(dev);
4761 if (ret && ret != -EIO) {
4762 DRM_ERROR("PPGTT enable failed %d\n", ret);
4763 i915_gem_cleanup_ringbuffer(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004764 }
Daniel Vettere21af882012-02-09 20:53:27 +01004765
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004766 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004767}
4768
Chris Wilson1070a422012-04-24 15:47:41 +01004769int i915_gem_init(struct drm_device *dev)
4770{
4771 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004772 int ret;
4773
Oscar Mateo127f1002014-07-24 17:04:11 +01004774 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4775 i915.enable_execlists);
4776
Chris Wilson1070a422012-04-24 15:47:41 +01004777 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004778
4779 if (IS_VALLEYVIEW(dev)) {
4780 /* VLVA0 (potential hack), BIOS isn't actually waking us */
Imre Deak981a5ae2014-04-14 20:24:22 +03004781 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4782 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4783 VLV_GTLC_ALLOWWAKEACK), 10))
Jesse Barnesd62b4892013-03-08 10:45:53 -08004784 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4785 }
4786
Oscar Mateoa83014d2014-07-24 17:04:21 +01004787 if (!i915.enable_execlists) {
4788 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4789 dev_priv->gt.init_rings = i915_gem_init_rings;
4790 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4791 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
Oscar Mateo454afeb2014-07-24 17:04:22 +01004792 } else {
4793 dev_priv->gt.do_execbuf = intel_execlists_submission;
4794 dev_priv->gt.init_rings = intel_logical_rings_init;
4795 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4796 dev_priv->gt.stop_ring = intel_logical_ring_stop;
Oscar Mateoa83014d2014-07-24 17:04:21 +01004797 }
4798
Daniel Vetter6c5566a2014-08-06 15:04:50 +02004799 ret = i915_gem_init_userptr(dev);
4800 if (ret) {
4801 mutex_unlock(&dev->struct_mutex);
4802 return ret;
4803 }
4804
Ben Widawskyd7e50082012-12-18 10:31:25 -08004805 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004806
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004807 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004808 if (ret) {
4809 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004810 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004811 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004812
Chris Wilson1070a422012-04-24 15:47:41 +01004813 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004814 if (ret == -EIO) {
4815 /* Allow ring initialisation to fail by marking the GPU as
4816 * wedged. But we only want to do this where the GPU is angry,
4817 * for all other failure, such as an allocation failure, bail.
4818 */
4819 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4820 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4821 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004822 }
Chris Wilson60990322014-04-09 09:19:42 +01004823 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004824
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004825 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4826 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4827 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson60990322014-04-09 09:19:42 +01004828 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004829}
4830
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004831void
4832i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4833{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004834 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004835 struct intel_engine_cs *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004836 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004837
Chris Wilsonb4519512012-05-11 14:29:30 +01004838 for_each_ring(ring, dev_priv, i)
Oscar Mateoa83014d2014-07-24 17:04:21 +01004839 dev_priv->gt.cleanup_ring(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004840}
4841
4842int
Eric Anholt673a3942008-07-30 12:06:12 -07004843i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4844 struct drm_file *file_priv)
4845{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004846 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004847 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004848
Jesse Barnes79e53942008-11-07 14:24:08 -08004849 if (drm_core_check_feature(dev, DRIVER_MODESET))
4850 return 0;
4851
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004852 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004853 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004854 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004855 }
4856
Eric Anholt673a3942008-07-30 12:06:12 -07004857 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004858 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004859
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004860 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004861 if (ret != 0) {
4862 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004863 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004864 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004865
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004866 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004867
Daniel Vetterbb0f1b52013-11-03 21:09:27 +01004868 ret = drm_irq_install(dev, dev->pdev->irq);
Chris Wilson5f353082010-06-07 14:03:03 +01004869 if (ret)
4870 goto cleanup_ringbuffer;
Daniel Vettere090c532013-11-03 20:27:05 +01004871 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004872
Eric Anholt673a3942008-07-30 12:06:12 -07004873 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004874
4875cleanup_ringbuffer:
Chris Wilson5f353082010-06-07 14:03:03 +01004876 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004877 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004878 mutex_unlock(&dev->struct_mutex);
4879
4880 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004881}
4882
4883int
4884i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4885 struct drm_file *file_priv)
4886{
Jesse Barnes79e53942008-11-07 14:24:08 -08004887 if (drm_core_check_feature(dev, DRIVER_MODESET))
4888 return 0;
4889
Daniel Vettere090c532013-11-03 20:27:05 +01004890 mutex_lock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004891 drm_irq_uninstall(dev);
Daniel Vettere090c532013-11-03 20:27:05 +01004892 mutex_unlock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004893
Chris Wilson45c5f202013-10-16 11:50:01 +01004894 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004895}
4896
4897void
4898i915_gem_lastclose(struct drm_device *dev)
4899{
4900 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004901
Eric Anholte806b492009-01-22 09:56:58 -08004902 if (drm_core_check_feature(dev, DRIVER_MODESET))
4903 return;
4904
Chris Wilson45c5f202013-10-16 11:50:01 +01004905 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004906 if (ret)
4907 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004908}
4909
Chris Wilson64193402010-10-24 12:38:05 +01004910static void
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004911init_ring_lists(struct intel_engine_cs *ring)
Chris Wilson64193402010-10-24 12:38:05 +01004912{
4913 INIT_LIST_HEAD(&ring->active_list);
4914 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004915}
4916
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004917void i915_init_vm(struct drm_i915_private *dev_priv,
4918 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004919{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004920 if (!i915_is_ggtt(vm))
4921 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004922 vm->dev = dev_priv->dev;
4923 INIT_LIST_HEAD(&vm->active_list);
4924 INIT_LIST_HEAD(&vm->inactive_list);
4925 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004926 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004927}
4928
Eric Anholt673a3942008-07-30 12:06:12 -07004929void
4930i915_gem_load(struct drm_device *dev)
4931{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004932 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004933 int i;
4934
4935 dev_priv->slab =
4936 kmem_cache_create("i915_gem_object",
4937 sizeof(struct drm_i915_gem_object), 0,
4938 SLAB_HWCACHE_ALIGN,
4939 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004940
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004941 INIT_LIST_HEAD(&dev_priv->vm_list);
4942 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4943
Ben Widawskya33afea2013-09-17 21:12:45 -07004944 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004945 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4946 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004947 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004948 for (i = 0; i < I915_NUM_RINGS; i++)
4949 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004950 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004951 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004952 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4953 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004954 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4955 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004956 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004957
Dave Airlie94400122010-07-20 13:15:31 +10004958 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
Ville Syrjälädbb42742014-02-25 15:13:41 +02004959 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004960 I915_WRITE(MI_ARB_STATE,
4961 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004962 }
4963
Chris Wilson72bfa192010-12-19 11:42:05 +00004964 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4965
Jesse Barnesde151cf2008-11-12 10:03:55 -08004966 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004967 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4968 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004969
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004970 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4971 dev_priv->num_fence_regs = 32;
4972 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004973 dev_priv->num_fence_regs = 16;
4974 else
4975 dev_priv->num_fence_regs = 8;
4976
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004977 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004978 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4979 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004980
Eric Anholt673a3942008-07-30 12:06:12 -07004981 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004982 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004983
Chris Wilsonce453d82011-02-21 14:43:56 +00004984 dev_priv->mm.interruptible = true;
4985
Chris Wilsonceabbba52014-03-25 13:23:04 +00004986 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4987 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4988 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4989 register_shrinker(&dev_priv->mm.shrinker);
Chris Wilson2cfcd322014-05-20 08:28:43 +01004990
4991 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4992 register_oom_notifier(&dev_priv->mm.oom_notifier);
Daniel Vetterf99d7062014-06-19 16:01:59 +02004993
4994 mutex_init(&dev_priv->fb_tracking.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004995}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004996
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004997void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004998{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004999 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00005000
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005001 cancel_delayed_work_sync(&file_priv->mm.idle_work);
5002
Eric Anholtb9624422009-06-03 07:27:35 +00005003 /* Clean up our request list when the client is going away, so that
5004 * later retire_requests won't dereference our soon-to-be-gone
5005 * file_priv.
5006 */
Chris Wilson1c255952010-09-26 11:03:27 +01005007 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01005008 while (!list_empty(&file_priv->mm.request_list)) {
5009 struct drm_i915_gem_request *request;
5010
5011 request = list_first_entry(&file_priv->mm.request_list,
5012 struct drm_i915_gem_request,
5013 client_list);
5014 list_del(&request->client_list);
5015 request->file_priv = NULL;
5016 }
Chris Wilson1c255952010-09-26 11:03:27 +01005017 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00005018}
Chris Wilson31169712009-09-14 16:50:28 +01005019
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005020static void
5021i915_gem_file_idle_work_handler(struct work_struct *work)
5022{
5023 struct drm_i915_file_private *file_priv =
5024 container_of(work, typeof(*file_priv), mm.idle_work.work);
5025
5026 atomic_set(&file_priv->rps_wait_boost, false);
5027}
5028
5029int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5030{
5031 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08005032 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005033
5034 DRM_DEBUG_DRIVER("\n");
5035
5036 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5037 if (!file_priv)
5038 return -ENOMEM;
5039
5040 file->driver_priv = file_priv;
5041 file_priv->dev_priv = dev->dev_private;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02005042 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005043
5044 spin_lock_init(&file_priv->mm.lock);
5045 INIT_LIST_HEAD(&file_priv->mm.request_list);
5046 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5047 i915_gem_file_idle_work_handler);
5048
Ben Widawskye422b882013-12-06 14:10:58 -08005049 ret = i915_gem_context_open(dev, file);
5050 if (ret)
5051 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005052
Ben Widawskye422b882013-12-06 14:10:58 -08005053 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005054}
5055
Daniel Vettera071fa02014-06-18 23:28:09 +02005056void i915_gem_track_fb(struct drm_i915_gem_object *old,
5057 struct drm_i915_gem_object *new,
5058 unsigned frontbuffer_bits)
5059{
5060 if (old) {
5061 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5062 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5063 old->frontbuffer_bits &= ~frontbuffer_bits;
5064 }
5065
5066 if (new) {
5067 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5068 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5069 new->frontbuffer_bits |= frontbuffer_bits;
5070 }
5071}
5072
Chris Wilson57745062012-11-21 13:04:04 +00005073static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5074{
5075 if (!mutex_is_locked(mutex))
5076 return false;
5077
5078#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5079 return mutex->owner == task;
5080#else
5081 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5082 return false;
5083#endif
5084}
5085
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005086static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5087{
5088 if (!mutex_trylock(&dev->struct_mutex)) {
5089 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5090 return false;
5091
5092 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5093 return false;
5094
5095 *unlock = false;
5096 } else
5097 *unlock = true;
5098
5099 return true;
5100}
5101
Chris Wilsonceabbba52014-03-25 13:23:04 +00005102static int num_vma_bound(struct drm_i915_gem_object *obj)
5103{
5104 struct i915_vma *vma;
5105 int count = 0;
5106
5107 list_for_each_entry(vma, &obj->vma_list, vma_link)
5108 if (drm_mm_node_allocated(&vma->node))
5109 count++;
5110
5111 return count;
5112}
5113
Dave Chinner7dc19d52013-08-28 10:18:11 +10005114static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005115i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01005116{
Chris Wilson17250b72010-10-28 12:51:39 +01005117 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005118 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Chris Wilson17250b72010-10-28 12:51:39 +01005119 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02005120 struct drm_i915_gem_object *obj;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005121 unsigned long count;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005122 bool unlock;
Chris Wilson17250b72010-10-28 12:51:39 +01005123
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005124 if (!i915_gem_shrinker_lock(dev, &unlock))
5125 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01005126
Dave Chinner7dc19d52013-08-28 10:18:11 +10005127 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07005128 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01005129 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005130 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005131
5132 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsonceabbba52014-03-25 13:23:04 +00005133 if (!i915_gem_obj_is_pinned(obj) &&
5134 obj->pages_pin_count == num_vma_bound(obj))
Dave Chinner7dc19d52013-08-28 10:18:11 +10005135 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005136 }
Chris Wilson31169712009-09-14 16:50:28 +01005137
Chris Wilson57745062012-11-21 13:04:04 +00005138 if (unlock)
5139 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005140
Dave Chinner7dc19d52013-08-28 10:18:11 +10005141 return count;
Chris Wilson31169712009-09-14 16:50:28 +01005142}
Ben Widawskya70a3142013-07-31 16:59:56 -07005143
5144/* All the new VM stuff */
5145unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5146 struct i915_address_space *vm)
5147{
5148 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5149 struct i915_vma *vma;
5150
Daniel Vetter896ab1a2014-08-06 15:04:51 +02005151 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
Ben Widawskya70a3142013-07-31 16:59:56 -07005152
Ben Widawskya70a3142013-07-31 16:59:56 -07005153 list_for_each_entry(vma, &o->vma_list, vma_link) {
5154 if (vma->vm == vm)
5155 return vma->node.start;
5156
5157 }
Daniel Vetterf25748ea2014-06-17 22:34:38 +02005158 WARN(1, "%s vma for this object not found.\n",
5159 i915_is_ggtt(vm) ? "global" : "ppgtt");
Ben Widawskya70a3142013-07-31 16:59:56 -07005160 return -1;
5161}
5162
5163bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5164 struct i915_address_space *vm)
5165{
5166 struct i915_vma *vma;
5167
5168 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07005169 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005170 return true;
5171
5172 return false;
5173}
5174
5175bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5176{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005177 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005178
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005179 list_for_each_entry(vma, &o->vma_list, vma_link)
5180 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005181 return true;
5182
5183 return false;
5184}
5185
5186unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5187 struct i915_address_space *vm)
5188{
5189 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5190 struct i915_vma *vma;
5191
Daniel Vetter896ab1a2014-08-06 15:04:51 +02005192 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
Ben Widawskya70a3142013-07-31 16:59:56 -07005193
5194 BUG_ON(list_empty(&o->vma_list));
5195
5196 list_for_each_entry(vma, &o->vma_list, vma_link)
5197 if (vma->vm == vm)
5198 return vma->node.size;
5199
5200 return 0;
5201}
5202
Dave Chinner7dc19d52013-08-28 10:18:11 +10005203static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005204i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005205{
5206 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005207 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Dave Chinner7dc19d52013-08-28 10:18:11 +10005208 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005209 unsigned long freed;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005210 bool unlock;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005211
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005212 if (!i915_gem_shrinker_lock(dev, &unlock))
5213 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005214
Chris Wilsond9973b42013-10-04 10:33:00 +01005215 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5216 if (freed < sc->nr_to_scan)
5217 freed += __i915_gem_shrink(dev_priv,
5218 sc->nr_to_scan - freed,
5219 false);
Dave Chinner7dc19d52013-08-28 10:18:11 +10005220 if (unlock)
5221 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005222
Dave Chinner7dc19d52013-08-28 10:18:11 +10005223 return freed;
5224}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005225
Chris Wilson2cfcd322014-05-20 08:28:43 +01005226static int
5227i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5228{
5229 struct drm_i915_private *dev_priv =
5230 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5231 struct drm_device *dev = dev_priv->dev;
5232 struct drm_i915_gem_object *obj;
5233 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5234 unsigned long pinned, bound, unbound, freed;
5235 bool was_interruptible;
5236 bool unlock;
5237
Chris Wilsona1db2fa2014-07-11 11:28:00 +01005238 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
Chris Wilson2cfcd322014-05-20 08:28:43 +01005239 schedule_timeout_killable(1);
Chris Wilsona1db2fa2014-07-11 11:28:00 +01005240 if (fatal_signal_pending(current))
5241 return NOTIFY_DONE;
5242 }
Chris Wilson2cfcd322014-05-20 08:28:43 +01005243 if (timeout == 0) {
5244 pr_err("Unable to purge GPU memory due lock contention.\n");
5245 return NOTIFY_DONE;
5246 }
5247
5248 was_interruptible = dev_priv->mm.interruptible;
5249 dev_priv->mm.interruptible = false;
5250
5251 freed = i915_gem_shrink_all(dev_priv);
5252
5253 dev_priv->mm.interruptible = was_interruptible;
5254
5255 /* Because we may be allocating inside our own driver, we cannot
5256 * assert that there are no objects with pinned pages that are not
5257 * being pointed to by hardware.
5258 */
5259 unbound = bound = pinned = 0;
5260 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5261 if (!obj->base.filp) /* not backed by a freeable object */
5262 continue;
5263
5264 if (obj->pages_pin_count)
5265 pinned += obj->base.size;
5266 else
5267 unbound += obj->base.size;
5268 }
5269 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5270 if (!obj->base.filp)
5271 continue;
5272
5273 if (obj->pages_pin_count)
5274 pinned += obj->base.size;
5275 else
5276 bound += obj->base.size;
5277 }
5278
5279 if (unlock)
5280 mutex_unlock(&dev->struct_mutex);
5281
5282 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5283 freed, pinned);
5284 if (unbound || bound)
5285 pr_err("%lu and %lu bytes still available in the "
5286 "bound and unbound GPU page lists.\n",
5287 bound, unbound);
5288
5289 *(unsigned long *)ptr += freed;
5290 return NOTIFY_DONE;
5291}
5292
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005293struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5294{
5295 struct i915_vma *vma;
5296
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005297 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Daniel Vetter5dc383b2014-08-06 15:04:49 +02005298 if (vma->vm != i915_obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005299 return NULL;
5300
5301 return vma;
5302}