blob: edaa34dae9b87eae0d3f6344dbb054e77d154bf1 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
Chris Wilsonc8725f32014-03-17 12:21:55 +000046static void
47i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
Chris Wilson05394f32010-11-08 19:18:58 +000049static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100051 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000052 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070053
Chris Wilson61050802012-04-17 15:31:31 +010054static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
58 bool enable);
59
Dave Chinner7dc19d52013-08-28 10:18:11 +100060static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc);
62static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010064static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010066static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Damien Lespiaucb216aa2014-03-03 17:42:36 +000067static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
Chris Wilson31169712009-09-14 16:50:28 +010068
Chris Wilsonc76ce032013-08-08 14:41:03 +010069static bool cpu_cache_is_coherent(struct drm_device *dev,
70 enum i915_cache_level level)
71{
72 return HAS_LLC(dev) || level != I915_CACHE_NONE;
73}
74
Chris Wilson2c225692013-08-09 12:26:45 +010075static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
76{
77 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
78 return true;
79
80 return obj->pin_display;
81}
82
Chris Wilson61050802012-04-17 15:31:31 +010083static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
84{
85 if (obj->tiling_mode)
86 i915_gem_release_mmap(obj);
87
88 /* As we do not have an associated fence register, we will force
89 * a tiling change if we ever need to acquire one.
90 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010091 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010092 obj->fence_reg = I915_FENCE_REG_NONE;
93}
94
Chris Wilson73aa8082010-09-30 11:46:12 +010095/* some bookkeeping */
96static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
97 size_t size)
98{
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100 dev_priv->mm.object_count++;
101 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200102 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100103}
104
105static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
106 size_t size)
107{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200108 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100109 dev_priv->mm.object_count--;
110 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200111 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100112}
113
Chris Wilson21dd3732011-01-26 15:55:56 +0000114static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100115i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100116{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100117 int ret;
118
Daniel Vetter7abb6902013-05-24 21:29:32 +0200119#define EXIT_COND (!i915_reset_in_progress(error) || \
120 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100121 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100122 return 0;
123
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200124 /*
125 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
126 * userspace. If it takes that long something really bad is going on and
127 * we should simply try to bail out and fail as gracefully as possible.
128 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100129 ret = wait_event_interruptible_timeout(error->reset_queue,
130 EXIT_COND,
131 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200132 if (ret == 0) {
133 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
134 return -EIO;
135 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100136 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200137 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100138#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100139
Chris Wilson21dd3732011-01-26 15:55:56 +0000140 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141}
142
Chris Wilson54cf91d2010-11-25 18:00:26 +0000143int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100144{
Daniel Vetter33196de2012-11-14 17:14:05 +0100145 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146 int ret;
147
Daniel Vetter33196de2012-11-14 17:14:05 +0100148 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100149 if (ret)
150 return ret;
151
152 ret = mutex_lock_interruptible(&dev->struct_mutex);
153 if (ret)
154 return ret;
155
Chris Wilson23bc5982010-09-29 16:10:57 +0100156 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100157 return 0;
158}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100159
Chris Wilson7d1c4802010-08-07 21:45:03 +0100160static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000161i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162{
Ben Widawsky98438772013-07-31 17:00:12 -0700163 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164}
165
Eric Anholt673a3942008-07-30 12:06:12 -0700166int
167i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000168 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700169{
Ben Widawsky93d18792013-01-17 12:45:17 -0800170 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700171 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000172
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200173 if (drm_core_check_feature(dev, DRIVER_MODESET))
174 return -ENODEV;
175
Chris Wilson20217462010-11-23 15:26:33 +0000176 if (args->gtt_start >= args->gtt_end ||
177 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
178 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700179
Daniel Vetterf534bc02012-03-26 22:37:04 +0200180 /* GEM with user mode setting was never supported on ilk and later. */
181 if (INTEL_INFO(dev)->gen >= 5)
182 return -ENODEV;
183
Eric Anholt673a3942008-07-30 12:06:12 -0700184 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800185 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
186 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800187 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700188 mutex_unlock(&dev->struct_mutex);
189
Chris Wilson20217462010-11-23 15:26:33 +0000190 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700191}
192
Eric Anholt5a125c32008-10-22 21:40:13 -0700193int
194i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000195 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700196{
Chris Wilson73aa8082010-09-30 11:46:12 +0100197 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700198 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000199 struct drm_i915_gem_object *obj;
200 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700201
Chris Wilson6299f992010-11-24 12:23:44 +0000202 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100203 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700204 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800205 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700206 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100207 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700208
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700209 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400210 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000211
Eric Anholt5a125c32008-10-22 21:40:13 -0700212 return 0;
213}
214
Chris Wilson42dcedd2012-11-15 11:32:30 +0000215void *i915_gem_object_alloc(struct drm_device *dev)
216{
217 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700218 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000219}
220
221void i915_gem_object_free(struct drm_i915_gem_object *obj)
222{
223 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
224 kmem_cache_free(dev_priv->slab, obj);
225}
226
Dave Airlieff72145b2011-02-07 12:16:14 +1000227static int
228i915_gem_create(struct drm_file *file,
229 struct drm_device *dev,
230 uint64_t size,
231 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700232{
Chris Wilson05394f32010-11-08 19:18:58 +0000233 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300234 int ret;
235 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700236
Dave Airlieff72145b2011-02-07 12:16:14 +1000237 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200238 if (size == 0)
239 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700240
241 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000242 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700243 if (obj == NULL)
244 return -ENOMEM;
245
Chris Wilson05394f32010-11-08 19:18:58 +0000246 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100247 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200248 drm_gem_object_unreference_unlocked(&obj->base);
249 if (ret)
250 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100251
Dave Airlieff72145b2011-02-07 12:16:14 +1000252 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700253 return 0;
254}
255
Dave Airlieff72145b2011-02-07 12:16:14 +1000256int
257i915_gem_dumb_create(struct drm_file *file,
258 struct drm_device *dev,
259 struct drm_mode_create_dumb *args)
260{
261 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300262 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000263 args->size = args->pitch * args->height;
264 return i915_gem_create(file, dev,
265 args->size, &args->handle);
266}
267
Dave Airlieff72145b2011-02-07 12:16:14 +1000268/**
269 * Creates a new mm object and returns a handle to it.
270 */
271int
272i915_gem_create_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file)
274{
275 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200276
Dave Airlieff72145b2011-02-07 12:16:14 +1000277 return i915_gem_create(file, dev,
278 args->size, &args->handle);
279}
280
Daniel Vetter8c599672011-12-14 13:57:31 +0100281static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100282__copy_to_user_swizzled(char __user *cpu_vaddr,
283 const char *gpu_vaddr, int gpu_offset,
284 int length)
285{
286 int ret, cpu_offset = 0;
287
288 while (length > 0) {
289 int cacheline_end = ALIGN(gpu_offset + 1, 64);
290 int this_length = min(cacheline_end - gpu_offset, length);
291 int swizzled_gpu_offset = gpu_offset ^ 64;
292
293 ret = __copy_to_user(cpu_vaddr + cpu_offset,
294 gpu_vaddr + swizzled_gpu_offset,
295 this_length);
296 if (ret)
297 return ret + length;
298
299 cpu_offset += this_length;
300 gpu_offset += this_length;
301 length -= this_length;
302 }
303
304 return 0;
305}
306
307static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700308__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
309 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100310 int length)
311{
312 int ret, cpu_offset = 0;
313
314 while (length > 0) {
315 int cacheline_end = ALIGN(gpu_offset + 1, 64);
316 int this_length = min(cacheline_end - gpu_offset, length);
317 int swizzled_gpu_offset = gpu_offset ^ 64;
318
319 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
320 cpu_vaddr + cpu_offset,
321 this_length);
322 if (ret)
323 return ret + length;
324
325 cpu_offset += this_length;
326 gpu_offset += this_length;
327 length -= this_length;
328 }
329
330 return 0;
331}
332
Brad Volkin4c914c02014-02-18 10:15:45 -0800333/*
334 * Pins the specified object's pages and synchronizes the object with
335 * GPU accesses. Sets needs_clflush to non-zero if the caller should
336 * flush the object from the CPU cache.
337 */
338int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
339 int *needs_clflush)
340{
341 int ret;
342
343 *needs_clflush = 0;
344
345 if (!obj->base.filp)
346 return -EINVAL;
347
348 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
349 /* If we're not in the cpu read domain, set ourself into the gtt
350 * read domain and manually flush cachelines (if required). This
351 * optimizes for the case when the gpu will dirty the data
352 * anyway again before the next pread happens. */
353 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
354 obj->cache_level);
355 ret = i915_gem_object_wait_rendering(obj, true);
356 if (ret)
357 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000358
359 i915_gem_object_retire(obj);
Brad Volkin4c914c02014-02-18 10:15:45 -0800360 }
361
362 ret = i915_gem_object_get_pages(obj);
363 if (ret)
364 return ret;
365
366 i915_gem_object_pin_pages(obj);
367
368 return ret;
369}
370
Daniel Vetterd174bd62012-03-25 19:47:40 +0200371/* Per-page copy function for the shmem pread fastpath.
372 * Flushes invalid cachelines before reading the target if
373 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700374static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200375shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
376 char __user *user_data,
377 bool page_do_bit17_swizzling, bool needs_clflush)
378{
379 char *vaddr;
380 int ret;
381
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200382 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383 return -EINVAL;
384
385 vaddr = kmap_atomic(page);
386 if (needs_clflush)
387 drm_clflush_virt_range(vaddr + shmem_page_offset,
388 page_length);
389 ret = __copy_to_user_inatomic(user_data,
390 vaddr + shmem_page_offset,
391 page_length);
392 kunmap_atomic(vaddr);
393
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100394 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200395}
396
Daniel Vetter23c18c72012-03-25 19:47:42 +0200397static void
398shmem_clflush_swizzled_range(char *addr, unsigned long length,
399 bool swizzled)
400{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200401 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200402 unsigned long start = (unsigned long) addr;
403 unsigned long end = (unsigned long) addr + length;
404
405 /* For swizzling simply ensure that we always flush both
406 * channels. Lame, but simple and it works. Swizzled
407 * pwrite/pread is far from a hotpath - current userspace
408 * doesn't use it at all. */
409 start = round_down(start, 128);
410 end = round_up(end, 128);
411
412 drm_clflush_virt_range((void *)start, end - start);
413 } else {
414 drm_clflush_virt_range(addr, length);
415 }
416
417}
418
Daniel Vetterd174bd62012-03-25 19:47:40 +0200419/* Only difference to the fast-path function is that this can handle bit17
420 * and uses non-atomic copy and kmap functions. */
421static int
422shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
423 char __user *user_data,
424 bool page_do_bit17_swizzling, bool needs_clflush)
425{
426 char *vaddr;
427 int ret;
428
429 vaddr = kmap(page);
430 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200431 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
432 page_length,
433 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200434
435 if (page_do_bit17_swizzling)
436 ret = __copy_to_user_swizzled(user_data,
437 vaddr, shmem_page_offset,
438 page_length);
439 else
440 ret = __copy_to_user(user_data,
441 vaddr + shmem_page_offset,
442 page_length);
443 kunmap(page);
444
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100445 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200446}
447
Eric Anholteb014592009-03-10 11:44:52 -0700448static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200449i915_gem_shmem_pread(struct drm_device *dev,
450 struct drm_i915_gem_object *obj,
451 struct drm_i915_gem_pread *args,
452 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700453{
Daniel Vetter8461d222011-12-14 13:57:32 +0100454 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700455 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100456 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100457 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100458 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200459 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200460 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200461 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700462
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200463 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700464 remain = args->size;
465
Daniel Vetter8461d222011-12-14 13:57:32 +0100466 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700467
Brad Volkin4c914c02014-02-18 10:15:45 -0800468 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100469 if (ret)
470 return ret;
471
Eric Anholteb014592009-03-10 11:44:52 -0700472 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100473
Imre Deak67d5a502013-02-18 19:28:02 +0200474 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
475 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200476 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100477
478 if (remain <= 0)
479 break;
480
Eric Anholteb014592009-03-10 11:44:52 -0700481 /* Operation in this page
482 *
Eric Anholteb014592009-03-10 11:44:52 -0700483 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700484 * page_length = bytes to copy for this page
485 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100486 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700487 page_length = remain;
488 if ((shmem_page_offset + page_length) > PAGE_SIZE)
489 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700490
Daniel Vetter8461d222011-12-14 13:57:32 +0100491 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
492 (page_to_phys(page) & (1 << 17)) != 0;
493
Daniel Vetterd174bd62012-03-25 19:47:40 +0200494 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
495 user_data, page_do_bit17_swizzling,
496 needs_clflush);
497 if (ret == 0)
498 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700499
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200500 mutex_unlock(&dev->struct_mutex);
501
Jani Nikulad330a952014-01-21 11:24:25 +0200502 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200503 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200504 /* Userspace is tricking us, but we've already clobbered
505 * its pages with the prefault and promised to write the
506 * data up to the first fault. Hence ignore any errors
507 * and just continue. */
508 (void)ret;
509 prefaulted = 1;
510 }
511
Daniel Vetterd174bd62012-03-25 19:47:40 +0200512 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
513 user_data, page_do_bit17_swizzling,
514 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700515
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200516 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100517
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100518 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100519 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100520
Chris Wilson17793c92014-03-07 08:30:36 +0000521next_page:
Eric Anholteb014592009-03-10 11:44:52 -0700522 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100523 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700524 offset += page_length;
525 }
526
Chris Wilson4f27b752010-10-14 15:26:45 +0100527out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100528 i915_gem_object_unpin_pages(obj);
529
Eric Anholteb014592009-03-10 11:44:52 -0700530 return ret;
531}
532
Eric Anholt673a3942008-07-30 12:06:12 -0700533/**
534 * Reads data from the object referenced by handle.
535 *
536 * On error, the contents of *data are undefined.
537 */
538int
539i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000540 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700541{
542 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000543 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100544 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700545
Chris Wilson51311d02010-11-17 09:10:42 +0000546 if (args->size == 0)
547 return 0;
548
549 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200550 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000551 args->size))
552 return -EFAULT;
553
Chris Wilson4f27b752010-10-14 15:26:45 +0100554 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100555 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100556 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700557
Chris Wilson05394f32010-11-08 19:18:58 +0000558 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000559 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100560 ret = -ENOENT;
561 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100562 }
Eric Anholt673a3942008-07-30 12:06:12 -0700563
Chris Wilson7dcd2492010-09-26 20:21:44 +0100564 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000565 if (args->offset > obj->base.size ||
566 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100567 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100568 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100569 }
570
Daniel Vetter1286ff72012-05-10 15:25:09 +0200571 /* prime objects have no backing filp to GEM pread/pwrite
572 * pages from.
573 */
574 if (!obj->base.filp) {
575 ret = -EINVAL;
576 goto out;
577 }
578
Chris Wilsondb53a302011-02-03 11:57:46 +0000579 trace_i915_gem_object_pread(obj, args->offset, args->size);
580
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200581 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700582
Chris Wilson35b62a82010-09-26 20:23:38 +0100583out:
Chris Wilson05394f32010-11-08 19:18:58 +0000584 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100585unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100586 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700587 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700588}
589
Keith Packard0839ccb2008-10-30 19:38:48 -0700590/* This is the fast write path which cannot handle
591 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700592 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700593
Keith Packard0839ccb2008-10-30 19:38:48 -0700594static inline int
595fast_user_write(struct io_mapping *mapping,
596 loff_t page_base, int page_offset,
597 char __user *user_data,
598 int length)
599{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700600 void __iomem *vaddr_atomic;
601 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 unsigned long unwritten;
603
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700604 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700605 /* We can use the cpu mem copy function because this is X86. */
606 vaddr = (void __force*)vaddr_atomic + page_offset;
607 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700608 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700609 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100610 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700611}
612
Eric Anholt3de09aa2009-03-09 09:42:23 -0700613/**
614 * This is the fast pwrite path, where we copy the data directly from the
615 * user into the GTT, uncached.
616 */
Eric Anholt673a3942008-07-30 12:06:12 -0700617static int
Chris Wilson05394f32010-11-08 19:18:58 +0000618i915_gem_gtt_pwrite_fast(struct drm_device *dev,
619 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700620 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000621 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700622{
Jani Nikula3e31c6c2014-03-31 14:27:16 +0300623 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700624 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700625 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700626 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200627 int page_offset, page_length, ret;
628
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100629 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200630 if (ret)
631 goto out;
632
633 ret = i915_gem_object_set_to_gtt_domain(obj, true);
634 if (ret)
635 goto out_unpin;
636
637 ret = i915_gem_object_put_fence(obj);
638 if (ret)
639 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700640
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200641 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700642 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700643
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700644 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700645
646 while (remain > 0) {
647 /* Operation in this page
648 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700649 * page_base = page offset within aperture
650 * page_offset = offset within page
651 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700652 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100653 page_base = offset & PAGE_MASK;
654 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700655 page_length = remain;
656 if ((page_offset + remain) > PAGE_SIZE)
657 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700658
Keith Packard0839ccb2008-10-30 19:38:48 -0700659 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700660 * source page isn't available. Return the error and we'll
661 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700662 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800663 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200664 page_offset, user_data, page_length)) {
665 ret = -EFAULT;
666 goto out_unpin;
667 }
Eric Anholt673a3942008-07-30 12:06:12 -0700668
Keith Packard0839ccb2008-10-30 19:38:48 -0700669 remain -= page_length;
670 user_data += page_length;
671 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700672 }
Eric Anholt673a3942008-07-30 12:06:12 -0700673
Daniel Vetter935aaa62012-03-25 19:47:35 +0200674out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800675 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200676out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700677 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700678}
679
Daniel Vetterd174bd62012-03-25 19:47:40 +0200680/* Per-page copy function for the shmem pwrite fastpath.
681 * Flushes invalid cachelines before writing to the target if
682 * needs_clflush_before is set and flushes out any written cachelines after
683 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700684static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200685shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
686 char __user *user_data,
687 bool page_do_bit17_swizzling,
688 bool needs_clflush_before,
689 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700690{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700692 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700693
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200694 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200695 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700696
Daniel Vetterd174bd62012-03-25 19:47:40 +0200697 vaddr = kmap_atomic(page);
698 if (needs_clflush_before)
699 drm_clflush_virt_range(vaddr + shmem_page_offset,
700 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +0000701 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
702 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 if (needs_clflush_after)
704 drm_clflush_virt_range(vaddr + shmem_page_offset,
705 page_length);
706 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700707
Chris Wilson755d2212012-09-04 21:02:55 +0100708 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700709}
710
Daniel Vetterd174bd62012-03-25 19:47:40 +0200711/* Only difference to the fast-path function is that this can handle bit17
712 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700713static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200714shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
715 char __user *user_data,
716 bool page_do_bit17_swizzling,
717 bool needs_clflush_before,
718 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700719{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 char *vaddr;
721 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700722
Daniel Vetterd174bd62012-03-25 19:47:40 +0200723 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200724 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200725 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
726 page_length,
727 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200728 if (page_do_bit17_swizzling)
729 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100730 user_data,
731 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200732 else
733 ret = __copy_from_user(vaddr + shmem_page_offset,
734 user_data,
735 page_length);
736 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200737 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
738 page_length,
739 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200740 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100741
Chris Wilson755d2212012-09-04 21:02:55 +0100742 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700743}
744
Eric Anholt40123c12009-03-09 13:42:30 -0700745static int
Daniel Vettere244a442012-03-25 19:47:28 +0200746i915_gem_shmem_pwrite(struct drm_device *dev,
747 struct drm_i915_gem_object *obj,
748 struct drm_i915_gem_pwrite *args,
749 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700750{
Eric Anholt40123c12009-03-09 13:42:30 -0700751 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100752 loff_t offset;
753 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100754 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100755 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200756 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200757 int needs_clflush_after = 0;
758 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200759 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700760
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200761 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700762 remain = args->size;
763
Daniel Vetter8c599672011-12-14 13:57:31 +0100764 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700765
Daniel Vetter58642882012-03-25 19:47:37 +0200766 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
767 /* If we're not in the cpu write domain, set ourself into the gtt
768 * write domain and manually flush cachelines (if required). This
769 * optimizes for the case when the gpu will use the data
770 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100771 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700772 ret = i915_gem_object_wait_rendering(obj, false);
773 if (ret)
774 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000775
776 i915_gem_object_retire(obj);
Daniel Vetter58642882012-03-25 19:47:37 +0200777 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100778 /* Same trick applies to invalidate partially written cachelines read
779 * before writing. */
780 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
781 needs_clflush_before =
782 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200783
Chris Wilson755d2212012-09-04 21:02:55 +0100784 ret = i915_gem_object_get_pages(obj);
785 if (ret)
786 return ret;
787
788 i915_gem_object_pin_pages(obj);
789
Eric Anholt40123c12009-03-09 13:42:30 -0700790 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000791 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700792
Imre Deak67d5a502013-02-18 19:28:02 +0200793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
794 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200795 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200796 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100797
Chris Wilson9da3da62012-06-01 15:20:22 +0100798 if (remain <= 0)
799 break;
800
Eric Anholt40123c12009-03-09 13:42:30 -0700801 /* Operation in this page
802 *
Eric Anholt40123c12009-03-09 13:42:30 -0700803 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700804 * page_length = bytes to copy for this page
805 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100806 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700807
808 page_length = remain;
809 if ((shmem_page_offset + page_length) > PAGE_SIZE)
810 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700811
Daniel Vetter58642882012-03-25 19:47:37 +0200812 /* If we don't overwrite a cacheline completely we need to be
813 * careful to have up-to-date data by first clflushing. Don't
814 * overcomplicate things and flush the entire patch. */
815 partial_cacheline_write = needs_clflush_before &&
816 ((shmem_page_offset | page_length)
817 & (boot_cpu_data.x86_clflush_size - 1));
818
Daniel Vetter8c599672011-12-14 13:57:31 +0100819 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
820 (page_to_phys(page) & (1 << 17)) != 0;
821
Daniel Vetterd174bd62012-03-25 19:47:40 +0200822 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
823 user_data, page_do_bit17_swizzling,
824 partial_cacheline_write,
825 needs_clflush_after);
826 if (ret == 0)
827 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700828
Daniel Vettere244a442012-03-25 19:47:28 +0200829 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200830 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200831 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
832 user_data, page_do_bit17_swizzling,
833 partial_cacheline_write,
834 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700835
Daniel Vettere244a442012-03-25 19:47:28 +0200836 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100837
Chris Wilson755d2212012-09-04 21:02:55 +0100838 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100839 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100840
Chris Wilson17793c92014-03-07 08:30:36 +0000841next_page:
Eric Anholt40123c12009-03-09 13:42:30 -0700842 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100843 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700844 offset += page_length;
845 }
846
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100847out:
Chris Wilson755d2212012-09-04 21:02:55 +0100848 i915_gem_object_unpin_pages(obj);
849
Daniel Vettere244a442012-03-25 19:47:28 +0200850 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100851 /*
852 * Fixup: Flush cpu caches in case we didn't flush the dirty
853 * cachelines in-line while writing and the object moved
854 * out of the cpu write domain while we've dropped the lock.
855 */
856 if (!needs_clflush_after &&
857 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100858 if (i915_gem_clflush_object(obj, obj->pin_display))
859 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200860 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100861 }
Eric Anholt40123c12009-03-09 13:42:30 -0700862
Daniel Vetter58642882012-03-25 19:47:37 +0200863 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800864 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200865
Eric Anholt40123c12009-03-09 13:42:30 -0700866 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700867}
868
869/**
870 * Writes data to the object referenced by handle.
871 *
872 * On error, the contents of the buffer that were to be modified are undefined.
873 */
874int
875i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100876 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700877{
878 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000879 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000880 int ret;
881
882 if (args->size == 0)
883 return 0;
884
885 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200886 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000887 args->size))
888 return -EFAULT;
889
Jani Nikulad330a952014-01-21 11:24:25 +0200890 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800891 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
892 args->size);
893 if (ret)
894 return -EFAULT;
895 }
Eric Anholt673a3942008-07-30 12:06:12 -0700896
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100897 ret = i915_mutex_lock_interruptible(dev);
898 if (ret)
899 return ret;
900
Chris Wilson05394f32010-11-08 19:18:58 +0000901 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000902 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100903 ret = -ENOENT;
904 goto unlock;
905 }
Eric Anholt673a3942008-07-30 12:06:12 -0700906
Chris Wilson7dcd2492010-09-26 20:21:44 +0100907 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000908 if (args->offset > obj->base.size ||
909 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100910 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100911 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100912 }
913
Daniel Vetter1286ff72012-05-10 15:25:09 +0200914 /* prime objects have no backing filp to GEM pread/pwrite
915 * pages from.
916 */
917 if (!obj->base.filp) {
918 ret = -EINVAL;
919 goto out;
920 }
921
Chris Wilsondb53a302011-02-03 11:57:46 +0000922 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
923
Daniel Vetter935aaa62012-03-25 19:47:35 +0200924 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700925 /* We can only do the GTT pwrite on untiled buffers, as otherwise
926 * it would end up going through the fenced access, and we'll get
927 * different detiling behavior between reading and writing.
928 * pread/pwrite currently are reading and writing from the CPU
929 * perspective, requiring manual detiling by the client.
930 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100931 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100933 goto out;
934 }
935
Chris Wilson2c225692013-08-09 12:26:45 +0100936 if (obj->tiling_mode == I915_TILING_NONE &&
937 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
938 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100939 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200940 /* Note that the gtt paths might fail with non-page-backed user
941 * pointers (e.g. gtt mappings when moving data between
942 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700943 }
Eric Anholt673a3942008-07-30 12:06:12 -0700944
Chris Wilson86a1ee22012-08-11 15:41:04 +0100945 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200946 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100947
Chris Wilson35b62a82010-09-26 20:23:38 +0100948out:
Chris Wilson05394f32010-11-08 19:18:58 +0000949 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100950unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100951 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700952 return ret;
953}
954
Chris Wilsonb3612372012-08-24 09:35:08 +0100955int
Daniel Vetter33196de2012-11-14 17:14:05 +0100956i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100957 bool interruptible)
958{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100959 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100960 /* Non-interruptible callers can't handle -EAGAIN, hence return
961 * -EIO unconditionally for these. */
962 if (!interruptible)
963 return -EIO;
964
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100965 /* Recovery complete, but the reset failed ... */
966 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100967 return -EIO;
968
969 return -EAGAIN;
970 }
971
972 return 0;
973}
974
975/*
976 * Compare seqno against outstanding lazy request. Emit a request if they are
977 * equal.
978 */
979static int
980i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
981{
982 int ret;
983
984 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
985
986 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100987 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300988 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100989
990 return ret;
991}
992
Chris Wilson094f9a52013-09-25 17:34:55 +0100993static void fake_irq(unsigned long data)
994{
995 wake_up_process((struct task_struct *)data);
996}
997
998static bool missed_irq(struct drm_i915_private *dev_priv,
999 struct intel_ring_buffer *ring)
1000{
1001 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1002}
1003
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001004static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1005{
1006 if (file_priv == NULL)
1007 return true;
1008
1009 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1010}
1011
Chris Wilsonb3612372012-08-24 09:35:08 +01001012/**
1013 * __wait_seqno - wait until execution of seqno has finished
1014 * @ring: the ring expected to report seqno
1015 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +01001016 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +01001017 * @interruptible: do an interruptible wait (normally yes)
1018 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1019 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001020 * Note: It is of utmost importance that the passed in seqno and reset_counter
1021 * values have been read by the caller in an smp safe manner. Where read-side
1022 * locks are involved, it is sufficient to read the reset_counter before
1023 * unlocking the lock that protects the seqno. For lockless tricks, the
1024 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1025 * inserted.
1026 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001027 * Returns 0 if the seqno was found within the alloted time. Else returns the
1028 * errno with remaining time filled in timeout argument.
1029 */
1030static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001031 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001032 bool interruptible,
1033 struct timespec *timeout,
1034 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001035{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001036 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001037 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001038 const bool irq_test_in_progress =
1039 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001040 struct timespec before, now;
1041 DEFINE_WAIT(wait);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001042 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001043 int ret;
1044
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001045 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
Paulo Zanonic67a4702013-08-19 13:18:09 -03001046
Chris Wilsonb3612372012-08-24 09:35:08 +01001047 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1048 return 0;
1049
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001050 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001051
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001052 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001053 gen6_rps_boost(dev_priv);
1054 if (file_priv)
1055 mod_delayed_work(dev_priv->wq,
1056 &file_priv->mm.idle_work,
1057 msecs_to_jiffies(100));
1058 }
1059
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001060 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001061 return -ENODEV;
1062
Chris Wilson094f9a52013-09-25 17:34:55 +01001063 /* Record current time in case interrupted by signal, or wedged */
1064 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001065 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001066 for (;;) {
1067 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001068
Chris Wilson094f9a52013-09-25 17:34:55 +01001069 prepare_to_wait(&ring->irq_queue, &wait,
1070 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001071
Daniel Vetterf69061b2012-12-06 09:01:42 +01001072 /* We need to check whether any gpu reset happened in between
1073 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001074 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1075 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1076 * is truely gone. */
1077 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1078 if (ret == 0)
1079 ret = -EAGAIN;
1080 break;
1081 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001082
Chris Wilson094f9a52013-09-25 17:34:55 +01001083 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1084 ret = 0;
1085 break;
1086 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001087
Chris Wilson094f9a52013-09-25 17:34:55 +01001088 if (interruptible && signal_pending(current)) {
1089 ret = -ERESTARTSYS;
1090 break;
1091 }
1092
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001093 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001094 ret = -ETIME;
1095 break;
1096 }
1097
1098 timer.function = NULL;
1099 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001100 unsigned long expire;
1101
Chris Wilson094f9a52013-09-25 17:34:55 +01001102 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001103 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001104 mod_timer(&timer, expire);
1105 }
1106
Chris Wilson5035c272013-10-04 09:58:46 +01001107 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001108
Chris Wilson094f9a52013-09-25 17:34:55 +01001109 if (timer.function) {
1110 del_singleshot_timer_sync(&timer);
1111 destroy_timer_on_stack(&timer);
1112 }
1113 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001114 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001115 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001116
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001117 if (!irq_test_in_progress)
1118 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001119
1120 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001121
1122 if (timeout) {
1123 struct timespec sleep_time = timespec_sub(now, before);
1124 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001125 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1126 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001127 }
1128
Chris Wilson094f9a52013-09-25 17:34:55 +01001129 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001130}
1131
1132/**
1133 * Waits for a sequence number to be signaled, and cleans up the
1134 * request and object lists appropriately for that event.
1135 */
1136int
1137i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1138{
1139 struct drm_device *dev = ring->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 bool interruptible = dev_priv->mm.interruptible;
1142 int ret;
1143
1144 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1145 BUG_ON(seqno == 0);
1146
Daniel Vetter33196de2012-11-14 17:14:05 +01001147 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001148 if (ret)
1149 return ret;
1150
1151 ret = i915_gem_check_olr(ring, seqno);
1152 if (ret)
1153 return ret;
1154
Daniel Vetterf69061b2012-12-06 09:01:42 +01001155 return __wait_seqno(ring, seqno,
1156 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001157 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001158}
1159
Chris Wilsond26e3af2013-06-29 22:05:26 +01001160static int
1161i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1162 struct intel_ring_buffer *ring)
1163{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001164 if (!obj->active)
1165 return 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001166
1167 /* Manually manage the write flush as we may have not yet
1168 * retired the buffer.
1169 *
1170 * Note that the last_write_seqno is always the earlier of
1171 * the two (read/write) seqno, so if we haved successfully waited,
1172 * we know we have passed the last write.
1173 */
1174 obj->last_write_seqno = 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001175
1176 return 0;
1177}
1178
Chris Wilsonb3612372012-08-24 09:35:08 +01001179/**
1180 * Ensures that all rendering to the object has completed and the object is
1181 * safe to unbind from the GTT or access from the CPU.
1182 */
1183static __must_check int
1184i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1185 bool readonly)
1186{
1187 struct intel_ring_buffer *ring = obj->ring;
1188 u32 seqno;
1189 int ret;
1190
1191 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1192 if (seqno == 0)
1193 return 0;
1194
1195 ret = i915_wait_seqno(ring, seqno);
1196 if (ret)
1197 return ret;
1198
Chris Wilsond26e3af2013-06-29 22:05:26 +01001199 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001200}
1201
Chris Wilson3236f572012-08-24 09:35:09 +01001202/* A nonblocking variant of the above wait. This is a highly dangerous routine
1203 * as the object state may change during this call.
1204 */
1205static __must_check int
1206i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001207 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001208 bool readonly)
1209{
1210 struct drm_device *dev = obj->base.dev;
1211 struct drm_i915_private *dev_priv = dev->dev_private;
1212 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001213 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001214 u32 seqno;
1215 int ret;
1216
1217 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1218 BUG_ON(!dev_priv->mm.interruptible);
1219
1220 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1221 if (seqno == 0)
1222 return 0;
1223
Daniel Vetter33196de2012-11-14 17:14:05 +01001224 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001225 if (ret)
1226 return ret;
1227
1228 ret = i915_gem_check_olr(ring, seqno);
1229 if (ret)
1230 return ret;
1231
Daniel Vetterf69061b2012-12-06 09:01:42 +01001232 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001233 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001234 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001235 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001236 if (ret)
1237 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001238
Chris Wilsond26e3af2013-06-29 22:05:26 +01001239 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001240}
1241
Eric Anholt673a3942008-07-30 12:06:12 -07001242/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001243 * Called when user space prepares to use an object with the CPU, either
1244 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001245 */
1246int
1247i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001248 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001249{
1250 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001251 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001252 uint32_t read_domains = args->read_domains;
1253 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001254 int ret;
1255
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001256 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001257 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001258 return -EINVAL;
1259
Chris Wilson21d509e2009-06-06 09:46:02 +01001260 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001261 return -EINVAL;
1262
1263 /* Having something in the write domain implies it's in the read
1264 * domain, and only that read domain. Enforce that in the request.
1265 */
1266 if (write_domain != 0 && read_domains != write_domain)
1267 return -EINVAL;
1268
Chris Wilson76c1dec2010-09-25 11:22:51 +01001269 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001270 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001271 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001272
Chris Wilson05394f32010-11-08 19:18:58 +00001273 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001274 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001275 ret = -ENOENT;
1276 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001277 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001278
Chris Wilson3236f572012-08-24 09:35:09 +01001279 /* Try to flush the object off the GPU without holding the lock.
1280 * We will repeat the flush holding the lock in the normal manner
1281 * to catch cases where we are gazumped.
1282 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001283 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1284 file->driver_priv,
1285 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001286 if (ret)
1287 goto unref;
1288
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001289 if (read_domains & I915_GEM_DOMAIN_GTT) {
1290 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001291
1292 /* Silently promote "you're not bound, there was nothing to do"
1293 * to success, since the client was just asking us to
1294 * make sure everything was done.
1295 */
1296 if (ret == -EINVAL)
1297 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001298 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001299 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001300 }
1301
Chris Wilson3236f572012-08-24 09:35:09 +01001302unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001303 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001304unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001305 mutex_unlock(&dev->struct_mutex);
1306 return ret;
1307}
1308
1309/**
1310 * Called when user space has done writes to this buffer
1311 */
1312int
1313i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001314 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001315{
1316 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001317 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001318 int ret = 0;
1319
Chris Wilson76c1dec2010-09-25 11:22:51 +01001320 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001321 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001322 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001323
Chris Wilson05394f32010-11-08 19:18:58 +00001324 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001325 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001326 ret = -ENOENT;
1327 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001328 }
1329
Eric Anholt673a3942008-07-30 12:06:12 -07001330 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001331 if (obj->pin_display)
1332 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001333
Chris Wilson05394f32010-11-08 19:18:58 +00001334 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001335unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001336 mutex_unlock(&dev->struct_mutex);
1337 return ret;
1338}
1339
1340/**
1341 * Maps the contents of an object, returning the address it is mapped
1342 * into.
1343 *
1344 * While the mapping holds a reference on the contents of the object, it doesn't
1345 * imply a ref on the object itself.
1346 */
1347int
1348i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001349 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001350{
1351 struct drm_i915_gem_mmap *args = data;
1352 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001353 unsigned long addr;
1354
Chris Wilson05394f32010-11-08 19:18:58 +00001355 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001356 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001357 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001358
Daniel Vetter1286ff72012-05-10 15:25:09 +02001359 /* prime objects have no backing filp to GEM mmap
1360 * pages from.
1361 */
1362 if (!obj->filp) {
1363 drm_gem_object_unreference_unlocked(obj);
1364 return -EINVAL;
1365 }
1366
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001367 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001368 PROT_READ | PROT_WRITE, MAP_SHARED,
1369 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001370 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001371 if (IS_ERR((void *)addr))
1372 return addr;
1373
1374 args->addr_ptr = (uint64_t) addr;
1375
1376 return 0;
1377}
1378
Jesse Barnesde151cf2008-11-12 10:03:55 -08001379/**
1380 * i915_gem_fault - fault a page into the GTT
1381 * vma: VMA in question
1382 * vmf: fault info
1383 *
1384 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1385 * from userspace. The fault handler takes care of binding the object to
1386 * the GTT (if needed), allocating and programming a fence register (again,
1387 * only if needed based on whether the old reg is still valid or the object
1388 * is tiled) and inserting a new PTE into the faulting process.
1389 *
1390 * Note that the faulting process may involve evicting existing objects
1391 * from the GTT and/or fence registers to make room. So performance may
1392 * suffer if the GTT working set is large or there are few fence registers
1393 * left.
1394 */
1395int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396{
Chris Wilson05394f32010-11-08 19:18:58 +00001397 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1398 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001399 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001400 pgoff_t page_offset;
1401 unsigned long pfn;
1402 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001403 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001404
Paulo Zanonif65c9162013-11-27 18:20:34 -02001405 intel_runtime_pm_get(dev_priv);
1406
Jesse Barnesde151cf2008-11-12 10:03:55 -08001407 /* We don't use vmf->pgoff since that has the fake offset */
1408 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1409 PAGE_SHIFT;
1410
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001411 ret = i915_mutex_lock_interruptible(dev);
1412 if (ret)
1413 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001414
Chris Wilsondb53a302011-02-03 11:57:46 +00001415 trace_i915_gem_object_fault(obj, page_offset, true, write);
1416
Chris Wilson6e4930f2014-02-07 18:37:06 -02001417 /* Try to flush the object off the GPU first without holding the lock.
1418 * Upon reacquiring the lock, we will perform our sanity checks and then
1419 * repeat the flush holding the lock in the normal manner to catch cases
1420 * where we are gazumped.
1421 */
1422 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1423 if (ret)
1424 goto unlock;
1425
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001426 /* Access to snoopable pages through the GTT is incoherent. */
1427 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1428 ret = -EINVAL;
1429 goto unlock;
1430 }
1431
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001432 /* Now bind it into the GTT if needed */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001433 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001434 if (ret)
1435 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001436
Chris Wilsonc9839302012-11-20 10:45:17 +00001437 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1438 if (ret)
1439 goto unpin;
1440
1441 ret = i915_gem_object_get_fence(obj);
1442 if (ret)
1443 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001444
Chris Wilson6299f992010-11-24 12:23:44 +00001445 obj->fault_mappable = true;
1446
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001447 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1448 pfn >>= PAGE_SHIFT;
1449 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001450
1451 /* Finally, remap it using the new GTT offset */
1452 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001453unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001454 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001455unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001456 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001457out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001458 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001459 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001460 /* If this -EIO is due to a gpu hang, give the reset code a
1461 * chance to clean up the mess. Otherwise return the proper
1462 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001463 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1464 ret = VM_FAULT_SIGBUS;
1465 break;
1466 }
Chris Wilson045e7692010-11-07 09:18:22 +00001467 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001468 /*
1469 * EAGAIN means the gpu is hung and we'll wait for the error
1470 * handler to reset everything when re-faulting in
1471 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001472 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001473 case 0:
1474 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001475 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001476 case -EBUSY:
1477 /*
1478 * EBUSY is ok: this just means that another thread
1479 * already did the job.
1480 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001481 ret = VM_FAULT_NOPAGE;
1482 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001483 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001484 ret = VM_FAULT_OOM;
1485 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001486 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001487 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001488 ret = VM_FAULT_SIGBUS;
1489 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001490 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001491 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001492 ret = VM_FAULT_SIGBUS;
1493 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001494 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001495
1496 intel_runtime_pm_put(dev_priv);
1497 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001498}
1499
Paulo Zanoni48018a52013-12-13 15:22:31 -02001500void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1501{
1502 struct i915_vma *vma;
1503
1504 /*
1505 * Only the global gtt is relevant for gtt memory mappings, so restrict
1506 * list traversal to objects bound into the global address space. Note
1507 * that the active list should be empty, but better safe than sorry.
1508 */
1509 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1510 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1511 i915_gem_release_mmap(vma->obj);
1512 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1513 i915_gem_release_mmap(vma->obj);
1514}
1515
Jesse Barnesde151cf2008-11-12 10:03:55 -08001516/**
Chris Wilson901782b2009-07-10 08:18:50 +01001517 * i915_gem_release_mmap - remove physical page mappings
1518 * @obj: obj in question
1519 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001520 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001521 * relinquish ownership of the pages back to the system.
1522 *
1523 * It is vital that we remove the page mapping if we have mapped a tiled
1524 * object through the GTT and then lose the fence register due to
1525 * resource pressure. Similarly if the object has been moved out of the
1526 * aperture, than pages mapped into userspace must be revoked. Removing the
1527 * mapping will then trigger a page fault on the next user access, allowing
1528 * fixup by i915_gem_fault().
1529 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001530void
Chris Wilson05394f32010-11-08 19:18:58 +00001531i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001532{
Chris Wilson6299f992010-11-24 12:23:44 +00001533 if (!obj->fault_mappable)
1534 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001535
David Herrmann6796cb12014-01-03 14:24:19 +01001536 drm_vma_node_unmap(&obj->base.vma_node,
1537 obj->base.dev->anon_inode->i_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001538 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001539}
1540
Imre Deak0fa87792013-01-07 21:47:35 +02001541uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001542i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001543{
Chris Wilsone28f8712011-07-18 13:11:49 -07001544 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001545
1546 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001547 tiling_mode == I915_TILING_NONE)
1548 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001549
1550 /* Previous chips need a power-of-two fence region when tiling */
1551 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001552 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001553 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001554 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001555
Chris Wilsone28f8712011-07-18 13:11:49 -07001556 while (gtt_size < size)
1557 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001558
Chris Wilsone28f8712011-07-18 13:11:49 -07001559 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001560}
1561
Jesse Barnesde151cf2008-11-12 10:03:55 -08001562/**
1563 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1564 * @obj: object to check
1565 *
1566 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001567 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001568 */
Imre Deakd8651102013-01-07 21:47:33 +02001569uint32_t
1570i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1571 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001572{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001573 /*
1574 * Minimum alignment is 4k (GTT page size), but might be greater
1575 * if a fence register is needed for the object.
1576 */
Imre Deakd8651102013-01-07 21:47:33 +02001577 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001578 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001579 return 4096;
1580
1581 /*
1582 * Previous chips need to be aligned to the size of the smallest
1583 * fence register that can contain the object.
1584 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001585 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001586}
1587
Chris Wilsond8cb5082012-08-11 15:41:03 +01001588static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1589{
1590 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1591 int ret;
1592
David Herrmann0de23972013-07-24 21:07:52 +02001593 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001594 return 0;
1595
Daniel Vetterda494d72012-12-20 15:11:16 +01001596 dev_priv->mm.shrinker_no_lock_stealing = true;
1597
Chris Wilsond8cb5082012-08-11 15:41:03 +01001598 ret = drm_gem_create_mmap_offset(&obj->base);
1599 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001600 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001601
1602 /* Badly fragmented mmap space? The only way we can recover
1603 * space is by destroying unwanted objects. We can't randomly release
1604 * mmap_offsets as userspace expects them to be persistent for the
1605 * lifetime of the objects. The closest we can is to release the
1606 * offsets on purgeable objects by truncating it and marking it purged,
1607 * which prevents userspace from ever using that object again.
1608 */
1609 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1610 ret = drm_gem_create_mmap_offset(&obj->base);
1611 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001612 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001613
1614 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001615 ret = drm_gem_create_mmap_offset(&obj->base);
1616out:
1617 dev_priv->mm.shrinker_no_lock_stealing = false;
1618
1619 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001620}
1621
1622static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1623{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001624 drm_gem_free_mmap_offset(&obj->base);
1625}
1626
Jesse Barnesde151cf2008-11-12 10:03:55 -08001627int
Dave Airlieff72145b2011-02-07 12:16:14 +10001628i915_gem_mmap_gtt(struct drm_file *file,
1629 struct drm_device *dev,
1630 uint32_t handle,
1631 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001632{
Chris Wilsonda761a62010-10-27 17:37:08 +01001633 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001634 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001635 int ret;
1636
Chris Wilson76c1dec2010-09-25 11:22:51 +01001637 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001638 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001639 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001640
Dave Airlieff72145b2011-02-07 12:16:14 +10001641 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001642 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001643 ret = -ENOENT;
1644 goto unlock;
1645 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001646
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001647 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001648 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001649 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001650 }
1651
Chris Wilson05394f32010-11-08 19:18:58 +00001652 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001653 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001654 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001655 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001656 }
1657
Chris Wilsond8cb5082012-08-11 15:41:03 +01001658 ret = i915_gem_object_create_mmap_offset(obj);
1659 if (ret)
1660 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001661
David Herrmann0de23972013-07-24 21:07:52 +02001662 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001663
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001664out:
Chris Wilson05394f32010-11-08 19:18:58 +00001665 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001666unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001667 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001668 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001669}
1670
Dave Airlieff72145b2011-02-07 12:16:14 +10001671/**
1672 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1673 * @dev: DRM device
1674 * @data: GTT mapping ioctl data
1675 * @file: GEM object info
1676 *
1677 * Simply returns the fake offset to userspace so it can mmap it.
1678 * The mmap call will end up in drm_gem_mmap(), which will set things
1679 * up so we can get faults in the handler above.
1680 *
1681 * The fault handler will take care of binding the object into the GTT
1682 * (since it may have been evicted to make room for something), allocating
1683 * a fence register, and mapping the appropriate aperture address into
1684 * userspace.
1685 */
1686int
1687i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1688 struct drm_file *file)
1689{
1690 struct drm_i915_gem_mmap_gtt *args = data;
1691
Dave Airlieff72145b2011-02-07 12:16:14 +10001692 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1693}
1694
Daniel Vetter225067e2012-08-20 10:23:20 +02001695/* Immediately discard the backing storage */
1696static void
1697i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001698{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001699 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001700
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001701 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001702
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001703 if (obj->base.filp == NULL)
1704 return;
1705
Daniel Vetter225067e2012-08-20 10:23:20 +02001706 /* Our goal here is to return as much of the memory as
1707 * is possible back to the system as we are called from OOM.
1708 * To do this we must instruct the shmfs to drop all of its
1709 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001710 */
Al Viro496ad9a2013-01-23 17:07:38 -05001711 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001712 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001713
Daniel Vetter225067e2012-08-20 10:23:20 +02001714 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001715}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001716
Daniel Vetter225067e2012-08-20 10:23:20 +02001717static inline int
1718i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1719{
1720 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001721}
1722
Chris Wilson5cdf5882010-09-27 15:51:07 +01001723static void
Chris Wilson05394f32010-11-08 19:18:58 +00001724i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001725{
Imre Deak90797e62013-02-18 19:28:03 +02001726 struct sg_page_iter sg_iter;
1727 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001728
Chris Wilson05394f32010-11-08 19:18:58 +00001729 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001730
Chris Wilson6c085a72012-08-20 11:40:46 +02001731 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1732 if (ret) {
1733 /* In the event of a disaster, abandon all caches and
1734 * hope for the best.
1735 */
1736 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001737 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001738 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1739 }
1740
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001741 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001742 i915_gem_object_save_bit_17_swizzle(obj);
1743
Chris Wilson05394f32010-11-08 19:18:58 +00001744 if (obj->madv == I915_MADV_DONTNEED)
1745 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001746
Imre Deak90797e62013-02-18 19:28:03 +02001747 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001748 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001749
Chris Wilson05394f32010-11-08 19:18:58 +00001750 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001751 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001752
Chris Wilson05394f32010-11-08 19:18:58 +00001753 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001754 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001755
Chris Wilson9da3da62012-06-01 15:20:22 +01001756 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001757 }
Chris Wilson05394f32010-11-08 19:18:58 +00001758 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001759
Chris Wilson9da3da62012-06-01 15:20:22 +01001760 sg_free_table(obj->pages);
1761 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001762}
1763
Chris Wilsondd624af2013-01-15 12:39:35 +00001764int
Chris Wilson37e680a2012-06-07 15:38:42 +01001765i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1766{
1767 const struct drm_i915_gem_object_ops *ops = obj->ops;
1768
Chris Wilson2f745ad2012-09-04 21:02:58 +01001769 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001770 return 0;
1771
Chris Wilsona5570172012-09-04 21:02:54 +01001772 if (obj->pages_pin_count)
1773 return -EBUSY;
1774
Ben Widawsky98438772013-07-31 17:00:12 -07001775 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001776
Chris Wilsona2165e32012-12-03 11:49:00 +00001777 /* ->put_pages might need to allocate memory for the bit17 swizzle
1778 * array, hence protect them from being reaped by removing them from gtt
1779 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001780 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001781
Chris Wilson37e680a2012-06-07 15:38:42 +01001782 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001783 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001784
Chris Wilson6c085a72012-08-20 11:40:46 +02001785 if (i915_gem_object_is_purgeable(obj))
1786 i915_gem_object_truncate(obj);
1787
1788 return 0;
1789}
1790
Chris Wilsond9973b42013-10-04 10:33:00 +01001791static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001792__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1793 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001794{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001795 struct list_head still_in_list;
1796 struct drm_i915_gem_object *obj;
Chris Wilsond9973b42013-10-04 10:33:00 +01001797 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001798
Chris Wilson57094f82013-09-04 10:45:50 +01001799 /*
Chris Wilsonc8725f32014-03-17 12:21:55 +00001800 * As we may completely rewrite the (un)bound list whilst unbinding
Chris Wilson57094f82013-09-04 10:45:50 +01001801 * (due to retiring requests) we have to strictly process only
1802 * one element of the list at the time, and recheck the list
1803 * on every iteration.
Chris Wilsonc8725f32014-03-17 12:21:55 +00001804 *
1805 * In particular, we must hold a reference whilst removing the
1806 * object as we may end up waiting for and/or retiring the objects.
1807 * This might release the final reference (held by the active list)
1808 * and result in the object being freed from under us. This is
1809 * similar to the precautions the eviction code must take whilst
1810 * removing objects.
1811 *
1812 * Also note that although these lists do not hold a reference to
1813 * the object we can safely grab one here: The final object
1814 * unreferencing and the bound_list are both protected by the
1815 * dev->struct_mutex and so we won't ever be able to observe an
1816 * object on the bound_list with a reference count equals 0.
Chris Wilson57094f82013-09-04 10:45:50 +01001817 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00001818 INIT_LIST_HEAD(&still_in_list);
1819 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1820 obj = list_first_entry(&dev_priv->mm.unbound_list,
1821 typeof(*obj), global_list);
1822 list_move_tail(&obj->global_list, &still_in_list);
1823
1824 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1825 continue;
1826
1827 drm_gem_object_reference(&obj->base);
1828
1829 if (i915_gem_object_put_pages(obj) == 0)
1830 count += obj->base.size >> PAGE_SHIFT;
1831
1832 drm_gem_object_unreference(&obj->base);
1833 }
1834 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1835
1836 INIT_LIST_HEAD(&still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001837 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001838 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001839
Chris Wilson57094f82013-09-04 10:45:50 +01001840 obj = list_first_entry(&dev_priv->mm.bound_list,
1841 typeof(*obj), global_list);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001842 list_move_tail(&obj->global_list, &still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001843
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001844 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1845 continue;
1846
Chris Wilson57094f82013-09-04 10:45:50 +01001847 drm_gem_object_reference(&obj->base);
1848
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001849 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1850 if (i915_vma_unbind(vma))
1851 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001852
Chris Wilson57094f82013-09-04 10:45:50 +01001853 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001854 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001855
1856 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001857 }
Chris Wilsonc8725f32014-03-17 12:21:55 +00001858 list_splice(&still_in_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001859
1860 return count;
1861}
1862
Chris Wilsond9973b42013-10-04 10:33:00 +01001863static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001864i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1865{
1866 return __i915_gem_shrink(dev_priv, target, true);
1867}
1868
Chris Wilsond9973b42013-10-04 10:33:00 +01001869static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001870i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1871{
Chris Wilson6c085a72012-08-20 11:40:46 +02001872 i915_gem_evict_everything(dev_priv->dev);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001873 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
Daniel Vetter225067e2012-08-20 10:23:20 +02001874}
1875
Chris Wilson37e680a2012-06-07 15:38:42 +01001876static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001877i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001878{
Chris Wilson6c085a72012-08-20 11:40:46 +02001879 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001880 int page_count, i;
1881 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001882 struct sg_table *st;
1883 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001884 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001885 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001886 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001887 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001888
Chris Wilson6c085a72012-08-20 11:40:46 +02001889 /* Assert that the object is not currently in any GPU domain. As it
1890 * wasn't in the GTT, there shouldn't be any way it could have been in
1891 * a GPU cache
1892 */
1893 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1894 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1895
Chris Wilson9da3da62012-06-01 15:20:22 +01001896 st = kmalloc(sizeof(*st), GFP_KERNEL);
1897 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001898 return -ENOMEM;
1899
Chris Wilson9da3da62012-06-01 15:20:22 +01001900 page_count = obj->base.size / PAGE_SIZE;
1901 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001902 kfree(st);
1903 return -ENOMEM;
1904 }
1905
1906 /* Get the list of pages out of our struct file. They'll be pinned
1907 * at this point until we release them.
1908 *
1909 * Fail silently without starting the shrinker
1910 */
Al Viro496ad9a2013-01-23 17:07:38 -05001911 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001912 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001913 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001914 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001915 sg = st->sgl;
1916 st->nents = 0;
1917 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001918 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1919 if (IS_ERR(page)) {
1920 i915_gem_purge(dev_priv, page_count);
1921 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1922 }
1923 if (IS_ERR(page)) {
1924 /* We've tried hard to allocate the memory by reaping
1925 * our own buffer, now let the real VM do its job and
1926 * go down in flames if truly OOM.
1927 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001928 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001929 gfp |= __GFP_IO | __GFP_WAIT;
1930
1931 i915_gem_shrink_all(dev_priv);
1932 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1933 if (IS_ERR(page))
1934 goto err_pages;
1935
Linus Torvaldscaf49192012-12-10 10:51:16 -08001936 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001937 gfp &= ~(__GFP_IO | __GFP_WAIT);
1938 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001939#ifdef CONFIG_SWIOTLB
1940 if (swiotlb_nr_tbl()) {
1941 st->nents++;
1942 sg_set_page(sg, page, PAGE_SIZE, 0);
1943 sg = sg_next(sg);
1944 continue;
1945 }
1946#endif
Imre Deak90797e62013-02-18 19:28:03 +02001947 if (!i || page_to_pfn(page) != last_pfn + 1) {
1948 if (i)
1949 sg = sg_next(sg);
1950 st->nents++;
1951 sg_set_page(sg, page, PAGE_SIZE, 0);
1952 } else {
1953 sg->length += PAGE_SIZE;
1954 }
1955 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001956
1957 /* Check that the i965g/gm workaround works. */
1958 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001959 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001960#ifdef CONFIG_SWIOTLB
1961 if (!swiotlb_nr_tbl())
1962#endif
1963 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001964 obj->pages = st;
1965
Eric Anholt673a3942008-07-30 12:06:12 -07001966 if (i915_gem_object_needs_bit17_swizzle(obj))
1967 i915_gem_object_do_bit_17_swizzle(obj);
1968
1969 return 0;
1970
1971err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001972 sg_mark_end(sg);
1973 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001974 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001975 sg_free_table(st);
1976 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001977 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001978}
1979
Chris Wilson37e680a2012-06-07 15:38:42 +01001980/* Ensure that the associated pages are gathered from the backing storage
1981 * and pinned into our object. i915_gem_object_get_pages() may be called
1982 * multiple times before they are released by a single call to
1983 * i915_gem_object_put_pages() - once the pages are no longer referenced
1984 * either as a result of memory pressure (reaping pages under the shrinker)
1985 * or as the object is itself released.
1986 */
1987int
1988i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1989{
1990 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1991 const struct drm_i915_gem_object_ops *ops = obj->ops;
1992 int ret;
1993
Chris Wilson2f745ad2012-09-04 21:02:58 +01001994 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001995 return 0;
1996
Chris Wilson43e28f02013-01-08 10:53:09 +00001997 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001998 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001999 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002000 }
2001
Chris Wilsona5570172012-09-04 21:02:54 +01002002 BUG_ON(obj->pages_pin_count);
2003
Chris Wilson37e680a2012-06-07 15:38:42 +01002004 ret = ops->get_pages(obj);
2005 if (ret)
2006 return ret;
2007
Ben Widawsky35c20a62013-05-31 11:28:48 -07002008 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01002009 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002010}
2011
Ben Widawskye2d05a82013-09-24 09:57:58 -07002012static void
Chris Wilson05394f32010-11-08 19:18:58 +00002013i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00002014 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002015{
Chris Wilson05394f32010-11-08 19:18:58 +00002016 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01002017 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00002018 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002019
Zou Nan hai852835f2010-05-21 09:08:56 +08002020 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002021 if (obj->ring != ring && obj->last_write_seqno) {
2022 /* Keep the seqno relative to the current ring */
2023 obj->last_write_seqno = seqno;
2024 }
Chris Wilson05394f32010-11-08 19:18:58 +00002025 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002026
2027 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002028 if (!obj->active) {
2029 drm_gem_object_reference(&obj->base);
2030 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002031 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002032
Chris Wilson05394f32010-11-08 19:18:58 +00002033 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002034
Chris Wilson0201f1e2012-07-20 12:41:01 +01002035 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002036
Chris Wilsoncaea7472010-11-12 13:53:37 +00002037 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002038 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002039
Chris Wilson7dd49062012-03-21 10:48:18 +00002040 /* Bump MRU to take account of the delayed flush */
2041 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2042 struct drm_i915_fence_reg *reg;
2043
2044 reg = &dev_priv->fence_regs[obj->fence_reg];
2045 list_move_tail(&reg->lru_list,
2046 &dev_priv->mm.fence_list);
2047 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002048 }
2049}
2050
Ben Widawskye2d05a82013-09-24 09:57:58 -07002051void i915_vma_move_to_active(struct i915_vma *vma,
2052 struct intel_ring_buffer *ring)
2053{
2054 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2055 return i915_gem_object_move_to_active(vma->obj, ring);
2056}
2057
Chris Wilsoncaea7472010-11-12 13:53:37 +00002058static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002059i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2060{
Ben Widawskyca191b12013-07-31 17:00:14 -07002061 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002062 struct i915_address_space *vm;
2063 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002064
Chris Wilson65ce3022012-07-20 12:41:02 +01002065 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002066 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002067
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002068 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2069 vma = i915_gem_obj_to_vma(obj, vm);
2070 if (vma && !list_empty(&vma->mm_list))
2071 list_move_tail(&vma->mm_list, &vm->inactive_list);
2072 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002073
Chris Wilson65ce3022012-07-20 12:41:02 +01002074 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002075 obj->ring = NULL;
2076
Chris Wilson65ce3022012-07-20 12:41:02 +01002077 obj->last_read_seqno = 0;
2078 obj->last_write_seqno = 0;
2079 obj->base.write_domain = 0;
2080
2081 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002082 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002083
2084 obj->active = 0;
2085 drm_gem_object_unreference(&obj->base);
2086
2087 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002088}
Eric Anholt673a3942008-07-30 12:06:12 -07002089
Chris Wilsonc8725f32014-03-17 12:21:55 +00002090static void
2091i915_gem_object_retire(struct drm_i915_gem_object *obj)
2092{
2093 struct intel_ring_buffer *ring = obj->ring;
2094
2095 if (ring == NULL)
2096 return;
2097
2098 if (i915_seqno_passed(ring->get_seqno(ring, true),
2099 obj->last_read_seqno))
2100 i915_gem_object_move_to_inactive(obj);
2101}
2102
Chris Wilson9d7730912012-11-27 16:22:52 +00002103static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002104i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002105{
Chris Wilson9d7730912012-11-27 16:22:52 +00002106 struct drm_i915_private *dev_priv = dev->dev_private;
2107 struct intel_ring_buffer *ring;
2108 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002109
Chris Wilson107f27a52012-12-10 13:56:17 +02002110 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002111 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002112 ret = intel_ring_idle(ring);
2113 if (ret)
2114 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002115 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002116 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002117
2118 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002119 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002120 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002121
Chris Wilson9d7730912012-11-27 16:22:52 +00002122 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2123 ring->sync_seqno[j] = 0;
2124 }
2125
2126 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002127}
2128
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002129int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2130{
2131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 int ret;
2133
2134 if (seqno == 0)
2135 return -EINVAL;
2136
2137 /* HWS page needs to be set less than what we
2138 * will inject to ring
2139 */
2140 ret = i915_gem_init_seqno(dev, seqno - 1);
2141 if (ret)
2142 return ret;
2143
2144 /* Carefully set the last_seqno value so that wrap
2145 * detection still works
2146 */
2147 dev_priv->next_seqno = seqno;
2148 dev_priv->last_seqno = seqno - 1;
2149 if (dev_priv->last_seqno == 0)
2150 dev_priv->last_seqno--;
2151
2152 return 0;
2153}
2154
Chris Wilson9d7730912012-11-27 16:22:52 +00002155int
2156i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002157{
Chris Wilson9d7730912012-11-27 16:22:52 +00002158 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002159
Chris Wilson9d7730912012-11-27 16:22:52 +00002160 /* reserve 0 for non-seqno */
2161 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002162 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002163 if (ret)
2164 return ret;
2165
2166 dev_priv->next_seqno = 1;
2167 }
2168
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002169 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002170 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002171}
2172
Mika Kuoppala0025c072013-06-12 12:35:30 +03002173int __i915_add_request(struct intel_ring_buffer *ring,
2174 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002175 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002176 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002177{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002178 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002179 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002180 u32 request_ring_position, request_start;
Chris Wilson3cce4692010-10-27 16:11:02 +01002181 int ret;
2182
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002183 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002184 /*
2185 * Emit any outstanding flushes - execbuf can fail to emit the flush
2186 * after having emitted the batchbuffer command. Hence we need to fix
2187 * things up similar to emitting the lazy request. The difference here
2188 * is that the flush _must_ happen before the next request, no matter
2189 * what.
2190 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002191 ret = intel_ring_flush_all_caches(ring);
2192 if (ret)
2193 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002194
Chris Wilson3c0e2342013-09-04 10:45:52 +01002195 request = ring->preallocated_lazy_request;
2196 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002197 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002198
Chris Wilsona71d8d92012-02-15 11:25:36 +00002199 /* Record the position of the start of the request so that
2200 * should we detect the updated seqno part-way through the
2201 * GPU processing the request, we never over-estimate the
2202 * position of the head.
2203 */
2204 request_ring_position = intel_ring_get_tail(ring);
2205
Chris Wilson9d7730912012-11-27 16:22:52 +00002206 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002207 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002208 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002209
Chris Wilson9d7730912012-11-27 16:22:52 +00002210 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002211 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002212 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002213 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002214
2215 /* Whilst this request exists, batch_obj will be on the
2216 * active_list, and so will hold the active reference. Only when this
2217 * request is retired will the the batch_obj be moved onto the
2218 * inactive_list and lose its active reference. Hence we do not need
2219 * to explicitly hold another reference here.
2220 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002221 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002222
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002223 /* Hold a reference to the current context so that we can inspect
2224 * it later in case a hangcheck error event fires.
2225 */
2226 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002227 if (request->ctx)
2228 i915_gem_context_reference(request->ctx);
2229
Eric Anholt673a3942008-07-30 12:06:12 -07002230 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002231 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002232 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002233
Chris Wilsondb53a302011-02-03 11:57:46 +00002234 if (file) {
2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2236
Chris Wilson1c255952010-09-26 11:03:27 +01002237 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002238 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002239 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002240 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002241 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002242 }
Eric Anholt673a3942008-07-30 12:06:12 -07002243
Chris Wilson9d7730912012-11-27 16:22:52 +00002244 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002245 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002246 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002247
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002248 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002249 i915_queue_hangcheck(ring->dev);
2250
Chris Wilsonf62a0072014-02-21 17:55:39 +00002251 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2252 queue_delayed_work(dev_priv->wq,
2253 &dev_priv->mm.retire_work,
2254 round_jiffies_up_relative(HZ));
2255 intel_mark_busy(dev_priv->dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04002256 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002257
Chris Wilsonacb868d2012-09-26 13:47:30 +01002258 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002259 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002260 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002261}
2262
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002263static inline void
2264i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002265{
Chris Wilson1c255952010-09-26 11:03:27 +01002266 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002267
Chris Wilson1c255952010-09-26 11:03:27 +01002268 if (!file_priv)
2269 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002270
Chris Wilson1c255952010-09-26 11:03:27 +01002271 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002272 list_del(&request->client_list);
2273 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002274 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002275}
2276
Mika Kuoppala939fd762014-01-30 19:04:44 +02002277static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002278 const struct i915_hw_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002279{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002280 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002281
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002282 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2283
2284 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002285 return true;
2286
2287 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002288 if (!i915_gem_context_is_default(ctx)) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002289 DRM_DEBUG("context hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002290 return true;
Mika Kuoppala88b4aa82014-03-28 18:18:18 +02002291 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2292 if (i915_stop_ring_allow_warn(dev_priv))
2293 DRM_ERROR("gpu hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002294 return true;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002295 }
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002296 }
2297
2298 return false;
2299}
2300
Mika Kuoppala939fd762014-01-30 19:04:44 +02002301static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2302 struct i915_hw_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002303 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002304{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002305 struct i915_ctx_hang_stats *hs;
2306
2307 if (WARN_ON(!ctx))
2308 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002309
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002310 hs = &ctx->hang_stats;
2311
2312 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002313 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002314 hs->batch_active++;
2315 hs->guilty_ts = get_seconds();
2316 } else {
2317 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002318 }
2319}
2320
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002321static void i915_gem_free_request(struct drm_i915_gem_request *request)
2322{
2323 list_del(&request->list);
2324 i915_gem_request_remove_from_client(request);
2325
2326 if (request->ctx)
2327 i915_gem_context_unreference(request->ctx);
2328
2329 kfree(request);
2330}
2331
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002332struct drm_i915_gem_request *
2333i915_gem_find_active_request(struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002334{
Chris Wilson4db080f2013-12-04 11:37:09 +00002335 struct drm_i915_gem_request *request;
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002336 u32 completed_seqno;
2337
2338 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002339
Chris Wilson4db080f2013-12-04 11:37:09 +00002340 list_for_each_entry(request, &ring->request_list, list) {
2341 if (i915_seqno_passed(completed_seqno, request->seqno))
2342 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002343
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002344 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002345 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002346
2347 return NULL;
2348}
2349
2350static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2351 struct intel_ring_buffer *ring)
2352{
2353 struct drm_i915_gem_request *request;
2354 bool ring_hung;
2355
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002356 request = i915_gem_find_active_request(ring);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002357
2358 if (request == NULL)
2359 return;
2360
2361 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2362
Mika Kuoppala939fd762014-01-30 19:04:44 +02002363 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002364
2365 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002366 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002367}
2368
2369static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2370 struct intel_ring_buffer *ring)
2371{
Chris Wilsondfaae392010-09-22 10:31:52 +01002372 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002373 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002374
Chris Wilson05394f32010-11-08 19:18:58 +00002375 obj = list_first_entry(&ring->active_list,
2376 struct drm_i915_gem_object,
2377 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002378
Chris Wilson05394f32010-11-08 19:18:58 +00002379 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002380 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002381
2382 /*
2383 * We must free the requests after all the corresponding objects have
2384 * been moved off active lists. Which is the same order as the normal
2385 * retire_requests function does. This is important if object hold
2386 * implicit references on things like e.g. ppgtt address spaces through
2387 * the request.
2388 */
2389 while (!list_empty(&ring->request_list)) {
2390 struct drm_i915_gem_request *request;
2391
2392 request = list_first_entry(&ring->request_list,
2393 struct drm_i915_gem_request,
2394 list);
2395
2396 i915_gem_free_request(request);
2397 }
Chris Wilsone3efda42014-04-09 09:19:41 +01002398
2399 /* These may not have been flush before the reset, do so now */
2400 kfree(ring->preallocated_lazy_request);
2401 ring->preallocated_lazy_request = NULL;
2402 ring->outstanding_lazy_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002403}
2404
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002405void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002406{
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 int i;
2409
Daniel Vetter4b9de732011-10-09 21:52:02 +02002410 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002411 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002412
Daniel Vetter94a335d2013-07-17 14:51:28 +02002413 /*
2414 * Commit delayed tiling changes if we have an object still
2415 * attached to the fence, otherwise just clear the fence.
2416 */
2417 if (reg->obj) {
2418 i915_gem_object_update_fence(reg->obj, reg,
2419 reg->obj->tiling_mode);
2420 } else {
2421 i915_gem_write_fence(dev, i, NULL);
2422 }
Chris Wilson312817a2010-11-22 11:50:11 +00002423 }
2424}
2425
Chris Wilson069efc12010-09-30 16:53:18 +01002426void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002427{
Chris Wilsondfaae392010-09-22 10:31:52 +01002428 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002429 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002430 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002431
Chris Wilson4db080f2013-12-04 11:37:09 +00002432 /*
2433 * Before we free the objects from the requests, we need to inspect
2434 * them for finding the guilty party. As the requests only borrow
2435 * their reference to the objects, the inspection must be done first.
2436 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002437 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002438 i915_gem_reset_ring_status(dev_priv, ring);
2439
2440 for_each_ring(ring, dev_priv, i)
2441 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002442
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002443 i915_gem_context_reset(dev);
2444
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002445 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002446}
2447
2448/**
2449 * This function clears the request list as sequence numbers are passed.
2450 */
Damien Lespiaucb216aa2014-03-03 17:42:36 +00002451static void
Chris Wilsondb53a302011-02-03 11:57:46 +00002452i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002453{
Eric Anholt673a3942008-07-30 12:06:12 -07002454 uint32_t seqno;
2455
Chris Wilsondb53a302011-02-03 11:57:46 +00002456 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002457 return;
2458
Chris Wilsondb53a302011-02-03 11:57:46 +00002459 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002460
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002461 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002462
Chris Wilsone9103032014-01-07 11:45:14 +00002463 /* Move any buffers on the active list that are no longer referenced
2464 * by the ringbuffer to the flushing/inactive lists as appropriate,
2465 * before we free the context associated with the requests.
2466 */
2467 while (!list_empty(&ring->active_list)) {
2468 struct drm_i915_gem_object *obj;
2469
2470 obj = list_first_entry(&ring->active_list,
2471 struct drm_i915_gem_object,
2472 ring_list);
2473
2474 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2475 break;
2476
2477 i915_gem_object_move_to_inactive(obj);
2478 }
2479
2480
Zou Nan hai852835f2010-05-21 09:08:56 +08002481 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002482 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002483
Zou Nan hai852835f2010-05-21 09:08:56 +08002484 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002485 struct drm_i915_gem_request,
2486 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002487
Chris Wilsondfaae392010-09-22 10:31:52 +01002488 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002489 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002490
Chris Wilsondb53a302011-02-03 11:57:46 +00002491 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002492 /* We know the GPU must have read the request to have
2493 * sent us the seqno + interrupt, so use the position
2494 * of tail of the request to update the last known position
2495 * of the GPU head.
2496 */
2497 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002498
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002499 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002500 }
2501
Chris Wilsondb53a302011-02-03 11:57:46 +00002502 if (unlikely(ring->trace_irq_seqno &&
2503 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002504 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002505 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002506 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002507
Chris Wilsondb53a302011-02-03 11:57:46 +00002508 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002509}
2510
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002511bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002512i915_gem_retire_requests(struct drm_device *dev)
2513{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002514 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002515 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002516 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002517 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002518
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002519 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002520 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002521 idle &= list_empty(&ring->request_list);
2522 }
2523
2524 if (idle)
2525 mod_delayed_work(dev_priv->wq,
2526 &dev_priv->mm.idle_work,
2527 msecs_to_jiffies(100));
2528
2529 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002530}
2531
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002532static void
Eric Anholt673a3942008-07-30 12:06:12 -07002533i915_gem_retire_work_handler(struct work_struct *work)
2534{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002535 struct drm_i915_private *dev_priv =
2536 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2537 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002538 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002539
Chris Wilson891b48c2010-09-29 12:26:37 +01002540 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002541 idle = false;
2542 if (mutex_trylock(&dev->struct_mutex)) {
2543 idle = i915_gem_retire_requests(dev);
2544 mutex_unlock(&dev->struct_mutex);
2545 }
2546 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002547 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2548 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002549}
Chris Wilson891b48c2010-09-29 12:26:37 +01002550
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002551static void
2552i915_gem_idle_work_handler(struct work_struct *work)
2553{
2554 struct drm_i915_private *dev_priv =
2555 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002556
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002557 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002558}
2559
Ben Widawsky5816d642012-04-11 11:18:19 -07002560/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002561 * Ensures that an object will eventually get non-busy by flushing any required
2562 * write domains, emitting any outstanding lazy request and retiring and
2563 * completed requests.
2564 */
2565static int
2566i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2567{
2568 int ret;
2569
2570 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002571 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002572 if (ret)
2573 return ret;
2574
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002575 i915_gem_retire_requests_ring(obj->ring);
2576 }
2577
2578 return 0;
2579}
2580
2581/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002582 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2583 * @DRM_IOCTL_ARGS: standard ioctl arguments
2584 *
2585 * Returns 0 if successful, else an error is returned with the remaining time in
2586 * the timeout parameter.
2587 * -ETIME: object is still busy after timeout
2588 * -ERESTARTSYS: signal interrupted the wait
2589 * -ENONENT: object doesn't exist
2590 * Also possible, but rare:
2591 * -EAGAIN: GPU wedged
2592 * -ENOMEM: damn
2593 * -ENODEV: Internal IRQ fail
2594 * -E?: The add request failed
2595 *
2596 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2597 * non-zero timeout parameter the wait ioctl will wait for the given number of
2598 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2599 * without holding struct_mutex the object may become re-busied before this
2600 * function completes. A similar but shorter * race condition exists in the busy
2601 * ioctl
2602 */
2603int
2604i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2605{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002606 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002607 struct drm_i915_gem_wait *args = data;
2608 struct drm_i915_gem_object *obj;
2609 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002610 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002611 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002612 u32 seqno = 0;
2613 int ret = 0;
2614
Ben Widawskyeac1f142012-06-05 15:24:24 -07002615 if (args->timeout_ns >= 0) {
2616 timeout_stack = ns_to_timespec(args->timeout_ns);
2617 timeout = &timeout_stack;
2618 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002619
2620 ret = i915_mutex_lock_interruptible(dev);
2621 if (ret)
2622 return ret;
2623
2624 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2625 if (&obj->base == NULL) {
2626 mutex_unlock(&dev->struct_mutex);
2627 return -ENOENT;
2628 }
2629
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002630 /* Need to make sure the object gets inactive eventually. */
2631 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002632 if (ret)
2633 goto out;
2634
2635 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002636 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002637 ring = obj->ring;
2638 }
2639
2640 if (seqno == 0)
2641 goto out;
2642
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002643 /* Do this after OLR check to make sure we make forward progress polling
2644 * on this IOCTL with a 0 timeout (like busy ioctl)
2645 */
2646 if (!args->timeout_ns) {
2647 ret = -ETIME;
2648 goto out;
2649 }
2650
2651 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002652 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002653 mutex_unlock(&dev->struct_mutex);
2654
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002655 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002656 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002657 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002658 return ret;
2659
2660out:
2661 drm_gem_object_unreference(&obj->base);
2662 mutex_unlock(&dev->struct_mutex);
2663 return ret;
2664}
2665
2666/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002667 * i915_gem_object_sync - sync an object to a ring.
2668 *
2669 * @obj: object which may be in use on another ring.
2670 * @to: ring we wish to use the object on. May be NULL.
2671 *
2672 * This code is meant to abstract object synchronization with the GPU.
2673 * Calling with NULL implies synchronizing the object with the CPU
2674 * rather than a particular GPU ring.
2675 *
2676 * Returns 0 if successful, else propagates up the lower layer error.
2677 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002678int
2679i915_gem_object_sync(struct drm_i915_gem_object *obj,
2680 struct intel_ring_buffer *to)
2681{
2682 struct intel_ring_buffer *from = obj->ring;
2683 u32 seqno;
2684 int ret, idx;
2685
2686 if (from == NULL || to == from)
2687 return 0;
2688
Ben Widawsky5816d642012-04-11 11:18:19 -07002689 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002690 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002691
2692 idx = intel_ring_sync_index(from, to);
2693
Chris Wilson0201f1e2012-07-20 12:41:01 +01002694 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002695 if (seqno <= from->sync_seqno[idx])
2696 return 0;
2697
Ben Widawskyb4aca012012-04-25 20:50:12 -07002698 ret = i915_gem_check_olr(obj->ring, seqno);
2699 if (ret)
2700 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002701
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002702 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002703 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002704 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002705 /* We use last_read_seqno because sync_to()
2706 * might have just caused seqno wrap under
2707 * the radar.
2708 */
2709 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002710
Ben Widawskye3a5a222012-04-11 11:18:20 -07002711 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002712}
2713
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002714static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2715{
2716 u32 old_write_domain, old_read_domains;
2717
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002718 /* Force a pagefault for domain tracking on next user access */
2719 i915_gem_release_mmap(obj);
2720
Keith Packardb97c3d92011-06-24 21:02:59 -07002721 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2722 return;
2723
Chris Wilson97c809fd2012-10-09 19:24:38 +01002724 /* Wait for any direct GTT access to complete */
2725 mb();
2726
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002727 old_read_domains = obj->base.read_domains;
2728 old_write_domain = obj->base.write_domain;
2729
2730 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2731 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2732
2733 trace_i915_gem_object_change_domain(obj,
2734 old_read_domains,
2735 old_write_domain);
2736}
2737
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002738int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002739{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002740 struct drm_i915_gem_object *obj = vma->obj;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002741 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002742 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002743
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002744 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002745 return 0;
2746
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002747 if (!drm_mm_node_allocated(&vma->node)) {
2748 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002749 return 0;
2750 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002751
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002752 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002753 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002754
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002755 BUG_ON(obj->pages == NULL);
2756
Chris Wilsona8198ee2011-04-13 22:04:09 +01002757 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002758 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002759 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002760 /* Continue on if we fail due to EIO, the GPU is hung so we
2761 * should be safe and we need to cleanup or else we might
2762 * cause memory corruption through use-after-free.
2763 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002764
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002765 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002766
Daniel Vetter96b47b62009-12-15 17:50:00 +01002767 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002768 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002769 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002770 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002771
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002772 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002773
Ben Widawsky6f65e292013-12-06 14:10:56 -08002774 vma->unbind_vma(vma);
2775
Daniel Vetter74163902012-02-15 23:50:21 +01002776 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002777
Chris Wilson64bf9302014-02-25 14:23:28 +00002778 list_del_init(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002779 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002780 if (i915_is_ggtt(vma->vm))
2781 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002782
Ben Widawsky2f633152013-07-17 12:19:03 -07002783 drm_mm_remove_node(&vma->node);
2784 i915_gem_vma_destroy(vma);
2785
2786 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002787 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002788 if (list_empty(&obj->vma_list))
2789 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002790
Chris Wilson70903c32013-12-04 09:59:09 +00002791 /* And finally now the object is completely decoupled from this vma,
2792 * we can drop its hold on the backing storage and allow it to be
2793 * reaped by the shrinker.
2794 */
2795 i915_gem_object_unpin_pages(obj);
2796
Chris Wilson88241782011-01-07 17:09:48 +00002797 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002798}
2799
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002800int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002801{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002802 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002803 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002804 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002805
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002806 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002807 for_each_ring(ring, dev_priv, i) {
Chris Wilson691e6412014-04-09 09:07:36 +01002808 ret = i915_switch_context(ring, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002809 if (ret)
2810 return ret;
2811
Chris Wilson3e960502012-11-27 16:22:54 +00002812 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002813 if (ret)
2814 return ret;
2815 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002816
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002817 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002818}
2819
Chris Wilson9ce079e2012-04-17 15:31:30 +01002820static void i965_write_fence_reg(struct drm_device *dev, int reg,
2821 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002822{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002823 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002824 int fence_reg;
2825 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002826
Imre Deak56c844e2013-01-07 21:47:34 +02002827 if (INTEL_INFO(dev)->gen >= 6) {
2828 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2829 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2830 } else {
2831 fence_reg = FENCE_REG_965_0;
2832 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2833 }
2834
Chris Wilsond18b9612013-07-10 13:36:23 +01002835 fence_reg += reg * 8;
2836
2837 /* To w/a incoherency with non-atomic 64-bit register updates,
2838 * we split the 64-bit update into two 32-bit writes. In order
2839 * for a partial fence not to be evaluated between writes, we
2840 * precede the update with write to turn off the fence register,
2841 * and only enable the fence as the last step.
2842 *
2843 * For extra levels of paranoia, we make sure each step lands
2844 * before applying the next step.
2845 */
2846 I915_WRITE(fence_reg, 0);
2847 POSTING_READ(fence_reg);
2848
Chris Wilson9ce079e2012-04-17 15:31:30 +01002849 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002850 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002851 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002852
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002853 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002854 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002855 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002856 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002857 if (obj->tiling_mode == I915_TILING_Y)
2858 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2859 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002860
Chris Wilsond18b9612013-07-10 13:36:23 +01002861 I915_WRITE(fence_reg + 4, val >> 32);
2862 POSTING_READ(fence_reg + 4);
2863
2864 I915_WRITE(fence_reg + 0, val);
2865 POSTING_READ(fence_reg);
2866 } else {
2867 I915_WRITE(fence_reg + 4, 0);
2868 POSTING_READ(fence_reg + 4);
2869 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002870}
2871
Chris Wilson9ce079e2012-04-17 15:31:30 +01002872static void i915_write_fence_reg(struct drm_device *dev, int reg,
2873 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002874{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002875 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002876 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002877
Chris Wilson9ce079e2012-04-17 15:31:30 +01002878 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002879 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002880 int pitch_val;
2881 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002882
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002883 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002884 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002885 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2886 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2887 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002888
2889 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2890 tile_width = 128;
2891 else
2892 tile_width = 512;
2893
2894 /* Note: pitch better be a power of two tile widths */
2895 pitch_val = obj->stride / tile_width;
2896 pitch_val = ffs(pitch_val) - 1;
2897
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002898 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002899 if (obj->tiling_mode == I915_TILING_Y)
2900 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2901 val |= I915_FENCE_SIZE_BITS(size);
2902 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2903 val |= I830_FENCE_REG_VALID;
2904 } else
2905 val = 0;
2906
2907 if (reg < 8)
2908 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002909 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002910 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002911
Chris Wilson9ce079e2012-04-17 15:31:30 +01002912 I915_WRITE(reg, val);
2913 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002914}
2915
Chris Wilson9ce079e2012-04-17 15:31:30 +01002916static void i830_write_fence_reg(struct drm_device *dev, int reg,
2917 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002918{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002919 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002920 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002921
Chris Wilson9ce079e2012-04-17 15:31:30 +01002922 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002923 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002924 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002925
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002926 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002927 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002928 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2929 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2930 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002931
Chris Wilson9ce079e2012-04-17 15:31:30 +01002932 pitch_val = obj->stride / 128;
2933 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002934
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002935 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002936 if (obj->tiling_mode == I915_TILING_Y)
2937 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2938 val |= I830_FENCE_SIZE_BITS(size);
2939 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2940 val |= I830_FENCE_REG_VALID;
2941 } else
2942 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002943
Chris Wilson9ce079e2012-04-17 15:31:30 +01002944 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2945 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2946}
2947
Chris Wilsond0a57782012-10-09 19:24:37 +01002948inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2949{
2950 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2951}
2952
Chris Wilson9ce079e2012-04-17 15:31:30 +01002953static void i915_gem_write_fence(struct drm_device *dev, int reg,
2954 struct drm_i915_gem_object *obj)
2955{
Chris Wilsond0a57782012-10-09 19:24:37 +01002956 struct drm_i915_private *dev_priv = dev->dev_private;
2957
2958 /* Ensure that all CPU reads are completed before installing a fence
2959 * and all writes before removing the fence.
2960 */
2961 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2962 mb();
2963
Daniel Vetter94a335d2013-07-17 14:51:28 +02002964 WARN(obj && (!obj->stride || !obj->tiling_mode),
2965 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2966 obj->stride, obj->tiling_mode);
2967
Chris Wilson9ce079e2012-04-17 15:31:30 +01002968 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002969 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002970 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002971 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002972 case 5:
2973 case 4: i965_write_fence_reg(dev, reg, obj); break;
2974 case 3: i915_write_fence_reg(dev, reg, obj); break;
2975 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002976 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002977 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002978
2979 /* And similarly be paranoid that no direct access to this region
2980 * is reordered to before the fence is installed.
2981 */
2982 if (i915_gem_object_needs_mb(obj))
2983 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002984}
2985
Chris Wilson61050802012-04-17 15:31:31 +01002986static inline int fence_number(struct drm_i915_private *dev_priv,
2987 struct drm_i915_fence_reg *fence)
2988{
2989 return fence - dev_priv->fence_regs;
2990}
2991
2992static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2993 struct drm_i915_fence_reg *fence,
2994 bool enable)
2995{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002996 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002997 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002998
Chris Wilson46a0b632013-07-10 13:36:24 +01002999 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003000
3001 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003002 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003003 fence->obj = obj;
3004 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3005 } else {
3006 obj->fence_reg = I915_FENCE_REG_NONE;
3007 fence->obj = NULL;
3008 list_del_init(&fence->lru_list);
3009 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003010 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003011}
3012
Chris Wilsond9e86c02010-11-10 16:40:20 +00003013static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003014i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003015{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003016 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003017 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003018 if (ret)
3019 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003020
3021 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003022 }
3023
Chris Wilson86d5bc32012-07-20 12:41:04 +01003024 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003025 return 0;
3026}
3027
3028int
3029i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3030{
Chris Wilson61050802012-04-17 15:31:31 +01003031 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003032 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003033 int ret;
3034
Chris Wilsond0a57782012-10-09 19:24:37 +01003035 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003036 if (ret)
3037 return ret;
3038
Chris Wilson61050802012-04-17 15:31:31 +01003039 if (obj->fence_reg == I915_FENCE_REG_NONE)
3040 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003041
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003042 fence = &dev_priv->fence_regs[obj->fence_reg];
3043
Chris Wilson61050802012-04-17 15:31:31 +01003044 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003045 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003046
3047 return 0;
3048}
3049
3050static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003051i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003052{
Daniel Vetterae3db242010-02-19 11:51:58 +01003053 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003054 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003055 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003056
3057 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003058 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003059 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3060 reg = &dev_priv->fence_regs[i];
3061 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003062 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003063
Chris Wilson1690e1e2011-12-14 13:57:08 +01003064 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003065 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003066 }
3067
Chris Wilsond9e86c02010-11-10 16:40:20 +00003068 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003069 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003070
3071 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003072 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003073 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003074 continue;
3075
Chris Wilson8fe301a2012-04-17 15:31:28 +01003076 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003077 }
3078
Chris Wilson5dce5b932014-01-20 10:17:36 +00003079deadlock:
3080 /* Wait for completion of pending flips which consume fences */
3081 if (intel_has_pending_fb_unpin(dev))
3082 return ERR_PTR(-EAGAIN);
3083
3084 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003085}
3086
Jesse Barnesde151cf2008-11-12 10:03:55 -08003087/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003088 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003089 * @obj: object to map through a fence reg
3090 *
3091 * When mapping objects through the GTT, userspace wants to be able to write
3092 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003093 * This function walks the fence regs looking for a free one for @obj,
3094 * stealing one if it can't find any.
3095 *
3096 * It then sets up the reg based on the object's properties: address, pitch
3097 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003098 *
3099 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003100 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003101int
Chris Wilson06d98132012-04-17 15:31:24 +01003102i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003103{
Chris Wilson05394f32010-11-08 19:18:58 +00003104 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003105 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003106 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003107 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003108 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003109
Chris Wilson14415742012-04-17 15:31:33 +01003110 /* Have we updated the tiling parameters upon the object and so
3111 * will need to serialise the write to the associated fence register?
3112 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003113 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003114 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003115 if (ret)
3116 return ret;
3117 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003118
Chris Wilsond9e86c02010-11-10 16:40:20 +00003119 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003120 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3121 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003122 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003123 list_move_tail(&reg->lru_list,
3124 &dev_priv->mm.fence_list);
3125 return 0;
3126 }
3127 } else if (enable) {
3128 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003129 if (IS_ERR(reg))
3130 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003131
Chris Wilson14415742012-04-17 15:31:33 +01003132 if (reg->obj) {
3133 struct drm_i915_gem_object *old = reg->obj;
3134
Chris Wilsond0a57782012-10-09 19:24:37 +01003135 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003136 if (ret)
3137 return ret;
3138
Chris Wilson14415742012-04-17 15:31:33 +01003139 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003140 }
Chris Wilson14415742012-04-17 15:31:33 +01003141 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003142 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003143
Chris Wilson14415742012-04-17 15:31:33 +01003144 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003145
Chris Wilson9ce079e2012-04-17 15:31:30 +01003146 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003147}
3148
Chris Wilson42d6ab42012-07-26 11:49:32 +01003149static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3150 struct drm_mm_node *gtt_space,
3151 unsigned long cache_level)
3152{
3153 struct drm_mm_node *other;
3154
3155 /* On non-LLC machines we have to be careful when putting differing
3156 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003157 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003158 */
3159 if (HAS_LLC(dev))
3160 return true;
3161
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003162 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003163 return true;
3164
3165 if (list_empty(&gtt_space->node_list))
3166 return true;
3167
3168 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3169 if (other->allocated && !other->hole_follows && other->color != cache_level)
3170 return false;
3171
3172 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3173 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3174 return false;
3175
3176 return true;
3177}
3178
3179static void i915_gem_verify_gtt(struct drm_device *dev)
3180{
3181#if WATCH_GTT
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct drm_i915_gem_object *obj;
3184 int err = 0;
3185
Ben Widawsky35c20a62013-05-31 11:28:48 -07003186 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003187 if (obj->gtt_space == NULL) {
3188 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3189 err++;
3190 continue;
3191 }
3192
3193 if (obj->cache_level != obj->gtt_space->color) {
3194 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003195 i915_gem_obj_ggtt_offset(obj),
3196 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003197 obj->cache_level,
3198 obj->gtt_space->color);
3199 err++;
3200 continue;
3201 }
3202
3203 if (!i915_gem_valid_gtt_space(dev,
3204 obj->gtt_space,
3205 obj->cache_level)) {
3206 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003207 i915_gem_obj_ggtt_offset(obj),
3208 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003209 obj->cache_level);
3210 err++;
3211 continue;
3212 }
3213 }
3214
3215 WARN_ON(err);
3216#endif
3217}
3218
Jesse Barnesde151cf2008-11-12 10:03:55 -08003219/**
Eric Anholt673a3942008-07-30 12:06:12 -07003220 * Finds free space in the GTT aperture and binds the object there.
3221 */
Daniel Vetter262de142014-02-14 14:01:20 +01003222static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003223i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3224 struct i915_address_space *vm,
3225 unsigned alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003226 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003227{
Chris Wilson05394f32010-11-08 19:18:58 +00003228 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003229 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003230 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003231 size_t gtt_max =
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003232 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003233 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003234 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003235
Chris Wilsone28f8712011-07-18 13:11:49 -07003236 fence_size = i915_gem_get_gtt_size(dev,
3237 obj->base.size,
3238 obj->tiling_mode);
3239 fence_alignment = i915_gem_get_gtt_alignment(dev,
3240 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003241 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003242 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003243 i915_gem_get_gtt_alignment(dev,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003244 obj->base.size,
3245 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003246
Eric Anholt673a3942008-07-30 12:06:12 -07003247 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003248 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003249 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003250 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003251 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003252 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003253 }
3254
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003255 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003256
Chris Wilson654fc602010-05-27 13:18:21 +01003257 /* If the object is bigger than the entire aperture, reject it early
3258 * before evicting everything in a vain attempt to find space.
3259 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003260 if (obj->base.size > gtt_max) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003261 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003262 obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003263 flags & PIN_MAPPABLE ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003264 gtt_max);
Daniel Vetter262de142014-02-14 14:01:20 +01003265 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003266 }
3267
Chris Wilson37e680a2012-06-07 15:38:42 +01003268 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003269 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003270 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003271
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003272 i915_gem_object_pin_pages(obj);
3273
Ben Widawskyaccfef22013-08-14 11:38:35 +02003274 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Daniel Vetter262de142014-02-14 14:01:20 +01003275 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003276 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003277
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003278search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003279 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003280 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003281 obj->cache_level, 0, gtt_max,
Lauri Kasanen62347f92014-04-02 20:03:57 +03003282 DRM_MM_SEARCH_DEFAULT,
3283 DRM_MM_CREATE_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003284 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003285 ret = i915_gem_evict_something(dev, vm, size, alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003286 obj->cache_level, flags);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003287 if (ret == 0)
3288 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003289
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003290 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003291 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003292 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003293 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003294 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003295 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003296 }
3297
Daniel Vetter74163902012-02-15 23:50:21 +01003298 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003299 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003300 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003301
Ben Widawsky35c20a62013-05-31 11:28:48 -07003302 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003303 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003304
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003305 if (i915_is_ggtt(vm)) {
3306 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003307
Daniel Vetter49987092013-08-14 10:21:23 +02003308 fenceable = (vma->node.size == fence_size &&
3309 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003310
Daniel Vetter49987092013-08-14 10:21:23 +02003311 mappable = (vma->node.start + obj->base.size <=
3312 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003313
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003314 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003315 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003316
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003317 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003318
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003319 trace_i915_vma_bind(vma, flags);
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003320 vma->bind_vma(vma, obj->cache_level,
3321 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3322
Chris Wilson42d6ab42012-07-26 11:49:32 +01003323 i915_gem_verify_gtt(dev);
Daniel Vetter262de142014-02-14 14:01:20 +01003324 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003325
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003326err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003327 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003328err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003329 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003330 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003331err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003332 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003333 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003334}
3335
Chris Wilson000433b2013-08-08 14:41:09 +01003336bool
Chris Wilson2c225692013-08-09 12:26:45 +01003337i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3338 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003339{
Eric Anholt673a3942008-07-30 12:06:12 -07003340 /* If we don't have a page list set up, then we're not pinned
3341 * to GPU, and we can ignore the cache flush because it'll happen
3342 * again at bind time.
3343 */
Chris Wilson05394f32010-11-08 19:18:58 +00003344 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003345 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003346
Imre Deak769ce462013-02-13 21:56:05 +02003347 /*
3348 * Stolen memory is always coherent with the GPU as it is explicitly
3349 * marked as wc by the system, or the system is cache-coherent.
3350 */
3351 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003352 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003353
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003354 /* If the GPU is snooping the contents of the CPU cache,
3355 * we do not need to manually clear the CPU cache lines. However,
3356 * the caches are only snooped when the render cache is
3357 * flushed/invalidated. As we always have to emit invalidations
3358 * and flushes when moving into and out of the RENDER domain, correct
3359 * snooping behaviour occurs naturally as the result of our domain
3360 * tracking.
3361 */
Chris Wilson2c225692013-08-09 12:26:45 +01003362 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003363 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003364
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003365 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003366 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003367
3368 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003369}
3370
3371/** Flushes the GTT write domain for the object if it's dirty. */
3372static void
Chris Wilson05394f32010-11-08 19:18:58 +00003373i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003374{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003375 uint32_t old_write_domain;
3376
Chris Wilson05394f32010-11-08 19:18:58 +00003377 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003378 return;
3379
Chris Wilson63256ec2011-01-04 18:42:07 +00003380 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003381 * to it immediately go to main memory as far as we know, so there's
3382 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003383 *
3384 * However, we do have to enforce the order so that all writes through
3385 * the GTT land before any writes to the device, such as updates to
3386 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003387 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003388 wmb();
3389
Chris Wilson05394f32010-11-08 19:18:58 +00003390 old_write_domain = obj->base.write_domain;
3391 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003392
3393 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003394 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003395 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003396}
3397
3398/** Flushes the CPU write domain for the object if it's dirty. */
3399static void
Chris Wilson2c225692013-08-09 12:26:45 +01003400i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3401 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003402{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003403 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003404
Chris Wilson05394f32010-11-08 19:18:58 +00003405 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003406 return;
3407
Chris Wilson000433b2013-08-08 14:41:09 +01003408 if (i915_gem_clflush_object(obj, force))
3409 i915_gem_chipset_flush(obj->base.dev);
3410
Chris Wilson05394f32010-11-08 19:18:58 +00003411 old_write_domain = obj->base.write_domain;
3412 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003413
3414 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003415 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003416 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003417}
3418
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003419/**
3420 * Moves a single object to the GTT read, and possibly write domain.
3421 *
3422 * This function returns when the move is complete, including waiting on
3423 * flushes to occur.
3424 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003425int
Chris Wilson20217462010-11-23 15:26:33 +00003426i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003427{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003428 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003429 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003430 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003431
Eric Anholt02354392008-11-26 13:58:13 -08003432 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003433 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003434 return -EINVAL;
3435
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003436 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3437 return 0;
3438
Chris Wilson0201f1e2012-07-20 12:41:01 +01003439 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003440 if (ret)
3441 return ret;
3442
Chris Wilsonc8725f32014-03-17 12:21:55 +00003443 i915_gem_object_retire(obj);
Chris Wilson2c225692013-08-09 12:26:45 +01003444 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003445
Chris Wilsond0a57782012-10-09 19:24:37 +01003446 /* Serialise direct access to this object with the barriers for
3447 * coherent writes from the GPU, by effectively invalidating the
3448 * GTT domain upon first access.
3449 */
3450 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3451 mb();
3452
Chris Wilson05394f32010-11-08 19:18:58 +00003453 old_write_domain = obj->base.write_domain;
3454 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003455
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003456 /* It should now be out of any other write domains, and we can update
3457 * the domain values for our changes.
3458 */
Chris Wilson05394f32010-11-08 19:18:58 +00003459 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3460 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003461 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003462 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3463 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3464 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003465 }
3466
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003467 trace_i915_gem_object_change_domain(obj,
3468 old_read_domains,
3469 old_write_domain);
3470
Chris Wilson8325a092012-04-24 15:52:35 +01003471 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003472 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003473 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003474 if (vma)
3475 list_move_tail(&vma->mm_list,
3476 &dev_priv->gtt.base.inactive_list);
3477
3478 }
Chris Wilson8325a092012-04-24 15:52:35 +01003479
Eric Anholte47c68e2008-11-14 13:35:19 -08003480 return 0;
3481}
3482
Chris Wilsone4ffd172011-04-04 09:44:39 +01003483int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3484 enum i915_cache_level cache_level)
3485{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003486 struct drm_device *dev = obj->base.dev;
Chris Wilsondf6f7832014-03-21 07:40:56 +00003487 struct i915_vma *vma, *next;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003488 int ret;
3489
3490 if (obj->cache_level == cache_level)
3491 return 0;
3492
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003493 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003494 DRM_DEBUG("can not change the cache level of pinned objects\n");
3495 return -EBUSY;
3496 }
3497
Chris Wilsondf6f7832014-03-21 07:40:56 +00003498 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003499 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003500 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003501 if (ret)
3502 return ret;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003503 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003504 }
3505
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003506 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003507 ret = i915_gem_object_finish_gpu(obj);
3508 if (ret)
3509 return ret;
3510
3511 i915_gem_object_finish_gtt(obj);
3512
3513 /* Before SandyBridge, you could not use tiling or fence
3514 * registers with snooped memory, so relinquish any fences
3515 * currently pointing to our region in the aperture.
3516 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003517 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003518 ret = i915_gem_object_put_fence(obj);
3519 if (ret)
3520 return ret;
3521 }
3522
Ben Widawsky6f65e292013-12-06 14:10:56 -08003523 list_for_each_entry(vma, &obj->vma_list, vma_link)
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003524 if (drm_mm_node_allocated(&vma->node))
3525 vma->bind_vma(vma, cache_level,
3526 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003527 }
3528
Chris Wilson2c225692013-08-09 12:26:45 +01003529 list_for_each_entry(vma, &obj->vma_list, vma_link)
3530 vma->node.color = cache_level;
3531 obj->cache_level = cache_level;
3532
3533 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003534 u32 old_read_domains, old_write_domain;
3535
3536 /* If we're coming from LLC cached, then we haven't
3537 * actually been tracking whether the data is in the
3538 * CPU cache or not, since we only allow one bit set
3539 * in obj->write_domain and have been skipping the clflushes.
3540 * Just set it to the CPU cache for now.
3541 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00003542 i915_gem_object_retire(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003543 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003544
3545 old_read_domains = obj->base.read_domains;
3546 old_write_domain = obj->base.write_domain;
3547
3548 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3549 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3550
3551 trace_i915_gem_object_change_domain(obj,
3552 old_read_domains,
3553 old_write_domain);
3554 }
3555
Chris Wilson42d6ab42012-07-26 11:49:32 +01003556 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003557 return 0;
3558}
3559
Ben Widawsky199adf42012-09-21 17:01:20 -07003560int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3561 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003562{
Ben Widawsky199adf42012-09-21 17:01:20 -07003563 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003564 struct drm_i915_gem_object *obj;
3565 int ret;
3566
3567 ret = i915_mutex_lock_interruptible(dev);
3568 if (ret)
3569 return ret;
3570
3571 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3572 if (&obj->base == NULL) {
3573 ret = -ENOENT;
3574 goto unlock;
3575 }
3576
Chris Wilson651d7942013-08-08 14:41:10 +01003577 switch (obj->cache_level) {
3578 case I915_CACHE_LLC:
3579 case I915_CACHE_L3_LLC:
3580 args->caching = I915_CACHING_CACHED;
3581 break;
3582
Chris Wilson4257d3b2013-08-08 14:41:11 +01003583 case I915_CACHE_WT:
3584 args->caching = I915_CACHING_DISPLAY;
3585 break;
3586
Chris Wilson651d7942013-08-08 14:41:10 +01003587 default:
3588 args->caching = I915_CACHING_NONE;
3589 break;
3590 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003591
3592 drm_gem_object_unreference(&obj->base);
3593unlock:
3594 mutex_unlock(&dev->struct_mutex);
3595 return ret;
3596}
3597
Ben Widawsky199adf42012-09-21 17:01:20 -07003598int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3599 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003600{
Ben Widawsky199adf42012-09-21 17:01:20 -07003601 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003602 struct drm_i915_gem_object *obj;
3603 enum i915_cache_level level;
3604 int ret;
3605
Ben Widawsky199adf42012-09-21 17:01:20 -07003606 switch (args->caching) {
3607 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003608 level = I915_CACHE_NONE;
3609 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003610 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003611 level = I915_CACHE_LLC;
3612 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003613 case I915_CACHING_DISPLAY:
3614 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3615 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003616 default:
3617 return -EINVAL;
3618 }
3619
Ben Widawsky3bc29132012-09-26 16:15:20 -07003620 ret = i915_mutex_lock_interruptible(dev);
3621 if (ret)
3622 return ret;
3623
Chris Wilsone6994ae2012-07-10 10:27:08 +01003624 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3625 if (&obj->base == NULL) {
3626 ret = -ENOENT;
3627 goto unlock;
3628 }
3629
3630 ret = i915_gem_object_set_cache_level(obj, level);
3631
3632 drm_gem_object_unreference(&obj->base);
3633unlock:
3634 mutex_unlock(&dev->struct_mutex);
3635 return ret;
3636}
3637
Chris Wilsoncc98b412013-08-09 12:25:09 +01003638static bool is_pin_display(struct drm_i915_gem_object *obj)
3639{
3640 /* There are 3 sources that pin objects:
3641 * 1. The display engine (scanouts, sprites, cursors);
3642 * 2. Reservations for execbuffer;
3643 * 3. The user.
3644 *
3645 * We can ignore reservations as we hold the struct_mutex and
3646 * are only called outside of the reservation path. The user
3647 * can only increment pin_count once, and so if after
3648 * subtracting the potential reference by the user, any pin_count
3649 * remains, it must be due to another use by the display engine.
3650 */
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003651 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003652}
3653
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003654/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003655 * Prepare buffer for display plane (scanout, cursors, etc).
3656 * Can be called from an uninterruptible phase (modesetting) and allows
3657 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003658 */
3659int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003660i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3661 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003662 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003663{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003664 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003665 int ret;
3666
Chris Wilson0be73282010-12-06 14:36:27 +00003667 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003668 ret = i915_gem_object_sync(obj, pipelined);
3669 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003670 return ret;
3671 }
3672
Chris Wilsoncc98b412013-08-09 12:25:09 +01003673 /* Mark the pin_display early so that we account for the
3674 * display coherency whilst setting up the cache domains.
3675 */
3676 obj->pin_display = true;
3677
Eric Anholta7ef0642011-03-29 16:59:54 -07003678 /* The display engine is not coherent with the LLC cache on gen6. As
3679 * a result, we make sure that the pinning that is about to occur is
3680 * done with uncached PTEs. This is lowest common denominator for all
3681 * chipsets.
3682 *
3683 * However for gen6+, we could do better by using the GFDT bit instead
3684 * of uncaching, which would allow us to flush all the LLC-cached data
3685 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3686 */
Chris Wilson651d7942013-08-08 14:41:10 +01003687 ret = i915_gem_object_set_cache_level(obj,
3688 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003689 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003690 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003691
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003692 /* As the user may map the buffer once pinned in the display plane
3693 * (e.g. libkms for the bootup splash), we have to ensure that we
3694 * always use map_and_fenceable for all scanout buffers.
3695 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003696 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003697 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003698 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003699
Chris Wilson2c225692013-08-09 12:26:45 +01003700 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003701
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003702 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003703 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003704
3705 /* It should now be out of any other write domains, and we can update
3706 * the domain values for our changes.
3707 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003708 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003709 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003710
3711 trace_i915_gem_object_change_domain(obj,
3712 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003713 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003714
3715 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003716
3717err_unpin_display:
3718 obj->pin_display = is_pin_display(obj);
3719 return ret;
3720}
3721
3722void
3723i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3724{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003725 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003726 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003727}
3728
Chris Wilson85345512010-11-13 09:49:11 +00003729int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003730i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003731{
Chris Wilson88241782011-01-07 17:09:48 +00003732 int ret;
3733
Chris Wilsona8198ee2011-04-13 22:04:09 +01003734 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003735 return 0;
3736
Chris Wilson0201f1e2012-07-20 12:41:01 +01003737 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003738 if (ret)
3739 return ret;
3740
Chris Wilsona8198ee2011-04-13 22:04:09 +01003741 /* Ensure that we invalidate the GPU's caches and TLBs. */
3742 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003743 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003744}
3745
Eric Anholte47c68e2008-11-14 13:35:19 -08003746/**
3747 * Moves a single object to the CPU read, and possibly write domain.
3748 *
3749 * This function returns when the move is complete, including waiting on
3750 * flushes to occur.
3751 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003752int
Chris Wilson919926a2010-11-12 13:42:53 +00003753i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003754{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003755 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003756 int ret;
3757
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003758 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3759 return 0;
3760
Chris Wilson0201f1e2012-07-20 12:41:01 +01003761 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003762 if (ret)
3763 return ret;
3764
Chris Wilsonc8725f32014-03-17 12:21:55 +00003765 i915_gem_object_retire(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003766 i915_gem_object_flush_gtt_write_domain(obj);
3767
Chris Wilson05394f32010-11-08 19:18:58 +00003768 old_write_domain = obj->base.write_domain;
3769 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003770
Eric Anholte47c68e2008-11-14 13:35:19 -08003771 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003772 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003773 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003774
Chris Wilson05394f32010-11-08 19:18:58 +00003775 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003776 }
3777
3778 /* It should now be out of any other write domains, and we can update
3779 * the domain values for our changes.
3780 */
Chris Wilson05394f32010-11-08 19:18:58 +00003781 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003782
3783 /* If we're writing through the CPU, then the GPU read domains will
3784 * need to be invalidated at next use.
3785 */
3786 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003787 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3788 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003789 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003790
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003791 trace_i915_gem_object_change_domain(obj,
3792 old_read_domains,
3793 old_write_domain);
3794
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003795 return 0;
3796}
3797
Eric Anholt673a3942008-07-30 12:06:12 -07003798/* Throttle our rendering by waiting until the ring has completed our requests
3799 * emitted over 20 msec ago.
3800 *
Eric Anholtb9624422009-06-03 07:27:35 +00003801 * Note that if we were to use the current jiffies each time around the loop,
3802 * we wouldn't escape the function with any frames outstanding if the time to
3803 * render a frame was over 20ms.
3804 *
Eric Anholt673a3942008-07-30 12:06:12 -07003805 * This should get us reasonable parallelism between CPU and GPU but also
3806 * relatively low latency when blocking on a particular request to finish.
3807 */
3808static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003809i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003810{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003811 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003813 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003814 struct drm_i915_gem_request *request;
3815 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003816 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003817 u32 seqno = 0;
3818 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003819
Daniel Vetter308887a2012-11-14 17:14:06 +01003820 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3821 if (ret)
3822 return ret;
3823
3824 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3825 if (ret)
3826 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003827
Chris Wilson1c255952010-09-26 11:03:27 +01003828 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003829 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003830 if (time_after_eq(request->emitted_jiffies, recent_enough))
3831 break;
3832
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003833 ring = request->ring;
3834 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003835 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003836 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003837 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003838
3839 if (seqno == 0)
3840 return 0;
3841
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003842 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003843 if (ret == 0)
3844 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003845
Eric Anholt673a3942008-07-30 12:06:12 -07003846 return ret;
3847}
3848
Eric Anholt673a3942008-07-30 12:06:12 -07003849int
Chris Wilson05394f32010-11-08 19:18:58 +00003850i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003851 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003852 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003853 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003854{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003855 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003856 int ret;
3857
Daniel Vetterbf3d1492014-02-14 14:01:12 +01003858 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003859 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003860
3861 vma = i915_gem_obj_to_vma(obj, vm);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003862 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003863 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3864 return -EBUSY;
3865
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003866 if ((alignment &&
3867 vma->node.start & (alignment - 1)) ||
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003868 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003869 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003870 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003871 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003872 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003873 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003874 flags & PIN_MAPPABLE,
Chris Wilson05394f32010-11-08 19:18:58 +00003875 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003876 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003877 if (ret)
3878 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003879
3880 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003881 }
3882 }
3883
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003884 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Daniel Vetter262de142014-02-14 14:01:20 +01003885 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3886 if (IS_ERR(vma))
3887 return PTR_ERR(vma);
Chris Wilson22c344e2009-02-11 14:26:45 +00003888 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003889
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003890 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3891 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter74898d72012-02-15 23:50:22 +01003892
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003893 vma->pin_count++;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003894 if (flags & PIN_MAPPABLE)
3895 obj->pin_mappable |= true;
Eric Anholt673a3942008-07-30 12:06:12 -07003896
3897 return 0;
3898}
3899
3900void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003901i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003902{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003903 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003904
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003905 BUG_ON(!vma);
3906 BUG_ON(vma->pin_count == 0);
3907 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3908
3909 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003910 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003911}
3912
3913int
3914i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003915 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003916{
3917 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003918 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003919 int ret;
3920
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003921 if (INTEL_INFO(dev)->gen >= 6)
3922 return -ENODEV;
3923
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003924 ret = i915_mutex_lock_interruptible(dev);
3925 if (ret)
3926 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003927
Chris Wilson05394f32010-11-08 19:18:58 +00003928 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003929 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003930 ret = -ENOENT;
3931 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003932 }
Eric Anholt673a3942008-07-30 12:06:12 -07003933
Chris Wilson05394f32010-11-08 19:18:58 +00003934 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003935 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00003936 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003937 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003938 }
3939
Chris Wilson05394f32010-11-08 19:18:58 +00003940 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003941 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08003942 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003943 ret = -EINVAL;
3944 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003945 }
3946
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003947 if (obj->user_pin_count == ULONG_MAX) {
3948 ret = -EBUSY;
3949 goto out;
3950 }
3951
Chris Wilson93be8782013-01-02 10:31:22 +00003952 if (obj->user_pin_count == 0) {
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003953 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003954 if (ret)
3955 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003956 }
3957
Chris Wilson93be8782013-01-02 10:31:22 +00003958 obj->user_pin_count++;
3959 obj->pin_filp = file;
3960
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003961 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003962out:
Chris Wilson05394f32010-11-08 19:18:58 +00003963 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003964unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003965 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003966 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003967}
3968
3969int
3970i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003971 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003972{
3973 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003974 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003975 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003976
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003977 ret = i915_mutex_lock_interruptible(dev);
3978 if (ret)
3979 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003980
Chris Wilson05394f32010-11-08 19:18:58 +00003981 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003982 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003983 ret = -ENOENT;
3984 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003985 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003986
Chris Wilson05394f32010-11-08 19:18:58 +00003987 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003988 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08003989 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003990 ret = -EINVAL;
3991 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003992 }
Chris Wilson05394f32010-11-08 19:18:58 +00003993 obj->user_pin_count--;
3994 if (obj->user_pin_count == 0) {
3995 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003996 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08003997 }
Eric Anholt673a3942008-07-30 12:06:12 -07003998
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003999out:
Chris Wilson05394f32010-11-08 19:18:58 +00004000 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004001unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004002 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004003 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004004}
4005
4006int
4007i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004008 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004009{
4010 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004011 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004012 int ret;
4013
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004014 ret = i915_mutex_lock_interruptible(dev);
4015 if (ret)
4016 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004017
Chris Wilson05394f32010-11-08 19:18:58 +00004018 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004019 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004020 ret = -ENOENT;
4021 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004022 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004023
Chris Wilson0be555b2010-08-04 15:36:30 +01004024 /* Count all active objects as busy, even if they are currently not used
4025 * by the gpu. Users of this interface expect objects to eventually
4026 * become non-busy without any further actions, therefore emit any
4027 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004028 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004029 ret = i915_gem_object_flush_active(obj);
4030
Chris Wilson05394f32010-11-08 19:18:58 +00004031 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004032 if (obj->ring) {
4033 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4034 args->busy |= intel_ring_flag(obj->ring) << 16;
4035 }
Eric Anholt673a3942008-07-30 12:06:12 -07004036
Chris Wilson05394f32010-11-08 19:18:58 +00004037 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004038unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004039 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004040 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004041}
4042
4043int
4044i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4045 struct drm_file *file_priv)
4046{
Akshay Joshi0206e352011-08-16 15:34:10 -04004047 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004048}
4049
Chris Wilson3ef94da2009-09-14 16:50:29 +01004050int
4051i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4052 struct drm_file *file_priv)
4053{
4054 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004055 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004056 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004057
4058 switch (args->madv) {
4059 case I915_MADV_DONTNEED:
4060 case I915_MADV_WILLNEED:
4061 break;
4062 default:
4063 return -EINVAL;
4064 }
4065
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004066 ret = i915_mutex_lock_interruptible(dev);
4067 if (ret)
4068 return ret;
4069
Chris Wilson05394f32010-11-08 19:18:58 +00004070 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004071 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004072 ret = -ENOENT;
4073 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004074 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004075
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004076 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004077 ret = -EINVAL;
4078 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004079 }
4080
Chris Wilson05394f32010-11-08 19:18:58 +00004081 if (obj->madv != __I915_MADV_PURGED)
4082 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004083
Chris Wilson6c085a72012-08-20 11:40:46 +02004084 /* if the object is no longer attached, discard its backing storage */
4085 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004086 i915_gem_object_truncate(obj);
4087
Chris Wilson05394f32010-11-08 19:18:58 +00004088 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004089
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004090out:
Chris Wilson05394f32010-11-08 19:18:58 +00004091 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004092unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004093 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004094 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004095}
4096
Chris Wilson37e680a2012-06-07 15:38:42 +01004097void i915_gem_object_init(struct drm_i915_gem_object *obj,
4098 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004099{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004100 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004101 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004102 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004103 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004104
Chris Wilson37e680a2012-06-07 15:38:42 +01004105 obj->ops = ops;
4106
Chris Wilson0327d6b2012-08-11 15:41:06 +01004107 obj->fence_reg = I915_FENCE_REG_NONE;
4108 obj->madv = I915_MADV_WILLNEED;
4109 /* Avoid an unnecessary call to unbind on the first bind. */
4110 obj->map_and_fenceable = true;
4111
4112 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4113}
4114
Chris Wilson37e680a2012-06-07 15:38:42 +01004115static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4116 .get_pages = i915_gem_object_get_pages_gtt,
4117 .put_pages = i915_gem_object_put_pages_gtt,
4118};
4119
Chris Wilson05394f32010-11-08 19:18:58 +00004120struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4121 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004122{
Daniel Vetterc397b902010-04-09 19:05:07 +00004123 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004124 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004125 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004126
Chris Wilson42dcedd2012-11-15 11:32:30 +00004127 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004128 if (obj == NULL)
4129 return NULL;
4130
4131 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004132 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004133 return NULL;
4134 }
4135
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004136 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4137 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4138 /* 965gm cannot relocate objects above 4GiB. */
4139 mask &= ~__GFP_HIGHMEM;
4140 mask |= __GFP_DMA32;
4141 }
4142
Al Viro496ad9a2013-01-23 17:07:38 -05004143 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004144 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004145
Chris Wilson37e680a2012-06-07 15:38:42 +01004146 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004147
Daniel Vetterc397b902010-04-09 19:05:07 +00004148 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4149 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4150
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004151 if (HAS_LLC(dev)) {
4152 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004153 * cache) for about a 10% performance improvement
4154 * compared to uncached. Graphics requests other than
4155 * display scanout are coherent with the CPU in
4156 * accessing this cache. This means in this mode we
4157 * don't need to clflush on the CPU side, and on the
4158 * GPU side we only need to flush internal caches to
4159 * get data visible to the CPU.
4160 *
4161 * However, we maintain the display planes as UC, and so
4162 * need to rebind when first used as such.
4163 */
4164 obj->cache_level = I915_CACHE_LLC;
4165 } else
4166 obj->cache_level = I915_CACHE_NONE;
4167
Daniel Vetterd861e332013-07-24 23:25:03 +02004168 trace_i915_gem_object_create(obj);
4169
Chris Wilson05394f32010-11-08 19:18:58 +00004170 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004171}
4172
Chris Wilson1488fc02012-04-24 15:47:31 +01004173void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004174{
Chris Wilson1488fc02012-04-24 15:47:31 +01004175 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004176 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004177 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004178 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004179
Paulo Zanonif65c9162013-11-27 18:20:34 -02004180 intel_runtime_pm_get(dev_priv);
4181
Chris Wilson26e12f892011-03-20 11:20:19 +00004182 trace_i915_gem_object_destroy(obj);
4183
Chris Wilson1488fc02012-04-24 15:47:31 +01004184 if (obj->phys_obj)
4185 i915_gem_detach_phys_object(dev, obj);
4186
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004187 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004188 int ret;
4189
4190 vma->pin_count = 0;
4191 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004192 if (WARN_ON(ret == -ERESTARTSYS)) {
4193 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004194
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004195 was_interruptible = dev_priv->mm.interruptible;
4196 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004197
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004198 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004199
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004200 dev_priv->mm.interruptible = was_interruptible;
4201 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004202 }
4203
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004204 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4205 * before progressing. */
4206 if (obj->stolen)
4207 i915_gem_object_unpin_pages(obj);
4208
Ben Widawsky401c29f2013-05-31 11:28:47 -07004209 if (WARN_ON(obj->pages_pin_count))
4210 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004211 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004212 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004213 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004214
Chris Wilson9da3da62012-06-01 15:20:22 +01004215 BUG_ON(obj->pages);
4216
Chris Wilson2f745ad2012-09-04 21:02:58 +01004217 if (obj->base.import_attach)
4218 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004219
Chris Wilson05394f32010-11-08 19:18:58 +00004220 drm_gem_object_release(&obj->base);
4221 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004222
Chris Wilson05394f32010-11-08 19:18:58 +00004223 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004224 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004225
4226 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004227}
4228
Daniel Vettere656a6c2013-08-14 14:14:04 +02004229struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004230 struct i915_address_space *vm)
4231{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004232 struct i915_vma *vma;
4233 list_for_each_entry(vma, &obj->vma_list, vma_link)
4234 if (vma->vm == vm)
4235 return vma;
4236
4237 return NULL;
4238}
4239
Ben Widawsky2f633152013-07-17 12:19:03 -07004240void i915_gem_vma_destroy(struct i915_vma *vma)
4241{
4242 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004243
4244 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4245 if (!list_empty(&vma->exec_list))
4246 return;
4247
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004248 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004249
Ben Widawsky2f633152013-07-17 12:19:03 -07004250 kfree(vma);
4251}
4252
Chris Wilsone3efda42014-04-09 09:19:41 +01004253static void
4254i915_gem_stop_ringbuffers(struct drm_device *dev)
4255{
4256 struct drm_i915_private *dev_priv = dev->dev_private;
4257 struct intel_ring_buffer *ring;
4258 int i;
4259
4260 for_each_ring(ring, dev_priv, i)
4261 intel_stop_ring_buffer(ring);
4262}
4263
Jesse Barnes5669fca2009-02-17 15:13:31 -08004264int
Chris Wilson45c5f202013-10-16 11:50:01 +01004265i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004266{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004267 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004268 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004269
Chris Wilson45c5f202013-10-16 11:50:01 +01004270 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004271 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004272 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004273
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004274 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004275 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004276 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004277
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004278 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004279
Chris Wilson29105cc2010-01-07 10:39:13 +00004280 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004281 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004282 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004283
Chris Wilson29105cc2010-01-07 10:39:13 +00004284 i915_kernel_lost_context(dev);
Chris Wilsone3efda42014-04-09 09:19:41 +01004285 i915_gem_stop_ringbuffers(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004286
Chris Wilson45c5f202013-10-16 11:50:01 +01004287 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4288 * We need to replace this with a semaphore, or something.
4289 * And not confound ums.mm_suspended!
4290 */
4291 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4292 DRIVER_MODESET);
4293 mutex_unlock(&dev->struct_mutex);
4294
4295 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004296 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004297 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004298
Eric Anholt673a3942008-07-30 12:06:12 -07004299 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004300
4301err:
4302 mutex_unlock(&dev->struct_mutex);
4303 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004304}
4305
Ben Widawskyc3787e22013-09-17 21:12:44 -07004306int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004307{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004308 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004309 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004310 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4311 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004312 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004313
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004314 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004315 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004316
Ben Widawskyc3787e22013-09-17 21:12:44 -07004317 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4318 if (ret)
4319 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004320
Ben Widawskyc3787e22013-09-17 21:12:44 -07004321 /*
4322 * Note: We do not worry about the concurrent register cacheline hang
4323 * here because no other code should access these registers other than
4324 * at initialization time.
4325 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004326 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004327 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4328 intel_ring_emit(ring, reg_base + i);
4329 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004330 }
4331
Ben Widawskyc3787e22013-09-17 21:12:44 -07004332 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004333
Ben Widawskyc3787e22013-09-17 21:12:44 -07004334 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004335}
4336
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004337void i915_gem_init_swizzling(struct drm_device *dev)
4338{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004339 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004340
Daniel Vetter11782b02012-01-31 16:47:55 +01004341 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004342 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4343 return;
4344
4345 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4346 DISP_TILE_SURFACE_SWIZZLING);
4347
Daniel Vetter11782b02012-01-31 16:47:55 +01004348 if (IS_GEN5(dev))
4349 return;
4350
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004351 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4352 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004353 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004354 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004355 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004356 else if (IS_GEN8(dev))
4357 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004358 else
4359 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004360}
Daniel Vettere21af882012-02-09 20:53:27 +01004361
Chris Wilson67b1b572012-07-05 23:49:40 +01004362static bool
4363intel_enable_blt(struct drm_device *dev)
4364{
4365 if (!HAS_BLT(dev))
4366 return false;
4367
4368 /* The blitter was dysfunctional on early prototypes */
4369 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4370 DRM_INFO("BLT not supported on this pre-production hardware;"
4371 " graphics performance will be degraded.\n");
4372 return false;
4373 }
4374
4375 return true;
4376}
4377
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004378static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004379{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004380 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004381 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004382
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004383 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004384 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004385 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004386
4387 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004388 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004389 if (ret)
4390 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004391 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004392
Chris Wilson67b1b572012-07-05 23:49:40 +01004393 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004394 ret = intel_init_blt_ring_buffer(dev);
4395 if (ret)
4396 goto cleanup_bsd_ring;
4397 }
4398
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004399 if (HAS_VEBOX(dev)) {
4400 ret = intel_init_vebox_ring_buffer(dev);
4401 if (ret)
4402 goto cleanup_blt_ring;
4403 }
4404
Zhao Yakui845f74a2014-04-17 10:37:37 +08004405 if (HAS_BSD2(dev)) {
4406 ret = intel_init_bsd2_ring_buffer(dev);
4407 if (ret)
4408 goto cleanup_vebox_ring;
4409 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004410
Mika Kuoppala99433932013-01-22 14:12:17 +02004411 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4412 if (ret)
Zhao Yakui845f74a2014-04-17 10:37:37 +08004413 goto cleanup_bsd2_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004414
4415 return 0;
4416
Zhao Yakui845f74a2014-04-17 10:37:37 +08004417cleanup_bsd2_ring:
4418 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004419cleanup_vebox_ring:
4420 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004421cleanup_blt_ring:
4422 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4423cleanup_bsd_ring:
4424 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4425cleanup_render_ring:
4426 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4427
4428 return ret;
4429}
4430
4431int
4432i915_gem_init_hw(struct drm_device *dev)
4433{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004434 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004435 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004436
4437 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4438 return -EIO;
4439
Ben Widawsky59124502013-07-04 11:02:05 -07004440 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004441 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004442
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004443 if (IS_HASWELL(dev))
4444 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4445 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004446
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004447 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004448 if (IS_IVYBRIDGE(dev)) {
4449 u32 temp = I915_READ(GEN7_MSG_CTL);
4450 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4451 I915_WRITE(GEN7_MSG_CTL, temp);
4452 } else if (INTEL_INFO(dev)->gen >= 7) {
4453 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4454 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4455 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4456 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004457 }
4458
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004459 i915_gem_init_swizzling(dev);
4460
4461 ret = i915_gem_init_rings(dev);
4462 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004463 return ret;
4464
Ben Widawskyc3787e22013-09-17 21:12:44 -07004465 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4466 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4467
Ben Widawsky254f9652012-06-04 14:42:42 -07004468 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004469 * XXX: Contexts should only be initialized once. Doing a switch to the
4470 * default context switch however is something we'd like to do after
4471 * reset or thaw (the latter may not actually be necessary for HW, but
4472 * goes with our code better). Context switching requires rings (for
4473 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004474 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004475 ret = i915_gem_context_enable(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004476 if (ret && ret != -EIO) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004477 DRM_ERROR("Context enable failed %d\n", ret);
Chris Wilson60990322014-04-09 09:19:42 +01004478 i915_gem_cleanup_ringbuffer(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004479 }
Daniel Vettere21af882012-02-09 20:53:27 +01004480
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004481 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004482}
4483
Chris Wilson1070a422012-04-24 15:47:41 +01004484int i915_gem_init(struct drm_device *dev)
4485{
4486 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004487 int ret;
4488
Chris Wilson1070a422012-04-24 15:47:41 +01004489 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004490
4491 if (IS_VALLEYVIEW(dev)) {
4492 /* VLVA0 (potential hack), BIOS isn't actually waking us */
Imre Deak981a5ae2014-04-14 20:24:22 +03004493 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4494 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4495 VLV_GTLC_ALLOWWAKEACK), 10))
Jesse Barnesd62b4892013-03-08 10:45:53 -08004496 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4497 }
4498
Ben Widawskyd7e50082012-12-18 10:31:25 -08004499 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004500
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004501 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004502 if (ret) {
4503 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004504 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004505 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004506
Chris Wilson1070a422012-04-24 15:47:41 +01004507 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004508 if (ret == -EIO) {
4509 /* Allow ring initialisation to fail by marking the GPU as
4510 * wedged. But we only want to do this where the GPU is angry,
4511 * for all other failure, such as an allocation failure, bail.
4512 */
4513 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4514 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4515 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004516 }
Chris Wilson60990322014-04-09 09:19:42 +01004517 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004518
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004519 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4520 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4521 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson60990322014-04-09 09:19:42 +01004522 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004523}
4524
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004525void
4526i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4527{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004528 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004529 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004530 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004531
Chris Wilsonb4519512012-05-11 14:29:30 +01004532 for_each_ring(ring, dev_priv, i)
4533 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004534}
4535
4536int
Eric Anholt673a3942008-07-30 12:06:12 -07004537i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4538 struct drm_file *file_priv)
4539{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004540 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004541 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004542
Jesse Barnes79e53942008-11-07 14:24:08 -08004543 if (drm_core_check_feature(dev, DRIVER_MODESET))
4544 return 0;
4545
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004546 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004547 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004548 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004549 }
4550
Eric Anholt673a3942008-07-30 12:06:12 -07004551 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004552 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004553
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004554 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004555 if (ret != 0) {
4556 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004557 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004558 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004559
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004560 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004561
Daniel Vetterbb0f1b52013-11-03 21:09:27 +01004562 ret = drm_irq_install(dev, dev->pdev->irq);
Chris Wilson5f353082010-06-07 14:03:03 +01004563 if (ret)
4564 goto cleanup_ringbuffer;
Daniel Vettere090c532013-11-03 20:27:05 +01004565 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004566
Eric Anholt673a3942008-07-30 12:06:12 -07004567 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004568
4569cleanup_ringbuffer:
Chris Wilson5f353082010-06-07 14:03:03 +01004570 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004571 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004572 mutex_unlock(&dev->struct_mutex);
4573
4574 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004575}
4576
4577int
4578i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4579 struct drm_file *file_priv)
4580{
Jesse Barnes79e53942008-11-07 14:24:08 -08004581 if (drm_core_check_feature(dev, DRIVER_MODESET))
4582 return 0;
4583
Daniel Vettere090c532013-11-03 20:27:05 +01004584 mutex_lock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004585 drm_irq_uninstall(dev);
Daniel Vettere090c532013-11-03 20:27:05 +01004586 mutex_unlock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004587
Chris Wilson45c5f202013-10-16 11:50:01 +01004588 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004589}
4590
4591void
4592i915_gem_lastclose(struct drm_device *dev)
4593{
4594 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004595
Eric Anholte806b492009-01-22 09:56:58 -08004596 if (drm_core_check_feature(dev, DRIVER_MODESET))
4597 return;
4598
Chris Wilson45c5f202013-10-16 11:50:01 +01004599 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004600 if (ret)
4601 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004602}
4603
Chris Wilson64193402010-10-24 12:38:05 +01004604static void
4605init_ring_lists(struct intel_ring_buffer *ring)
4606{
4607 INIT_LIST_HEAD(&ring->active_list);
4608 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004609}
4610
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004611void i915_init_vm(struct drm_i915_private *dev_priv,
4612 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004613{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004614 if (!i915_is_ggtt(vm))
4615 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004616 vm->dev = dev_priv->dev;
4617 INIT_LIST_HEAD(&vm->active_list);
4618 INIT_LIST_HEAD(&vm->inactive_list);
4619 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004620 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004621}
4622
Eric Anholt673a3942008-07-30 12:06:12 -07004623void
4624i915_gem_load(struct drm_device *dev)
4625{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004626 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004627 int i;
4628
4629 dev_priv->slab =
4630 kmem_cache_create("i915_gem_object",
4631 sizeof(struct drm_i915_gem_object), 0,
4632 SLAB_HWCACHE_ALIGN,
4633 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004634
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004635 INIT_LIST_HEAD(&dev_priv->vm_list);
4636 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4637
Ben Widawskya33afea2013-09-17 21:12:45 -07004638 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004639 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4640 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004641 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004642 for (i = 0; i < I915_NUM_RINGS; i++)
4643 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004644 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004645 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004646 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4647 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004648 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4649 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004650 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004651
Dave Airlie94400122010-07-20 13:15:31 +10004652 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4653 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004654 I915_WRITE(MI_ARB_STATE,
4655 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004656 }
4657
Chris Wilson72bfa192010-12-19 11:42:05 +00004658 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4659
Jesse Barnesde151cf2008-11-12 10:03:55 -08004660 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004661 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4662 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004663
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004664 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4665 dev_priv->num_fence_regs = 32;
4666 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004667 dev_priv->num_fence_regs = 16;
4668 else
4669 dev_priv->num_fence_regs = 8;
4670
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004671 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004672 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4673 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004674
Eric Anholt673a3942008-07-30 12:06:12 -07004675 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004676 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004677
Chris Wilsonce453d82011-02-21 14:43:56 +00004678 dev_priv->mm.interruptible = true;
4679
Dave Chinner7dc19d52013-08-28 10:18:11 +10004680 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4681 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004682 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4683 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004684}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004685
4686/*
4687 * Create a physically contiguous memory object for this object
4688 * e.g. for cursor + overlay regs
4689 */
Chris Wilson995b6762010-08-20 13:23:26 +01004690static int i915_gem_init_phys_object(struct drm_device *dev,
4691 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004692{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004693 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004694 struct drm_i915_gem_phys_object *phys_obj;
4695 int ret;
4696
4697 if (dev_priv->mm.phys_objs[id - 1] || !size)
4698 return 0;
4699
Daniel Vetterb14c5672013-09-19 12:18:32 +02004700 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004701 if (!phys_obj)
4702 return -ENOMEM;
4703
4704 phys_obj->id = id;
4705
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004706 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004707 if (!phys_obj->handle) {
4708 ret = -ENOMEM;
4709 goto kfree_obj;
4710 }
4711#ifdef CONFIG_X86
4712 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4713#endif
4714
4715 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4716
4717 return 0;
4718kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004719 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004720 return ret;
4721}
4722
Chris Wilson995b6762010-08-20 13:23:26 +01004723static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004724{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004725 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004726 struct drm_i915_gem_phys_object *phys_obj;
4727
4728 if (!dev_priv->mm.phys_objs[id - 1])
4729 return;
4730
4731 phys_obj = dev_priv->mm.phys_objs[id - 1];
4732 if (phys_obj->cur_obj) {
4733 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4734 }
4735
4736#ifdef CONFIG_X86
4737 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4738#endif
4739 drm_pci_free(dev, phys_obj->handle);
4740 kfree(phys_obj);
4741 dev_priv->mm.phys_objs[id - 1] = NULL;
4742}
4743
4744void i915_gem_free_all_phys_object(struct drm_device *dev)
4745{
4746 int i;
4747
Dave Airlie260883c2009-01-22 17:58:49 +10004748 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004749 i915_gem_free_phys_object(dev, i);
4750}
4751
4752void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004753 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004754{
Al Viro496ad9a2013-01-23 17:07:38 -05004755 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004756 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004757 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004758 int page_count;
4759
Chris Wilson05394f32010-11-08 19:18:58 +00004760 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004761 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004762 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004763
Chris Wilson05394f32010-11-08 19:18:58 +00004764 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004765 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004766 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004767 if (!IS_ERR(page)) {
4768 char *dst = kmap_atomic(page);
4769 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4770 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004771
Chris Wilsone5281cc2010-10-28 13:45:36 +01004772 drm_clflush_pages(&page, 1);
4773
4774 set_page_dirty(page);
4775 mark_page_accessed(page);
4776 page_cache_release(page);
4777 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004778 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004779 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004780
Chris Wilson05394f32010-11-08 19:18:58 +00004781 obj->phys_obj->cur_obj = NULL;
4782 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004783}
4784
4785int
4786i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004787 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004788 int id,
4789 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004790{
Al Viro496ad9a2013-01-23 17:07:38 -05004791 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004792 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004793 int ret = 0;
4794 int page_count;
4795 int i;
4796
4797 if (id > I915_MAX_PHYS_OBJECT)
4798 return -EINVAL;
4799
Chris Wilson05394f32010-11-08 19:18:58 +00004800 if (obj->phys_obj) {
4801 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004802 return 0;
4803 i915_gem_detach_phys_object(dev, obj);
4804 }
4805
Dave Airlie71acb5e2008-12-30 20:31:46 +10004806 /* create a new object */
4807 if (!dev_priv->mm.phys_objs[id - 1]) {
4808 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004809 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004810 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004811 DRM_ERROR("failed to init phys object %d size: %zu\n",
4812 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004813 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004814 }
4815 }
4816
4817 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004818 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4819 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004820
Chris Wilson05394f32010-11-08 19:18:58 +00004821 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004822
4823 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004824 struct page *page;
4825 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004826
Hugh Dickins5949eac2011-06-27 16:18:18 -07004827 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004828 if (IS_ERR(page))
4829 return PTR_ERR(page);
4830
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004831 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004832 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004833 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004834 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004835
4836 mark_page_accessed(page);
4837 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004838 }
4839
4840 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004841}
4842
4843static int
Chris Wilson05394f32010-11-08 19:18:58 +00004844i915_gem_phys_pwrite(struct drm_device *dev,
4845 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004846 struct drm_i915_gem_pwrite *args,
4847 struct drm_file *file_priv)
4848{
Chris Wilson05394f32010-11-08 19:18:58 +00004849 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004850 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004851
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004852 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4853 unsigned long unwritten;
4854
4855 /* The physical object once assigned is fixed for the lifetime
4856 * of the obj, so we can safely drop the lock and continue
4857 * to access vaddr.
4858 */
4859 mutex_unlock(&dev->struct_mutex);
4860 unwritten = copy_from_user(vaddr, user_data, args->size);
4861 mutex_lock(&dev->struct_mutex);
4862 if (unwritten)
4863 return -EFAULT;
4864 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004865
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004866 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004867 return 0;
4868}
Eric Anholtb9624422009-06-03 07:27:35 +00004869
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004870void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004871{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004872 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004873
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004874 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4875
Eric Anholtb9624422009-06-03 07:27:35 +00004876 /* Clean up our request list when the client is going away, so that
4877 * later retire_requests won't dereference our soon-to-be-gone
4878 * file_priv.
4879 */
Chris Wilson1c255952010-09-26 11:03:27 +01004880 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004881 while (!list_empty(&file_priv->mm.request_list)) {
4882 struct drm_i915_gem_request *request;
4883
4884 request = list_first_entry(&file_priv->mm.request_list,
4885 struct drm_i915_gem_request,
4886 client_list);
4887 list_del(&request->client_list);
4888 request->file_priv = NULL;
4889 }
Chris Wilson1c255952010-09-26 11:03:27 +01004890 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004891}
Chris Wilson31169712009-09-14 16:50:28 +01004892
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004893static void
4894i915_gem_file_idle_work_handler(struct work_struct *work)
4895{
4896 struct drm_i915_file_private *file_priv =
4897 container_of(work, typeof(*file_priv), mm.idle_work.work);
4898
4899 atomic_set(&file_priv->rps_wait_boost, false);
4900}
4901
4902int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4903{
4904 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004905 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004906
4907 DRM_DEBUG_DRIVER("\n");
4908
4909 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4910 if (!file_priv)
4911 return -ENOMEM;
4912
4913 file->driver_priv = file_priv;
4914 file_priv->dev_priv = dev->dev_private;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004915 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004916
4917 spin_lock_init(&file_priv->mm.lock);
4918 INIT_LIST_HEAD(&file_priv->mm.request_list);
4919 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4920 i915_gem_file_idle_work_handler);
4921
Ben Widawskye422b882013-12-06 14:10:58 -08004922 ret = i915_gem_context_open(dev, file);
4923 if (ret)
4924 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004925
Ben Widawskye422b882013-12-06 14:10:58 -08004926 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004927}
4928
Chris Wilson57745062012-11-21 13:04:04 +00004929static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4930{
4931 if (!mutex_is_locked(mutex))
4932 return false;
4933
4934#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4935 return mutex->owner == task;
4936#else
4937 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4938 return false;
4939#endif
4940}
4941
Dave Chinner7dc19d52013-08-28 10:18:11 +10004942static unsigned long
4943i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004944{
Chris Wilson17250b72010-10-28 12:51:39 +01004945 struct drm_i915_private *dev_priv =
4946 container_of(shrinker,
4947 struct drm_i915_private,
4948 mm.inactive_shrinker);
4949 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004950 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004951 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004952 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004953
Chris Wilson57745062012-11-21 13:04:04 +00004954 if (!mutex_trylock(&dev->struct_mutex)) {
4955 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004956 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004957
Daniel Vetter677feac2012-12-19 14:33:45 +01004958 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004959 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004960
Chris Wilson57745062012-11-21 13:04:04 +00004961 unlock = false;
4962 }
Chris Wilson31169712009-09-14 16:50:28 +01004963
Dave Chinner7dc19d52013-08-28 10:18:11 +10004964 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004965 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004966 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004967 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004968
4969 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4970 if (obj->active)
4971 continue;
4972
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004973 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004974 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004975 }
Chris Wilson31169712009-09-14 16:50:28 +01004976
Chris Wilson57745062012-11-21 13:04:04 +00004977 if (unlock)
4978 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004979
Dave Chinner7dc19d52013-08-28 10:18:11 +10004980 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004981}
Ben Widawskya70a3142013-07-31 16:59:56 -07004982
4983/* All the new VM stuff */
4984unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4985 struct i915_address_space *vm)
4986{
4987 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4988 struct i915_vma *vma;
4989
Ben Widawsky6f425322013-12-06 14:10:48 -08004990 if (!dev_priv->mm.aliasing_ppgtt ||
4991 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004992 vm = &dev_priv->gtt.base;
4993
4994 BUG_ON(list_empty(&o->vma_list));
4995 list_for_each_entry(vma, &o->vma_list, vma_link) {
4996 if (vma->vm == vm)
4997 return vma->node.start;
4998
4999 }
5000 return -1;
5001}
5002
5003bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5004 struct i915_address_space *vm)
5005{
5006 struct i915_vma *vma;
5007
5008 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07005009 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005010 return true;
5011
5012 return false;
5013}
5014
5015bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5016{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005017 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005018
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005019 list_for_each_entry(vma, &o->vma_list, vma_link)
5020 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005021 return true;
5022
5023 return false;
5024}
5025
5026unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5027 struct i915_address_space *vm)
5028{
5029 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5030 struct i915_vma *vma;
5031
Ben Widawsky6f425322013-12-06 14:10:48 -08005032 if (!dev_priv->mm.aliasing_ppgtt ||
5033 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005034 vm = &dev_priv->gtt.base;
5035
5036 BUG_ON(list_empty(&o->vma_list));
5037
5038 list_for_each_entry(vma, &o->vma_list, vma_link)
5039 if (vma->vm == vm)
5040 return vma->node.size;
5041
5042 return 0;
5043}
5044
Dave Chinner7dc19d52013-08-28 10:18:11 +10005045static unsigned long
5046i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5047{
5048 struct drm_i915_private *dev_priv =
5049 container_of(shrinker,
5050 struct drm_i915_private,
5051 mm.inactive_shrinker);
5052 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005053 unsigned long freed;
5054 bool unlock = true;
5055
5056 if (!mutex_trylock(&dev->struct_mutex)) {
5057 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005058 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005059
5060 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005061 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005062
5063 unlock = false;
5064 }
5065
Chris Wilsond9973b42013-10-04 10:33:00 +01005066 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5067 if (freed < sc->nr_to_scan)
5068 freed += __i915_gem_shrink(dev_priv,
5069 sc->nr_to_scan - freed,
5070 false);
5071 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005072 freed += i915_gem_shrink_all(dev_priv);
5073
5074 if (unlock)
5075 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005076
Dave Chinner7dc19d52013-08-28 10:18:11 +10005077 return freed;
5078}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005079
5080struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5081{
5082 struct i915_vma *vma;
5083
5084 if (WARN_ON(list_empty(&obj->vma_list)))
5085 return NULL;
5086
5087 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005088 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005089 return NULL;
5090
5091 return vma;
5092}