blob: 0572257ac6f6b16c5a436ccdef46bfb8989d1475 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
Ben Widawsky07fe0b12013-07-31 17:00:10 -070047i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson61050802012-04-17 15:31:31 +010057static void i915_gem_write_fence(struct drm_device *dev, int reg,
58 struct drm_i915_gem_object *obj);
59static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
60 struct drm_i915_fence_reg *fence,
61 bool enable);
62
Dave Chinner7dc19d52013-08-28 10:18:11 +100063static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
64 struct shrink_control *sc);
65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
66 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010067static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010069static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800207 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Daniel Vetterd174bd62012-03-25 19:47:40 +0200335/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700338static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200339shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 char __user *user_data,
341 bool page_do_bit17_swizzling, bool needs_clflush)
342{
343 char *vaddr;
344 int ret;
345
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200346 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200347 return -EINVAL;
348
349 vaddr = kmap_atomic(page);
350 if (needs_clflush)
351 drm_clflush_virt_range(vaddr + shmem_page_offset,
352 page_length);
353 ret = __copy_to_user_inatomic(user_data,
354 vaddr + shmem_page_offset,
355 page_length);
356 kunmap_atomic(vaddr);
357
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100358 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359}
360
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361static void
362shmem_clflush_swizzled_range(char *addr, unsigned long length,
363 bool swizzled)
364{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200365 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200366 unsigned long start = (unsigned long) addr;
367 unsigned long end = (unsigned long) addr + length;
368
369 /* For swizzling simply ensure that we always flush both
370 * channels. Lame, but simple and it works. Swizzled
371 * pwrite/pread is far from a hotpath - current userspace
372 * doesn't use it at all. */
373 start = round_down(start, 128);
374 end = round_up(end, 128);
375
376 drm_clflush_virt_range((void *)start, end - start);
377 } else {
378 drm_clflush_virt_range(addr, length);
379 }
380
381}
382
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383/* Only difference to the fast-path function is that this can handle bit17
384 * and uses non-atomic copy and kmap functions. */
385static int
386shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
387 char __user *user_data,
388 bool page_do_bit17_swizzling, bool needs_clflush)
389{
390 char *vaddr;
391 int ret;
392
393 vaddr = kmap(page);
394 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
396 page_length,
397 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200398
399 if (page_do_bit17_swizzling)
400 ret = __copy_to_user_swizzled(user_data,
401 vaddr, shmem_page_offset,
402 page_length);
403 else
404 ret = __copy_to_user(user_data,
405 vaddr + shmem_page_offset,
406 page_length);
407 kunmap(page);
408
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100409 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200410}
411
Eric Anholteb014592009-03-10 11:44:52 -0700412static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200413i915_gem_shmem_pread(struct drm_device *dev,
414 struct drm_i915_gem_object *obj,
415 struct drm_i915_gem_pread *args,
416 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700417{
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700419 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100420 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100421 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100422 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200423 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200424 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200425 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700426
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200427 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700428 remain = args->size;
429
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700431
Daniel Vetter84897312012-03-25 19:47:31 +0200432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200441 }
Eric Anholteb014592009-03-10 11:44:52 -0700442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 ret = i915_gem_object_get_pages(obj);
444 if (ret)
445 return ret;
446
447 i915_gem_object_pin_pages(obj);
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100450
Imre Deak67d5a502013-02-18 19:28:02 +0200451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
452 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200453 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100454
455 if (remain <= 0)
456 break;
457
Eric Anholteb014592009-03-10 11:44:52 -0700458 /* Operation in this page
459 *
Eric Anholteb014592009-03-10 11:44:52 -0700460 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700461 * page_length = bytes to copy for this page
462 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100463 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700464 page_length = remain;
465 if ((shmem_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
469 (page_to_phys(page) & (1 << 17)) != 0;
470
Daniel Vetterd174bd62012-03-25 19:47:40 +0200471 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
472 user_data, page_do_bit17_swizzling,
473 needs_clflush);
474 if (ret == 0)
475 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700476
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200477 mutex_unlock(&dev->struct_mutex);
478
Xiong Zhang0b74b502013-07-19 13:51:24 +0800479 if (likely(!i915_prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200480 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the
483 * data up to the first fault. Hence ignore any errors
484 * and just continue. */
485 (void)ret;
486 prefaulted = 1;
487 }
488
Daniel Vetterd174bd62012-03-25 19:47:40 +0200489 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
490 user_data, page_do_bit17_swizzling,
491 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200493 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100494
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200495next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100496 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100497
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100498 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100500
Eric Anholteb014592009-03-10 11:44:52 -0700501 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700503 offset += page_length;
504 }
505
Chris Wilson4f27b752010-10-14 15:26:45 +0100506out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100507 i915_gem_object_unpin_pages(obj);
508
Eric Anholteb014592009-03-10 11:44:52 -0700509 return ret;
510}
511
Eric Anholt673a3942008-07-30 12:06:12 -0700512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700520{
521 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100523 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700524
Chris Wilson51311d02010-11-17 09:10:42 +0000525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200529 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000530 args->size))
531 return -EFAULT;
532
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100535 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700536
Chris Wilson05394f32010-11-08 19:18:58 +0000537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000538 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100539 ret = -ENOENT;
540 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Chris Wilson7dcd2492010-09-26 20:21:44 +0100543 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100546 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100548 }
549
Daniel Vetter1286ff72012-05-10 15:25:09 +0200550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
Chris Wilsondb53a302011-02-03 11:57:46 +0000558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200560 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson35b62a82010-09-26 20:23:38 +0100562out:
Chris Wilson05394f32010-11-08 19:18:58 +0000563 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100564unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100565 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700566 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567}
568
Keith Packard0839ccb2008-10-30 19:38:48 -0700569/* This is the fast write path which cannot handle
570 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700571 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572
Keith Packard0839ccb2008-10-30 19:38:48 -0700573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
578{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 void __iomem *vaddr_atomic;
580 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700588 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100589 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Chris Wilson05394f32010-11-08 19:18:58 +0000597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700599 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000600 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700601{
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 int page_offset, page_length, ret;
607
Ben Widawskyc37e2202013-07-31 16:59:58 -0700608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200620 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700621 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700623 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700624
625 while (remain > 0) {
626 /* Operation in this page
627 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700631 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800642 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700651 }
Eric Anholt673a3942008-07-30 12:06:12 -0700652
Daniel Vetter935aaa62012-03-25 19:47:35 +0200653out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800654 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200655out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700663static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700669{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200670 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200673 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687
Chris Wilson755d2212012-09-04 21:02:55 +0100688 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689}
690
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700693static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700699{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 char *vaddr;
701 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700702
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100710 user_data,
711 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilson755d2212012-09-04 21:02:55 +0100722 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700723}
724
Eric Anholt40123c12009-03-09 13:42:30 -0700725static int
Daniel Vettere244a442012-03-25 19:47:28 +0200726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700730{
Eric Anholt40123c12009-03-09 13:42:30 -0700731 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 loff_t offset;
733 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100734 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200736 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200737 int needs_clflush_after = 0;
738 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200739 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200741 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700742 remain = args->size;
743
Daniel Vetter8c599672011-12-14 13:57:31 +0100744 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700745
Daniel Vetter58642882012-03-25 19:47:37 +0200746 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
747 /* If we're not in the cpu write domain, set ourself into the gtt
748 * write domain and manually flush cachelines (if required). This
749 * optimizes for the case when the gpu will use the data
750 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100751 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700752 ret = i915_gem_object_wait_rendering(obj, false);
753 if (ret)
754 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100756 /* Same trick applies to invalidate partially written cachelines read
757 * before writing. */
758 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
759 needs_clflush_before =
760 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200761
Chris Wilson755d2212012-09-04 21:02:55 +0100762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
Eric Anholt40123c12009-03-09 13:42:30 -0700768 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000769 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
Imre Deak67d5a502013-02-18 19:28:02 +0200771 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200773 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200774 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100775
Chris Wilson9da3da62012-06-01 15:20:22 +0100776 if (remain <= 0)
777 break;
778
Eric Anholt40123c12009-03-09 13:42:30 -0700779 /* Operation in this page
780 *
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700782 * page_length = bytes to copy for this page
783 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100784 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700785
786 page_length = remain;
787 if ((shmem_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700789
Daniel Vetter58642882012-03-25 19:47:37 +0200790 /* If we don't overwrite a cacheline completely we need to be
791 * careful to have up-to-date data by first clflushing. Don't
792 * overcomplicate things and flush the entire patch. */
793 partial_cacheline_write = needs_clflush_before &&
794 ((shmem_page_offset | page_length)
795 & (boot_cpu_data.x86_clflush_size - 1));
796
Daniel Vetter8c599672011-12-14 13:57:31 +0100797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
804 if (ret == 0)
805 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700806
Daniel Vettere244a442012-03-25 19:47:28 +0200807 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200808 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200809 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100815
Daniel Vettere244a442012-03-25 19:47:28 +0200816next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100817 set_page_dirty(page);
818 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819
Chris Wilson755d2212012-09-04 21:02:55 +0100820 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100822
Eric Anholt40123c12009-03-09 13:42:30 -0700823 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100824 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825 offset += page_length;
826 }
827
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100828out:
Chris Wilson755d2212012-09-04 21:02:55 +0100829 i915_gem_object_unpin_pages(obj);
830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100832 /*
833 * Fixup: Flush cpu caches in case we didn't flush the dirty
834 * cachelines in-line while writing and the object moved
835 * out of the cpu write domain while we've dropped the lock.
836 */
837 if (!needs_clflush_after &&
838 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100839 if (i915_gem_clflush_object(obj, obj->pin_display))
840 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200841 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100842 }
Eric Anholt40123c12009-03-09 13:42:30 -0700843
Daniel Vetter58642882012-03-25 19:47:37 +0200844 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200846
Eric Anholt40123c12009-03-09 13:42:30 -0700847 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
850/**
851 * Writes data to the object referenced by handle.
852 *
853 * On error, the contents of the buffer that were to be modified are undefined.
854 */
855int
856i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100857 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700858{
859 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000860 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000861 int ret;
862
863 if (args->size == 0)
864 return 0;
865
866 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200867 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000868 args->size))
869 return -EFAULT;
870
Xiong Zhang0b74b502013-07-19 13:51:24 +0800871 if (likely(!i915_prefault_disable)) {
872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size);
874 if (ret)
875 return -EFAULT;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
Chris Wilson05394f32010-11-08 19:18:58 +0000882 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000883 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = -ENOENT;
885 goto unlock;
886 }
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Chris Wilson7dcd2492010-09-26 20:21:44 +0100888 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000889 if (args->offset > obj->base.size ||
890 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100891 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100892 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 }
894
Daniel Vetter1286ff72012-05-10 15:25:09 +0200895 /* prime objects have no backing filp to GEM pread/pwrite
896 * pages from.
897 */
898 if (!obj->base.filp) {
899 ret = -EINVAL;
900 goto out;
901 }
902
Chris Wilsondb53a302011-02-03 11:57:46 +0000903 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
904
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700906 /* We can only do the GTT pwrite on untiled buffers, as otherwise
907 * it would end up going through the fenced access, and we'll get
908 * different detiling behavior between reading and writing.
909 * pread/pwrite currently are reading and writing from the CPU
910 * perspective, requiring manual detiling by the client.
911 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100914 goto out;
915 }
916
Chris Wilson2c225692013-08-09 12:26:45 +0100917 if (obj->tiling_mode == I915_TILING_NONE &&
918 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
919 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100920 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200921 /* Note that the gtt paths might fail with non-page-backed user
922 * pointers (e.g. gtt mappings when moving data between
923 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700924 }
Eric Anholt673a3942008-07-30 12:06:12 -0700925
Chris Wilson86a1ee22012-08-11 15:41:04 +0100926 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200927 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100928
Chris Wilson35b62a82010-09-26 20:23:38 +0100929out:
Chris Wilson05394f32010-11-08 19:18:58 +0000930 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100931unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return ret;
934}
935
Chris Wilsonb3612372012-08-24 09:35:08 +0100936int
Daniel Vetter33196de2012-11-14 17:14:05 +0100937i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100938 bool interruptible)
939{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100940 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100941 /* Non-interruptible callers can't handle -EAGAIN, hence return
942 * -EIO unconditionally for these. */
943 if (!interruptible)
944 return -EIO;
945
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100946 /* Recovery complete, but the reset failed ... */
947 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100948 return -EIO;
949
950 return -EAGAIN;
951 }
952
953 return 0;
954}
955
956/*
957 * Compare seqno against outstanding lazy request. Emit a request if they are
958 * equal.
959 */
960static int
961i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
962{
963 int ret;
964
965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
966
967 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100968 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300969 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100970
971 return ret;
972}
973
Chris Wilson094f9a52013-09-25 17:34:55 +0100974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
Chris Wilsonb3612372012-08-24 09:35:08 +0100993/**
994 * __wait_seqno - wait until execution of seqno has finished
995 * @ring: the ring expected to report seqno
996 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100997 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100998 * @interruptible: do an interruptible wait (normally yes)
999 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1000 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001001 * Note: It is of utmost importance that the passed in seqno and reset_counter
1002 * values have been read by the caller in an smp safe manner. Where read-side
1003 * locks are involved, it is sufficient to read the reset_counter before
1004 * unlocking the lock that protects the seqno. For lockless tricks, the
1005 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1006 * inserted.
1007 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001008 * Returns 0 if the seqno was found within the alloted time. Else returns the
1009 * errno with remaining time filled in timeout argument.
1010 */
1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001012 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001016{
1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson094f9a52013-09-25 17:34:55 +01001018 struct timespec before, now;
1019 DEFINE_WAIT(wait);
1020 long timeout_jiffies;
Chris Wilsonb3612372012-08-24 09:35:08 +01001021 int ret;
1022
Paulo Zanonic67a4702013-08-19 13:18:09 -03001023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1024
Chris Wilsonb3612372012-08-24 09:35:08 +01001025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1026 return 0;
1027
Chris Wilson094f9a52013-09-25 17:34:55 +01001028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
Chris Wilsonb3612372012-08-24 09:35:08 +01001029
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1031 gen6_rps_boost(dev_priv);
1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1036 }
1037
Chris Wilson094f9a52013-09-25 17:34:55 +01001038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1039 WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001040 return -ENODEV;
1041
Chris Wilson094f9a52013-09-25 17:34:55 +01001042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001044 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001048
Chris Wilson094f9a52013-09-25 17:34:55 +01001049 prepare_to_wait(&ring->irq_queue, &wait,
1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001051
Daniel Vetterf69061b2012-12-06 09:01:42 +01001052 /* We need to check whether any gpu reset happened in between
1053 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001062
Chris Wilson094f9a52013-09-25 17:34:55 +01001063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1064 ret = 0;
1065 break;
1066 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001067
Chris Wilson094f9a52013-09-25 17:34:55 +01001068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
Chris Wilson5035c272013-10-04 09:58:46 +01001085 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001095 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001096 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001097
1098 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001099
1100 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001101
1102 if (timeout) {
1103 struct timespec sleep_time = timespec_sub(now, before);
1104 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001105 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1106 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001107 }
1108
Chris Wilson094f9a52013-09-25 17:34:55 +01001109 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001110}
1111
1112/**
1113 * Waits for a sequence number to be signaled, and cleans up the
1114 * request and object lists appropriately for that event.
1115 */
1116int
1117i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1118{
1119 struct drm_device *dev = ring->dev;
1120 struct drm_i915_private *dev_priv = dev->dev_private;
1121 bool interruptible = dev_priv->mm.interruptible;
1122 int ret;
1123
1124 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1125 BUG_ON(seqno == 0);
1126
Daniel Vetter33196de2012-11-14 17:14:05 +01001127 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001128 if (ret)
1129 return ret;
1130
1131 ret = i915_gem_check_olr(ring, seqno);
1132 if (ret)
1133 return ret;
1134
Daniel Vetterf69061b2012-12-06 09:01:42 +01001135 return __wait_seqno(ring, seqno,
1136 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001137 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001138}
1139
Chris Wilsond26e3af2013-06-29 22:05:26 +01001140static int
1141i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1142 struct intel_ring_buffer *ring)
1143{
1144 i915_gem_retire_requests_ring(ring);
1145
1146 /* Manually manage the write flush as we may have not yet
1147 * retired the buffer.
1148 *
1149 * Note that the last_write_seqno is always the earlier of
1150 * the two (read/write) seqno, so if we haved successfully waited,
1151 * we know we have passed the last write.
1152 */
1153 obj->last_write_seqno = 0;
1154 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1155
1156 return 0;
1157}
1158
Chris Wilsonb3612372012-08-24 09:35:08 +01001159/**
1160 * Ensures that all rendering to the object has completed and the object is
1161 * safe to unbind from the GTT or access from the CPU.
1162 */
1163static __must_check int
1164i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1165 bool readonly)
1166{
1167 struct intel_ring_buffer *ring = obj->ring;
1168 u32 seqno;
1169 int ret;
1170
1171 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1172 if (seqno == 0)
1173 return 0;
1174
1175 ret = i915_wait_seqno(ring, seqno);
1176 if (ret)
1177 return ret;
1178
Chris Wilsond26e3af2013-06-29 22:05:26 +01001179 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001180}
1181
Chris Wilson3236f572012-08-24 09:35:09 +01001182/* A nonblocking variant of the above wait. This is a highly dangerous routine
1183 * as the object state may change during this call.
1184 */
1185static __must_check int
1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001187 struct drm_file *file,
Chris Wilson3236f572012-08-24 09:35:09 +01001188 bool readonly)
1189{
1190 struct drm_device *dev = obj->base.dev;
1191 struct drm_i915_private *dev_priv = dev->dev_private;
1192 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001193 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001194 u32 seqno;
1195 int ret;
1196
1197 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1198 BUG_ON(!dev_priv->mm.interruptible);
1199
1200 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1201 if (seqno == 0)
1202 return 0;
1203
Daniel Vetter33196de2012-11-14 17:14:05 +01001204 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001205 if (ret)
1206 return ret;
1207
1208 ret = i915_gem_check_olr(ring, seqno);
1209 if (ret)
1210 return ret;
1211
Daniel Vetterf69061b2012-12-06 09:01:42 +01001212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001213 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001215 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001216 if (ret)
1217 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001218
Chris Wilsond26e3af2013-06-29 22:05:26 +01001219 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001220}
1221
Eric Anholt673a3942008-07-30 12:06:12 -07001222/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001223 * Called when user space prepares to use an object with the CPU, either
1224 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001225 */
1226int
1227i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001228 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001229{
1230 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001231 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001232 uint32_t read_domains = args->read_domains;
1233 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001234 int ret;
1235
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001236 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001237 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001238 return -EINVAL;
1239
Chris Wilson21d509e2009-06-06 09:46:02 +01001240 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001241 return -EINVAL;
1242
1243 /* Having something in the write domain implies it's in the read
1244 * domain, and only that read domain. Enforce that in the request.
1245 */
1246 if (write_domain != 0 && read_domains != write_domain)
1247 return -EINVAL;
1248
Chris Wilson76c1dec2010-09-25 11:22:51 +01001249 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001250 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001251 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001252
Chris Wilson05394f32010-11-08 19:18:58 +00001253 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001254 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001255 ret = -ENOENT;
1256 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001257 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001258
Chris Wilson3236f572012-08-24 09:35:09 +01001259 /* Try to flush the object off the GPU without holding the lock.
1260 * We will repeat the flush holding the lock in the normal manner
1261 * to catch cases where we are gazumped.
1262 */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001264 if (ret)
1265 goto unref;
1266
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001267 if (read_domains & I915_GEM_DOMAIN_GTT) {
1268 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001269
1270 /* Silently promote "you're not bound, there was nothing to do"
1271 * to success, since the client was just asking us to
1272 * make sure everything was done.
1273 */
1274 if (ret == -EINVAL)
1275 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001276 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001277 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001278 }
1279
Chris Wilson3236f572012-08-24 09:35:09 +01001280unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001281 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001282unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001283 mutex_unlock(&dev->struct_mutex);
1284 return ret;
1285}
1286
1287/**
1288 * Called when user space has done writes to this buffer
1289 */
1290int
1291i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001292 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001293{
1294 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001295 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001296 int ret = 0;
1297
Chris Wilson76c1dec2010-09-25 11:22:51 +01001298 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001299 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001300 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001301
Chris Wilson05394f32010-11-08 19:18:58 +00001302 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001303 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001304 ret = -ENOENT;
1305 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001306 }
1307
Eric Anholt673a3942008-07-30 12:06:12 -07001308 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001309 if (obj->pin_display)
1310 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001311
Chris Wilson05394f32010-11-08 19:18:58 +00001312 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001313unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001314 mutex_unlock(&dev->struct_mutex);
1315 return ret;
1316}
1317
1318/**
1319 * Maps the contents of an object, returning the address it is mapped
1320 * into.
1321 *
1322 * While the mapping holds a reference on the contents of the object, it doesn't
1323 * imply a ref on the object itself.
1324 */
1325int
1326i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001327 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001328{
1329 struct drm_i915_gem_mmap *args = data;
1330 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001331 unsigned long addr;
1332
Chris Wilson05394f32010-11-08 19:18:58 +00001333 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001334 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001335 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001336
Daniel Vetter1286ff72012-05-10 15:25:09 +02001337 /* prime objects have no backing filp to GEM mmap
1338 * pages from.
1339 */
1340 if (!obj->filp) {
1341 drm_gem_object_unreference_unlocked(obj);
1342 return -EINVAL;
1343 }
1344
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001345 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001346 PROT_READ | PROT_WRITE, MAP_SHARED,
1347 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001348 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001349 if (IS_ERR((void *)addr))
1350 return addr;
1351
1352 args->addr_ptr = (uint64_t) addr;
1353
1354 return 0;
1355}
1356
Jesse Barnesde151cf2008-11-12 10:03:55 -08001357/**
1358 * i915_gem_fault - fault a page into the GTT
1359 * vma: VMA in question
1360 * vmf: fault info
1361 *
1362 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1363 * from userspace. The fault handler takes care of binding the object to
1364 * the GTT (if needed), allocating and programming a fence register (again,
1365 * only if needed based on whether the old reg is still valid or the object
1366 * is tiled) and inserting a new PTE into the faulting process.
1367 *
1368 * Note that the faulting process may involve evicting existing objects
1369 * from the GTT and/or fence registers to make room. So performance may
1370 * suffer if the GTT working set is large or there are few fence registers
1371 * left.
1372 */
1373int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1374{
Chris Wilson05394f32010-11-08 19:18:58 +00001375 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1376 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001377 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001378 pgoff_t page_offset;
1379 unsigned long pfn;
1380 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001381 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001382
1383 /* We don't use vmf->pgoff since that has the fake offset */
1384 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1385 PAGE_SHIFT;
1386
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001387 ret = i915_mutex_lock_interruptible(dev);
1388 if (ret)
1389 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001390
Chris Wilsondb53a302011-02-03 11:57:46 +00001391 trace_i915_gem_object_fault(obj, page_offset, true, write);
1392
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001393 /* Access to snoopable pages through the GTT is incoherent. */
1394 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1395 ret = -EINVAL;
1396 goto unlock;
1397 }
1398
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001399 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001400 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001401 if (ret)
1402 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001403
Chris Wilsonc9839302012-11-20 10:45:17 +00001404 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1405 if (ret)
1406 goto unpin;
1407
1408 ret = i915_gem_object_get_fence(obj);
1409 if (ret)
1410 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001411
Chris Wilson6299f992010-11-24 12:23:44 +00001412 obj->fault_mappable = true;
1413
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001414 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1415 pfn >>= PAGE_SHIFT;
1416 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417
1418 /* Finally, remap it using the new GTT offset */
1419 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001420unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001421 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001422unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001423 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001424out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001425 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001426 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001427 /* If this -EIO is due to a gpu hang, give the reset code a
1428 * chance to clean up the mess. Otherwise return the proper
1429 * SIGBUS. */
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001430 if (i915_terminally_wedged(&dev_priv->gpu_error))
Daniel Vettera9340cc2012-07-04 22:18:42 +02001431 return VM_FAULT_SIGBUS;
Chris Wilson045e7692010-11-07 09:18:22 +00001432 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001433 /*
1434 * EAGAIN means the gpu is hung and we'll wait for the error
1435 * handler to reset everything when re-faulting in
1436 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001437 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001438 case 0:
1439 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001440 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001441 case -EBUSY:
1442 /*
1443 * EBUSY is ok: this just means that another thread
1444 * already did the job.
1445 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001446 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001447 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001448 return VM_FAULT_OOM;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001449 case -ENOSPC:
1450 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001451 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001452 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Chris Wilsonc7150892009-09-23 00:43:56 +01001453 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001454 }
1455}
1456
1457/**
Chris Wilson901782b2009-07-10 08:18:50 +01001458 * i915_gem_release_mmap - remove physical page mappings
1459 * @obj: obj in question
1460 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001461 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001462 * relinquish ownership of the pages back to the system.
1463 *
1464 * It is vital that we remove the page mapping if we have mapped a tiled
1465 * object through the GTT and then lose the fence register due to
1466 * resource pressure. Similarly if the object has been moved out of the
1467 * aperture, than pages mapped into userspace must be revoked. Removing the
1468 * mapping will then trigger a page fault on the next user access, allowing
1469 * fixup by i915_gem_fault().
1470 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001471void
Chris Wilson05394f32010-11-08 19:18:58 +00001472i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001473{
Chris Wilson6299f992010-11-24 12:23:44 +00001474 if (!obj->fault_mappable)
1475 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001476
David Herrmann51335df2013-07-24 21:10:03 +02001477 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001478 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001479}
1480
Imre Deak0fa87792013-01-07 21:47:35 +02001481uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001482i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001483{
Chris Wilsone28f8712011-07-18 13:11:49 -07001484 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001485
1486 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001487 tiling_mode == I915_TILING_NONE)
1488 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001489
1490 /* Previous chips need a power-of-two fence region when tiling */
1491 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001492 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001493 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001494 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001495
Chris Wilsone28f8712011-07-18 13:11:49 -07001496 while (gtt_size < size)
1497 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001498
Chris Wilsone28f8712011-07-18 13:11:49 -07001499 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001500}
1501
Jesse Barnesde151cf2008-11-12 10:03:55 -08001502/**
1503 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1504 * @obj: object to check
1505 *
1506 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001507 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001508 */
Imre Deakd865110c2013-01-07 21:47:33 +02001509uint32_t
1510i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1511 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001512{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001513 /*
1514 * Minimum alignment is 4k (GTT page size), but might be greater
1515 * if a fence register is needed for the object.
1516 */
Imre Deakd865110c2013-01-07 21:47:33 +02001517 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001518 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001519 return 4096;
1520
1521 /*
1522 * Previous chips need to be aligned to the size of the smallest
1523 * fence register that can contain the object.
1524 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001525 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001526}
1527
Chris Wilsond8cb5082012-08-11 15:41:03 +01001528static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1529{
1530 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1531 int ret;
1532
David Herrmann0de23972013-07-24 21:07:52 +02001533 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001534 return 0;
1535
Daniel Vetterda494d72012-12-20 15:11:16 +01001536 dev_priv->mm.shrinker_no_lock_stealing = true;
1537
Chris Wilsond8cb5082012-08-11 15:41:03 +01001538 ret = drm_gem_create_mmap_offset(&obj->base);
1539 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001540 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001541
1542 /* Badly fragmented mmap space? The only way we can recover
1543 * space is by destroying unwanted objects. We can't randomly release
1544 * mmap_offsets as userspace expects them to be persistent for the
1545 * lifetime of the objects. The closest we can is to release the
1546 * offsets on purgeable objects by truncating it and marking it purged,
1547 * which prevents userspace from ever using that object again.
1548 */
1549 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1550 ret = drm_gem_create_mmap_offset(&obj->base);
1551 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001552 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001553
1554 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001555 ret = drm_gem_create_mmap_offset(&obj->base);
1556out:
1557 dev_priv->mm.shrinker_no_lock_stealing = false;
1558
1559 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001560}
1561
1562static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1563{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001564 drm_gem_free_mmap_offset(&obj->base);
1565}
1566
Jesse Barnesde151cf2008-11-12 10:03:55 -08001567int
Dave Airlieff72145b2011-02-07 12:16:14 +10001568i915_gem_mmap_gtt(struct drm_file *file,
1569 struct drm_device *dev,
1570 uint32_t handle,
1571 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001572{
Chris Wilsonda761a62010-10-27 17:37:08 +01001573 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001574 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001575 int ret;
1576
Chris Wilson76c1dec2010-09-25 11:22:51 +01001577 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001578 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001579 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001580
Dave Airlieff72145b2011-02-07 12:16:14 +10001581 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001582 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001583 ret = -ENOENT;
1584 goto unlock;
1585 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001586
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001587 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001588 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001589 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001590 }
1591
Chris Wilson05394f32010-11-08 19:18:58 +00001592 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001593 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001594 ret = -EINVAL;
1595 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001596 }
1597
Chris Wilsond8cb5082012-08-11 15:41:03 +01001598 ret = i915_gem_object_create_mmap_offset(obj);
1599 if (ret)
1600 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001601
David Herrmann0de23972013-07-24 21:07:52 +02001602 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001603
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001604out:
Chris Wilson05394f32010-11-08 19:18:58 +00001605 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001606unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001607 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001608 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001609}
1610
Dave Airlieff72145b2011-02-07 12:16:14 +10001611/**
1612 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1613 * @dev: DRM device
1614 * @data: GTT mapping ioctl data
1615 * @file: GEM object info
1616 *
1617 * Simply returns the fake offset to userspace so it can mmap it.
1618 * The mmap call will end up in drm_gem_mmap(), which will set things
1619 * up so we can get faults in the handler above.
1620 *
1621 * The fault handler will take care of binding the object into the GTT
1622 * (since it may have been evicted to make room for something), allocating
1623 * a fence register, and mapping the appropriate aperture address into
1624 * userspace.
1625 */
1626int
1627i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1628 struct drm_file *file)
1629{
1630 struct drm_i915_gem_mmap_gtt *args = data;
1631
Dave Airlieff72145b2011-02-07 12:16:14 +10001632 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1633}
1634
Daniel Vetter225067e2012-08-20 10:23:20 +02001635/* Immediately discard the backing storage */
1636static void
1637i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001638{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001639 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001640
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001641 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001642
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001643 if (obj->base.filp == NULL)
1644 return;
1645
Daniel Vetter225067e2012-08-20 10:23:20 +02001646 /* Our goal here is to return as much of the memory as
1647 * is possible back to the system as we are called from OOM.
1648 * To do this we must instruct the shmfs to drop all of its
1649 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001650 */
Al Viro496ad9a2013-01-23 17:07:38 -05001651 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001652 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001653
Daniel Vetter225067e2012-08-20 10:23:20 +02001654 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001655}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001656
Daniel Vetter225067e2012-08-20 10:23:20 +02001657static inline int
1658i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1659{
1660 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001661}
1662
Chris Wilson5cdf5882010-09-27 15:51:07 +01001663static void
Chris Wilson05394f32010-11-08 19:18:58 +00001664i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001665{
Imre Deak90797e62013-02-18 19:28:03 +02001666 struct sg_page_iter sg_iter;
1667 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001668
Chris Wilson05394f32010-11-08 19:18:58 +00001669 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001670
Chris Wilson6c085a72012-08-20 11:40:46 +02001671 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1672 if (ret) {
1673 /* In the event of a disaster, abandon all caches and
1674 * hope for the best.
1675 */
1676 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001677 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001678 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1679 }
1680
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001681 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001682 i915_gem_object_save_bit_17_swizzle(obj);
1683
Chris Wilson05394f32010-11-08 19:18:58 +00001684 if (obj->madv == I915_MADV_DONTNEED)
1685 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001686
Imre Deak90797e62013-02-18 19:28:03 +02001687 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001688 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001689
Chris Wilson05394f32010-11-08 19:18:58 +00001690 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001691 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001692
Chris Wilson05394f32010-11-08 19:18:58 +00001693 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001694 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001695
Chris Wilson9da3da62012-06-01 15:20:22 +01001696 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001697 }
Chris Wilson05394f32010-11-08 19:18:58 +00001698 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001699
Chris Wilson9da3da62012-06-01 15:20:22 +01001700 sg_free_table(obj->pages);
1701 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001702}
1703
Chris Wilsondd624af2013-01-15 12:39:35 +00001704int
Chris Wilson37e680a2012-06-07 15:38:42 +01001705i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1706{
1707 const struct drm_i915_gem_object_ops *ops = obj->ops;
1708
Chris Wilson2f745ad2012-09-04 21:02:58 +01001709 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001710 return 0;
1711
Chris Wilsona5570172012-09-04 21:02:54 +01001712 if (obj->pages_pin_count)
1713 return -EBUSY;
1714
Ben Widawsky98438772013-07-31 17:00:12 -07001715 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001716
Chris Wilsona2165e32012-12-03 11:49:00 +00001717 /* ->put_pages might need to allocate memory for the bit17 swizzle
1718 * array, hence protect them from being reaped by removing them from gtt
1719 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001720 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001721
Chris Wilson37e680a2012-06-07 15:38:42 +01001722 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001723 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001724
Chris Wilson6c085a72012-08-20 11:40:46 +02001725 if (i915_gem_object_is_purgeable(obj))
1726 i915_gem_object_truncate(obj);
1727
1728 return 0;
1729}
1730
Chris Wilsond9973b42013-10-04 10:33:00 +01001731static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001732__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1733 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001734{
Chris Wilson57094f82013-09-04 10:45:50 +01001735 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001736 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001737 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001738
1739 list_for_each_entry_safe(obj, next,
1740 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001741 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001742 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001743 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001744 count += obj->base.size >> PAGE_SHIFT;
1745 if (count >= target)
1746 return count;
1747 }
1748 }
1749
Chris Wilson57094f82013-09-04 10:45:50 +01001750 /*
1751 * As we may completely rewrite the bound list whilst unbinding
1752 * (due to retiring requests) we have to strictly process only
1753 * one element of the list at the time, and recheck the list
1754 * on every iteration.
1755 */
1756 INIT_LIST_HEAD(&still_bound_list);
1757 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001758 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001759
Chris Wilson57094f82013-09-04 10:45:50 +01001760 obj = list_first_entry(&dev_priv->mm.bound_list,
1761 typeof(*obj), global_list);
1762 list_move_tail(&obj->global_list, &still_bound_list);
1763
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001764 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1765 continue;
1766
Chris Wilson57094f82013-09-04 10:45:50 +01001767 /*
1768 * Hold a reference whilst we unbind this object, as we may
1769 * end up waiting for and retiring requests. This might
1770 * release the final reference (held by the active list)
1771 * and result in the object being freed from under us.
1772 * in this object being freed.
1773 *
1774 * Note 1: Shrinking the bound list is special since only active
1775 * (and hence bound objects) can contain such limbo objects, so
1776 * we don't need special tricks for shrinking the unbound list.
1777 * The only other place where we have to be careful with active
1778 * objects suddenly disappearing due to retiring requests is the
1779 * eviction code.
1780 *
1781 * Note 2: Even though the bound list doesn't hold a reference
1782 * to the object we can safely grab one here: The final object
1783 * unreferencing and the bound_list are both protected by the
1784 * dev->struct_mutex and so we won't ever be able to observe an
1785 * object on the bound_list with a reference count equals 0.
1786 */
1787 drm_gem_object_reference(&obj->base);
1788
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001789 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1790 if (i915_vma_unbind(vma))
1791 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001792
Chris Wilson57094f82013-09-04 10:45:50 +01001793 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001794 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001795
1796 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001797 }
Chris Wilson57094f82013-09-04 10:45:50 +01001798 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001799
1800 return count;
1801}
1802
Chris Wilsond9973b42013-10-04 10:33:00 +01001803static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001804i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1805{
1806 return __i915_gem_shrink(dev_priv, target, true);
1807}
1808
Chris Wilsond9973b42013-10-04 10:33:00 +01001809static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001810i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1811{
1812 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001813 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001814
1815 i915_gem_evict_everything(dev_priv->dev);
1816
Ben Widawsky35c20a62013-05-31 11:28:48 -07001817 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001818 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001819 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001820 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001821 }
1822 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001823}
1824
Chris Wilson37e680a2012-06-07 15:38:42 +01001825static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001826i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001827{
Chris Wilson6c085a72012-08-20 11:40:46 +02001828 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001829 int page_count, i;
1830 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001831 struct sg_table *st;
1832 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001833 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001834 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001835 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001836 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001837
Chris Wilson6c085a72012-08-20 11:40:46 +02001838 /* Assert that the object is not currently in any GPU domain. As it
1839 * wasn't in the GTT, there shouldn't be any way it could have been in
1840 * a GPU cache
1841 */
1842 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1843 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1844
Chris Wilson9da3da62012-06-01 15:20:22 +01001845 st = kmalloc(sizeof(*st), GFP_KERNEL);
1846 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001847 return -ENOMEM;
1848
Chris Wilson9da3da62012-06-01 15:20:22 +01001849 page_count = obj->base.size / PAGE_SIZE;
1850 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001851 kfree(st);
1852 return -ENOMEM;
1853 }
1854
1855 /* Get the list of pages out of our struct file. They'll be pinned
1856 * at this point until we release them.
1857 *
1858 * Fail silently without starting the shrinker
1859 */
Al Viro496ad9a2013-01-23 17:07:38 -05001860 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001861 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001862 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001863 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001864 sg = st->sgl;
1865 st->nents = 0;
1866 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001867 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1868 if (IS_ERR(page)) {
1869 i915_gem_purge(dev_priv, page_count);
1870 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1871 }
1872 if (IS_ERR(page)) {
1873 /* We've tried hard to allocate the memory by reaping
1874 * our own buffer, now let the real VM do its job and
1875 * go down in flames if truly OOM.
1876 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001877 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001878 gfp |= __GFP_IO | __GFP_WAIT;
1879
1880 i915_gem_shrink_all(dev_priv);
1881 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1882 if (IS_ERR(page))
1883 goto err_pages;
1884
Linus Torvaldscaf49192012-12-10 10:51:16 -08001885 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001886 gfp &= ~(__GFP_IO | __GFP_WAIT);
1887 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001888#ifdef CONFIG_SWIOTLB
1889 if (swiotlb_nr_tbl()) {
1890 st->nents++;
1891 sg_set_page(sg, page, PAGE_SIZE, 0);
1892 sg = sg_next(sg);
1893 continue;
1894 }
1895#endif
Imre Deak90797e62013-02-18 19:28:03 +02001896 if (!i || page_to_pfn(page) != last_pfn + 1) {
1897 if (i)
1898 sg = sg_next(sg);
1899 st->nents++;
1900 sg_set_page(sg, page, PAGE_SIZE, 0);
1901 } else {
1902 sg->length += PAGE_SIZE;
1903 }
1904 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001905
1906 /* Check that the i965g/gm workaround works. */
1907 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001908 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001909#ifdef CONFIG_SWIOTLB
1910 if (!swiotlb_nr_tbl())
1911#endif
1912 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001913 obj->pages = st;
1914
Eric Anholt673a3942008-07-30 12:06:12 -07001915 if (i915_gem_object_needs_bit17_swizzle(obj))
1916 i915_gem_object_do_bit_17_swizzle(obj);
1917
1918 return 0;
1919
1920err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001921 sg_mark_end(sg);
1922 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001923 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001924 sg_free_table(st);
1925 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001926 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001927}
1928
Chris Wilson37e680a2012-06-07 15:38:42 +01001929/* Ensure that the associated pages are gathered from the backing storage
1930 * and pinned into our object. i915_gem_object_get_pages() may be called
1931 * multiple times before they are released by a single call to
1932 * i915_gem_object_put_pages() - once the pages are no longer referenced
1933 * either as a result of memory pressure (reaping pages under the shrinker)
1934 * or as the object is itself released.
1935 */
1936int
1937i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1938{
1939 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1940 const struct drm_i915_gem_object_ops *ops = obj->ops;
1941 int ret;
1942
Chris Wilson2f745ad2012-09-04 21:02:58 +01001943 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001944 return 0;
1945
Chris Wilson43e28f02013-01-08 10:53:09 +00001946 if (obj->madv != I915_MADV_WILLNEED) {
1947 DRM_ERROR("Attempting to obtain a purgeable object\n");
1948 return -EINVAL;
1949 }
1950
Chris Wilsona5570172012-09-04 21:02:54 +01001951 BUG_ON(obj->pages_pin_count);
1952
Chris Wilson37e680a2012-06-07 15:38:42 +01001953 ret = ops->get_pages(obj);
1954 if (ret)
1955 return ret;
1956
Ben Widawsky35c20a62013-05-31 11:28:48 -07001957 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001958 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001959}
1960
Ben Widawskye2d05a82013-09-24 09:57:58 -07001961static void
Chris Wilson05394f32010-11-08 19:18:58 +00001962i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001963 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001964{
Chris Wilson05394f32010-11-08 19:18:58 +00001965 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001966 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001967 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001968
Zou Nan hai852835f2010-05-21 09:08:56 +08001969 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001970 if (obj->ring != ring && obj->last_write_seqno) {
1971 /* Keep the seqno relative to the current ring */
1972 obj->last_write_seqno = seqno;
1973 }
Chris Wilson05394f32010-11-08 19:18:58 +00001974 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001975
1976 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001977 if (!obj->active) {
1978 drm_gem_object_reference(&obj->base);
1979 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001980 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001981
Chris Wilson05394f32010-11-08 19:18:58 +00001982 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001983
Chris Wilson0201f1e2012-07-20 12:41:01 +01001984 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001985
Chris Wilsoncaea7472010-11-12 13:53:37 +00001986 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001987 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001988
Chris Wilson7dd49062012-03-21 10:48:18 +00001989 /* Bump MRU to take account of the delayed flush */
1990 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1991 struct drm_i915_fence_reg *reg;
1992
1993 reg = &dev_priv->fence_regs[obj->fence_reg];
1994 list_move_tail(&reg->lru_list,
1995 &dev_priv->mm.fence_list);
1996 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001997 }
1998}
1999
Ben Widawskye2d05a82013-09-24 09:57:58 -07002000void i915_vma_move_to_active(struct i915_vma *vma,
2001 struct intel_ring_buffer *ring)
2002{
2003 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2004 return i915_gem_object_move_to_active(vma->obj, ring);
2005}
2006
Chris Wilsoncaea7472010-11-12 13:53:37 +00002007static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002008i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2009{
Ben Widawskyca191b12013-07-31 17:00:14 -07002010 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002011 struct i915_address_space *vm;
2012 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002013
Chris Wilson65ce3022012-07-20 12:41:02 +01002014 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002015 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002016
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002017 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2018 vma = i915_gem_obj_to_vma(obj, vm);
2019 if (vma && !list_empty(&vma->mm_list))
2020 list_move_tail(&vma->mm_list, &vm->inactive_list);
2021 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002022
Chris Wilson65ce3022012-07-20 12:41:02 +01002023 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002024 obj->ring = NULL;
2025
Chris Wilson65ce3022012-07-20 12:41:02 +01002026 obj->last_read_seqno = 0;
2027 obj->last_write_seqno = 0;
2028 obj->base.write_domain = 0;
2029
2030 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002031 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002032
2033 obj->active = 0;
2034 drm_gem_object_unreference(&obj->base);
2035
2036 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002037}
Eric Anholt673a3942008-07-30 12:06:12 -07002038
Chris Wilson9d7730912012-11-27 16:22:52 +00002039static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002040i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002041{
Chris Wilson9d7730912012-11-27 16:22:52 +00002042 struct drm_i915_private *dev_priv = dev->dev_private;
2043 struct intel_ring_buffer *ring;
2044 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002045
Chris Wilson107f27a52012-12-10 13:56:17 +02002046 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002047 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002048 ret = intel_ring_idle(ring);
2049 if (ret)
2050 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002051 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002052 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002053
2054 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002055 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002056 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002057
Chris Wilson9d7730912012-11-27 16:22:52 +00002058 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2059 ring->sync_seqno[j] = 0;
2060 }
2061
2062 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002063}
2064
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002065int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2066{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 int ret;
2069
2070 if (seqno == 0)
2071 return -EINVAL;
2072
2073 /* HWS page needs to be set less than what we
2074 * will inject to ring
2075 */
2076 ret = i915_gem_init_seqno(dev, seqno - 1);
2077 if (ret)
2078 return ret;
2079
2080 /* Carefully set the last_seqno value so that wrap
2081 * detection still works
2082 */
2083 dev_priv->next_seqno = seqno;
2084 dev_priv->last_seqno = seqno - 1;
2085 if (dev_priv->last_seqno == 0)
2086 dev_priv->last_seqno--;
2087
2088 return 0;
2089}
2090
Chris Wilson9d7730912012-11-27 16:22:52 +00002091int
2092i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002093{
Chris Wilson9d7730912012-11-27 16:22:52 +00002094 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002095
Chris Wilson9d7730912012-11-27 16:22:52 +00002096 /* reserve 0 for non-seqno */
2097 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002098 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002099 if (ret)
2100 return ret;
2101
2102 dev_priv->next_seqno = 1;
2103 }
2104
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002105 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002106 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002107}
2108
Mika Kuoppala0025c072013-06-12 12:35:30 +03002109int __i915_add_request(struct intel_ring_buffer *ring,
2110 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002111 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002112 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002113{
Chris Wilsondb53a302011-02-03 11:57:46 +00002114 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002115 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002116 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002117 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002118 int ret;
2119
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002120 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002121 /*
2122 * Emit any outstanding flushes - execbuf can fail to emit the flush
2123 * after having emitted the batchbuffer command. Hence we need to fix
2124 * things up similar to emitting the lazy request. The difference here
2125 * is that the flush _must_ happen before the next request, no matter
2126 * what.
2127 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002128 ret = intel_ring_flush_all_caches(ring);
2129 if (ret)
2130 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002131
Chris Wilson3c0e2342013-09-04 10:45:52 +01002132 request = ring->preallocated_lazy_request;
2133 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002134 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002135
Chris Wilsona71d8d92012-02-15 11:25:36 +00002136 /* Record the position of the start of the request so that
2137 * should we detect the updated seqno part-way through the
2138 * GPU processing the request, we never over-estimate the
2139 * position of the head.
2140 */
2141 request_ring_position = intel_ring_get_tail(ring);
2142
Chris Wilson9d7730912012-11-27 16:22:52 +00002143 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002144 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002145 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002146
Chris Wilson9d7730912012-11-27 16:22:52 +00002147 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002148 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002149 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002150 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002151
2152 /* Whilst this request exists, batch_obj will be on the
2153 * active_list, and so will hold the active reference. Only when this
2154 * request is retired will the the batch_obj be moved onto the
2155 * inactive_list and lose its active reference. Hence we do not need
2156 * to explicitly hold another reference here.
2157 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002158 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002159
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002160 /* Hold a reference to the current context so that we can inspect
2161 * it later in case a hangcheck error event fires.
2162 */
2163 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002164 if (request->ctx)
2165 i915_gem_context_reference(request->ctx);
2166
Eric Anholt673a3942008-07-30 12:06:12 -07002167 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002168 was_empty = list_empty(&ring->request_list);
2169 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002170 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002171
Chris Wilsondb53a302011-02-03 11:57:46 +00002172 if (file) {
2173 struct drm_i915_file_private *file_priv = file->driver_priv;
2174
Chris Wilson1c255952010-09-26 11:03:27 +01002175 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002176 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002177 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002178 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002179 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002180 }
Eric Anholt673a3942008-07-30 12:06:12 -07002181
Chris Wilson9d7730912012-11-27 16:22:52 +00002182 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002183 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002184 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002185
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002186 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002187 i915_queue_hangcheck(ring->dev);
2188
Chris Wilsonf047e392012-07-21 12:31:41 +01002189 if (was_empty) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002190 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002191 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002192 &dev_priv->mm.retire_work,
2193 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002194 intel_mark_busy(dev_priv->dev);
2195 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002196 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002197
Chris Wilsonacb868d2012-09-26 13:47:30 +01002198 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002199 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002200 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002201}
2202
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002203static inline void
2204i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002205{
Chris Wilson1c255952010-09-26 11:03:27 +01002206 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002207
Chris Wilson1c255952010-09-26 11:03:27 +01002208 if (!file_priv)
2209 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002210
Chris Wilson1c255952010-09-26 11:03:27 +01002211 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002212 list_del(&request->client_list);
2213 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002214 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002215}
2216
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002217static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2218 struct i915_address_space *vm)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002219{
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002220 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2221 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002222 return true;
2223
2224 return false;
2225}
2226
2227static bool i915_head_inside_request(const u32 acthd_unmasked,
2228 const u32 request_start,
2229 const u32 request_end)
2230{
2231 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2232
2233 if (request_start < request_end) {
2234 if (acthd >= request_start && acthd < request_end)
2235 return true;
2236 } else if (request_start > request_end) {
2237 if (acthd >= request_start || acthd < request_end)
2238 return true;
2239 }
2240
2241 return false;
2242}
2243
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002244static struct i915_address_space *
2245request_to_vm(struct drm_i915_gem_request *request)
2246{
2247 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2248 struct i915_address_space *vm;
2249
2250 vm = &dev_priv->gtt.base;
2251
2252 return vm;
2253}
2254
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002255static bool i915_request_guilty(struct drm_i915_gem_request *request,
2256 const u32 acthd, bool *inside)
2257{
2258 /* There is a possibility that unmasked head address
2259 * pointing inside the ring, matches the batch_obj address range.
2260 * However this is extremely unlikely.
2261 */
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002262 if (request->batch_obj) {
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002263 if (i915_head_inside_object(acthd, request->batch_obj,
2264 request_to_vm(request))) {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002265 *inside = true;
2266 return true;
2267 }
2268 }
2269
2270 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2271 *inside = false;
2272 return true;
2273 }
2274
2275 return false;
2276}
2277
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002278static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2279{
2280 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2281
2282 if (hs->banned)
2283 return true;
2284
2285 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2286 DRM_ERROR("context hanging too fast, declaring banned!\n");
2287 return true;
2288 }
2289
2290 return false;
2291}
2292
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002293static void i915_set_reset_status(struct intel_ring_buffer *ring,
2294 struct drm_i915_gem_request *request,
2295 u32 acthd)
2296{
2297 struct i915_ctx_hang_stats *hs = NULL;
2298 bool inside, guilty;
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002299 unsigned long offset = 0;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002300
2301 /* Innocent until proven guilty */
2302 guilty = false;
2303
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002304 if (request->batch_obj)
2305 offset = i915_gem_obj_offset(request->batch_obj,
2306 request_to_vm(request));
2307
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002308 if (ring->hangcheck.action != HANGCHECK_WAIT &&
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002309 i915_request_guilty(request, acthd, &inside)) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002310 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002311 ring->name,
2312 inside ? "inside" : "flushing",
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002313 offset,
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002314 request->ctx ? request->ctx->id : 0,
2315 acthd);
2316
2317 guilty = true;
2318 }
2319
2320 /* If contexts are disabled or this is the default context, use
2321 * file_priv->reset_state
2322 */
2323 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2324 hs = &request->ctx->hang_stats;
2325 else if (request->file_priv)
Ben Widawskyc4829722013-12-06 14:11:20 -08002326 hs = &request->file_priv->private_default_ctx->hang_stats;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002327
2328 if (hs) {
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002329 if (guilty) {
2330 hs->banned = i915_context_is_banned(hs);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002331 hs->batch_active++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002332 hs->guilty_ts = get_seconds();
2333 } else {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002334 hs->batch_pending++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002335 }
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002336 }
2337}
2338
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002339static void i915_gem_free_request(struct drm_i915_gem_request *request)
2340{
2341 list_del(&request->list);
2342 i915_gem_request_remove_from_client(request);
2343
2344 if (request->ctx)
2345 i915_gem_context_unreference(request->ctx);
2346
2347 kfree(request);
2348}
2349
Chris Wilsondfaae392010-09-22 10:31:52 +01002350static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2351 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002352{
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002353 u32 completed_seqno;
2354 u32 acthd;
2355
2356 acthd = intel_ring_get_active_head(ring);
2357 completed_seqno = ring->get_seqno(ring, false);
2358
Chris Wilsondfaae392010-09-22 10:31:52 +01002359 while (!list_empty(&ring->request_list)) {
2360 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01002361
Chris Wilsondfaae392010-09-22 10:31:52 +01002362 request = list_first_entry(&ring->request_list,
2363 struct drm_i915_gem_request,
2364 list);
2365
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002366 if (request->seqno > completed_seqno)
2367 i915_set_reset_status(ring, request, acthd);
2368
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002369 i915_gem_free_request(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01002370 }
2371
2372 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002373 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002374
Chris Wilson05394f32010-11-08 19:18:58 +00002375 obj = list_first_entry(&ring->active_list,
2376 struct drm_i915_gem_object,
2377 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002378
Chris Wilson05394f32010-11-08 19:18:58 +00002379 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002380 }
Eric Anholt673a3942008-07-30 12:06:12 -07002381}
2382
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002383void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002384{
2385 struct drm_i915_private *dev_priv = dev->dev_private;
2386 int i;
2387
Daniel Vetter4b9de732011-10-09 21:52:02 +02002388 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002389 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002390
Daniel Vetter94a335d2013-07-17 14:51:28 +02002391 /*
2392 * Commit delayed tiling changes if we have an object still
2393 * attached to the fence, otherwise just clear the fence.
2394 */
2395 if (reg->obj) {
2396 i915_gem_object_update_fence(reg->obj, reg,
2397 reg->obj->tiling_mode);
2398 } else {
2399 i915_gem_write_fence(dev, i, NULL);
2400 }
Chris Wilson312817a2010-11-22 11:50:11 +00002401 }
2402}
2403
Chris Wilson069efc12010-09-30 16:53:18 +01002404void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002405{
Chris Wilsondfaae392010-09-22 10:31:52 +01002406 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002407 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002408 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002409
Chris Wilsonb4519512012-05-11 14:29:30 +01002410 for_each_ring(ring, dev_priv, i)
2411 i915_gem_reset_ring_lists(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002412
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002413 i915_gem_cleanup_ringbuffer(dev);
2414
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002415 i915_gem_context_reset(dev);
2416
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002417 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002418}
2419
2420/**
2421 * This function clears the request list as sequence numbers are passed.
2422 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002423void
Chris Wilsondb53a302011-02-03 11:57:46 +00002424i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002425{
Ben Widawskye2078042013-12-06 14:11:22 -08002426 LIST_HEAD(deferred_request_free);
2427 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002428 uint32_t seqno;
2429
Chris Wilsondb53a302011-02-03 11:57:46 +00002430 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002431 return;
2432
Chris Wilsondb53a302011-02-03 11:57:46 +00002433 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002434
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002435 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002436
Zou Nan hai852835f2010-05-21 09:08:56 +08002437 while (!list_empty(&ring->request_list)) {
Zou Nan hai852835f2010-05-21 09:08:56 +08002438 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002439 struct drm_i915_gem_request,
2440 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002441
Chris Wilsondfaae392010-09-22 10:31:52 +01002442 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002443 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002444
Chris Wilsondb53a302011-02-03 11:57:46 +00002445 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002446 /* We know the GPU must have read the request to have
2447 * sent us the seqno + interrupt, so use the position
2448 * of tail of the request to update the last known position
2449 * of the GPU head.
2450 */
2451 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002452
Ben Widawskye2078042013-12-06 14:11:22 -08002453 list_move_tail(&request->list, &deferred_request_free);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002454 }
2455
2456 /* Move any buffers on the active list that are no longer referenced
2457 * by the ringbuffer to the flushing/inactive lists as appropriate.
2458 */
2459 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002460 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002461
Akshay Joshi0206e352011-08-16 15:34:10 -04002462 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00002463 struct drm_i915_gem_object,
2464 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002465
Chris Wilson0201f1e2012-07-20 12:41:01 +01002466 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002467 break;
2468
Chris Wilson65ce3022012-07-20 12:41:02 +01002469 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002470 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002471
Chris Wilsondb53a302011-02-03 11:57:46 +00002472 if (unlikely(ring->trace_irq_seqno &&
2473 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002474 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002475 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002476 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002477
Ben Widawskye2078042013-12-06 14:11:22 -08002478 /* Finish processing active list before freeing request */
2479 while (!list_empty(&deferred_request_free)) {
2480 request = list_first_entry(&deferred_request_free,
2481 struct drm_i915_gem_request,
2482 list);
2483 i915_gem_free_request(request);
2484 }
Chris Wilsondb53a302011-02-03 11:57:46 +00002485 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002486}
2487
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002488bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002489i915_gem_retire_requests(struct drm_device *dev)
2490{
2491 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002492 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002493 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002494 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002495
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002496 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002497 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002498 idle &= list_empty(&ring->request_list);
2499 }
2500
2501 if (idle)
2502 mod_delayed_work(dev_priv->wq,
2503 &dev_priv->mm.idle_work,
2504 msecs_to_jiffies(100));
2505
2506 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002507}
2508
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002509static void
Eric Anholt673a3942008-07-30 12:06:12 -07002510i915_gem_retire_work_handler(struct work_struct *work)
2511{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002512 struct drm_i915_private *dev_priv =
2513 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2514 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002515 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002516
Chris Wilson891b48c2010-09-29 12:26:37 +01002517 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002518 idle = false;
2519 if (mutex_trylock(&dev->struct_mutex)) {
2520 idle = i915_gem_retire_requests(dev);
2521 mutex_unlock(&dev->struct_mutex);
2522 }
2523 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002524 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2525 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002526}
Chris Wilson891b48c2010-09-29 12:26:37 +01002527
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002528static void
2529i915_gem_idle_work_handler(struct work_struct *work)
2530{
2531 struct drm_i915_private *dev_priv =
2532 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002533
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002534 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002535}
2536
Ben Widawsky5816d642012-04-11 11:18:19 -07002537/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002538 * Ensures that an object will eventually get non-busy by flushing any required
2539 * write domains, emitting any outstanding lazy request and retiring and
2540 * completed requests.
2541 */
2542static int
2543i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2544{
2545 int ret;
2546
2547 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002548 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002549 if (ret)
2550 return ret;
2551
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002552 i915_gem_retire_requests_ring(obj->ring);
2553 }
2554
2555 return 0;
2556}
2557
2558/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002559 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2560 * @DRM_IOCTL_ARGS: standard ioctl arguments
2561 *
2562 * Returns 0 if successful, else an error is returned with the remaining time in
2563 * the timeout parameter.
2564 * -ETIME: object is still busy after timeout
2565 * -ERESTARTSYS: signal interrupted the wait
2566 * -ENONENT: object doesn't exist
2567 * Also possible, but rare:
2568 * -EAGAIN: GPU wedged
2569 * -ENOMEM: damn
2570 * -ENODEV: Internal IRQ fail
2571 * -E?: The add request failed
2572 *
2573 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2574 * non-zero timeout parameter the wait ioctl will wait for the given number of
2575 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2576 * without holding struct_mutex the object may become re-busied before this
2577 * function completes. A similar but shorter * race condition exists in the busy
2578 * ioctl
2579 */
2580int
2581i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2582{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002583 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002584 struct drm_i915_gem_wait *args = data;
2585 struct drm_i915_gem_object *obj;
2586 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002587 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002588 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002589 u32 seqno = 0;
2590 int ret = 0;
2591
Ben Widawskyeac1f142012-06-05 15:24:24 -07002592 if (args->timeout_ns >= 0) {
2593 timeout_stack = ns_to_timespec(args->timeout_ns);
2594 timeout = &timeout_stack;
2595 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002596
2597 ret = i915_mutex_lock_interruptible(dev);
2598 if (ret)
2599 return ret;
2600
2601 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2602 if (&obj->base == NULL) {
2603 mutex_unlock(&dev->struct_mutex);
2604 return -ENOENT;
2605 }
2606
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002607 /* Need to make sure the object gets inactive eventually. */
2608 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002609 if (ret)
2610 goto out;
2611
2612 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002613 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002614 ring = obj->ring;
2615 }
2616
2617 if (seqno == 0)
2618 goto out;
2619
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002620 /* Do this after OLR check to make sure we make forward progress polling
2621 * on this IOCTL with a 0 timeout (like busy ioctl)
2622 */
2623 if (!args->timeout_ns) {
2624 ret = -ETIME;
2625 goto out;
2626 }
2627
2628 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002629 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002630 mutex_unlock(&dev->struct_mutex);
2631
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002632 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002633 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002634 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002635 return ret;
2636
2637out:
2638 drm_gem_object_unreference(&obj->base);
2639 mutex_unlock(&dev->struct_mutex);
2640 return ret;
2641}
2642
2643/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002644 * i915_gem_object_sync - sync an object to a ring.
2645 *
2646 * @obj: object which may be in use on another ring.
2647 * @to: ring we wish to use the object on. May be NULL.
2648 *
2649 * This code is meant to abstract object synchronization with the GPU.
2650 * Calling with NULL implies synchronizing the object with the CPU
2651 * rather than a particular GPU ring.
2652 *
2653 * Returns 0 if successful, else propagates up the lower layer error.
2654 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002655int
2656i915_gem_object_sync(struct drm_i915_gem_object *obj,
2657 struct intel_ring_buffer *to)
2658{
2659 struct intel_ring_buffer *from = obj->ring;
2660 u32 seqno;
2661 int ret, idx;
2662
2663 if (from == NULL || to == from)
2664 return 0;
2665
Ben Widawsky5816d642012-04-11 11:18:19 -07002666 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002667 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002668
2669 idx = intel_ring_sync_index(from, to);
2670
Chris Wilson0201f1e2012-07-20 12:41:01 +01002671 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002672 if (seqno <= from->sync_seqno[idx])
2673 return 0;
2674
Ben Widawskyb4aca012012-04-25 20:50:12 -07002675 ret = i915_gem_check_olr(obj->ring, seqno);
2676 if (ret)
2677 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002678
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002679 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002680 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002681 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002682 /* We use last_read_seqno because sync_to()
2683 * might have just caused seqno wrap under
2684 * the radar.
2685 */
2686 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002687
Ben Widawskye3a5a222012-04-11 11:18:20 -07002688 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002689}
2690
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002691static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2692{
2693 u32 old_write_domain, old_read_domains;
2694
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002695 /* Force a pagefault for domain tracking on next user access */
2696 i915_gem_release_mmap(obj);
2697
Keith Packardb97c3d92011-06-24 21:02:59 -07002698 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2699 return;
2700
Chris Wilson97c809fd2012-10-09 19:24:38 +01002701 /* Wait for any direct GTT access to complete */
2702 mb();
2703
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002704 old_read_domains = obj->base.read_domains;
2705 old_write_domain = obj->base.write_domain;
2706
2707 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2708 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2709
2710 trace_i915_gem_object_change_domain(obj,
2711 old_read_domains,
2712 old_write_domain);
2713}
2714
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002715int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002716{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002717 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002718 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002719 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002720
Daniel Vetterb93dab62013-08-26 11:23:47 +02002721 /* For now we only ever use 1 vma per object */
2722 WARN_ON(!list_is_singular(&obj->vma_list));
2723
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002724 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002725 return 0;
2726
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002727 if (!drm_mm_node_allocated(&vma->node)) {
2728 i915_gem_vma_destroy(vma);
2729
2730 return 0;
2731 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002732
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002733 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002734 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002735
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002736 BUG_ON(obj->pages == NULL);
2737
Chris Wilsona8198ee2011-04-13 22:04:09 +01002738 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002739 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002740 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002741 /* Continue on if we fail due to EIO, the GPU is hung so we
2742 * should be safe and we need to cleanup or else we might
2743 * cause memory corruption through use-after-free.
2744 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002745
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002746 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002747
Daniel Vetter96b47b62009-12-15 17:50:00 +01002748 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002749 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002750 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002751 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002752
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002753 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002754
Ben Widawsky6f65e292013-12-06 14:10:56 -08002755 vma->unbind_vma(vma);
2756
Daniel Vetter74163902012-02-15 23:50:21 +01002757 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002758
Ben Widawskyca191b12013-07-31 17:00:14 -07002759 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002760 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002761 if (i915_is_ggtt(vma->vm))
2762 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002763
Ben Widawsky2f633152013-07-17 12:19:03 -07002764 drm_mm_remove_node(&vma->node);
2765 i915_gem_vma_destroy(vma);
2766
2767 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002768 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002769 if (list_empty(&obj->vma_list))
2770 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002771
Chris Wilson70903c32013-12-04 09:59:09 +00002772 /* And finally now the object is completely decoupled from this vma,
2773 * we can drop its hold on the backing storage and allow it to be
2774 * reaped by the shrinker.
2775 */
2776 i915_gem_object_unpin_pages(obj);
2777
Chris Wilson88241782011-01-07 17:09:48 +00002778 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002779}
2780
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002781/**
2782 * Unbinds an object from the global GTT aperture.
2783 */
2784int
2785i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2786{
2787 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2788 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2789
Dan Carpenter58e73e12013-08-09 12:44:11 +03002790 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002791 return 0;
2792
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002793 if (i915_gem_obj_to_ggtt(obj)->pin_count)
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002794 return -EBUSY;
2795
2796 BUG_ON(obj->pages == NULL);
2797
2798 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2799}
2800
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002801int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002802{
2803 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002804 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002805 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002806
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002807 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002808 for_each_ring(ring, dev_priv, i) {
Ben Widawsky41bde552013-12-06 14:11:21 -08002809 ret = i915_switch_context(ring, NULL, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002810 if (ret)
2811 return ret;
2812
Chris Wilson3e960502012-11-27 16:22:54 +00002813 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002814 if (ret)
2815 return ret;
2816 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002817
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002818 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002819}
2820
Chris Wilson9ce079e2012-04-17 15:31:30 +01002821static void i965_write_fence_reg(struct drm_device *dev, int reg,
2822 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002823{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002824 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002825 int fence_reg;
2826 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002827
Imre Deak56c844e2013-01-07 21:47:34 +02002828 if (INTEL_INFO(dev)->gen >= 6) {
2829 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2830 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2831 } else {
2832 fence_reg = FENCE_REG_965_0;
2833 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2834 }
2835
Chris Wilsond18b9612013-07-10 13:36:23 +01002836 fence_reg += reg * 8;
2837
2838 /* To w/a incoherency with non-atomic 64-bit register updates,
2839 * we split the 64-bit update into two 32-bit writes. In order
2840 * for a partial fence not to be evaluated between writes, we
2841 * precede the update with write to turn off the fence register,
2842 * and only enable the fence as the last step.
2843 *
2844 * For extra levels of paranoia, we make sure each step lands
2845 * before applying the next step.
2846 */
2847 I915_WRITE(fence_reg, 0);
2848 POSTING_READ(fence_reg);
2849
Chris Wilson9ce079e2012-04-17 15:31:30 +01002850 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002851 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002852 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002853
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002854 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002855 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002856 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002857 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002858 if (obj->tiling_mode == I915_TILING_Y)
2859 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2860 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002861
Chris Wilsond18b9612013-07-10 13:36:23 +01002862 I915_WRITE(fence_reg + 4, val >> 32);
2863 POSTING_READ(fence_reg + 4);
2864
2865 I915_WRITE(fence_reg + 0, val);
2866 POSTING_READ(fence_reg);
2867 } else {
2868 I915_WRITE(fence_reg + 4, 0);
2869 POSTING_READ(fence_reg + 4);
2870 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002871}
2872
Chris Wilson9ce079e2012-04-17 15:31:30 +01002873static void i915_write_fence_reg(struct drm_device *dev, int reg,
2874 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002875{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002876 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002877 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002878
Chris Wilson9ce079e2012-04-17 15:31:30 +01002879 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002880 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002881 int pitch_val;
2882 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002883
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002884 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002885 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002886 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2887 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2888 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002889
2890 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2891 tile_width = 128;
2892 else
2893 tile_width = 512;
2894
2895 /* Note: pitch better be a power of two tile widths */
2896 pitch_val = obj->stride / tile_width;
2897 pitch_val = ffs(pitch_val) - 1;
2898
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002899 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002900 if (obj->tiling_mode == I915_TILING_Y)
2901 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2902 val |= I915_FENCE_SIZE_BITS(size);
2903 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2904 val |= I830_FENCE_REG_VALID;
2905 } else
2906 val = 0;
2907
2908 if (reg < 8)
2909 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002910 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002911 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002912
Chris Wilson9ce079e2012-04-17 15:31:30 +01002913 I915_WRITE(reg, val);
2914 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002915}
2916
Chris Wilson9ce079e2012-04-17 15:31:30 +01002917static void i830_write_fence_reg(struct drm_device *dev, int reg,
2918 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002919{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002920 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002921 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002922
Chris Wilson9ce079e2012-04-17 15:31:30 +01002923 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002924 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002925 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002926
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002927 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002928 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002929 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2930 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2931 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002932
Chris Wilson9ce079e2012-04-17 15:31:30 +01002933 pitch_val = obj->stride / 128;
2934 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002935
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002936 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002937 if (obj->tiling_mode == I915_TILING_Y)
2938 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2939 val |= I830_FENCE_SIZE_BITS(size);
2940 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2941 val |= I830_FENCE_REG_VALID;
2942 } else
2943 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002944
Chris Wilson9ce079e2012-04-17 15:31:30 +01002945 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2946 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2947}
2948
Chris Wilsond0a57782012-10-09 19:24:37 +01002949inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2950{
2951 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2952}
2953
Chris Wilson9ce079e2012-04-17 15:31:30 +01002954static void i915_gem_write_fence(struct drm_device *dev, int reg,
2955 struct drm_i915_gem_object *obj)
2956{
Chris Wilsond0a57782012-10-09 19:24:37 +01002957 struct drm_i915_private *dev_priv = dev->dev_private;
2958
2959 /* Ensure that all CPU reads are completed before installing a fence
2960 * and all writes before removing the fence.
2961 */
2962 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2963 mb();
2964
Daniel Vetter94a335d2013-07-17 14:51:28 +02002965 WARN(obj && (!obj->stride || !obj->tiling_mode),
2966 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2967 obj->stride, obj->tiling_mode);
2968
Chris Wilson9ce079e2012-04-17 15:31:30 +01002969 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002970 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002971 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002972 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002973 case 5:
2974 case 4: i965_write_fence_reg(dev, reg, obj); break;
2975 case 3: i915_write_fence_reg(dev, reg, obj); break;
2976 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002977 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002978 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002979
2980 /* And similarly be paranoid that no direct access to this region
2981 * is reordered to before the fence is installed.
2982 */
2983 if (i915_gem_object_needs_mb(obj))
2984 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002985}
2986
Chris Wilson61050802012-04-17 15:31:31 +01002987static inline int fence_number(struct drm_i915_private *dev_priv,
2988 struct drm_i915_fence_reg *fence)
2989{
2990 return fence - dev_priv->fence_regs;
2991}
2992
2993static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2994 struct drm_i915_fence_reg *fence,
2995 bool enable)
2996{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002997 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002998 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002999
Chris Wilson46a0b632013-07-10 13:36:24 +01003000 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003001
3002 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003003 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003004 fence->obj = obj;
3005 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3006 } else {
3007 obj->fence_reg = I915_FENCE_REG_NONE;
3008 fence->obj = NULL;
3009 list_del_init(&fence->lru_list);
3010 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003011 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003012}
3013
Chris Wilsond9e86c02010-11-10 16:40:20 +00003014static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003015i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003016{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003017 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003018 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003019 if (ret)
3020 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003021
3022 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003023 }
3024
Chris Wilson86d5bc32012-07-20 12:41:04 +01003025 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003026 return 0;
3027}
3028
3029int
3030i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3031{
Chris Wilson61050802012-04-17 15:31:31 +01003032 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003033 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003034 int ret;
3035
Chris Wilsond0a57782012-10-09 19:24:37 +01003036 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003037 if (ret)
3038 return ret;
3039
Chris Wilson61050802012-04-17 15:31:31 +01003040 if (obj->fence_reg == I915_FENCE_REG_NONE)
3041 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003042
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003043 fence = &dev_priv->fence_regs[obj->fence_reg];
3044
Chris Wilson61050802012-04-17 15:31:31 +01003045 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003046 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003047
3048 return 0;
3049}
3050
3051static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003052i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003053{
Daniel Vetterae3db242010-02-19 11:51:58 +01003054 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003055 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003056 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003057
3058 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003059 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003060 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3061 reg = &dev_priv->fence_regs[i];
3062 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003063 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003064
Chris Wilson1690e1e2011-12-14 13:57:08 +01003065 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003066 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003067 }
3068
Chris Wilsond9e86c02010-11-10 16:40:20 +00003069 if (avail == NULL)
3070 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003071
3072 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003073 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003074 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003075 continue;
3076
Chris Wilson8fe301a2012-04-17 15:31:28 +01003077 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003078 }
3079
Chris Wilson8fe301a2012-04-17 15:31:28 +01003080 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003081}
3082
Jesse Barnesde151cf2008-11-12 10:03:55 -08003083/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003084 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003085 * @obj: object to map through a fence reg
3086 *
3087 * When mapping objects through the GTT, userspace wants to be able to write
3088 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003089 * This function walks the fence regs looking for a free one for @obj,
3090 * stealing one if it can't find any.
3091 *
3092 * It then sets up the reg based on the object's properties: address, pitch
3093 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003094 *
3095 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003096 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003097int
Chris Wilson06d98132012-04-17 15:31:24 +01003098i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003099{
Chris Wilson05394f32010-11-08 19:18:58 +00003100 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003101 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003102 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003103 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003104 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003105
Chris Wilson14415742012-04-17 15:31:33 +01003106 /* Have we updated the tiling parameters upon the object and so
3107 * will need to serialise the write to the associated fence register?
3108 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003109 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003110 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003111 if (ret)
3112 return ret;
3113 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003114
Chris Wilsond9e86c02010-11-10 16:40:20 +00003115 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003116 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3117 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003118 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003119 list_move_tail(&reg->lru_list,
3120 &dev_priv->mm.fence_list);
3121 return 0;
3122 }
3123 } else if (enable) {
3124 reg = i915_find_fence_reg(dev);
3125 if (reg == NULL)
3126 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003127
Chris Wilson14415742012-04-17 15:31:33 +01003128 if (reg->obj) {
3129 struct drm_i915_gem_object *old = reg->obj;
3130
Chris Wilsond0a57782012-10-09 19:24:37 +01003131 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003132 if (ret)
3133 return ret;
3134
Chris Wilson14415742012-04-17 15:31:33 +01003135 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003136 }
Chris Wilson14415742012-04-17 15:31:33 +01003137 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003138 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003139
Chris Wilson14415742012-04-17 15:31:33 +01003140 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003141
Chris Wilson9ce079e2012-04-17 15:31:30 +01003142 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003143}
3144
Chris Wilson42d6ab42012-07-26 11:49:32 +01003145static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3146 struct drm_mm_node *gtt_space,
3147 unsigned long cache_level)
3148{
3149 struct drm_mm_node *other;
3150
3151 /* On non-LLC machines we have to be careful when putting differing
3152 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003153 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003154 */
3155 if (HAS_LLC(dev))
3156 return true;
3157
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003158 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003159 return true;
3160
3161 if (list_empty(&gtt_space->node_list))
3162 return true;
3163
3164 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3165 if (other->allocated && !other->hole_follows && other->color != cache_level)
3166 return false;
3167
3168 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3169 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3170 return false;
3171
3172 return true;
3173}
3174
3175static void i915_gem_verify_gtt(struct drm_device *dev)
3176{
3177#if WATCH_GTT
3178 struct drm_i915_private *dev_priv = dev->dev_private;
3179 struct drm_i915_gem_object *obj;
3180 int err = 0;
3181
Ben Widawsky35c20a62013-05-31 11:28:48 -07003182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003183 if (obj->gtt_space == NULL) {
3184 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3185 err++;
3186 continue;
3187 }
3188
3189 if (obj->cache_level != obj->gtt_space->color) {
3190 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003191 i915_gem_obj_ggtt_offset(obj),
3192 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003193 obj->cache_level,
3194 obj->gtt_space->color);
3195 err++;
3196 continue;
3197 }
3198
3199 if (!i915_gem_valid_gtt_space(dev,
3200 obj->gtt_space,
3201 obj->cache_level)) {
3202 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003203 i915_gem_obj_ggtt_offset(obj),
3204 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003205 obj->cache_level);
3206 err++;
3207 continue;
3208 }
3209 }
3210
3211 WARN_ON(err);
3212#endif
3213}
3214
Jesse Barnesde151cf2008-11-12 10:03:55 -08003215/**
Eric Anholt673a3942008-07-30 12:06:12 -07003216 * Finds free space in the GTT aperture and binds the object there.
3217 */
3218static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003219i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3220 struct i915_address_space *vm,
3221 unsigned alignment,
3222 bool map_and_fenceable,
3223 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003224{
Chris Wilson05394f32010-11-08 19:18:58 +00003225 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003226 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003227 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003228 size_t gtt_max =
3229 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003230 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003231 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003232
Chris Wilsone28f8712011-07-18 13:11:49 -07003233 fence_size = i915_gem_get_gtt_size(dev,
3234 obj->base.size,
3235 obj->tiling_mode);
3236 fence_alignment = i915_gem_get_gtt_alignment(dev,
3237 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003238 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003239 unfenced_alignment =
Imre Deakd865110c2013-01-07 21:47:33 +02003240 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003241 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003242 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003243
Eric Anholt673a3942008-07-30 12:06:12 -07003244 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003245 alignment = map_and_fenceable ? fence_alignment :
3246 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003247 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003248 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3249 return -EINVAL;
3250 }
3251
Chris Wilson05394f32010-11-08 19:18:58 +00003252 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003253
Chris Wilson654fc602010-05-27 13:18:21 +01003254 /* If the object is bigger than the entire aperture, reject it early
3255 * before evicting everything in a vain attempt to find space.
3256 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003257 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003258 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003259 obj->base.size,
3260 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003261 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003262 return -E2BIG;
3263 }
3264
Chris Wilson37e680a2012-06-07 15:38:42 +01003265 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003266 if (ret)
3267 return ret;
3268
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003269 i915_gem_object_pin_pages(obj);
3270
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003271 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003272
Ben Widawskyaccfef22013-08-14 11:38:35 +02003273 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003274 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003275 ret = PTR_ERR(vma);
3276 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003277 }
3278
Ben Widawskyaccfef22013-08-14 11:38:35 +02003279 /* For now we only ever use 1 vma per object */
3280 WARN_ON(!list_is_singular(&obj->vma_list));
3281
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003282search_free:
Ben Widawsky4fe9adb2013-12-06 14:11:24 -08003283 /* FIXME: Some tests are failing when they receive a reloc of 0. To
3284 * prevent this, we simply don't allow the 0th offset. */
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003285 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003286 size, alignment,
Ben Widawsky4fe9adb2013-12-06 14:11:24 -08003287 obj->cache_level, 1, gtt_max,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003288 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003289 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003290 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003291 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003292 map_and_fenceable,
3293 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003294 if (ret == 0)
3295 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003296
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003297 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003298 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003299 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003300 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003301 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003302 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003303 }
3304
Daniel Vetter74163902012-02-15 23:50:21 +01003305 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003306 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003307 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003308
Ben Widawsky35c20a62013-05-31 11:28:48 -07003309 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003310 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003311
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003312 if (i915_is_ggtt(vm)) {
3313 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003314
Daniel Vetter49987092013-08-14 10:21:23 +02003315 fenceable = (vma->node.size == fence_size &&
3316 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003317
Daniel Vetter49987092013-08-14 10:21:23 +02003318 mappable = (vma->node.start + obj->base.size <=
3319 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003320
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003321 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003322 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003323
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003324 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003325
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003326 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003327 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003328 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003329
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003330err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003331 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003332err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003333 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003334err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003335 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003336 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003337}
3338
Chris Wilson000433b2013-08-08 14:41:09 +01003339bool
Chris Wilson2c225692013-08-09 12:26:45 +01003340i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3341 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003342{
Eric Anholt673a3942008-07-30 12:06:12 -07003343 /* If we don't have a page list set up, then we're not pinned
3344 * to GPU, and we can ignore the cache flush because it'll happen
3345 * again at bind time.
3346 */
Chris Wilson05394f32010-11-08 19:18:58 +00003347 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003348 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003349
Imre Deak769ce462013-02-13 21:56:05 +02003350 /*
3351 * Stolen memory is always coherent with the GPU as it is explicitly
3352 * marked as wc by the system, or the system is cache-coherent.
3353 */
3354 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003355 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003356
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003357 /* If the GPU is snooping the contents of the CPU cache,
3358 * we do not need to manually clear the CPU cache lines. However,
3359 * the caches are only snooped when the render cache is
3360 * flushed/invalidated. As we always have to emit invalidations
3361 * and flushes when moving into and out of the RENDER domain, correct
3362 * snooping behaviour occurs naturally as the result of our domain
3363 * tracking.
3364 */
Chris Wilson2c225692013-08-09 12:26:45 +01003365 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003366 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003367
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003368 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003369 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003370
3371 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003372}
3373
3374/** Flushes the GTT write domain for the object if it's dirty. */
3375static void
Chris Wilson05394f32010-11-08 19:18:58 +00003376i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003377{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003378 uint32_t old_write_domain;
3379
Chris Wilson05394f32010-11-08 19:18:58 +00003380 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003381 return;
3382
Chris Wilson63256ec2011-01-04 18:42:07 +00003383 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003384 * to it immediately go to main memory as far as we know, so there's
3385 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003386 *
3387 * However, we do have to enforce the order so that all writes through
3388 * the GTT land before any writes to the device, such as updates to
3389 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003390 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003391 wmb();
3392
Chris Wilson05394f32010-11-08 19:18:58 +00003393 old_write_domain = obj->base.write_domain;
3394 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003395
3396 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003397 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003398 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003399}
3400
3401/** Flushes the CPU write domain for the object if it's dirty. */
3402static void
Chris Wilson2c225692013-08-09 12:26:45 +01003403i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3404 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003405{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003406 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003407
Chris Wilson05394f32010-11-08 19:18:58 +00003408 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003409 return;
3410
Chris Wilson000433b2013-08-08 14:41:09 +01003411 if (i915_gem_clflush_object(obj, force))
3412 i915_gem_chipset_flush(obj->base.dev);
3413
Chris Wilson05394f32010-11-08 19:18:58 +00003414 old_write_domain = obj->base.write_domain;
3415 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003416
3417 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003418 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003419 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003420}
3421
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003422/**
3423 * Moves a single object to the GTT read, and possibly write domain.
3424 *
3425 * This function returns when the move is complete, including waiting on
3426 * flushes to occur.
3427 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003428int
Chris Wilson20217462010-11-23 15:26:33 +00003429i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003430{
Chris Wilson8325a092012-04-24 15:52:35 +01003431 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003432 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003433 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003434
Eric Anholt02354392008-11-26 13:58:13 -08003435 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003436 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003437 return -EINVAL;
3438
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003439 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3440 return 0;
3441
Chris Wilson0201f1e2012-07-20 12:41:01 +01003442 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003443 if (ret)
3444 return ret;
3445
Chris Wilson2c225692013-08-09 12:26:45 +01003446 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003447
Chris Wilsond0a57782012-10-09 19:24:37 +01003448 /* Serialise direct access to this object with the barriers for
3449 * coherent writes from the GPU, by effectively invalidating the
3450 * GTT domain upon first access.
3451 */
3452 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3453 mb();
3454
Chris Wilson05394f32010-11-08 19:18:58 +00003455 old_write_domain = obj->base.write_domain;
3456 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003457
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003458 /* It should now be out of any other write domains, and we can update
3459 * the domain values for our changes.
3460 */
Chris Wilson05394f32010-11-08 19:18:58 +00003461 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3462 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003463 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003464 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3465 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3466 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003467 }
3468
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003469 trace_i915_gem_object_change_domain(obj,
3470 old_read_domains,
3471 old_write_domain);
3472
Chris Wilson8325a092012-04-24 15:52:35 +01003473 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003474 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003475 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003476 if (vma)
3477 list_move_tail(&vma->mm_list,
3478 &dev_priv->gtt.base.inactive_list);
3479
3480 }
Chris Wilson8325a092012-04-24 15:52:35 +01003481
Eric Anholte47c68e2008-11-14 13:35:19 -08003482 return 0;
3483}
3484
Chris Wilsone4ffd172011-04-04 09:44:39 +01003485int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3486 enum i915_cache_level cache_level)
3487{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003488 struct drm_device *dev = obj->base.dev;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003489 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003490 int ret;
3491
3492 if (obj->cache_level == cache_level)
3493 return 0;
3494
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003495 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003496 DRM_DEBUG("can not change the cache level of pinned objects\n");
3497 return -EBUSY;
3498 }
3499
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003500 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3501 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003502 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003503 if (ret)
3504 return ret;
3505
3506 break;
3507 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003508 }
3509
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003510 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003511 ret = i915_gem_object_finish_gpu(obj);
3512 if (ret)
3513 return ret;
3514
3515 i915_gem_object_finish_gtt(obj);
3516
3517 /* Before SandyBridge, you could not use tiling or fence
3518 * registers with snooped memory, so relinquish any fences
3519 * currently pointing to our region in the aperture.
3520 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003521 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003522 ret = i915_gem_object_put_fence(obj);
3523 if (ret)
3524 return ret;
3525 }
3526
Ben Widawsky6f65e292013-12-06 14:10:56 -08003527 list_for_each_entry(vma, &obj->vma_list, vma_link)
3528 vma->bind_vma(vma, cache_level, 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003529 }
3530
Chris Wilson2c225692013-08-09 12:26:45 +01003531 list_for_each_entry(vma, &obj->vma_list, vma_link)
3532 vma->node.color = cache_level;
3533 obj->cache_level = cache_level;
3534
3535 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003536 u32 old_read_domains, old_write_domain;
3537
3538 /* If we're coming from LLC cached, then we haven't
3539 * actually been tracking whether the data is in the
3540 * CPU cache or not, since we only allow one bit set
3541 * in obj->write_domain and have been skipping the clflushes.
3542 * Just set it to the CPU cache for now.
3543 */
3544 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003545
3546 old_read_domains = obj->base.read_domains;
3547 old_write_domain = obj->base.write_domain;
3548
3549 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3550 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3551
3552 trace_i915_gem_object_change_domain(obj,
3553 old_read_domains,
3554 old_write_domain);
3555 }
3556
Chris Wilson42d6ab42012-07-26 11:49:32 +01003557 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003558 return 0;
3559}
3560
Ben Widawsky199adf42012-09-21 17:01:20 -07003561int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3562 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003563{
Ben Widawsky199adf42012-09-21 17:01:20 -07003564 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003565 struct drm_i915_gem_object *obj;
3566 int ret;
3567
3568 ret = i915_mutex_lock_interruptible(dev);
3569 if (ret)
3570 return ret;
3571
3572 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3573 if (&obj->base == NULL) {
3574 ret = -ENOENT;
3575 goto unlock;
3576 }
3577
Chris Wilson651d7942013-08-08 14:41:10 +01003578 switch (obj->cache_level) {
3579 case I915_CACHE_LLC:
3580 case I915_CACHE_L3_LLC:
3581 args->caching = I915_CACHING_CACHED;
3582 break;
3583
Chris Wilson4257d3b2013-08-08 14:41:11 +01003584 case I915_CACHE_WT:
3585 args->caching = I915_CACHING_DISPLAY;
3586 break;
3587
Chris Wilson651d7942013-08-08 14:41:10 +01003588 default:
3589 args->caching = I915_CACHING_NONE;
3590 break;
3591 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003592
3593 drm_gem_object_unreference(&obj->base);
3594unlock:
3595 mutex_unlock(&dev->struct_mutex);
3596 return ret;
3597}
3598
Ben Widawsky199adf42012-09-21 17:01:20 -07003599int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3600 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003601{
Ben Widawsky199adf42012-09-21 17:01:20 -07003602 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003603 struct drm_i915_gem_object *obj;
3604 enum i915_cache_level level;
3605 int ret;
3606
Ben Widawsky199adf42012-09-21 17:01:20 -07003607 switch (args->caching) {
3608 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003609 level = I915_CACHE_NONE;
3610 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003611 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003612 level = I915_CACHE_LLC;
3613 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003614 case I915_CACHING_DISPLAY:
3615 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3616 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003617 default:
3618 return -EINVAL;
3619 }
3620
Ben Widawsky3bc29132012-09-26 16:15:20 -07003621 ret = i915_mutex_lock_interruptible(dev);
3622 if (ret)
3623 return ret;
3624
Chris Wilsone6994ae2012-07-10 10:27:08 +01003625 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3626 if (&obj->base == NULL) {
3627 ret = -ENOENT;
3628 goto unlock;
3629 }
3630
3631 ret = i915_gem_object_set_cache_level(obj, level);
3632
3633 drm_gem_object_unreference(&obj->base);
3634unlock:
3635 mutex_unlock(&dev->struct_mutex);
3636 return ret;
3637}
3638
Chris Wilsoncc98b412013-08-09 12:25:09 +01003639static bool is_pin_display(struct drm_i915_gem_object *obj)
3640{
3641 /* There are 3 sources that pin objects:
3642 * 1. The display engine (scanouts, sprites, cursors);
3643 * 2. Reservations for execbuffer;
3644 * 3. The user.
3645 *
3646 * We can ignore reservations as we hold the struct_mutex and
3647 * are only called outside of the reservation path. The user
3648 * can only increment pin_count once, and so if after
3649 * subtracting the potential reference by the user, any pin_count
3650 * remains, it must be due to another use by the display engine.
3651 */
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003652 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003653}
3654
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003655/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003656 * Prepare buffer for display plane (scanout, cursors, etc).
3657 * Can be called from an uninterruptible phase (modesetting) and allows
3658 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003659 */
3660int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003661i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3662 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003663 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003664{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003665 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003666 int ret;
3667
Chris Wilson0be73282010-12-06 14:36:27 +00003668 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003669 ret = i915_gem_object_sync(obj, pipelined);
3670 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003671 return ret;
3672 }
3673
Chris Wilsoncc98b412013-08-09 12:25:09 +01003674 /* Mark the pin_display early so that we account for the
3675 * display coherency whilst setting up the cache domains.
3676 */
3677 obj->pin_display = true;
3678
Eric Anholta7ef0642011-03-29 16:59:54 -07003679 /* The display engine is not coherent with the LLC cache on gen6. As
3680 * a result, we make sure that the pinning that is about to occur is
3681 * done with uncached PTEs. This is lowest common denominator for all
3682 * chipsets.
3683 *
3684 * However for gen6+, we could do better by using the GFDT bit instead
3685 * of uncaching, which would allow us to flush all the LLC-cached data
3686 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3687 */
Chris Wilson651d7942013-08-08 14:41:10 +01003688 ret = i915_gem_object_set_cache_level(obj,
3689 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003690 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003691 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003692
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003693 /* As the user may map the buffer once pinned in the display plane
3694 * (e.g. libkms for the bootup splash), we have to ensure that we
3695 * always use map_and_fenceable for all scanout buffers.
3696 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003697 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003698 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003699 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003700
Chris Wilson2c225692013-08-09 12:26:45 +01003701 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003702
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003703 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003704 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003705
3706 /* It should now be out of any other write domains, and we can update
3707 * the domain values for our changes.
3708 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003709 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003710 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003711
3712 trace_i915_gem_object_change_domain(obj,
3713 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003714 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003715
3716 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003717
3718err_unpin_display:
3719 obj->pin_display = is_pin_display(obj);
3720 return ret;
3721}
3722
3723void
3724i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3725{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003726 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003727 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003728}
3729
Chris Wilson85345512010-11-13 09:49:11 +00003730int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003731i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003732{
Chris Wilson88241782011-01-07 17:09:48 +00003733 int ret;
3734
Chris Wilsona8198ee2011-04-13 22:04:09 +01003735 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003736 return 0;
3737
Chris Wilson0201f1e2012-07-20 12:41:01 +01003738 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003739 if (ret)
3740 return ret;
3741
Chris Wilsona8198ee2011-04-13 22:04:09 +01003742 /* Ensure that we invalidate the GPU's caches and TLBs. */
3743 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003744 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003745}
3746
Eric Anholte47c68e2008-11-14 13:35:19 -08003747/**
3748 * Moves a single object to the CPU read, and possibly write domain.
3749 *
3750 * This function returns when the move is complete, including waiting on
3751 * flushes to occur.
3752 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003753int
Chris Wilson919926a2010-11-12 13:42:53 +00003754i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003755{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003756 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003757 int ret;
3758
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003759 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3760 return 0;
3761
Chris Wilson0201f1e2012-07-20 12:41:01 +01003762 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003763 if (ret)
3764 return ret;
3765
Eric Anholte47c68e2008-11-14 13:35:19 -08003766 i915_gem_object_flush_gtt_write_domain(obj);
3767
Chris Wilson05394f32010-11-08 19:18:58 +00003768 old_write_domain = obj->base.write_domain;
3769 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003770
Eric Anholte47c68e2008-11-14 13:35:19 -08003771 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003772 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003773 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003774
Chris Wilson05394f32010-11-08 19:18:58 +00003775 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003776 }
3777
3778 /* It should now be out of any other write domains, and we can update
3779 * the domain values for our changes.
3780 */
Chris Wilson05394f32010-11-08 19:18:58 +00003781 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003782
3783 /* If we're writing through the CPU, then the GPU read domains will
3784 * need to be invalidated at next use.
3785 */
3786 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003787 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3788 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003789 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003790
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003791 trace_i915_gem_object_change_domain(obj,
3792 old_read_domains,
3793 old_write_domain);
3794
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003795 return 0;
3796}
3797
Eric Anholt673a3942008-07-30 12:06:12 -07003798/* Throttle our rendering by waiting until the ring has completed our requests
3799 * emitted over 20 msec ago.
3800 *
Eric Anholtb9624422009-06-03 07:27:35 +00003801 * Note that if we were to use the current jiffies each time around the loop,
3802 * we wouldn't escape the function with any frames outstanding if the time to
3803 * render a frame was over 20ms.
3804 *
Eric Anholt673a3942008-07-30 12:06:12 -07003805 * This should get us reasonable parallelism between CPU and GPU but also
3806 * relatively low latency when blocking on a particular request to finish.
3807 */
3808static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003809i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003810{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003811 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003813 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003814 struct drm_i915_gem_request *request;
3815 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003816 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003817 u32 seqno = 0;
3818 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003819
Daniel Vetter308887a2012-11-14 17:14:06 +01003820 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3821 if (ret)
3822 return ret;
3823
3824 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3825 if (ret)
3826 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003827
Chris Wilson1c255952010-09-26 11:03:27 +01003828 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003829 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003830 if (time_after_eq(request->emitted_jiffies, recent_enough))
3831 break;
3832
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003833 ring = request->ring;
3834 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003835 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003836 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003837 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003838
3839 if (seqno == 0)
3840 return 0;
3841
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003842 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003843 if (ret == 0)
3844 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003845
Eric Anholt673a3942008-07-30 12:06:12 -07003846 return ret;
3847}
3848
Eric Anholt673a3942008-07-30 12:06:12 -07003849int
Chris Wilson05394f32010-11-08 19:18:58 +00003850i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003851 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003852 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003853 bool map_and_fenceable,
3854 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003855{
Ben Widawsky6f65e292013-12-06 14:10:56 -08003856 const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003857 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003858 int ret;
3859
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003860 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3861
3862 vma = i915_gem_obj_to_vma(obj, vm);
3863
3864 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003865 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3866 return -EBUSY;
3867
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003868 if ((alignment &&
3869 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003870 (map_and_fenceable && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003871 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003872 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003873 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003874 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003875 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003876 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003877 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003878 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003879 if (ret)
3880 return ret;
3881 }
3882 }
3883
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003884 if (!i915_gem_obj_bound(obj, vm)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003885 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3886 map_and_fenceable,
3887 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003888 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003889 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003890
Chris Wilson22c344e2009-02-11 14:26:45 +00003891 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003892
Ben Widawsky6f65e292013-12-06 14:10:56 -08003893 vma = i915_gem_obj_to_vma(obj, vm);
3894
3895 vma->bind_vma(vma, obj->cache_level, flags);
Daniel Vetter74898d72012-02-15 23:50:22 +01003896
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003897 i915_gem_obj_to_vma(obj, vm)->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003898 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003899
3900 return 0;
3901}
3902
3903void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003904i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003905{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003906 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003907
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003908 BUG_ON(!vma);
3909 BUG_ON(vma->pin_count == 0);
3910 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3911
3912 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003913 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003914}
3915
3916int
3917i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003918 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003919{
3920 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003921 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003922 int ret;
3923
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003924 ret = i915_mutex_lock_interruptible(dev);
3925 if (ret)
3926 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003927
Chris Wilson05394f32010-11-08 19:18:58 +00003928 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003929 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003930 ret = -ENOENT;
3931 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003932 }
Eric Anholt673a3942008-07-30 12:06:12 -07003933
Chris Wilson05394f32010-11-08 19:18:58 +00003934 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003935 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003936 ret = -EINVAL;
3937 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003938 }
3939
Chris Wilson05394f32010-11-08 19:18:58 +00003940 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003941 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3942 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003943 ret = -EINVAL;
3944 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003945 }
3946
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003947 if (obj->user_pin_count == ULONG_MAX) {
3948 ret = -EBUSY;
3949 goto out;
3950 }
3951
Chris Wilson93be8782013-01-02 10:31:22 +00003952 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003953 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003954 if (ret)
3955 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003956 }
3957
Chris Wilson93be8782013-01-02 10:31:22 +00003958 obj->user_pin_count++;
3959 obj->pin_filp = file;
3960
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003961 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003962out:
Chris Wilson05394f32010-11-08 19:18:58 +00003963 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003964unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003965 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003966 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003967}
3968
3969int
3970i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003971 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003972{
3973 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003974 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003975 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003976
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003977 ret = i915_mutex_lock_interruptible(dev);
3978 if (ret)
3979 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003980
Chris Wilson05394f32010-11-08 19:18:58 +00003981 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003982 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003983 ret = -ENOENT;
3984 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003985 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003986
Chris Wilson05394f32010-11-08 19:18:58 +00003987 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003988 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3989 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003990 ret = -EINVAL;
3991 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003992 }
Chris Wilson05394f32010-11-08 19:18:58 +00003993 obj->user_pin_count--;
3994 if (obj->user_pin_count == 0) {
3995 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003996 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08003997 }
Eric Anholt673a3942008-07-30 12:06:12 -07003998
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003999out:
Chris Wilson05394f32010-11-08 19:18:58 +00004000 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004001unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004002 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004003 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004004}
4005
4006int
4007i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004008 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004009{
4010 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004011 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004012 int ret;
4013
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004014 ret = i915_mutex_lock_interruptible(dev);
4015 if (ret)
4016 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004017
Chris Wilson05394f32010-11-08 19:18:58 +00004018 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004019 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004020 ret = -ENOENT;
4021 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004022 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004023
Chris Wilson0be555b2010-08-04 15:36:30 +01004024 /* Count all active objects as busy, even if they are currently not used
4025 * by the gpu. Users of this interface expect objects to eventually
4026 * become non-busy without any further actions, therefore emit any
4027 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004028 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004029 ret = i915_gem_object_flush_active(obj);
4030
Chris Wilson05394f32010-11-08 19:18:58 +00004031 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004032 if (obj->ring) {
4033 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4034 args->busy |= intel_ring_flag(obj->ring) << 16;
4035 }
Eric Anholt673a3942008-07-30 12:06:12 -07004036
Chris Wilson05394f32010-11-08 19:18:58 +00004037 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004038unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004039 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004040 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004041}
4042
4043int
4044i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4045 struct drm_file *file_priv)
4046{
Akshay Joshi0206e352011-08-16 15:34:10 -04004047 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004048}
4049
Chris Wilson3ef94da2009-09-14 16:50:29 +01004050int
4051i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4052 struct drm_file *file_priv)
4053{
4054 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004055 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004056 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004057
4058 switch (args->madv) {
4059 case I915_MADV_DONTNEED:
4060 case I915_MADV_WILLNEED:
4061 break;
4062 default:
4063 return -EINVAL;
4064 }
4065
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004066 ret = i915_mutex_lock_interruptible(dev);
4067 if (ret)
4068 return ret;
4069
Chris Wilson05394f32010-11-08 19:18:58 +00004070 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004071 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004072 ret = -ENOENT;
4073 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004074 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004075
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004076 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004077 ret = -EINVAL;
4078 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004079 }
4080
Chris Wilson05394f32010-11-08 19:18:58 +00004081 if (obj->madv != __I915_MADV_PURGED)
4082 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004083
Chris Wilson6c085a72012-08-20 11:40:46 +02004084 /* if the object is no longer attached, discard its backing storage */
4085 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004086 i915_gem_object_truncate(obj);
4087
Chris Wilson05394f32010-11-08 19:18:58 +00004088 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004089
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004090out:
Chris Wilson05394f32010-11-08 19:18:58 +00004091 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004092unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004093 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004094 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004095}
4096
Chris Wilson37e680a2012-06-07 15:38:42 +01004097void i915_gem_object_init(struct drm_i915_gem_object *obj,
4098 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004099{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004100 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004101 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004102 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004103 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004104
Chris Wilson37e680a2012-06-07 15:38:42 +01004105 obj->ops = ops;
4106
Chris Wilson0327d6b2012-08-11 15:41:06 +01004107 obj->fence_reg = I915_FENCE_REG_NONE;
4108 obj->madv = I915_MADV_WILLNEED;
4109 /* Avoid an unnecessary call to unbind on the first bind. */
4110 obj->map_and_fenceable = true;
4111
4112 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4113}
4114
Chris Wilson37e680a2012-06-07 15:38:42 +01004115static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4116 .get_pages = i915_gem_object_get_pages_gtt,
4117 .put_pages = i915_gem_object_put_pages_gtt,
4118};
4119
Chris Wilson05394f32010-11-08 19:18:58 +00004120struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4121 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004122{
Daniel Vetterc397b902010-04-09 19:05:07 +00004123 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004124 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004125 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004126
Chris Wilson42dcedd2012-11-15 11:32:30 +00004127 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004128 if (obj == NULL)
4129 return NULL;
4130
4131 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004132 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004133 return NULL;
4134 }
4135
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004136 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4137 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4138 /* 965gm cannot relocate objects above 4GiB. */
4139 mask &= ~__GFP_HIGHMEM;
4140 mask |= __GFP_DMA32;
4141 }
4142
Al Viro496ad9a2013-01-23 17:07:38 -05004143 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004144 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004145
Chris Wilson37e680a2012-06-07 15:38:42 +01004146 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004147
Daniel Vetterc397b902010-04-09 19:05:07 +00004148 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4149 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4150
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004151 if (HAS_LLC(dev)) {
4152 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004153 * cache) for about a 10% performance improvement
4154 * compared to uncached. Graphics requests other than
4155 * display scanout are coherent with the CPU in
4156 * accessing this cache. This means in this mode we
4157 * don't need to clflush on the CPU side, and on the
4158 * GPU side we only need to flush internal caches to
4159 * get data visible to the CPU.
4160 *
4161 * However, we maintain the display planes as UC, and so
4162 * need to rebind when first used as such.
4163 */
4164 obj->cache_level = I915_CACHE_LLC;
4165 } else
4166 obj->cache_level = I915_CACHE_NONE;
4167
Daniel Vetterd861e332013-07-24 23:25:03 +02004168 trace_i915_gem_object_create(obj);
4169
Chris Wilson05394f32010-11-08 19:18:58 +00004170 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004171}
4172
Chris Wilson1488fc02012-04-24 15:47:31 +01004173void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004174{
Chris Wilson1488fc02012-04-24 15:47:31 +01004175 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004176 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004177 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004178 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004179
Chris Wilson26e12f82011-03-20 11:20:19 +00004180 trace_i915_gem_object_destroy(obj);
4181
Chris Wilson1488fc02012-04-24 15:47:31 +01004182 if (obj->phys_obj)
4183 i915_gem_detach_phys_object(dev, obj);
4184
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004185 /* NB: 0 or 1 elements */
4186 WARN_ON(!list_empty(&obj->vma_list) &&
4187 !list_is_singular(&obj->vma_list));
4188 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004189 int ret;
4190
4191 vma->pin_count = 0;
4192 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004193 if (WARN_ON(ret == -ERESTARTSYS)) {
4194 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004195
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004196 was_interruptible = dev_priv->mm.interruptible;
4197 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004198
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004199 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004200
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004201 dev_priv->mm.interruptible = was_interruptible;
4202 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004203 }
4204
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004205 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4206 * before progressing. */
4207 if (obj->stolen)
4208 i915_gem_object_unpin_pages(obj);
4209
Ben Widawsky401c29f2013-05-31 11:28:47 -07004210 if (WARN_ON(obj->pages_pin_count))
4211 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004212 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004213 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004214 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004215
Chris Wilson9da3da62012-06-01 15:20:22 +01004216 BUG_ON(obj->pages);
4217
Chris Wilson2f745ad2012-09-04 21:02:58 +01004218 if (obj->base.import_attach)
4219 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004220
Chris Wilson05394f32010-11-08 19:18:58 +00004221 drm_gem_object_release(&obj->base);
4222 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004223
Chris Wilson05394f32010-11-08 19:18:58 +00004224 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004225 i915_gem_object_free(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004226}
4227
Daniel Vettere656a6c2013-08-14 14:14:04 +02004228struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004229 struct i915_address_space *vm)
4230{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004231 struct i915_vma *vma;
4232 list_for_each_entry(vma, &obj->vma_list, vma_link)
4233 if (vma->vm == vm)
4234 return vma;
4235
4236 return NULL;
4237}
4238
Ben Widawsky2f633152013-07-17 12:19:03 -07004239void i915_gem_vma_destroy(struct i915_vma *vma)
4240{
4241 WARN_ON(vma->node.allocated);
Chris Wilsonaaa056672013-08-20 12:56:40 +01004242
4243 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4244 if (!list_empty(&vma->exec_list))
4245 return;
4246
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004247 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004248
Ben Widawsky2f633152013-07-17 12:19:03 -07004249 kfree(vma);
4250}
4251
Jesse Barnes5669fca2009-02-17 15:13:31 -08004252int
Chris Wilson45c5f202013-10-16 11:50:01 +01004253i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004254{
4255 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004256 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004257
Chris Wilson45c5f202013-10-16 11:50:01 +01004258 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004259 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004260 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004261
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004262 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004263 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004264 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004265
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004266 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004267
Chris Wilson29105cc2010-01-07 10:39:13 +00004268 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004269 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004270 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004271
Chris Wilson29105cc2010-01-07 10:39:13 +00004272 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004273 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004274
Chris Wilson45c5f202013-10-16 11:50:01 +01004275 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4276 * We need to replace this with a semaphore, or something.
4277 * And not confound ums.mm_suspended!
4278 */
4279 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4280 DRIVER_MODESET);
4281 mutex_unlock(&dev->struct_mutex);
4282
4283 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004284 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004285 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004286
Eric Anholt673a3942008-07-30 12:06:12 -07004287 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004288
4289err:
4290 mutex_unlock(&dev->struct_mutex);
4291 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004292}
4293
Ben Widawskyc3787e22013-09-17 21:12:44 -07004294int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004295{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004296 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004297 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004298 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4299 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004300 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004301
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004302 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004303 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004304
Ben Widawskyc3787e22013-09-17 21:12:44 -07004305 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4306 if (ret)
4307 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004308
Ben Widawskyc3787e22013-09-17 21:12:44 -07004309 /*
4310 * Note: We do not worry about the concurrent register cacheline hang
4311 * here because no other code should access these registers other than
4312 * at initialization time.
4313 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004314 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004315 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4316 intel_ring_emit(ring, reg_base + i);
4317 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004318 }
4319
Ben Widawskyc3787e22013-09-17 21:12:44 -07004320 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004321
Ben Widawskyc3787e22013-09-17 21:12:44 -07004322 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004323}
4324
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004325void i915_gem_init_swizzling(struct drm_device *dev)
4326{
4327 drm_i915_private_t *dev_priv = dev->dev_private;
4328
Daniel Vetter11782b02012-01-31 16:47:55 +01004329 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004330 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4331 return;
4332
4333 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4334 DISP_TILE_SURFACE_SWIZZLING);
4335
Daniel Vetter11782b02012-01-31 16:47:55 +01004336 if (IS_GEN5(dev))
4337 return;
4338
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004339 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4340 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004341 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004342 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004343 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004344 else if (IS_GEN8(dev))
4345 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004346 else
4347 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004348}
Daniel Vettere21af882012-02-09 20:53:27 +01004349
Chris Wilson67b1b572012-07-05 23:49:40 +01004350static bool
4351intel_enable_blt(struct drm_device *dev)
4352{
4353 if (!HAS_BLT(dev))
4354 return false;
4355
4356 /* The blitter was dysfunctional on early prototypes */
4357 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4358 DRM_INFO("BLT not supported on this pre-production hardware;"
4359 " graphics performance will be degraded.\n");
4360 return false;
4361 }
4362
4363 return true;
4364}
4365
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004366static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004367{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004368 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004369 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004370
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004371 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004372 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004373 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004374
4375 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004376 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004377 if (ret)
4378 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004379 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004380
Chris Wilson67b1b572012-07-05 23:49:40 +01004381 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004382 ret = intel_init_blt_ring_buffer(dev);
4383 if (ret)
4384 goto cleanup_bsd_ring;
4385 }
4386
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004387 if (HAS_VEBOX(dev)) {
4388 ret = intel_init_vebox_ring_buffer(dev);
4389 if (ret)
4390 goto cleanup_blt_ring;
4391 }
4392
4393
Mika Kuoppala99433932013-01-22 14:12:17 +02004394 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4395 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004396 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004397
4398 return 0;
4399
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004400cleanup_vebox_ring:
4401 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004402cleanup_blt_ring:
4403 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4404cleanup_bsd_ring:
4405 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4406cleanup_render_ring:
4407 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4408
4409 return ret;
4410}
4411
4412int
4413i915_gem_init_hw(struct drm_device *dev)
4414{
4415 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004416 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004417
4418 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4419 return -EIO;
4420
Ben Widawsky59124502013-07-04 11:02:05 -07004421 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004422 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004423
Rodrigo Vivi94353732013-08-28 16:45:46 -03004424 if (IS_HSW_GT3(dev))
4425 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4426 else
4427 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4428
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004429 if (HAS_PCH_NOP(dev)) {
4430 u32 temp = I915_READ(GEN7_MSG_CTL);
4431 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4432 I915_WRITE(GEN7_MSG_CTL, temp);
4433 }
4434
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004435 i915_gem_init_swizzling(dev);
4436
4437 ret = i915_gem_init_rings(dev);
4438 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004439 return ret;
4440
Ben Widawskyc3787e22013-09-17 21:12:44 -07004441 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4442 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4443
Ben Widawsky254f9652012-06-04 14:42:42 -07004444 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004445 * XXX: Contexts should only be initialized once. Doing a switch to the
4446 * default context switch however is something we'd like to do after
4447 * reset or thaw (the latter may not actually be necessary for HW, but
4448 * goes with our code better). Context switching requires rings (for
4449 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004450 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004451 ret = i915_gem_context_enable(dev_priv);
Ben Widawsky8245be32013-11-06 13:56:29 -02004452 if (ret) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004453 DRM_ERROR("Context enable failed %d\n", ret);
4454 goto err_out;
Ben Widawsky8245be32013-11-06 13:56:29 -02004455 }
4456
Chris Wilson68f95ba2010-05-27 13:18:22 +01004457 return 0;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004458
4459err_out:
4460 i915_gem_cleanup_ringbuffer(dev);
4461 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004462}
4463
Chris Wilson1070a422012-04-24 15:47:41 +01004464int i915_gem_init(struct drm_device *dev)
4465{
4466 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004467 int ret;
4468
Chris Wilson1070a422012-04-24 15:47:41 +01004469 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004470
4471 if (IS_VALLEYVIEW(dev)) {
4472 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4473 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4474 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4475 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4476 }
4477
Ben Widawskyd7e50082012-12-18 10:31:25 -08004478 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004479
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004480 ret = i915_gem_context_init(dev);
4481 if (ret)
4482 return ret;
4483
Chris Wilson1070a422012-04-24 15:47:41 +01004484 ret = i915_gem_init_hw(dev);
4485 mutex_unlock(&dev->struct_mutex);
4486 if (ret) {
Ben Widawskybdf4fd72013-12-06 14:11:18 -08004487 WARN_ON(dev_priv->mm.aliasing_ppgtt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004488 i915_gem_context_fini(dev);
Ben Widawskyc39538a2013-12-06 14:10:50 -08004489 drm_mm_takedown(&dev_priv->gtt.base.mm);
Chris Wilson1070a422012-04-24 15:47:41 +01004490 return ret;
4491 }
4492
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004493 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4494 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4495 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004496 return 0;
4497}
4498
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004499void
4500i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4501{
4502 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004503 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004504 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004505
Chris Wilsonb4519512012-05-11 14:29:30 +01004506 for_each_ring(ring, dev_priv, i)
4507 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004508}
4509
4510int
Eric Anholt673a3942008-07-30 12:06:12 -07004511i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4512 struct drm_file *file_priv)
4513{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004514 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004515 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004516
Jesse Barnes79e53942008-11-07 14:24:08 -08004517 if (drm_core_check_feature(dev, DRIVER_MODESET))
4518 return 0;
4519
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004520 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004521 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004522 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004523 }
4524
Eric Anholt673a3942008-07-30 12:06:12 -07004525 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004526 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004527
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004528 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004529 if (ret != 0) {
4530 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004531 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004532 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004533
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004534 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004535 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004536
Chris Wilson5f353082010-06-07 14:03:03 +01004537 ret = drm_irq_install(dev);
4538 if (ret)
4539 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004540
Eric Anholt673a3942008-07-30 12:06:12 -07004541 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004542
4543cleanup_ringbuffer:
4544 mutex_lock(&dev->struct_mutex);
4545 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004546 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004547 mutex_unlock(&dev->struct_mutex);
4548
4549 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004550}
4551
4552int
4553i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4554 struct drm_file *file_priv)
4555{
Jesse Barnes79e53942008-11-07 14:24:08 -08004556 if (drm_core_check_feature(dev, DRIVER_MODESET))
4557 return 0;
4558
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004559 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004560
Chris Wilson45c5f202013-10-16 11:50:01 +01004561 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004562}
4563
4564void
4565i915_gem_lastclose(struct drm_device *dev)
4566{
4567 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004568
Eric Anholte806b492009-01-22 09:56:58 -08004569 if (drm_core_check_feature(dev, DRIVER_MODESET))
4570 return;
4571
Chris Wilson45c5f202013-10-16 11:50:01 +01004572 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004573 if (ret)
4574 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004575}
4576
Chris Wilson64193402010-10-24 12:38:05 +01004577static void
4578init_ring_lists(struct intel_ring_buffer *ring)
4579{
4580 INIT_LIST_HEAD(&ring->active_list);
4581 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004582}
4583
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004584static void i915_init_vm(struct drm_i915_private *dev_priv,
4585 struct i915_address_space *vm)
4586{
4587 vm->dev = dev_priv->dev;
4588 INIT_LIST_HEAD(&vm->active_list);
4589 INIT_LIST_HEAD(&vm->inactive_list);
4590 INIT_LIST_HEAD(&vm->global_link);
4591 list_add(&vm->global_link, &dev_priv->vm_list);
4592}
4593
Eric Anholt673a3942008-07-30 12:06:12 -07004594void
4595i915_gem_load(struct drm_device *dev)
4596{
4597 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004598 int i;
4599
4600 dev_priv->slab =
4601 kmem_cache_create("i915_gem_object",
4602 sizeof(struct drm_i915_gem_object), 0,
4603 SLAB_HWCACHE_ALIGN,
4604 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004605
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004606 INIT_LIST_HEAD(&dev_priv->vm_list);
4607 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4608
Ben Widawskya33afea2013-09-17 21:12:45 -07004609 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004610 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4611 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004612 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004613 for (i = 0; i < I915_NUM_RINGS; i++)
4614 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004615 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004616 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004617 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4618 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004619 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4620 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004621 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004622
Dave Airlie94400122010-07-20 13:15:31 +10004623 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4624 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004625 I915_WRITE(MI_ARB_STATE,
4626 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004627 }
4628
Chris Wilson72bfa192010-12-19 11:42:05 +00004629 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4630
Jesse Barnesde151cf2008-11-12 10:03:55 -08004631 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004632 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4633 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004634
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004635 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4636 dev_priv->num_fence_regs = 32;
4637 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004638 dev_priv->num_fence_regs = 16;
4639 else
4640 dev_priv->num_fence_regs = 8;
4641
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004642 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004643 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4644 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004645
Eric Anholt673a3942008-07-30 12:06:12 -07004646 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004647 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004648
Chris Wilsonce453d82011-02-21 14:43:56 +00004649 dev_priv->mm.interruptible = true;
4650
Dave Chinner7dc19d52013-08-28 10:18:11 +10004651 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4652 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004653 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4654 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004655}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004656
4657/*
4658 * Create a physically contiguous memory object for this object
4659 * e.g. for cursor + overlay regs
4660 */
Chris Wilson995b6762010-08-20 13:23:26 +01004661static int i915_gem_init_phys_object(struct drm_device *dev,
4662 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004663{
4664 drm_i915_private_t *dev_priv = dev->dev_private;
4665 struct drm_i915_gem_phys_object *phys_obj;
4666 int ret;
4667
4668 if (dev_priv->mm.phys_objs[id - 1] || !size)
4669 return 0;
4670
Daniel Vetterb14c5672013-09-19 12:18:32 +02004671 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004672 if (!phys_obj)
4673 return -ENOMEM;
4674
4675 phys_obj->id = id;
4676
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004677 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004678 if (!phys_obj->handle) {
4679 ret = -ENOMEM;
4680 goto kfree_obj;
4681 }
4682#ifdef CONFIG_X86
4683 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4684#endif
4685
4686 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4687
4688 return 0;
4689kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004690 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004691 return ret;
4692}
4693
Chris Wilson995b6762010-08-20 13:23:26 +01004694static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004695{
4696 drm_i915_private_t *dev_priv = dev->dev_private;
4697 struct drm_i915_gem_phys_object *phys_obj;
4698
4699 if (!dev_priv->mm.phys_objs[id - 1])
4700 return;
4701
4702 phys_obj = dev_priv->mm.phys_objs[id - 1];
4703 if (phys_obj->cur_obj) {
4704 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4705 }
4706
4707#ifdef CONFIG_X86
4708 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4709#endif
4710 drm_pci_free(dev, phys_obj->handle);
4711 kfree(phys_obj);
4712 dev_priv->mm.phys_objs[id - 1] = NULL;
4713}
4714
4715void i915_gem_free_all_phys_object(struct drm_device *dev)
4716{
4717 int i;
4718
Dave Airlie260883c2009-01-22 17:58:49 +10004719 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004720 i915_gem_free_phys_object(dev, i);
4721}
4722
4723void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004724 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004725{
Al Viro496ad9a2013-01-23 17:07:38 -05004726 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004727 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004728 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004729 int page_count;
4730
Chris Wilson05394f32010-11-08 19:18:58 +00004731 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004732 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004733 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004734
Chris Wilson05394f32010-11-08 19:18:58 +00004735 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004736 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004737 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004738 if (!IS_ERR(page)) {
4739 char *dst = kmap_atomic(page);
4740 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4741 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004742
Chris Wilsone5281cc2010-10-28 13:45:36 +01004743 drm_clflush_pages(&page, 1);
4744
4745 set_page_dirty(page);
4746 mark_page_accessed(page);
4747 page_cache_release(page);
4748 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004749 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004750 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004751
Chris Wilson05394f32010-11-08 19:18:58 +00004752 obj->phys_obj->cur_obj = NULL;
4753 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004754}
4755
4756int
4757i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004758 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004759 int id,
4760 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004761{
Al Viro496ad9a2013-01-23 17:07:38 -05004762 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004763 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004764 int ret = 0;
4765 int page_count;
4766 int i;
4767
4768 if (id > I915_MAX_PHYS_OBJECT)
4769 return -EINVAL;
4770
Chris Wilson05394f32010-11-08 19:18:58 +00004771 if (obj->phys_obj) {
4772 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004773 return 0;
4774 i915_gem_detach_phys_object(dev, obj);
4775 }
4776
Dave Airlie71acb5e2008-12-30 20:31:46 +10004777 /* create a new object */
4778 if (!dev_priv->mm.phys_objs[id - 1]) {
4779 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004780 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004781 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004782 DRM_ERROR("failed to init phys object %d size: %zu\n",
4783 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004784 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004785 }
4786 }
4787
4788 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004789 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4790 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004791
Chris Wilson05394f32010-11-08 19:18:58 +00004792 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004793
4794 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004795 struct page *page;
4796 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004797
Hugh Dickins5949eac2011-06-27 16:18:18 -07004798 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004799 if (IS_ERR(page))
4800 return PTR_ERR(page);
4801
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004802 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004803 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004804 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004805 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004806
4807 mark_page_accessed(page);
4808 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004809 }
4810
4811 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004812}
4813
4814static int
Chris Wilson05394f32010-11-08 19:18:58 +00004815i915_gem_phys_pwrite(struct drm_device *dev,
4816 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004817 struct drm_i915_gem_pwrite *args,
4818 struct drm_file *file_priv)
4819{
Chris Wilson05394f32010-11-08 19:18:58 +00004820 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004821 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004822
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004823 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4824 unsigned long unwritten;
4825
4826 /* The physical object once assigned is fixed for the lifetime
4827 * of the obj, so we can safely drop the lock and continue
4828 * to access vaddr.
4829 */
4830 mutex_unlock(&dev->struct_mutex);
4831 unwritten = copy_from_user(vaddr, user_data, args->size);
4832 mutex_lock(&dev->struct_mutex);
4833 if (unwritten)
4834 return -EFAULT;
4835 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004836
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004837 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004838 return 0;
4839}
Eric Anholtb9624422009-06-03 07:27:35 +00004840
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004841void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004842{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004843 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004844
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004845 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4846
Eric Anholtb9624422009-06-03 07:27:35 +00004847 /* Clean up our request list when the client is going away, so that
4848 * later retire_requests won't dereference our soon-to-be-gone
4849 * file_priv.
4850 */
Chris Wilson1c255952010-09-26 11:03:27 +01004851 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004852 while (!list_empty(&file_priv->mm.request_list)) {
4853 struct drm_i915_gem_request *request;
4854
4855 request = list_first_entry(&file_priv->mm.request_list,
4856 struct drm_i915_gem_request,
4857 client_list);
4858 list_del(&request->client_list);
4859 request->file_priv = NULL;
4860 }
Chris Wilson1c255952010-09-26 11:03:27 +01004861 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004862}
Chris Wilson31169712009-09-14 16:50:28 +01004863
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004864static void
4865i915_gem_file_idle_work_handler(struct work_struct *work)
4866{
4867 struct drm_i915_file_private *file_priv =
4868 container_of(work, typeof(*file_priv), mm.idle_work.work);
4869
4870 atomic_set(&file_priv->rps_wait_boost, false);
4871}
4872
4873int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4874{
4875 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004876 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004877
4878 DRM_DEBUG_DRIVER("\n");
4879
4880 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4881 if (!file_priv)
4882 return -ENOMEM;
4883
4884 file->driver_priv = file_priv;
4885 file_priv->dev_priv = dev->dev_private;
4886
4887 spin_lock_init(&file_priv->mm.lock);
4888 INIT_LIST_HEAD(&file_priv->mm.request_list);
4889 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4890 i915_gem_file_idle_work_handler);
4891
Ben Widawskye422b882013-12-06 14:10:58 -08004892 ret = i915_gem_context_open(dev, file);
4893 if (ret)
4894 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004895
Ben Widawskye422b882013-12-06 14:10:58 -08004896 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004897}
4898
Chris Wilson57745062012-11-21 13:04:04 +00004899static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4900{
4901 if (!mutex_is_locked(mutex))
4902 return false;
4903
4904#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4905 return mutex->owner == task;
4906#else
4907 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4908 return false;
4909#endif
4910}
4911
Dave Chinner7dc19d52013-08-28 10:18:11 +10004912static unsigned long
4913i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004914{
Chris Wilson17250b72010-10-28 12:51:39 +01004915 struct drm_i915_private *dev_priv =
4916 container_of(shrinker,
4917 struct drm_i915_private,
4918 mm.inactive_shrinker);
4919 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004920 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004921 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004922 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004923
Chris Wilson57745062012-11-21 13:04:04 +00004924 if (!mutex_trylock(&dev->struct_mutex)) {
4925 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004926 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004927
Daniel Vetter677feac2012-12-19 14:33:45 +01004928 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004929 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004930
Chris Wilson57745062012-11-21 13:04:04 +00004931 unlock = false;
4932 }
Chris Wilson31169712009-09-14 16:50:28 +01004933
Dave Chinner7dc19d52013-08-28 10:18:11 +10004934 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004935 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004936 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004937 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004938
4939 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4940 if (obj->active)
4941 continue;
4942
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004943 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004944 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004945 }
Chris Wilson31169712009-09-14 16:50:28 +01004946
Chris Wilson57745062012-11-21 13:04:04 +00004947 if (unlock)
4948 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004949
Dave Chinner7dc19d52013-08-28 10:18:11 +10004950 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004951}
Ben Widawskya70a3142013-07-31 16:59:56 -07004952
4953/* All the new VM stuff */
4954unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4955 struct i915_address_space *vm)
4956{
4957 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4958 struct i915_vma *vma;
4959
Ben Widawsky6f425322013-12-06 14:10:48 -08004960 if (!dev_priv->mm.aliasing_ppgtt ||
4961 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004962 vm = &dev_priv->gtt.base;
4963
4964 BUG_ON(list_empty(&o->vma_list));
4965 list_for_each_entry(vma, &o->vma_list, vma_link) {
4966 if (vma->vm == vm)
4967 return vma->node.start;
4968
4969 }
4970 return -1;
4971}
4972
4973bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4974 struct i915_address_space *vm)
4975{
4976 struct i915_vma *vma;
4977
4978 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004979 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004980 return true;
4981
4982 return false;
4983}
4984
4985bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4986{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004987 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07004988
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004989 list_for_each_entry(vma, &o->vma_list, vma_link)
4990 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004991 return true;
4992
4993 return false;
4994}
4995
4996unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4997 struct i915_address_space *vm)
4998{
4999 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5000 struct i915_vma *vma;
5001
Ben Widawsky6f425322013-12-06 14:10:48 -08005002 if (!dev_priv->mm.aliasing_ppgtt ||
5003 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005004 vm = &dev_priv->gtt.base;
5005
5006 BUG_ON(list_empty(&o->vma_list));
5007
5008 list_for_each_entry(vma, &o->vma_list, vma_link)
5009 if (vma->vm == vm)
5010 return vma->node.size;
5011
5012 return 0;
5013}
5014
Dave Chinner7dc19d52013-08-28 10:18:11 +10005015static unsigned long
5016i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5017{
5018 struct drm_i915_private *dev_priv =
5019 container_of(shrinker,
5020 struct drm_i915_private,
5021 mm.inactive_shrinker);
5022 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005023 unsigned long freed;
5024 bool unlock = true;
5025
5026 if (!mutex_trylock(&dev->struct_mutex)) {
5027 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005028 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005029
5030 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005031 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005032
5033 unlock = false;
5034 }
5035
Chris Wilsond9973b42013-10-04 10:33:00 +01005036 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5037 if (freed < sc->nr_to_scan)
5038 freed += __i915_gem_shrink(dev_priv,
5039 sc->nr_to_scan - freed,
5040 false);
5041 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005042 freed += i915_gem_shrink_all(dev_priv);
5043
5044 if (unlock)
5045 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005046
Dave Chinner7dc19d52013-08-28 10:18:11 +10005047 return freed;
5048}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005049
5050struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5051{
5052 struct i915_vma *vma;
5053
5054 if (WARN_ON(list_empty(&obj->vma_list)))
5055 return NULL;
5056
5057 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005058 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005059 return NULL;
5060
5061 return vma;
5062}