blob: 76d3d1ab73c6965063eba62527594dce82dc41d4 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
Ben Widawsky07fe0b12013-07-31 17:00:10 -070047i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson61050802012-04-17 15:31:31 +010057static void i915_gem_write_fence(struct drm_device *dev, int reg,
58 struct drm_i915_gem_object *obj);
59static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
60 struct drm_i915_fence_reg *fence,
61 bool enable);
62
Dave Chinner7dc19d52013-08-28 10:18:11 +100063static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
64 struct shrink_control *sc);
65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
66 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010067static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010069static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Chris Wilson1b502472012-04-24 15:47:30 +0100207 if (obj->pin_count)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Daniel Vetterd174bd62012-03-25 19:47:40 +0200335/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700338static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200339shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 char __user *user_data,
341 bool page_do_bit17_swizzling, bool needs_clflush)
342{
343 char *vaddr;
344 int ret;
345
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200346 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200347 return -EINVAL;
348
349 vaddr = kmap_atomic(page);
350 if (needs_clflush)
351 drm_clflush_virt_range(vaddr + shmem_page_offset,
352 page_length);
353 ret = __copy_to_user_inatomic(user_data,
354 vaddr + shmem_page_offset,
355 page_length);
356 kunmap_atomic(vaddr);
357
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100358 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359}
360
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361static void
362shmem_clflush_swizzled_range(char *addr, unsigned long length,
363 bool swizzled)
364{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200365 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200366 unsigned long start = (unsigned long) addr;
367 unsigned long end = (unsigned long) addr + length;
368
369 /* For swizzling simply ensure that we always flush both
370 * channels. Lame, but simple and it works. Swizzled
371 * pwrite/pread is far from a hotpath - current userspace
372 * doesn't use it at all. */
373 start = round_down(start, 128);
374 end = round_up(end, 128);
375
376 drm_clflush_virt_range((void *)start, end - start);
377 } else {
378 drm_clflush_virt_range(addr, length);
379 }
380
381}
382
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383/* Only difference to the fast-path function is that this can handle bit17
384 * and uses non-atomic copy and kmap functions. */
385static int
386shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
387 char __user *user_data,
388 bool page_do_bit17_swizzling, bool needs_clflush)
389{
390 char *vaddr;
391 int ret;
392
393 vaddr = kmap(page);
394 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
396 page_length,
397 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200398
399 if (page_do_bit17_swizzling)
400 ret = __copy_to_user_swizzled(user_data,
401 vaddr, shmem_page_offset,
402 page_length);
403 else
404 ret = __copy_to_user(user_data,
405 vaddr + shmem_page_offset,
406 page_length);
407 kunmap(page);
408
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100409 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200410}
411
Eric Anholteb014592009-03-10 11:44:52 -0700412static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200413i915_gem_shmem_pread(struct drm_device *dev,
414 struct drm_i915_gem_object *obj,
415 struct drm_i915_gem_pread *args,
416 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700417{
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700419 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100420 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100421 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100422 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200423 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200424 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200425 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700426
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200427 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700428 remain = args->size;
429
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700431
Daniel Vetter84897312012-03-25 19:47:31 +0200432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200441 }
Eric Anholteb014592009-03-10 11:44:52 -0700442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 ret = i915_gem_object_get_pages(obj);
444 if (ret)
445 return ret;
446
447 i915_gem_object_pin_pages(obj);
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100450
Imre Deak67d5a502013-02-18 19:28:02 +0200451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
452 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200453 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100454
455 if (remain <= 0)
456 break;
457
Eric Anholteb014592009-03-10 11:44:52 -0700458 /* Operation in this page
459 *
Eric Anholteb014592009-03-10 11:44:52 -0700460 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700461 * page_length = bytes to copy for this page
462 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100463 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700464 page_length = remain;
465 if ((shmem_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
469 (page_to_phys(page) & (1 << 17)) != 0;
470
Daniel Vetterd174bd62012-03-25 19:47:40 +0200471 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
472 user_data, page_do_bit17_swizzling,
473 needs_clflush);
474 if (ret == 0)
475 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700476
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200477 mutex_unlock(&dev->struct_mutex);
478
Xiong Zhang0b74b502013-07-19 13:51:24 +0800479 if (likely(!i915_prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200480 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the
483 * data up to the first fault. Hence ignore any errors
484 * and just continue. */
485 (void)ret;
486 prefaulted = 1;
487 }
488
Daniel Vetterd174bd62012-03-25 19:47:40 +0200489 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
490 user_data, page_do_bit17_swizzling,
491 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200493 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100494
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200495next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100496 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100497
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100498 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100500
Eric Anholteb014592009-03-10 11:44:52 -0700501 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700503 offset += page_length;
504 }
505
Chris Wilson4f27b752010-10-14 15:26:45 +0100506out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100507 i915_gem_object_unpin_pages(obj);
508
Eric Anholteb014592009-03-10 11:44:52 -0700509 return ret;
510}
511
Eric Anholt673a3942008-07-30 12:06:12 -0700512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700520{
521 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100523 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700524
Chris Wilson51311d02010-11-17 09:10:42 +0000525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200529 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000530 args->size))
531 return -EFAULT;
532
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100535 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700536
Chris Wilson05394f32010-11-08 19:18:58 +0000537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000538 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100539 ret = -ENOENT;
540 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Chris Wilson7dcd2492010-09-26 20:21:44 +0100543 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100546 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100548 }
549
Daniel Vetter1286ff72012-05-10 15:25:09 +0200550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
Chris Wilsondb53a302011-02-03 11:57:46 +0000558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200560 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson35b62a82010-09-26 20:23:38 +0100562out:
Chris Wilson05394f32010-11-08 19:18:58 +0000563 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100564unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100565 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700566 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567}
568
Keith Packard0839ccb2008-10-30 19:38:48 -0700569/* This is the fast write path which cannot handle
570 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700571 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572
Keith Packard0839ccb2008-10-30 19:38:48 -0700573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
578{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 void __iomem *vaddr_atomic;
580 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700588 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100589 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Chris Wilson05394f32010-11-08 19:18:58 +0000597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700599 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000600 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700601{
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 int page_offset, page_length, ret;
607
Ben Widawskyc37e2202013-07-31 16:59:58 -0700608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200620 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700621 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700623 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700624
625 while (remain > 0) {
626 /* Operation in this page
627 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700631 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800642 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700651 }
Eric Anholt673a3942008-07-30 12:06:12 -0700652
Daniel Vetter935aaa62012-03-25 19:47:35 +0200653out_unpin:
654 i915_gem_object_unpin(obj);
655out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700663static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700669{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200670 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200673 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687
Chris Wilson755d2212012-09-04 21:02:55 +0100688 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689}
690
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700693static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700699{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 char *vaddr;
701 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700702
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100710 user_data,
711 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilson755d2212012-09-04 21:02:55 +0100722 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700723}
724
Eric Anholt40123c12009-03-09 13:42:30 -0700725static int
Daniel Vettere244a442012-03-25 19:47:28 +0200726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700730{
Eric Anholt40123c12009-03-09 13:42:30 -0700731 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 loff_t offset;
733 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100734 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200736 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200737 int needs_clflush_after = 0;
738 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200739 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200741 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700742 remain = args->size;
743
Daniel Vetter8c599672011-12-14 13:57:31 +0100744 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700745
Daniel Vetter58642882012-03-25 19:47:37 +0200746 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
747 /* If we're not in the cpu write domain, set ourself into the gtt
748 * write domain and manually flush cachelines (if required). This
749 * optimizes for the case when the gpu will use the data
750 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100751 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700752 ret = i915_gem_object_wait_rendering(obj, false);
753 if (ret)
754 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100756 /* Same trick applies to invalidate partially written cachelines read
757 * before writing. */
758 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
759 needs_clflush_before =
760 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200761
Chris Wilson755d2212012-09-04 21:02:55 +0100762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
Eric Anholt40123c12009-03-09 13:42:30 -0700768 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000769 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
Imre Deak67d5a502013-02-18 19:28:02 +0200771 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200773 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200774 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100775
Chris Wilson9da3da62012-06-01 15:20:22 +0100776 if (remain <= 0)
777 break;
778
Eric Anholt40123c12009-03-09 13:42:30 -0700779 /* Operation in this page
780 *
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700782 * page_length = bytes to copy for this page
783 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100784 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700785
786 page_length = remain;
787 if ((shmem_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700789
Daniel Vetter58642882012-03-25 19:47:37 +0200790 /* If we don't overwrite a cacheline completely we need to be
791 * careful to have up-to-date data by first clflushing. Don't
792 * overcomplicate things and flush the entire patch. */
793 partial_cacheline_write = needs_clflush_before &&
794 ((shmem_page_offset | page_length)
795 & (boot_cpu_data.x86_clflush_size - 1));
796
Daniel Vetter8c599672011-12-14 13:57:31 +0100797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
804 if (ret == 0)
805 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700806
Daniel Vettere244a442012-03-25 19:47:28 +0200807 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200808 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200809 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100815
Daniel Vettere244a442012-03-25 19:47:28 +0200816next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100817 set_page_dirty(page);
818 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819
Chris Wilson755d2212012-09-04 21:02:55 +0100820 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100822
Eric Anholt40123c12009-03-09 13:42:30 -0700823 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100824 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825 offset += page_length;
826 }
827
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100828out:
Chris Wilson755d2212012-09-04 21:02:55 +0100829 i915_gem_object_unpin_pages(obj);
830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100832 /*
833 * Fixup: Flush cpu caches in case we didn't flush the dirty
834 * cachelines in-line while writing and the object moved
835 * out of the cpu write domain while we've dropped the lock.
836 */
837 if (!needs_clflush_after &&
838 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100839 if (i915_gem_clflush_object(obj, obj->pin_display))
840 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200841 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100842 }
Eric Anholt40123c12009-03-09 13:42:30 -0700843
Daniel Vetter58642882012-03-25 19:47:37 +0200844 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200846
Eric Anholt40123c12009-03-09 13:42:30 -0700847 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
850/**
851 * Writes data to the object referenced by handle.
852 *
853 * On error, the contents of the buffer that were to be modified are undefined.
854 */
855int
856i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100857 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700858{
859 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000860 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000861 int ret;
862
863 if (args->size == 0)
864 return 0;
865
866 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200867 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000868 args->size))
869 return -EFAULT;
870
Xiong Zhang0b74b502013-07-19 13:51:24 +0800871 if (likely(!i915_prefault_disable)) {
872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size);
874 if (ret)
875 return -EFAULT;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
Chris Wilson05394f32010-11-08 19:18:58 +0000882 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000883 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = -ENOENT;
885 goto unlock;
886 }
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Chris Wilson7dcd2492010-09-26 20:21:44 +0100888 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000889 if (args->offset > obj->base.size ||
890 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100891 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100892 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 }
894
Daniel Vetter1286ff72012-05-10 15:25:09 +0200895 /* prime objects have no backing filp to GEM pread/pwrite
896 * pages from.
897 */
898 if (!obj->base.filp) {
899 ret = -EINVAL;
900 goto out;
901 }
902
Chris Wilsondb53a302011-02-03 11:57:46 +0000903 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
904
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700906 /* We can only do the GTT pwrite on untiled buffers, as otherwise
907 * it would end up going through the fenced access, and we'll get
908 * different detiling behavior between reading and writing.
909 * pread/pwrite currently are reading and writing from the CPU
910 * perspective, requiring manual detiling by the client.
911 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100914 goto out;
915 }
916
Chris Wilson2c225692013-08-09 12:26:45 +0100917 if (obj->tiling_mode == I915_TILING_NONE &&
918 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
919 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100920 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200921 /* Note that the gtt paths might fail with non-page-backed user
922 * pointers (e.g. gtt mappings when moving data between
923 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700924 }
Eric Anholt673a3942008-07-30 12:06:12 -0700925
Chris Wilson86a1ee22012-08-11 15:41:04 +0100926 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200927 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100928
Chris Wilson35b62a82010-09-26 20:23:38 +0100929out:
Chris Wilson05394f32010-11-08 19:18:58 +0000930 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100931unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return ret;
934}
935
Chris Wilsonb3612372012-08-24 09:35:08 +0100936int
Daniel Vetter33196de2012-11-14 17:14:05 +0100937i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100938 bool interruptible)
939{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100940 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100941 /* Non-interruptible callers can't handle -EAGAIN, hence return
942 * -EIO unconditionally for these. */
943 if (!interruptible)
944 return -EIO;
945
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100946 /* Recovery complete, but the reset failed ... */
947 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100948 return -EIO;
949
950 return -EAGAIN;
951 }
952
953 return 0;
954}
955
956/*
957 * Compare seqno against outstanding lazy request. Emit a request if they are
958 * equal.
959 */
960static int
961i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
962{
963 int ret;
964
965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
966
967 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100968 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300969 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100970
971 return ret;
972}
973
Chris Wilson094f9a52013-09-25 17:34:55 +0100974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
Chris Wilsonb3612372012-08-24 09:35:08 +0100993/**
994 * __wait_seqno - wait until execution of seqno has finished
995 * @ring: the ring expected to report seqno
996 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100997 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100998 * @interruptible: do an interruptible wait (normally yes)
999 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1000 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001001 * Note: It is of utmost importance that the passed in seqno and reset_counter
1002 * values have been read by the caller in an smp safe manner. Where read-side
1003 * locks are involved, it is sufficient to read the reset_counter before
1004 * unlocking the lock that protects the seqno. For lockless tricks, the
1005 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1006 * inserted.
1007 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001008 * Returns 0 if the seqno was found within the alloted time. Else returns the
1009 * errno with remaining time filled in timeout argument.
1010 */
1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001012 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001016{
1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson094f9a52013-09-25 17:34:55 +01001018 struct timespec before, now;
1019 DEFINE_WAIT(wait);
1020 long timeout_jiffies;
Chris Wilsonb3612372012-08-24 09:35:08 +01001021 int ret;
1022
Paulo Zanonic67a4702013-08-19 13:18:09 -03001023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1024
Chris Wilsonb3612372012-08-24 09:35:08 +01001025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1026 return 0;
1027
Chris Wilson094f9a52013-09-25 17:34:55 +01001028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
Chris Wilsonb3612372012-08-24 09:35:08 +01001029
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1031 gen6_rps_boost(dev_priv);
1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1036 }
1037
Chris Wilson094f9a52013-09-25 17:34:55 +01001038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1039 WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001040 return -ENODEV;
1041
Chris Wilson094f9a52013-09-25 17:34:55 +01001042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001044 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001048
Chris Wilson094f9a52013-09-25 17:34:55 +01001049 prepare_to_wait(&ring->irq_queue, &wait,
1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001051
Daniel Vetterf69061b2012-12-06 09:01:42 +01001052 /* We need to check whether any gpu reset happened in between
1053 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001062
Chris Wilson094f9a52013-09-25 17:34:55 +01001063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1064 ret = 0;
1065 break;
1066 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001067
Chris Wilson094f9a52013-09-25 17:34:55 +01001068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
Chris Wilson5035c272013-10-04 09:58:46 +01001085 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001095 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001096 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001097
1098 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001099
1100 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001101
1102 if (timeout) {
1103 struct timespec sleep_time = timespec_sub(now, before);
1104 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001105 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1106 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001107 }
1108
Chris Wilson094f9a52013-09-25 17:34:55 +01001109 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001110}
1111
1112/**
1113 * Waits for a sequence number to be signaled, and cleans up the
1114 * request and object lists appropriately for that event.
1115 */
1116int
1117i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1118{
1119 struct drm_device *dev = ring->dev;
1120 struct drm_i915_private *dev_priv = dev->dev_private;
1121 bool interruptible = dev_priv->mm.interruptible;
1122 int ret;
1123
1124 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1125 BUG_ON(seqno == 0);
1126
Daniel Vetter33196de2012-11-14 17:14:05 +01001127 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001128 if (ret)
1129 return ret;
1130
1131 ret = i915_gem_check_olr(ring, seqno);
1132 if (ret)
1133 return ret;
1134
Daniel Vetterf69061b2012-12-06 09:01:42 +01001135 return __wait_seqno(ring, seqno,
1136 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001137 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001138}
1139
Chris Wilsond26e3af2013-06-29 22:05:26 +01001140static int
1141i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1142 struct intel_ring_buffer *ring)
1143{
1144 i915_gem_retire_requests_ring(ring);
1145
1146 /* Manually manage the write flush as we may have not yet
1147 * retired the buffer.
1148 *
1149 * Note that the last_write_seqno is always the earlier of
1150 * the two (read/write) seqno, so if we haved successfully waited,
1151 * we know we have passed the last write.
1152 */
1153 obj->last_write_seqno = 0;
1154 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1155
1156 return 0;
1157}
1158
Chris Wilsonb3612372012-08-24 09:35:08 +01001159/**
1160 * Ensures that all rendering to the object has completed and the object is
1161 * safe to unbind from the GTT or access from the CPU.
1162 */
1163static __must_check int
1164i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1165 bool readonly)
1166{
1167 struct intel_ring_buffer *ring = obj->ring;
1168 u32 seqno;
1169 int ret;
1170
1171 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1172 if (seqno == 0)
1173 return 0;
1174
1175 ret = i915_wait_seqno(ring, seqno);
1176 if (ret)
1177 return ret;
1178
Chris Wilsond26e3af2013-06-29 22:05:26 +01001179 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001180}
1181
Chris Wilson3236f572012-08-24 09:35:09 +01001182/* A nonblocking variant of the above wait. This is a highly dangerous routine
1183 * as the object state may change during this call.
1184 */
1185static __must_check int
1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001187 struct drm_file *file,
Chris Wilson3236f572012-08-24 09:35:09 +01001188 bool readonly)
1189{
1190 struct drm_device *dev = obj->base.dev;
1191 struct drm_i915_private *dev_priv = dev->dev_private;
1192 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001193 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001194 u32 seqno;
1195 int ret;
1196
1197 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1198 BUG_ON(!dev_priv->mm.interruptible);
1199
1200 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1201 if (seqno == 0)
1202 return 0;
1203
Daniel Vetter33196de2012-11-14 17:14:05 +01001204 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001205 if (ret)
1206 return ret;
1207
1208 ret = i915_gem_check_olr(ring, seqno);
1209 if (ret)
1210 return ret;
1211
Daniel Vetterf69061b2012-12-06 09:01:42 +01001212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001213 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001215 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001216 if (ret)
1217 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001218
Chris Wilsond26e3af2013-06-29 22:05:26 +01001219 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001220}
1221
Eric Anholt673a3942008-07-30 12:06:12 -07001222/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001223 * Called when user space prepares to use an object with the CPU, either
1224 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001225 */
1226int
1227i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001228 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001229{
1230 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001231 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001232 uint32_t read_domains = args->read_domains;
1233 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001234 int ret;
1235
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001236 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001237 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001238 return -EINVAL;
1239
Chris Wilson21d509e2009-06-06 09:46:02 +01001240 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001241 return -EINVAL;
1242
1243 /* Having something in the write domain implies it's in the read
1244 * domain, and only that read domain. Enforce that in the request.
1245 */
1246 if (write_domain != 0 && read_domains != write_domain)
1247 return -EINVAL;
1248
Chris Wilson76c1dec2010-09-25 11:22:51 +01001249 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001250 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001251 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001252
Chris Wilson05394f32010-11-08 19:18:58 +00001253 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001254 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001255 ret = -ENOENT;
1256 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001257 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001258
Chris Wilson3236f572012-08-24 09:35:09 +01001259 /* Try to flush the object off the GPU without holding the lock.
1260 * We will repeat the flush holding the lock in the normal manner
1261 * to catch cases where we are gazumped.
1262 */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001264 if (ret)
1265 goto unref;
1266
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001267 if (read_domains & I915_GEM_DOMAIN_GTT) {
1268 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001269
1270 /* Silently promote "you're not bound, there was nothing to do"
1271 * to success, since the client was just asking us to
1272 * make sure everything was done.
1273 */
1274 if (ret == -EINVAL)
1275 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001276 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001277 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001278 }
1279
Chris Wilson3236f572012-08-24 09:35:09 +01001280unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001281 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001282unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001283 mutex_unlock(&dev->struct_mutex);
1284 return ret;
1285}
1286
1287/**
1288 * Called when user space has done writes to this buffer
1289 */
1290int
1291i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001292 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001293{
1294 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001295 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001296 int ret = 0;
1297
Chris Wilson76c1dec2010-09-25 11:22:51 +01001298 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001299 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001300 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001301
Chris Wilson05394f32010-11-08 19:18:58 +00001302 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001303 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001304 ret = -ENOENT;
1305 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001306 }
1307
Eric Anholt673a3942008-07-30 12:06:12 -07001308 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001309 if (obj->pin_display)
1310 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001311
Chris Wilson05394f32010-11-08 19:18:58 +00001312 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001313unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001314 mutex_unlock(&dev->struct_mutex);
1315 return ret;
1316}
1317
1318/**
1319 * Maps the contents of an object, returning the address it is mapped
1320 * into.
1321 *
1322 * While the mapping holds a reference on the contents of the object, it doesn't
1323 * imply a ref on the object itself.
1324 */
1325int
1326i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001327 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001328{
1329 struct drm_i915_gem_mmap *args = data;
1330 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001331 unsigned long addr;
1332
Chris Wilson05394f32010-11-08 19:18:58 +00001333 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001334 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001335 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001336
Daniel Vetter1286ff72012-05-10 15:25:09 +02001337 /* prime objects have no backing filp to GEM mmap
1338 * pages from.
1339 */
1340 if (!obj->filp) {
1341 drm_gem_object_unreference_unlocked(obj);
1342 return -EINVAL;
1343 }
1344
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001345 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001346 PROT_READ | PROT_WRITE, MAP_SHARED,
1347 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001348 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001349 if (IS_ERR((void *)addr))
1350 return addr;
1351
1352 args->addr_ptr = (uint64_t) addr;
1353
1354 return 0;
1355}
1356
Jesse Barnesde151cf2008-11-12 10:03:55 -08001357/**
1358 * i915_gem_fault - fault a page into the GTT
1359 * vma: VMA in question
1360 * vmf: fault info
1361 *
1362 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1363 * from userspace. The fault handler takes care of binding the object to
1364 * the GTT (if needed), allocating and programming a fence register (again,
1365 * only if needed based on whether the old reg is still valid or the object
1366 * is tiled) and inserting a new PTE into the faulting process.
1367 *
1368 * Note that the faulting process may involve evicting existing objects
1369 * from the GTT and/or fence registers to make room. So performance may
1370 * suffer if the GTT working set is large or there are few fence registers
1371 * left.
1372 */
1373int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1374{
Chris Wilson05394f32010-11-08 19:18:58 +00001375 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1376 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001377 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001378 pgoff_t page_offset;
1379 unsigned long pfn;
1380 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001381 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001382
1383 /* We don't use vmf->pgoff since that has the fake offset */
1384 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1385 PAGE_SHIFT;
1386
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001387 ret = i915_mutex_lock_interruptible(dev);
1388 if (ret)
1389 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001390
Chris Wilsondb53a302011-02-03 11:57:46 +00001391 trace_i915_gem_object_fault(obj, page_offset, true, write);
1392
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001393 /* Access to snoopable pages through the GTT is incoherent. */
1394 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1395 ret = -EINVAL;
1396 goto unlock;
1397 }
1398
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001399 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001400 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001401 if (ret)
1402 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001403
Chris Wilsonc9839302012-11-20 10:45:17 +00001404 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1405 if (ret)
1406 goto unpin;
1407
1408 ret = i915_gem_object_get_fence(obj);
1409 if (ret)
1410 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001411
Chris Wilson6299f992010-11-24 12:23:44 +00001412 obj->fault_mappable = true;
1413
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001414 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1415 pfn >>= PAGE_SHIFT;
1416 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417
1418 /* Finally, remap it using the new GTT offset */
1419 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001420unpin:
1421 i915_gem_object_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001422unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001423 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001424out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001425 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001426 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001427 /* If this -EIO is due to a gpu hang, give the reset code a
1428 * chance to clean up the mess. Otherwise return the proper
1429 * SIGBUS. */
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001430 if (i915_terminally_wedged(&dev_priv->gpu_error))
Daniel Vettera9340cc2012-07-04 22:18:42 +02001431 return VM_FAULT_SIGBUS;
Chris Wilson045e7692010-11-07 09:18:22 +00001432 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001433 /*
1434 * EAGAIN means the gpu is hung and we'll wait for the error
1435 * handler to reset everything when re-faulting in
1436 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001437 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001438 case 0:
1439 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001440 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001441 case -EBUSY:
1442 /*
1443 * EBUSY is ok: this just means that another thread
1444 * already did the job.
1445 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001446 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001447 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001448 return VM_FAULT_OOM;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001449 case -ENOSPC:
1450 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001451 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001452 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Chris Wilsonc7150892009-09-23 00:43:56 +01001453 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001454 }
1455}
1456
1457/**
Chris Wilson901782b2009-07-10 08:18:50 +01001458 * i915_gem_release_mmap - remove physical page mappings
1459 * @obj: obj in question
1460 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001461 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001462 * relinquish ownership of the pages back to the system.
1463 *
1464 * It is vital that we remove the page mapping if we have mapped a tiled
1465 * object through the GTT and then lose the fence register due to
1466 * resource pressure. Similarly if the object has been moved out of the
1467 * aperture, than pages mapped into userspace must be revoked. Removing the
1468 * mapping will then trigger a page fault on the next user access, allowing
1469 * fixup by i915_gem_fault().
1470 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001471void
Chris Wilson05394f32010-11-08 19:18:58 +00001472i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001473{
Chris Wilson6299f992010-11-24 12:23:44 +00001474 if (!obj->fault_mappable)
1475 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001476
David Herrmann51335df2013-07-24 21:10:03 +02001477 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001478 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001479}
1480
Imre Deak0fa87792013-01-07 21:47:35 +02001481uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001482i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001483{
Chris Wilsone28f8712011-07-18 13:11:49 -07001484 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001485
1486 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001487 tiling_mode == I915_TILING_NONE)
1488 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001489
1490 /* Previous chips need a power-of-two fence region when tiling */
1491 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001492 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001493 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001494 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001495
Chris Wilsone28f8712011-07-18 13:11:49 -07001496 while (gtt_size < size)
1497 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001498
Chris Wilsone28f8712011-07-18 13:11:49 -07001499 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001500}
1501
Jesse Barnesde151cf2008-11-12 10:03:55 -08001502/**
1503 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1504 * @obj: object to check
1505 *
1506 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001507 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001508 */
Imre Deakd8651102013-01-07 21:47:33 +02001509uint32_t
1510i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1511 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001512{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001513 /*
1514 * Minimum alignment is 4k (GTT page size), but might be greater
1515 * if a fence register is needed for the object.
1516 */
Imre Deakd8651102013-01-07 21:47:33 +02001517 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001518 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001519 return 4096;
1520
1521 /*
1522 * Previous chips need to be aligned to the size of the smallest
1523 * fence register that can contain the object.
1524 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001525 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001526}
1527
Chris Wilsond8cb5082012-08-11 15:41:03 +01001528static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1529{
1530 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1531 int ret;
1532
David Herrmann0de23972013-07-24 21:07:52 +02001533 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001534 return 0;
1535
Daniel Vetterda494d72012-12-20 15:11:16 +01001536 dev_priv->mm.shrinker_no_lock_stealing = true;
1537
Chris Wilsond8cb5082012-08-11 15:41:03 +01001538 ret = drm_gem_create_mmap_offset(&obj->base);
1539 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001540 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001541
1542 /* Badly fragmented mmap space? The only way we can recover
1543 * space is by destroying unwanted objects. We can't randomly release
1544 * mmap_offsets as userspace expects them to be persistent for the
1545 * lifetime of the objects. The closest we can is to release the
1546 * offsets on purgeable objects by truncating it and marking it purged,
1547 * which prevents userspace from ever using that object again.
1548 */
1549 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1550 ret = drm_gem_create_mmap_offset(&obj->base);
1551 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001552 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001553
1554 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001555 ret = drm_gem_create_mmap_offset(&obj->base);
1556out:
1557 dev_priv->mm.shrinker_no_lock_stealing = false;
1558
1559 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001560}
1561
1562static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1563{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001564 drm_gem_free_mmap_offset(&obj->base);
1565}
1566
Jesse Barnesde151cf2008-11-12 10:03:55 -08001567int
Dave Airlieff72145b2011-02-07 12:16:14 +10001568i915_gem_mmap_gtt(struct drm_file *file,
1569 struct drm_device *dev,
1570 uint32_t handle,
1571 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001572{
Chris Wilsonda761a62010-10-27 17:37:08 +01001573 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001574 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001575 int ret;
1576
Chris Wilson76c1dec2010-09-25 11:22:51 +01001577 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001578 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001579 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001580
Dave Airlieff72145b2011-02-07 12:16:14 +10001581 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001582 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001583 ret = -ENOENT;
1584 goto unlock;
1585 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001586
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001587 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001588 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001589 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001590 }
1591
Chris Wilson05394f32010-11-08 19:18:58 +00001592 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001593 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001594 ret = -EINVAL;
1595 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001596 }
1597
Chris Wilsond8cb5082012-08-11 15:41:03 +01001598 ret = i915_gem_object_create_mmap_offset(obj);
1599 if (ret)
1600 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001601
David Herrmann0de23972013-07-24 21:07:52 +02001602 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001603
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001604out:
Chris Wilson05394f32010-11-08 19:18:58 +00001605 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001606unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001607 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001608 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001609}
1610
Dave Airlieff72145b2011-02-07 12:16:14 +10001611/**
1612 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1613 * @dev: DRM device
1614 * @data: GTT mapping ioctl data
1615 * @file: GEM object info
1616 *
1617 * Simply returns the fake offset to userspace so it can mmap it.
1618 * The mmap call will end up in drm_gem_mmap(), which will set things
1619 * up so we can get faults in the handler above.
1620 *
1621 * The fault handler will take care of binding the object into the GTT
1622 * (since it may have been evicted to make room for something), allocating
1623 * a fence register, and mapping the appropriate aperture address into
1624 * userspace.
1625 */
1626int
1627i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1628 struct drm_file *file)
1629{
1630 struct drm_i915_gem_mmap_gtt *args = data;
1631
Dave Airlieff72145b2011-02-07 12:16:14 +10001632 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1633}
1634
Daniel Vetter225067e2012-08-20 10:23:20 +02001635/* Immediately discard the backing storage */
1636static void
1637i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001638{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001639 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001640
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001641 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001642
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001643 if (obj->base.filp == NULL)
1644 return;
1645
Daniel Vetter225067e2012-08-20 10:23:20 +02001646 /* Our goal here is to return as much of the memory as
1647 * is possible back to the system as we are called from OOM.
1648 * To do this we must instruct the shmfs to drop all of its
1649 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001650 */
Al Viro496ad9a2013-01-23 17:07:38 -05001651 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001652 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001653
Daniel Vetter225067e2012-08-20 10:23:20 +02001654 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001655}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001656
Daniel Vetter225067e2012-08-20 10:23:20 +02001657static inline int
1658i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1659{
1660 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001661}
1662
Chris Wilson5cdf5882010-09-27 15:51:07 +01001663static void
Chris Wilson05394f32010-11-08 19:18:58 +00001664i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001665{
Imre Deak90797e62013-02-18 19:28:03 +02001666 struct sg_page_iter sg_iter;
1667 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001668
Chris Wilson05394f32010-11-08 19:18:58 +00001669 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001670
Chris Wilson6c085a72012-08-20 11:40:46 +02001671 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1672 if (ret) {
1673 /* In the event of a disaster, abandon all caches and
1674 * hope for the best.
1675 */
1676 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001677 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001678 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1679 }
1680
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001681 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001682 i915_gem_object_save_bit_17_swizzle(obj);
1683
Chris Wilson05394f32010-11-08 19:18:58 +00001684 if (obj->madv == I915_MADV_DONTNEED)
1685 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001686
Imre Deak90797e62013-02-18 19:28:03 +02001687 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001688 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001689
Chris Wilson05394f32010-11-08 19:18:58 +00001690 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001691 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001692
Chris Wilson05394f32010-11-08 19:18:58 +00001693 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001694 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001695
Chris Wilson9da3da62012-06-01 15:20:22 +01001696 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001697 }
Chris Wilson05394f32010-11-08 19:18:58 +00001698 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001699
Chris Wilson9da3da62012-06-01 15:20:22 +01001700 sg_free_table(obj->pages);
1701 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001702}
1703
Chris Wilsondd624af2013-01-15 12:39:35 +00001704int
Chris Wilson37e680a2012-06-07 15:38:42 +01001705i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1706{
1707 const struct drm_i915_gem_object_ops *ops = obj->ops;
1708
Chris Wilson2f745ad2012-09-04 21:02:58 +01001709 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001710 return 0;
1711
Chris Wilsona5570172012-09-04 21:02:54 +01001712 if (obj->pages_pin_count)
1713 return -EBUSY;
1714
Ben Widawsky98438772013-07-31 17:00:12 -07001715 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001716
Chris Wilsona2165e32012-12-03 11:49:00 +00001717 /* ->put_pages might need to allocate memory for the bit17 swizzle
1718 * array, hence protect them from being reaped by removing them from gtt
1719 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001720 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001721
Chris Wilson37e680a2012-06-07 15:38:42 +01001722 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001723 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001724
Chris Wilson6c085a72012-08-20 11:40:46 +02001725 if (i915_gem_object_is_purgeable(obj))
1726 i915_gem_object_truncate(obj);
1727
1728 return 0;
1729}
1730
Chris Wilsond9973b42013-10-04 10:33:00 +01001731static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001732__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1733 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001734{
Chris Wilson57094f82013-09-04 10:45:50 +01001735 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001736 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001737 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001738
1739 list_for_each_entry_safe(obj, next,
1740 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001741 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001742 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001743 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001744 count += obj->base.size >> PAGE_SHIFT;
1745 if (count >= target)
1746 return count;
1747 }
1748 }
1749
Chris Wilson57094f82013-09-04 10:45:50 +01001750 /*
1751 * As we may completely rewrite the bound list whilst unbinding
1752 * (due to retiring requests) we have to strictly process only
1753 * one element of the list at the time, and recheck the list
1754 * on every iteration.
1755 */
1756 INIT_LIST_HEAD(&still_bound_list);
1757 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001758 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001759
Chris Wilson57094f82013-09-04 10:45:50 +01001760 obj = list_first_entry(&dev_priv->mm.bound_list,
1761 typeof(*obj), global_list);
1762 list_move_tail(&obj->global_list, &still_bound_list);
1763
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001764 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1765 continue;
1766
Chris Wilson57094f82013-09-04 10:45:50 +01001767 /*
1768 * Hold a reference whilst we unbind this object, as we may
1769 * end up waiting for and retiring requests. This might
1770 * release the final reference (held by the active list)
1771 * and result in the object being freed from under us.
1772 * in this object being freed.
1773 *
1774 * Note 1: Shrinking the bound list is special since only active
1775 * (and hence bound objects) can contain such limbo objects, so
1776 * we don't need special tricks for shrinking the unbound list.
1777 * The only other place where we have to be careful with active
1778 * objects suddenly disappearing due to retiring requests is the
1779 * eviction code.
1780 *
1781 * Note 2: Even though the bound list doesn't hold a reference
1782 * to the object we can safely grab one here: The final object
1783 * unreferencing and the bound_list are both protected by the
1784 * dev->struct_mutex and so we won't ever be able to observe an
1785 * object on the bound_list with a reference count equals 0.
1786 */
1787 drm_gem_object_reference(&obj->base);
1788
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001789 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1790 if (i915_vma_unbind(vma))
1791 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001792
Chris Wilson57094f82013-09-04 10:45:50 +01001793 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001794 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001795
1796 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001797 }
Chris Wilson57094f82013-09-04 10:45:50 +01001798 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001799
1800 return count;
1801}
1802
Chris Wilsond9973b42013-10-04 10:33:00 +01001803static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001804i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1805{
1806 return __i915_gem_shrink(dev_priv, target, true);
1807}
1808
Chris Wilsond9973b42013-10-04 10:33:00 +01001809static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001810i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1811{
1812 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001813 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001814
1815 i915_gem_evict_everything(dev_priv->dev);
1816
Ben Widawsky35c20a62013-05-31 11:28:48 -07001817 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001818 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001819 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001820 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001821 }
1822 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001823}
1824
Chris Wilson37e680a2012-06-07 15:38:42 +01001825static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001826i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001827{
Chris Wilson6c085a72012-08-20 11:40:46 +02001828 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001829 int page_count, i;
1830 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001831 struct sg_table *st;
1832 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001833 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001834 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001835 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001836 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001837
Chris Wilson6c085a72012-08-20 11:40:46 +02001838 /* Assert that the object is not currently in any GPU domain. As it
1839 * wasn't in the GTT, there shouldn't be any way it could have been in
1840 * a GPU cache
1841 */
1842 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1843 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1844
Chris Wilson9da3da62012-06-01 15:20:22 +01001845 st = kmalloc(sizeof(*st), GFP_KERNEL);
1846 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001847 return -ENOMEM;
1848
Chris Wilson9da3da62012-06-01 15:20:22 +01001849 page_count = obj->base.size / PAGE_SIZE;
1850 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001851 kfree(st);
1852 return -ENOMEM;
1853 }
1854
1855 /* Get the list of pages out of our struct file. They'll be pinned
1856 * at this point until we release them.
1857 *
1858 * Fail silently without starting the shrinker
1859 */
Al Viro496ad9a2013-01-23 17:07:38 -05001860 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001861 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001862 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001863 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001864 sg = st->sgl;
1865 st->nents = 0;
1866 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001867 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1868 if (IS_ERR(page)) {
1869 i915_gem_purge(dev_priv, page_count);
1870 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1871 }
1872 if (IS_ERR(page)) {
1873 /* We've tried hard to allocate the memory by reaping
1874 * our own buffer, now let the real VM do its job and
1875 * go down in flames if truly OOM.
1876 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001877 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001878 gfp |= __GFP_IO | __GFP_WAIT;
1879
1880 i915_gem_shrink_all(dev_priv);
1881 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1882 if (IS_ERR(page))
1883 goto err_pages;
1884
Linus Torvaldscaf49192012-12-10 10:51:16 -08001885 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001886 gfp &= ~(__GFP_IO | __GFP_WAIT);
1887 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001888#ifdef CONFIG_SWIOTLB
1889 if (swiotlb_nr_tbl()) {
1890 st->nents++;
1891 sg_set_page(sg, page, PAGE_SIZE, 0);
1892 sg = sg_next(sg);
1893 continue;
1894 }
1895#endif
Imre Deak90797e62013-02-18 19:28:03 +02001896 if (!i || page_to_pfn(page) != last_pfn + 1) {
1897 if (i)
1898 sg = sg_next(sg);
1899 st->nents++;
1900 sg_set_page(sg, page, PAGE_SIZE, 0);
1901 } else {
1902 sg->length += PAGE_SIZE;
1903 }
1904 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001905
1906 /* Check that the i965g/gm workaround works. */
1907 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001908 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001909#ifdef CONFIG_SWIOTLB
1910 if (!swiotlb_nr_tbl())
1911#endif
1912 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001913 obj->pages = st;
1914
Eric Anholt673a3942008-07-30 12:06:12 -07001915 if (i915_gem_object_needs_bit17_swizzle(obj))
1916 i915_gem_object_do_bit_17_swizzle(obj);
1917
1918 return 0;
1919
1920err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001921 sg_mark_end(sg);
1922 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001923 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001924 sg_free_table(st);
1925 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001926 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001927}
1928
Chris Wilson37e680a2012-06-07 15:38:42 +01001929/* Ensure that the associated pages are gathered from the backing storage
1930 * and pinned into our object. i915_gem_object_get_pages() may be called
1931 * multiple times before they are released by a single call to
1932 * i915_gem_object_put_pages() - once the pages are no longer referenced
1933 * either as a result of memory pressure (reaping pages under the shrinker)
1934 * or as the object is itself released.
1935 */
1936int
1937i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1938{
1939 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1940 const struct drm_i915_gem_object_ops *ops = obj->ops;
1941 int ret;
1942
Chris Wilson2f745ad2012-09-04 21:02:58 +01001943 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001944 return 0;
1945
Chris Wilson43e28f02013-01-08 10:53:09 +00001946 if (obj->madv != I915_MADV_WILLNEED) {
1947 DRM_ERROR("Attempting to obtain a purgeable object\n");
1948 return -EINVAL;
1949 }
1950
Chris Wilsona5570172012-09-04 21:02:54 +01001951 BUG_ON(obj->pages_pin_count);
1952
Chris Wilson37e680a2012-06-07 15:38:42 +01001953 ret = ops->get_pages(obj);
1954 if (ret)
1955 return ret;
1956
Ben Widawsky35c20a62013-05-31 11:28:48 -07001957 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001958 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001959}
1960
Ben Widawskye2d05a82013-09-24 09:57:58 -07001961static void
Chris Wilson05394f32010-11-08 19:18:58 +00001962i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001963 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001964{
Chris Wilson05394f32010-11-08 19:18:58 +00001965 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001966 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001967 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001968
Zou Nan hai852835f2010-05-21 09:08:56 +08001969 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001970 if (obj->ring != ring && obj->last_write_seqno) {
1971 /* Keep the seqno relative to the current ring */
1972 obj->last_write_seqno = seqno;
1973 }
Chris Wilson05394f32010-11-08 19:18:58 +00001974 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001975
1976 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001977 if (!obj->active) {
1978 drm_gem_object_reference(&obj->base);
1979 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001980 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001981
Chris Wilson05394f32010-11-08 19:18:58 +00001982 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001983
Chris Wilson0201f1e2012-07-20 12:41:01 +01001984 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001985
Chris Wilsoncaea7472010-11-12 13:53:37 +00001986 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001987 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001988
Chris Wilson7dd49062012-03-21 10:48:18 +00001989 /* Bump MRU to take account of the delayed flush */
1990 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1991 struct drm_i915_fence_reg *reg;
1992
1993 reg = &dev_priv->fence_regs[obj->fence_reg];
1994 list_move_tail(&reg->lru_list,
1995 &dev_priv->mm.fence_list);
1996 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001997 }
1998}
1999
Ben Widawskye2d05a82013-09-24 09:57:58 -07002000void i915_vma_move_to_active(struct i915_vma *vma,
2001 struct intel_ring_buffer *ring)
2002{
2003 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2004 return i915_gem_object_move_to_active(vma->obj, ring);
2005}
2006
Chris Wilsoncaea7472010-11-12 13:53:37 +00002007static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002008i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2009{
Ben Widawskyca191b12013-07-31 17:00:14 -07002010 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2011 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2012 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002013
Chris Wilson65ce3022012-07-20 12:41:02 +01002014 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002015 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002016
Ben Widawskyca191b12013-07-31 17:00:14 -07002017 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002018
Chris Wilson65ce3022012-07-20 12:41:02 +01002019 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002020 obj->ring = NULL;
2021
Chris Wilson65ce3022012-07-20 12:41:02 +01002022 obj->last_read_seqno = 0;
2023 obj->last_write_seqno = 0;
2024 obj->base.write_domain = 0;
2025
2026 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002027 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002028
2029 obj->active = 0;
2030 drm_gem_object_unreference(&obj->base);
2031
2032 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002033}
Eric Anholt673a3942008-07-30 12:06:12 -07002034
Chris Wilson9d7730912012-11-27 16:22:52 +00002035static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002036i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002037{
Chris Wilson9d7730912012-11-27 16:22:52 +00002038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 struct intel_ring_buffer *ring;
2040 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002041
Chris Wilson107f27a52012-12-10 13:56:17 +02002042 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002043 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002044 ret = intel_ring_idle(ring);
2045 if (ret)
2046 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002047 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002048 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002049
2050 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002051 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002052 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002053
Chris Wilson9d7730912012-11-27 16:22:52 +00002054 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2055 ring->sync_seqno[j] = 0;
2056 }
2057
2058 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002059}
2060
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002061int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2062{
2063 struct drm_i915_private *dev_priv = dev->dev_private;
2064 int ret;
2065
2066 if (seqno == 0)
2067 return -EINVAL;
2068
2069 /* HWS page needs to be set less than what we
2070 * will inject to ring
2071 */
2072 ret = i915_gem_init_seqno(dev, seqno - 1);
2073 if (ret)
2074 return ret;
2075
2076 /* Carefully set the last_seqno value so that wrap
2077 * detection still works
2078 */
2079 dev_priv->next_seqno = seqno;
2080 dev_priv->last_seqno = seqno - 1;
2081 if (dev_priv->last_seqno == 0)
2082 dev_priv->last_seqno--;
2083
2084 return 0;
2085}
2086
Chris Wilson9d7730912012-11-27 16:22:52 +00002087int
2088i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002089{
Chris Wilson9d7730912012-11-27 16:22:52 +00002090 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002091
Chris Wilson9d7730912012-11-27 16:22:52 +00002092 /* reserve 0 for non-seqno */
2093 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002094 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002095 if (ret)
2096 return ret;
2097
2098 dev_priv->next_seqno = 1;
2099 }
2100
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002101 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002102 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002103}
2104
Mika Kuoppala0025c072013-06-12 12:35:30 +03002105int __i915_add_request(struct intel_ring_buffer *ring,
2106 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002107 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002108 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002109{
Chris Wilsondb53a302011-02-03 11:57:46 +00002110 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002111 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002112 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002113 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002114 int ret;
2115
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002116 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002117 /*
2118 * Emit any outstanding flushes - execbuf can fail to emit the flush
2119 * after having emitted the batchbuffer command. Hence we need to fix
2120 * things up similar to emitting the lazy request. The difference here
2121 * is that the flush _must_ happen before the next request, no matter
2122 * what.
2123 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002124 ret = intel_ring_flush_all_caches(ring);
2125 if (ret)
2126 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002127
Chris Wilson3c0e2342013-09-04 10:45:52 +01002128 request = ring->preallocated_lazy_request;
2129 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002130 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002131
Chris Wilsona71d8d92012-02-15 11:25:36 +00002132 /* Record the position of the start of the request so that
2133 * should we detect the updated seqno part-way through the
2134 * GPU processing the request, we never over-estimate the
2135 * position of the head.
2136 */
2137 request_ring_position = intel_ring_get_tail(ring);
2138
Chris Wilson9d7730912012-11-27 16:22:52 +00002139 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002140 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002141 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002142
Chris Wilson9d7730912012-11-27 16:22:52 +00002143 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002144 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002145 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002146 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002147
2148 /* Whilst this request exists, batch_obj will be on the
2149 * active_list, and so will hold the active reference. Only when this
2150 * request is retired will the the batch_obj be moved onto the
2151 * inactive_list and lose its active reference. Hence we do not need
2152 * to explicitly hold another reference here.
2153 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002154 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002155
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002156 /* Hold a reference to the current context so that we can inspect
2157 * it later in case a hangcheck error event fires.
2158 */
2159 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002160 if (request->ctx)
2161 i915_gem_context_reference(request->ctx);
2162
Eric Anholt673a3942008-07-30 12:06:12 -07002163 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002164 was_empty = list_empty(&ring->request_list);
2165 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002166 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002167
Chris Wilsondb53a302011-02-03 11:57:46 +00002168 if (file) {
2169 struct drm_i915_file_private *file_priv = file->driver_priv;
2170
Chris Wilson1c255952010-09-26 11:03:27 +01002171 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002172 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002173 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002174 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002175 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002176 }
Eric Anholt673a3942008-07-30 12:06:12 -07002177
Chris Wilson9d7730912012-11-27 16:22:52 +00002178 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002179 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002180 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002181
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002182 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002183 i915_queue_hangcheck(ring->dev);
2184
Chris Wilsonf047e392012-07-21 12:31:41 +01002185 if (was_empty) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002186 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002187 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002188 &dev_priv->mm.retire_work,
2189 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002190 intel_mark_busy(dev_priv->dev);
2191 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002192 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002193
Chris Wilsonacb868d2012-09-26 13:47:30 +01002194 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002195 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002196 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002197}
2198
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002199static inline void
2200i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002201{
Chris Wilson1c255952010-09-26 11:03:27 +01002202 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002203
Chris Wilson1c255952010-09-26 11:03:27 +01002204 if (!file_priv)
2205 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002206
Chris Wilson1c255952010-09-26 11:03:27 +01002207 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002208 list_del(&request->client_list);
2209 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002210 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002211}
2212
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002213static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2214 struct i915_address_space *vm)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002215{
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002216 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2217 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002218 return true;
2219
2220 return false;
2221}
2222
2223static bool i915_head_inside_request(const u32 acthd_unmasked,
2224 const u32 request_start,
2225 const u32 request_end)
2226{
2227 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2228
2229 if (request_start < request_end) {
2230 if (acthd >= request_start && acthd < request_end)
2231 return true;
2232 } else if (request_start > request_end) {
2233 if (acthd >= request_start || acthd < request_end)
2234 return true;
2235 }
2236
2237 return false;
2238}
2239
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002240static struct i915_address_space *
2241request_to_vm(struct drm_i915_gem_request *request)
2242{
2243 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2244 struct i915_address_space *vm;
2245
2246 vm = &dev_priv->gtt.base;
2247
2248 return vm;
2249}
2250
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002251static bool i915_request_guilty(struct drm_i915_gem_request *request,
2252 const u32 acthd, bool *inside)
2253{
2254 /* There is a possibility that unmasked head address
2255 * pointing inside the ring, matches the batch_obj address range.
2256 * However this is extremely unlikely.
2257 */
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002258 if (request->batch_obj) {
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002259 if (i915_head_inside_object(acthd, request->batch_obj,
2260 request_to_vm(request))) {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002261 *inside = true;
2262 return true;
2263 }
2264 }
2265
2266 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2267 *inside = false;
2268 return true;
2269 }
2270
2271 return false;
2272}
2273
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002274static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2275{
2276 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2277
2278 if (hs->banned)
2279 return true;
2280
2281 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2282 DRM_ERROR("context hanging too fast, declaring banned!\n");
2283 return true;
2284 }
2285
2286 return false;
2287}
2288
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002289static void i915_set_reset_status(struct intel_ring_buffer *ring,
2290 struct drm_i915_gem_request *request,
2291 u32 acthd)
2292{
2293 struct i915_ctx_hang_stats *hs = NULL;
2294 bool inside, guilty;
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002295 unsigned long offset = 0;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002296
2297 /* Innocent until proven guilty */
2298 guilty = false;
2299
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002300 if (request->batch_obj)
2301 offset = i915_gem_obj_offset(request->batch_obj,
2302 request_to_vm(request));
2303
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002304 if (ring->hangcheck.action != HANGCHECK_WAIT &&
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002305 i915_request_guilty(request, acthd, &inside)) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002306 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002307 ring->name,
2308 inside ? "inside" : "flushing",
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002309 offset,
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002310 request->ctx ? request->ctx->id : 0,
2311 acthd);
2312
2313 guilty = true;
2314 }
2315
2316 /* If contexts are disabled or this is the default context, use
2317 * file_priv->reset_state
2318 */
2319 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2320 hs = &request->ctx->hang_stats;
2321 else if (request->file_priv)
2322 hs = &request->file_priv->hang_stats;
2323
2324 if (hs) {
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002325 if (guilty) {
2326 hs->banned = i915_context_is_banned(hs);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002327 hs->batch_active++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002328 hs->guilty_ts = get_seconds();
2329 } else {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002330 hs->batch_pending++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002331 }
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002332 }
2333}
2334
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002335static void i915_gem_free_request(struct drm_i915_gem_request *request)
2336{
2337 list_del(&request->list);
2338 i915_gem_request_remove_from_client(request);
2339
2340 if (request->ctx)
2341 i915_gem_context_unreference(request->ctx);
2342
2343 kfree(request);
2344}
2345
Chris Wilson4db080f2013-12-04 11:37:09 +00002346static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2347 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002348{
Chris Wilson4db080f2013-12-04 11:37:09 +00002349 u32 completed_seqno = ring->get_seqno(ring, false);
2350 u32 acthd = intel_ring_get_active_head(ring);
2351 struct drm_i915_gem_request *request;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002352
Chris Wilson4db080f2013-12-04 11:37:09 +00002353 list_for_each_entry(request, &ring->request_list, list) {
2354 if (i915_seqno_passed(completed_seqno, request->seqno))
2355 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002356
Chris Wilson4db080f2013-12-04 11:37:09 +00002357 i915_set_reset_status(ring, request, acthd);
2358 }
2359}
2360
2361static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2362 struct intel_ring_buffer *ring)
2363{
Chris Wilsondfaae392010-09-22 10:31:52 +01002364 while (!list_empty(&ring->request_list)) {
2365 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01002366
Chris Wilsondfaae392010-09-22 10:31:52 +01002367 request = list_first_entry(&ring->request_list,
2368 struct drm_i915_gem_request,
2369 list);
2370
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002371 i915_gem_free_request(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01002372 }
2373
2374 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002375 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002376
Chris Wilson05394f32010-11-08 19:18:58 +00002377 obj = list_first_entry(&ring->active_list,
2378 struct drm_i915_gem_object,
2379 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002380
Chris Wilson05394f32010-11-08 19:18:58 +00002381 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002382 }
Eric Anholt673a3942008-07-30 12:06:12 -07002383}
2384
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002385void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002386{
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2388 int i;
2389
Daniel Vetter4b9de732011-10-09 21:52:02 +02002390 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002391 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002392
Daniel Vetter94a335d2013-07-17 14:51:28 +02002393 /*
2394 * Commit delayed tiling changes if we have an object still
2395 * attached to the fence, otherwise just clear the fence.
2396 */
2397 if (reg->obj) {
2398 i915_gem_object_update_fence(reg->obj, reg,
2399 reg->obj->tiling_mode);
2400 } else {
2401 i915_gem_write_fence(dev, i, NULL);
2402 }
Chris Wilson312817a2010-11-22 11:50:11 +00002403 }
2404}
2405
Chris Wilson069efc12010-09-30 16:53:18 +01002406void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002407{
Chris Wilsondfaae392010-09-22 10:31:52 +01002408 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002409 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002410 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002411
Chris Wilson4db080f2013-12-04 11:37:09 +00002412 /*
2413 * Before we free the objects from the requests, we need to inspect
2414 * them for finding the guilty party. As the requests only borrow
2415 * their reference to the objects, the inspection must be done first.
2416 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002417 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002418 i915_gem_reset_ring_status(dev_priv, ring);
2419
2420 for_each_ring(ring, dev_priv, i)
2421 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002422
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002423 i915_gem_cleanup_ringbuffer(dev);
2424
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002425 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002426}
2427
2428/**
2429 * This function clears the request list as sequence numbers are passed.
2430 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002431void
Chris Wilsondb53a302011-02-03 11:57:46 +00002432i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002433{
Eric Anholt673a3942008-07-30 12:06:12 -07002434 uint32_t seqno;
2435
Chris Wilsondb53a302011-02-03 11:57:46 +00002436 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002437 return;
2438
Chris Wilsondb53a302011-02-03 11:57:46 +00002439 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002440
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002441 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002442
Zou Nan hai852835f2010-05-21 09:08:56 +08002443 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002444 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002445
Zou Nan hai852835f2010-05-21 09:08:56 +08002446 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002447 struct drm_i915_gem_request,
2448 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002449
Chris Wilsondfaae392010-09-22 10:31:52 +01002450 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002451 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002452
Chris Wilsondb53a302011-02-03 11:57:46 +00002453 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002454 /* We know the GPU must have read the request to have
2455 * sent us the seqno + interrupt, so use the position
2456 * of tail of the request to update the last known position
2457 * of the GPU head.
2458 */
2459 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002460
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002461 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002462 }
2463
2464 /* Move any buffers on the active list that are no longer referenced
2465 * by the ringbuffer to the flushing/inactive lists as appropriate.
2466 */
2467 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002468 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002469
Akshay Joshi0206e352011-08-16 15:34:10 -04002470 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00002471 struct drm_i915_gem_object,
2472 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002473
Chris Wilson0201f1e2012-07-20 12:41:01 +01002474 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002475 break;
2476
Chris Wilson65ce3022012-07-20 12:41:02 +01002477 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002478 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002479
Chris Wilsondb53a302011-02-03 11:57:46 +00002480 if (unlikely(ring->trace_irq_seqno &&
2481 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002482 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002483 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002484 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002485
Chris Wilsondb53a302011-02-03 11:57:46 +00002486 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002487}
2488
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002489bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002490i915_gem_retire_requests(struct drm_device *dev)
2491{
2492 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002493 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002494 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002495 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002496
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002497 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002498 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002499 idle &= list_empty(&ring->request_list);
2500 }
2501
2502 if (idle)
2503 mod_delayed_work(dev_priv->wq,
2504 &dev_priv->mm.idle_work,
2505 msecs_to_jiffies(100));
2506
2507 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002508}
2509
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002510static void
Eric Anholt673a3942008-07-30 12:06:12 -07002511i915_gem_retire_work_handler(struct work_struct *work)
2512{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002513 struct drm_i915_private *dev_priv =
2514 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2515 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002516 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002517
Chris Wilson891b48c2010-09-29 12:26:37 +01002518 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002519 idle = false;
2520 if (mutex_trylock(&dev->struct_mutex)) {
2521 idle = i915_gem_retire_requests(dev);
2522 mutex_unlock(&dev->struct_mutex);
2523 }
2524 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002525 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2526 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002527}
Chris Wilson891b48c2010-09-29 12:26:37 +01002528
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002529static void
2530i915_gem_idle_work_handler(struct work_struct *work)
2531{
2532 struct drm_i915_private *dev_priv =
2533 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002534
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002535 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002536}
2537
Ben Widawsky5816d642012-04-11 11:18:19 -07002538/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002539 * Ensures that an object will eventually get non-busy by flushing any required
2540 * write domains, emitting any outstanding lazy request and retiring and
2541 * completed requests.
2542 */
2543static int
2544i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2545{
2546 int ret;
2547
2548 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002549 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002550 if (ret)
2551 return ret;
2552
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002553 i915_gem_retire_requests_ring(obj->ring);
2554 }
2555
2556 return 0;
2557}
2558
2559/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002560 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2561 * @DRM_IOCTL_ARGS: standard ioctl arguments
2562 *
2563 * Returns 0 if successful, else an error is returned with the remaining time in
2564 * the timeout parameter.
2565 * -ETIME: object is still busy after timeout
2566 * -ERESTARTSYS: signal interrupted the wait
2567 * -ENONENT: object doesn't exist
2568 * Also possible, but rare:
2569 * -EAGAIN: GPU wedged
2570 * -ENOMEM: damn
2571 * -ENODEV: Internal IRQ fail
2572 * -E?: The add request failed
2573 *
2574 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2575 * non-zero timeout parameter the wait ioctl will wait for the given number of
2576 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2577 * without holding struct_mutex the object may become re-busied before this
2578 * function completes. A similar but shorter * race condition exists in the busy
2579 * ioctl
2580 */
2581int
2582i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2583{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002584 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002585 struct drm_i915_gem_wait *args = data;
2586 struct drm_i915_gem_object *obj;
2587 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002588 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002589 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002590 u32 seqno = 0;
2591 int ret = 0;
2592
Ben Widawskyeac1f142012-06-05 15:24:24 -07002593 if (args->timeout_ns >= 0) {
2594 timeout_stack = ns_to_timespec(args->timeout_ns);
2595 timeout = &timeout_stack;
2596 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002597
2598 ret = i915_mutex_lock_interruptible(dev);
2599 if (ret)
2600 return ret;
2601
2602 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2603 if (&obj->base == NULL) {
2604 mutex_unlock(&dev->struct_mutex);
2605 return -ENOENT;
2606 }
2607
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002608 /* Need to make sure the object gets inactive eventually. */
2609 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002610 if (ret)
2611 goto out;
2612
2613 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002614 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002615 ring = obj->ring;
2616 }
2617
2618 if (seqno == 0)
2619 goto out;
2620
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002621 /* Do this after OLR check to make sure we make forward progress polling
2622 * on this IOCTL with a 0 timeout (like busy ioctl)
2623 */
2624 if (!args->timeout_ns) {
2625 ret = -ETIME;
2626 goto out;
2627 }
2628
2629 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002630 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002631 mutex_unlock(&dev->struct_mutex);
2632
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002633 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002634 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002635 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002636 return ret;
2637
2638out:
2639 drm_gem_object_unreference(&obj->base);
2640 mutex_unlock(&dev->struct_mutex);
2641 return ret;
2642}
2643
2644/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002645 * i915_gem_object_sync - sync an object to a ring.
2646 *
2647 * @obj: object which may be in use on another ring.
2648 * @to: ring we wish to use the object on. May be NULL.
2649 *
2650 * This code is meant to abstract object synchronization with the GPU.
2651 * Calling with NULL implies synchronizing the object with the CPU
2652 * rather than a particular GPU ring.
2653 *
2654 * Returns 0 if successful, else propagates up the lower layer error.
2655 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002656int
2657i915_gem_object_sync(struct drm_i915_gem_object *obj,
2658 struct intel_ring_buffer *to)
2659{
2660 struct intel_ring_buffer *from = obj->ring;
2661 u32 seqno;
2662 int ret, idx;
2663
2664 if (from == NULL || to == from)
2665 return 0;
2666
Ben Widawsky5816d642012-04-11 11:18:19 -07002667 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002668 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002669
2670 idx = intel_ring_sync_index(from, to);
2671
Chris Wilson0201f1e2012-07-20 12:41:01 +01002672 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002673 if (seqno <= from->sync_seqno[idx])
2674 return 0;
2675
Ben Widawskyb4aca012012-04-25 20:50:12 -07002676 ret = i915_gem_check_olr(obj->ring, seqno);
2677 if (ret)
2678 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002679
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002680 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002681 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002682 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002683 /* We use last_read_seqno because sync_to()
2684 * might have just caused seqno wrap under
2685 * the radar.
2686 */
2687 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002688
Ben Widawskye3a5a222012-04-11 11:18:20 -07002689 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002690}
2691
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002692static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2693{
2694 u32 old_write_domain, old_read_domains;
2695
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002696 /* Force a pagefault for domain tracking on next user access */
2697 i915_gem_release_mmap(obj);
2698
Keith Packardb97c3d92011-06-24 21:02:59 -07002699 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2700 return;
2701
Chris Wilson97c809fd2012-10-09 19:24:38 +01002702 /* Wait for any direct GTT access to complete */
2703 mb();
2704
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002705 old_read_domains = obj->base.read_domains;
2706 old_write_domain = obj->base.write_domain;
2707
2708 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2709 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2710
2711 trace_i915_gem_object_change_domain(obj,
2712 old_read_domains,
2713 old_write_domain);
2714}
2715
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002716int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002717{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002718 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002719 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002720 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002721
Daniel Vetterb93dab62013-08-26 11:23:47 +02002722 /* For now we only ever use 1 vma per object */
2723 WARN_ON(!list_is_singular(&obj->vma_list));
2724
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002725 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002726 return 0;
2727
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002728 if (!drm_mm_node_allocated(&vma->node)) {
2729 i915_gem_vma_destroy(vma);
2730
2731 return 0;
2732 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002733
Chris Wilson31d8d652012-05-24 19:11:20 +01002734 if (obj->pin_count)
2735 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002736
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002737 BUG_ON(obj->pages == NULL);
2738
Chris Wilsona8198ee2011-04-13 22:04:09 +01002739 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002740 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002741 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002742 /* Continue on if we fail due to EIO, the GPU is hung so we
2743 * should be safe and we need to cleanup or else we might
2744 * cause memory corruption through use-after-free.
2745 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002746
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002747 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002748
Daniel Vetter96b47b62009-12-15 17:50:00 +01002749 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002750 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002751 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002752 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002753
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002754 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002755
Daniel Vetter74898d72012-02-15 23:50:22 +01002756 if (obj->has_global_gtt_mapping)
2757 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002758 if (obj->has_aliasing_ppgtt_mapping) {
2759 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2760 obj->has_aliasing_ppgtt_mapping = 0;
2761 }
Daniel Vetter74163902012-02-15 23:50:21 +01002762 i915_gem_gtt_finish_object(obj);
Ben Widawsky401c29f2013-05-31 11:28:47 -07002763 i915_gem_object_unpin_pages(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002764
Ben Widawskyca191b12013-07-31 17:00:14 -07002765 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002766 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002767 if (i915_is_ggtt(vma->vm))
2768 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002769
Ben Widawsky2f633152013-07-17 12:19:03 -07002770 drm_mm_remove_node(&vma->node);
Ben Widawsky433544b2013-08-13 18:09:06 -07002771
Ben Widawsky2f633152013-07-17 12:19:03 -07002772 i915_gem_vma_destroy(vma);
2773
2774 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002775 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002776 if (list_empty(&obj->vma_list))
2777 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002778
Chris Wilson88241782011-01-07 17:09:48 +00002779 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002780}
2781
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002782/**
2783 * Unbinds an object from the global GTT aperture.
2784 */
2785int
2786i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2787{
2788 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2789 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2790
Dan Carpenter58e73e12013-08-09 12:44:11 +03002791 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002792 return 0;
2793
2794 if (obj->pin_count)
2795 return -EBUSY;
2796
2797 BUG_ON(obj->pages == NULL);
2798
2799 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2800}
2801
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002802int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002803{
2804 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002805 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002806 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002807
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002808 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002809 for_each_ring(ring, dev_priv, i) {
Ben Widawskyb6c74882012-08-14 14:35:14 -07002810 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2811 if (ret)
2812 return ret;
2813
Chris Wilson3e960502012-11-27 16:22:54 +00002814 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002815 if (ret)
2816 return ret;
2817 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002818
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002819 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002820}
2821
Chris Wilson9ce079e2012-04-17 15:31:30 +01002822static void i965_write_fence_reg(struct drm_device *dev, int reg,
2823 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002824{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002825 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002826 int fence_reg;
2827 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002828
Imre Deak56c844e2013-01-07 21:47:34 +02002829 if (INTEL_INFO(dev)->gen >= 6) {
2830 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2831 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2832 } else {
2833 fence_reg = FENCE_REG_965_0;
2834 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2835 }
2836
Chris Wilsond18b9612013-07-10 13:36:23 +01002837 fence_reg += reg * 8;
2838
2839 /* To w/a incoherency with non-atomic 64-bit register updates,
2840 * we split the 64-bit update into two 32-bit writes. In order
2841 * for a partial fence not to be evaluated between writes, we
2842 * precede the update with write to turn off the fence register,
2843 * and only enable the fence as the last step.
2844 *
2845 * For extra levels of paranoia, we make sure each step lands
2846 * before applying the next step.
2847 */
2848 I915_WRITE(fence_reg, 0);
2849 POSTING_READ(fence_reg);
2850
Chris Wilson9ce079e2012-04-17 15:31:30 +01002851 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002852 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002853 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002854
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002855 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002856 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002857 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002858 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002859 if (obj->tiling_mode == I915_TILING_Y)
2860 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2861 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002862
Chris Wilsond18b9612013-07-10 13:36:23 +01002863 I915_WRITE(fence_reg + 4, val >> 32);
2864 POSTING_READ(fence_reg + 4);
2865
2866 I915_WRITE(fence_reg + 0, val);
2867 POSTING_READ(fence_reg);
2868 } else {
2869 I915_WRITE(fence_reg + 4, 0);
2870 POSTING_READ(fence_reg + 4);
2871 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002872}
2873
Chris Wilson9ce079e2012-04-17 15:31:30 +01002874static void i915_write_fence_reg(struct drm_device *dev, int reg,
2875 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002876{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002877 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002878 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002879
Chris Wilson9ce079e2012-04-17 15:31:30 +01002880 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002881 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002882 int pitch_val;
2883 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002884
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002885 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002886 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002887 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2888 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2889 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002890
2891 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2892 tile_width = 128;
2893 else
2894 tile_width = 512;
2895
2896 /* Note: pitch better be a power of two tile widths */
2897 pitch_val = obj->stride / tile_width;
2898 pitch_val = ffs(pitch_val) - 1;
2899
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002900 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002901 if (obj->tiling_mode == I915_TILING_Y)
2902 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2903 val |= I915_FENCE_SIZE_BITS(size);
2904 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2905 val |= I830_FENCE_REG_VALID;
2906 } else
2907 val = 0;
2908
2909 if (reg < 8)
2910 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002911 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002912 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002913
Chris Wilson9ce079e2012-04-17 15:31:30 +01002914 I915_WRITE(reg, val);
2915 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002916}
2917
Chris Wilson9ce079e2012-04-17 15:31:30 +01002918static void i830_write_fence_reg(struct drm_device *dev, int reg,
2919 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002920{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002921 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002922 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002923
Chris Wilson9ce079e2012-04-17 15:31:30 +01002924 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002925 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002926 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002927
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002928 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002929 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002930 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2931 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2932 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002933
Chris Wilson9ce079e2012-04-17 15:31:30 +01002934 pitch_val = obj->stride / 128;
2935 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002936
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002937 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002938 if (obj->tiling_mode == I915_TILING_Y)
2939 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2940 val |= I830_FENCE_SIZE_BITS(size);
2941 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2942 val |= I830_FENCE_REG_VALID;
2943 } else
2944 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002945
Chris Wilson9ce079e2012-04-17 15:31:30 +01002946 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2947 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2948}
2949
Chris Wilsond0a57782012-10-09 19:24:37 +01002950inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2951{
2952 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2953}
2954
Chris Wilson9ce079e2012-04-17 15:31:30 +01002955static void i915_gem_write_fence(struct drm_device *dev, int reg,
2956 struct drm_i915_gem_object *obj)
2957{
Chris Wilsond0a57782012-10-09 19:24:37 +01002958 struct drm_i915_private *dev_priv = dev->dev_private;
2959
2960 /* Ensure that all CPU reads are completed before installing a fence
2961 * and all writes before removing the fence.
2962 */
2963 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2964 mb();
2965
Daniel Vetter94a335d2013-07-17 14:51:28 +02002966 WARN(obj && (!obj->stride || !obj->tiling_mode),
2967 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2968 obj->stride, obj->tiling_mode);
2969
Chris Wilson9ce079e2012-04-17 15:31:30 +01002970 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002971 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002972 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002973 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002974 case 5:
2975 case 4: i965_write_fence_reg(dev, reg, obj); break;
2976 case 3: i915_write_fence_reg(dev, reg, obj); break;
2977 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002978 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002979 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002980
2981 /* And similarly be paranoid that no direct access to this region
2982 * is reordered to before the fence is installed.
2983 */
2984 if (i915_gem_object_needs_mb(obj))
2985 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002986}
2987
Chris Wilson61050802012-04-17 15:31:31 +01002988static inline int fence_number(struct drm_i915_private *dev_priv,
2989 struct drm_i915_fence_reg *fence)
2990{
2991 return fence - dev_priv->fence_regs;
2992}
2993
2994static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2995 struct drm_i915_fence_reg *fence,
2996 bool enable)
2997{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002998 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002999 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01003000
Chris Wilson46a0b632013-07-10 13:36:24 +01003001 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003002
3003 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003004 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003005 fence->obj = obj;
3006 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3007 } else {
3008 obj->fence_reg = I915_FENCE_REG_NONE;
3009 fence->obj = NULL;
3010 list_del_init(&fence->lru_list);
3011 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003012 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003013}
3014
Chris Wilsond9e86c02010-11-10 16:40:20 +00003015static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003016i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003017{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003018 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003019 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003020 if (ret)
3021 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003022
3023 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003024 }
3025
Chris Wilson86d5bc32012-07-20 12:41:04 +01003026 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003027 return 0;
3028}
3029
3030int
3031i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3032{
Chris Wilson61050802012-04-17 15:31:31 +01003033 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003034 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003035 int ret;
3036
Chris Wilsond0a57782012-10-09 19:24:37 +01003037 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003038 if (ret)
3039 return ret;
3040
Chris Wilson61050802012-04-17 15:31:31 +01003041 if (obj->fence_reg == I915_FENCE_REG_NONE)
3042 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003043
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003044 fence = &dev_priv->fence_regs[obj->fence_reg];
3045
Chris Wilson61050802012-04-17 15:31:31 +01003046 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003047 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003048
3049 return 0;
3050}
3051
3052static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003053i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003054{
Daniel Vetterae3db242010-02-19 11:51:58 +01003055 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003056 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003057 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003058
3059 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003060 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003061 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3062 reg = &dev_priv->fence_regs[i];
3063 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003064 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003065
Chris Wilson1690e1e2011-12-14 13:57:08 +01003066 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003067 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003068 }
3069
Chris Wilsond9e86c02010-11-10 16:40:20 +00003070 if (avail == NULL)
3071 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003072
3073 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003074 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003075 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003076 continue;
3077
Chris Wilson8fe301a2012-04-17 15:31:28 +01003078 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003079 }
3080
Chris Wilson8fe301a2012-04-17 15:31:28 +01003081 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003082}
3083
Jesse Barnesde151cf2008-11-12 10:03:55 -08003084/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003085 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003086 * @obj: object to map through a fence reg
3087 *
3088 * When mapping objects through the GTT, userspace wants to be able to write
3089 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003090 * This function walks the fence regs looking for a free one for @obj,
3091 * stealing one if it can't find any.
3092 *
3093 * It then sets up the reg based on the object's properties: address, pitch
3094 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003095 *
3096 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003097 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003098int
Chris Wilson06d98132012-04-17 15:31:24 +01003099i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003100{
Chris Wilson05394f32010-11-08 19:18:58 +00003101 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003102 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003103 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003104 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003105 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003106
Chris Wilson14415742012-04-17 15:31:33 +01003107 /* Have we updated the tiling parameters upon the object and so
3108 * will need to serialise the write to the associated fence register?
3109 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003110 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003111 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003112 if (ret)
3113 return ret;
3114 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003115
Chris Wilsond9e86c02010-11-10 16:40:20 +00003116 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003117 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3118 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003119 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003120 list_move_tail(&reg->lru_list,
3121 &dev_priv->mm.fence_list);
3122 return 0;
3123 }
3124 } else if (enable) {
3125 reg = i915_find_fence_reg(dev);
3126 if (reg == NULL)
3127 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003128
Chris Wilson14415742012-04-17 15:31:33 +01003129 if (reg->obj) {
3130 struct drm_i915_gem_object *old = reg->obj;
3131
Chris Wilsond0a57782012-10-09 19:24:37 +01003132 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003133 if (ret)
3134 return ret;
3135
Chris Wilson14415742012-04-17 15:31:33 +01003136 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003137 }
Chris Wilson14415742012-04-17 15:31:33 +01003138 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003139 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003140
Chris Wilson14415742012-04-17 15:31:33 +01003141 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003142
Chris Wilson9ce079e2012-04-17 15:31:30 +01003143 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003144}
3145
Chris Wilson42d6ab42012-07-26 11:49:32 +01003146static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3147 struct drm_mm_node *gtt_space,
3148 unsigned long cache_level)
3149{
3150 struct drm_mm_node *other;
3151
3152 /* On non-LLC machines we have to be careful when putting differing
3153 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003154 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003155 */
3156 if (HAS_LLC(dev))
3157 return true;
3158
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003159 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003160 return true;
3161
3162 if (list_empty(&gtt_space->node_list))
3163 return true;
3164
3165 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3166 if (other->allocated && !other->hole_follows && other->color != cache_level)
3167 return false;
3168
3169 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3170 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3171 return false;
3172
3173 return true;
3174}
3175
3176static void i915_gem_verify_gtt(struct drm_device *dev)
3177{
3178#if WATCH_GTT
3179 struct drm_i915_private *dev_priv = dev->dev_private;
3180 struct drm_i915_gem_object *obj;
3181 int err = 0;
3182
Ben Widawsky35c20a62013-05-31 11:28:48 -07003183 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003184 if (obj->gtt_space == NULL) {
3185 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3186 err++;
3187 continue;
3188 }
3189
3190 if (obj->cache_level != obj->gtt_space->color) {
3191 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003192 i915_gem_obj_ggtt_offset(obj),
3193 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003194 obj->cache_level,
3195 obj->gtt_space->color);
3196 err++;
3197 continue;
3198 }
3199
3200 if (!i915_gem_valid_gtt_space(dev,
3201 obj->gtt_space,
3202 obj->cache_level)) {
3203 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003204 i915_gem_obj_ggtt_offset(obj),
3205 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003206 obj->cache_level);
3207 err++;
3208 continue;
3209 }
3210 }
3211
3212 WARN_ON(err);
3213#endif
3214}
3215
Jesse Barnesde151cf2008-11-12 10:03:55 -08003216/**
Eric Anholt673a3942008-07-30 12:06:12 -07003217 * Finds free space in the GTT aperture and binds the object there.
3218 */
3219static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003220i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3221 struct i915_address_space *vm,
3222 unsigned alignment,
3223 bool map_and_fenceable,
3224 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003225{
Chris Wilson05394f32010-11-08 19:18:58 +00003226 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003227 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003228 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003229 size_t gtt_max =
3230 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003231 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003232 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003233
Chris Wilsone28f8712011-07-18 13:11:49 -07003234 fence_size = i915_gem_get_gtt_size(dev,
3235 obj->base.size,
3236 obj->tiling_mode);
3237 fence_alignment = i915_gem_get_gtt_alignment(dev,
3238 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003239 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003240 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003241 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003242 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003243 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003244
Eric Anholt673a3942008-07-30 12:06:12 -07003245 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003246 alignment = map_and_fenceable ? fence_alignment :
3247 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003248 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003249 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3250 return -EINVAL;
3251 }
3252
Chris Wilson05394f32010-11-08 19:18:58 +00003253 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003254
Chris Wilson654fc602010-05-27 13:18:21 +01003255 /* If the object is bigger than the entire aperture, reject it early
3256 * before evicting everything in a vain attempt to find space.
3257 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003258 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003259 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003260 obj->base.size,
3261 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003262 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003263 return -E2BIG;
3264 }
3265
Chris Wilson37e680a2012-06-07 15:38:42 +01003266 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003267 if (ret)
3268 return ret;
3269
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003270 i915_gem_object_pin_pages(obj);
3271
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003272 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003273
Ben Widawskyaccfef22013-08-14 11:38:35 +02003274 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003275 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003276 ret = PTR_ERR(vma);
3277 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003278 }
3279
Ben Widawskyaccfef22013-08-14 11:38:35 +02003280 /* For now we only ever use 1 vma per object */
3281 WARN_ON(!list_is_singular(&obj->vma_list));
3282
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003283search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003284 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003285 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003286 obj->cache_level, 0, gtt_max,
3287 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003288 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003289 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003290 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003291 map_and_fenceable,
3292 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003293 if (ret == 0)
3294 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003295
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003296 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003297 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003298 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003299 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003300 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003301 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003302 }
3303
Daniel Vetter74163902012-02-15 23:50:21 +01003304 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003305 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003306 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003307
Ben Widawsky35c20a62013-05-31 11:28:48 -07003308 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003309 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003310
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003311 if (i915_is_ggtt(vm)) {
3312 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003313
Daniel Vetter49987092013-08-14 10:21:23 +02003314 fenceable = (vma->node.size == fence_size &&
3315 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003316
Daniel Vetter49987092013-08-14 10:21:23 +02003317 mappable = (vma->node.start + obj->base.size <=
3318 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003319
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003320 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003321 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003322
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003323 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003324
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003325 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003326 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003327 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003328
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003329err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003330 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003331err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003332 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003333err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003334 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003335 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003336}
3337
Chris Wilson000433b2013-08-08 14:41:09 +01003338bool
Chris Wilson2c225692013-08-09 12:26:45 +01003339i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3340 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003341{
Eric Anholt673a3942008-07-30 12:06:12 -07003342 /* If we don't have a page list set up, then we're not pinned
3343 * to GPU, and we can ignore the cache flush because it'll happen
3344 * again at bind time.
3345 */
Chris Wilson05394f32010-11-08 19:18:58 +00003346 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003347 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003348
Imre Deak769ce462013-02-13 21:56:05 +02003349 /*
3350 * Stolen memory is always coherent with the GPU as it is explicitly
3351 * marked as wc by the system, or the system is cache-coherent.
3352 */
3353 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003354 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003355
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003356 /* If the GPU is snooping the contents of the CPU cache,
3357 * we do not need to manually clear the CPU cache lines. However,
3358 * the caches are only snooped when the render cache is
3359 * flushed/invalidated. As we always have to emit invalidations
3360 * and flushes when moving into and out of the RENDER domain, correct
3361 * snooping behaviour occurs naturally as the result of our domain
3362 * tracking.
3363 */
Chris Wilson2c225692013-08-09 12:26:45 +01003364 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003365 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003366
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003367 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003368 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003369
3370 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003371}
3372
3373/** Flushes the GTT write domain for the object if it's dirty. */
3374static void
Chris Wilson05394f32010-11-08 19:18:58 +00003375i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003376{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003377 uint32_t old_write_domain;
3378
Chris Wilson05394f32010-11-08 19:18:58 +00003379 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003380 return;
3381
Chris Wilson63256ec2011-01-04 18:42:07 +00003382 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003383 * to it immediately go to main memory as far as we know, so there's
3384 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003385 *
3386 * However, we do have to enforce the order so that all writes through
3387 * the GTT land before any writes to the device, such as updates to
3388 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003389 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003390 wmb();
3391
Chris Wilson05394f32010-11-08 19:18:58 +00003392 old_write_domain = obj->base.write_domain;
3393 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003394
3395 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003396 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003397 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003398}
3399
3400/** Flushes the CPU write domain for the object if it's dirty. */
3401static void
Chris Wilson2c225692013-08-09 12:26:45 +01003402i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3403 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003404{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003405 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003406
Chris Wilson05394f32010-11-08 19:18:58 +00003407 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003408 return;
3409
Chris Wilson000433b2013-08-08 14:41:09 +01003410 if (i915_gem_clflush_object(obj, force))
3411 i915_gem_chipset_flush(obj->base.dev);
3412
Chris Wilson05394f32010-11-08 19:18:58 +00003413 old_write_domain = obj->base.write_domain;
3414 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003415
3416 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003417 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003418 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003419}
3420
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003421/**
3422 * Moves a single object to the GTT read, and possibly write domain.
3423 *
3424 * This function returns when the move is complete, including waiting on
3425 * flushes to occur.
3426 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003427int
Chris Wilson20217462010-11-23 15:26:33 +00003428i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003429{
Chris Wilson8325a092012-04-24 15:52:35 +01003430 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003431 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003432 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003433
Eric Anholt02354392008-11-26 13:58:13 -08003434 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003435 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003436 return -EINVAL;
3437
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003438 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3439 return 0;
3440
Chris Wilson0201f1e2012-07-20 12:41:01 +01003441 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003442 if (ret)
3443 return ret;
3444
Chris Wilson2c225692013-08-09 12:26:45 +01003445 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003446
Chris Wilsond0a57782012-10-09 19:24:37 +01003447 /* Serialise direct access to this object with the barriers for
3448 * coherent writes from the GPU, by effectively invalidating the
3449 * GTT domain upon first access.
3450 */
3451 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3452 mb();
3453
Chris Wilson05394f32010-11-08 19:18:58 +00003454 old_write_domain = obj->base.write_domain;
3455 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003456
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003457 /* It should now be out of any other write domains, and we can update
3458 * the domain values for our changes.
3459 */
Chris Wilson05394f32010-11-08 19:18:58 +00003460 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3461 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003462 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003463 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3464 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3465 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003466 }
3467
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003468 trace_i915_gem_object_change_domain(obj,
3469 old_read_domains,
3470 old_write_domain);
3471
Chris Wilson8325a092012-04-24 15:52:35 +01003472 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003473 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003474 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003475 if (vma)
3476 list_move_tail(&vma->mm_list,
3477 &dev_priv->gtt.base.inactive_list);
3478
3479 }
Chris Wilson8325a092012-04-24 15:52:35 +01003480
Eric Anholte47c68e2008-11-14 13:35:19 -08003481 return 0;
3482}
3483
Chris Wilsone4ffd172011-04-04 09:44:39 +01003484int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3485 enum i915_cache_level cache_level)
3486{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003487 struct drm_device *dev = obj->base.dev;
3488 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003489 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003490 int ret;
3491
3492 if (obj->cache_level == cache_level)
3493 return 0;
3494
3495 if (obj->pin_count) {
3496 DRM_DEBUG("can not change the cache level of pinned objects\n");
3497 return -EBUSY;
3498 }
3499
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003500 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3501 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003502 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003503 if (ret)
3504 return ret;
3505
3506 break;
3507 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003508 }
3509
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003510 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003511 ret = i915_gem_object_finish_gpu(obj);
3512 if (ret)
3513 return ret;
3514
3515 i915_gem_object_finish_gtt(obj);
3516
3517 /* Before SandyBridge, you could not use tiling or fence
3518 * registers with snooped memory, so relinquish any fences
3519 * currently pointing to our region in the aperture.
3520 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003521 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003522 ret = i915_gem_object_put_fence(obj);
3523 if (ret)
3524 return ret;
3525 }
3526
Daniel Vetter74898d72012-02-15 23:50:22 +01003527 if (obj->has_global_gtt_mapping)
3528 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01003529 if (obj->has_aliasing_ppgtt_mapping)
3530 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3531 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003532 }
3533
Chris Wilson2c225692013-08-09 12:26:45 +01003534 list_for_each_entry(vma, &obj->vma_list, vma_link)
3535 vma->node.color = cache_level;
3536 obj->cache_level = cache_level;
3537
3538 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003539 u32 old_read_domains, old_write_domain;
3540
3541 /* If we're coming from LLC cached, then we haven't
3542 * actually been tracking whether the data is in the
3543 * CPU cache or not, since we only allow one bit set
3544 * in obj->write_domain and have been skipping the clflushes.
3545 * Just set it to the CPU cache for now.
3546 */
3547 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003548
3549 old_read_domains = obj->base.read_domains;
3550 old_write_domain = obj->base.write_domain;
3551
3552 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3553 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3554
3555 trace_i915_gem_object_change_domain(obj,
3556 old_read_domains,
3557 old_write_domain);
3558 }
3559
Chris Wilson42d6ab42012-07-26 11:49:32 +01003560 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003561 return 0;
3562}
3563
Ben Widawsky199adf42012-09-21 17:01:20 -07003564int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3565 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003566{
Ben Widawsky199adf42012-09-21 17:01:20 -07003567 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003568 struct drm_i915_gem_object *obj;
3569 int ret;
3570
3571 ret = i915_mutex_lock_interruptible(dev);
3572 if (ret)
3573 return ret;
3574
3575 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3576 if (&obj->base == NULL) {
3577 ret = -ENOENT;
3578 goto unlock;
3579 }
3580
Chris Wilson651d7942013-08-08 14:41:10 +01003581 switch (obj->cache_level) {
3582 case I915_CACHE_LLC:
3583 case I915_CACHE_L3_LLC:
3584 args->caching = I915_CACHING_CACHED;
3585 break;
3586
Chris Wilson4257d3b2013-08-08 14:41:11 +01003587 case I915_CACHE_WT:
3588 args->caching = I915_CACHING_DISPLAY;
3589 break;
3590
Chris Wilson651d7942013-08-08 14:41:10 +01003591 default:
3592 args->caching = I915_CACHING_NONE;
3593 break;
3594 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003595
3596 drm_gem_object_unreference(&obj->base);
3597unlock:
3598 mutex_unlock(&dev->struct_mutex);
3599 return ret;
3600}
3601
Ben Widawsky199adf42012-09-21 17:01:20 -07003602int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3603 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003604{
Ben Widawsky199adf42012-09-21 17:01:20 -07003605 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003606 struct drm_i915_gem_object *obj;
3607 enum i915_cache_level level;
3608 int ret;
3609
Ben Widawsky199adf42012-09-21 17:01:20 -07003610 switch (args->caching) {
3611 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003612 level = I915_CACHE_NONE;
3613 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003614 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003615 level = I915_CACHE_LLC;
3616 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003617 case I915_CACHING_DISPLAY:
3618 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3619 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003620 default:
3621 return -EINVAL;
3622 }
3623
Ben Widawsky3bc29132012-09-26 16:15:20 -07003624 ret = i915_mutex_lock_interruptible(dev);
3625 if (ret)
3626 return ret;
3627
Chris Wilsone6994ae2012-07-10 10:27:08 +01003628 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3629 if (&obj->base == NULL) {
3630 ret = -ENOENT;
3631 goto unlock;
3632 }
3633
3634 ret = i915_gem_object_set_cache_level(obj, level);
3635
3636 drm_gem_object_unreference(&obj->base);
3637unlock:
3638 mutex_unlock(&dev->struct_mutex);
3639 return ret;
3640}
3641
Chris Wilsoncc98b412013-08-09 12:25:09 +01003642static bool is_pin_display(struct drm_i915_gem_object *obj)
3643{
3644 /* There are 3 sources that pin objects:
3645 * 1. The display engine (scanouts, sprites, cursors);
3646 * 2. Reservations for execbuffer;
3647 * 3. The user.
3648 *
3649 * We can ignore reservations as we hold the struct_mutex and
3650 * are only called outside of the reservation path. The user
3651 * can only increment pin_count once, and so if after
3652 * subtracting the potential reference by the user, any pin_count
3653 * remains, it must be due to another use by the display engine.
3654 */
3655 return obj->pin_count - !!obj->user_pin_count;
3656}
3657
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003658/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003659 * Prepare buffer for display plane (scanout, cursors, etc).
3660 * Can be called from an uninterruptible phase (modesetting) and allows
3661 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003662 */
3663int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003664i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3665 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003666 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003667{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003668 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003669 int ret;
3670
Chris Wilson0be73282010-12-06 14:36:27 +00003671 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003672 ret = i915_gem_object_sync(obj, pipelined);
3673 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003674 return ret;
3675 }
3676
Chris Wilsoncc98b412013-08-09 12:25:09 +01003677 /* Mark the pin_display early so that we account for the
3678 * display coherency whilst setting up the cache domains.
3679 */
3680 obj->pin_display = true;
3681
Eric Anholta7ef0642011-03-29 16:59:54 -07003682 /* The display engine is not coherent with the LLC cache on gen6. As
3683 * a result, we make sure that the pinning that is about to occur is
3684 * done with uncached PTEs. This is lowest common denominator for all
3685 * chipsets.
3686 *
3687 * However for gen6+, we could do better by using the GFDT bit instead
3688 * of uncaching, which would allow us to flush all the LLC-cached data
3689 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3690 */
Chris Wilson651d7942013-08-08 14:41:10 +01003691 ret = i915_gem_object_set_cache_level(obj,
3692 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003693 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003694 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003695
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003696 /* As the user may map the buffer once pinned in the display plane
3697 * (e.g. libkms for the bootup splash), we have to ensure that we
3698 * always use map_and_fenceable for all scanout buffers.
3699 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003700 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003701 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003702 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003703
Chris Wilson2c225692013-08-09 12:26:45 +01003704 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003705
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003706 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003707 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003708
3709 /* It should now be out of any other write domains, and we can update
3710 * the domain values for our changes.
3711 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003712 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003713 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003714
3715 trace_i915_gem_object_change_domain(obj,
3716 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003717 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003718
3719 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003720
3721err_unpin_display:
3722 obj->pin_display = is_pin_display(obj);
3723 return ret;
3724}
3725
3726void
3727i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3728{
3729 i915_gem_object_unpin(obj);
3730 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003731}
3732
Chris Wilson85345512010-11-13 09:49:11 +00003733int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003734i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003735{
Chris Wilson88241782011-01-07 17:09:48 +00003736 int ret;
3737
Chris Wilsona8198ee2011-04-13 22:04:09 +01003738 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003739 return 0;
3740
Chris Wilson0201f1e2012-07-20 12:41:01 +01003741 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003742 if (ret)
3743 return ret;
3744
Chris Wilsona8198ee2011-04-13 22:04:09 +01003745 /* Ensure that we invalidate the GPU's caches and TLBs. */
3746 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003747 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003748}
3749
Eric Anholte47c68e2008-11-14 13:35:19 -08003750/**
3751 * Moves a single object to the CPU read, and possibly write domain.
3752 *
3753 * This function returns when the move is complete, including waiting on
3754 * flushes to occur.
3755 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003756int
Chris Wilson919926a2010-11-12 13:42:53 +00003757i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003758{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003759 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003760 int ret;
3761
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003762 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3763 return 0;
3764
Chris Wilson0201f1e2012-07-20 12:41:01 +01003765 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003766 if (ret)
3767 return ret;
3768
Eric Anholte47c68e2008-11-14 13:35:19 -08003769 i915_gem_object_flush_gtt_write_domain(obj);
3770
Chris Wilson05394f32010-11-08 19:18:58 +00003771 old_write_domain = obj->base.write_domain;
3772 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003773
Eric Anholte47c68e2008-11-14 13:35:19 -08003774 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003775 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003776 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003777
Chris Wilson05394f32010-11-08 19:18:58 +00003778 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003779 }
3780
3781 /* It should now be out of any other write domains, and we can update
3782 * the domain values for our changes.
3783 */
Chris Wilson05394f32010-11-08 19:18:58 +00003784 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003785
3786 /* If we're writing through the CPU, then the GPU read domains will
3787 * need to be invalidated at next use.
3788 */
3789 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003790 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3791 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003792 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003793
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003794 trace_i915_gem_object_change_domain(obj,
3795 old_read_domains,
3796 old_write_domain);
3797
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003798 return 0;
3799}
3800
Eric Anholt673a3942008-07-30 12:06:12 -07003801/* Throttle our rendering by waiting until the ring has completed our requests
3802 * emitted over 20 msec ago.
3803 *
Eric Anholtb9624422009-06-03 07:27:35 +00003804 * Note that if we were to use the current jiffies each time around the loop,
3805 * we wouldn't escape the function with any frames outstanding if the time to
3806 * render a frame was over 20ms.
3807 *
Eric Anholt673a3942008-07-30 12:06:12 -07003808 * This should get us reasonable parallelism between CPU and GPU but also
3809 * relatively low latency when blocking on a particular request to finish.
3810 */
3811static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003812i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003813{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003814 struct drm_i915_private *dev_priv = dev->dev_private;
3815 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003816 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003817 struct drm_i915_gem_request *request;
3818 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003819 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003820 u32 seqno = 0;
3821 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003822
Daniel Vetter308887a2012-11-14 17:14:06 +01003823 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3824 if (ret)
3825 return ret;
3826
3827 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3828 if (ret)
3829 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003830
Chris Wilson1c255952010-09-26 11:03:27 +01003831 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003832 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003833 if (time_after_eq(request->emitted_jiffies, recent_enough))
3834 break;
3835
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003836 ring = request->ring;
3837 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003838 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003839 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003840 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003841
3842 if (seqno == 0)
3843 return 0;
3844
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003845 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003846 if (ret == 0)
3847 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003848
Eric Anholt673a3942008-07-30 12:06:12 -07003849 return ret;
3850}
3851
Eric Anholt673a3942008-07-30 12:06:12 -07003852int
Chris Wilson05394f32010-11-08 19:18:58 +00003853i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003854 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003855 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003856 bool map_and_fenceable,
3857 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003858{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003859 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003860 int ret;
3861
Chris Wilson7e81a422012-09-15 09:41:57 +01003862 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3863 return -EBUSY;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003864
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003865 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3866
3867 vma = i915_gem_obj_to_vma(obj, vm);
3868
3869 if (vma) {
3870 if ((alignment &&
3871 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003872 (map_and_fenceable && !obj->map_and_fenceable)) {
3873 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003874 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003875 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003876 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003877 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003878 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003879 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003880 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003881 if (ret)
3882 return ret;
3883 }
3884 }
3885
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003886 if (!i915_gem_obj_bound(obj, vm)) {
Chris Wilson87422672012-11-21 13:04:03 +00003887 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3888
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003889 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3890 map_and_fenceable,
3891 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003892 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003893 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003894
3895 if (!dev_priv->mm.aliasing_ppgtt)
3896 i915_gem_gtt_bind_object(obj, obj->cache_level);
Chris Wilson22c344e2009-02-11 14:26:45 +00003897 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003898
Daniel Vetter74898d72012-02-15 23:50:22 +01003899 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3900 i915_gem_gtt_bind_object(obj, obj->cache_level);
3901
Chris Wilson1b502472012-04-24 15:47:30 +01003902 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003903 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003904
3905 return 0;
3906}
3907
3908void
Chris Wilson05394f32010-11-08 19:18:58 +00003909i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003910{
Chris Wilson05394f32010-11-08 19:18:58 +00003911 BUG_ON(obj->pin_count == 0);
Ben Widawsky98438772013-07-31 17:00:12 -07003912 BUG_ON(!i915_gem_obj_bound_any(obj));
Eric Anholt673a3942008-07-30 12:06:12 -07003913
Chris Wilson1b502472012-04-24 15:47:30 +01003914 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003915 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003916}
3917
3918int
3919i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003920 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003921{
3922 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003923 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003924 int ret;
3925
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003926 ret = i915_mutex_lock_interruptible(dev);
3927 if (ret)
3928 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003929
Chris Wilson05394f32010-11-08 19:18:58 +00003930 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003931 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003932 ret = -ENOENT;
3933 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003934 }
Eric Anholt673a3942008-07-30 12:06:12 -07003935
Chris Wilson05394f32010-11-08 19:18:58 +00003936 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003937 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003938 ret = -EINVAL;
3939 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003940 }
3941
Chris Wilson05394f32010-11-08 19:18:58 +00003942 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003943 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3944 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003945 ret = -EINVAL;
3946 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003947 }
3948
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003949 if (obj->user_pin_count == ULONG_MAX) {
3950 ret = -EBUSY;
3951 goto out;
3952 }
3953
Chris Wilson93be8782013-01-02 10:31:22 +00003954 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003955 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003956 if (ret)
3957 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003958 }
3959
Chris Wilson93be8782013-01-02 10:31:22 +00003960 obj->user_pin_count++;
3961 obj->pin_filp = file;
3962
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003963 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003964out:
Chris Wilson05394f32010-11-08 19:18:58 +00003965 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003966unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003967 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003968 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003969}
3970
3971int
3972i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003973 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003974{
3975 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003976 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003977 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003978
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003979 ret = i915_mutex_lock_interruptible(dev);
3980 if (ret)
3981 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003982
Chris Wilson05394f32010-11-08 19:18:58 +00003983 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003984 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003985 ret = -ENOENT;
3986 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003987 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003988
Chris Wilson05394f32010-11-08 19:18:58 +00003989 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003990 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3991 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003992 ret = -EINVAL;
3993 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003994 }
Chris Wilson05394f32010-11-08 19:18:58 +00003995 obj->user_pin_count--;
3996 if (obj->user_pin_count == 0) {
3997 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003998 i915_gem_object_unpin(obj);
3999 }
Eric Anholt673a3942008-07-30 12:06:12 -07004000
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004001out:
Chris Wilson05394f32010-11-08 19:18:58 +00004002 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004003unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004004 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004005 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004006}
4007
4008int
4009i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004010 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004011{
4012 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004013 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004014 int ret;
4015
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004016 ret = i915_mutex_lock_interruptible(dev);
4017 if (ret)
4018 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004019
Chris Wilson05394f32010-11-08 19:18:58 +00004020 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004021 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004022 ret = -ENOENT;
4023 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004024 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004025
Chris Wilson0be555b2010-08-04 15:36:30 +01004026 /* Count all active objects as busy, even if they are currently not used
4027 * by the gpu. Users of this interface expect objects to eventually
4028 * become non-busy without any further actions, therefore emit any
4029 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004030 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004031 ret = i915_gem_object_flush_active(obj);
4032
Chris Wilson05394f32010-11-08 19:18:58 +00004033 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004034 if (obj->ring) {
4035 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4036 args->busy |= intel_ring_flag(obj->ring) << 16;
4037 }
Eric Anholt673a3942008-07-30 12:06:12 -07004038
Chris Wilson05394f32010-11-08 19:18:58 +00004039 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004040unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004041 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004042 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004043}
4044
4045int
4046i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4047 struct drm_file *file_priv)
4048{
Akshay Joshi0206e352011-08-16 15:34:10 -04004049 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004050}
4051
Chris Wilson3ef94da2009-09-14 16:50:29 +01004052int
4053i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4054 struct drm_file *file_priv)
4055{
4056 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004057 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004058 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004059
4060 switch (args->madv) {
4061 case I915_MADV_DONTNEED:
4062 case I915_MADV_WILLNEED:
4063 break;
4064 default:
4065 return -EINVAL;
4066 }
4067
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004068 ret = i915_mutex_lock_interruptible(dev);
4069 if (ret)
4070 return ret;
4071
Chris Wilson05394f32010-11-08 19:18:58 +00004072 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004073 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004074 ret = -ENOENT;
4075 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004076 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004077
Chris Wilson05394f32010-11-08 19:18:58 +00004078 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004079 ret = -EINVAL;
4080 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004081 }
4082
Chris Wilson05394f32010-11-08 19:18:58 +00004083 if (obj->madv != __I915_MADV_PURGED)
4084 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004085
Chris Wilson6c085a72012-08-20 11:40:46 +02004086 /* if the object is no longer attached, discard its backing storage */
4087 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004088 i915_gem_object_truncate(obj);
4089
Chris Wilson05394f32010-11-08 19:18:58 +00004090 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004091
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004092out:
Chris Wilson05394f32010-11-08 19:18:58 +00004093 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004094unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004095 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004096 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004097}
4098
Chris Wilson37e680a2012-06-07 15:38:42 +01004099void i915_gem_object_init(struct drm_i915_gem_object *obj,
4100 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004101{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004102 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004103 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004104 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004105 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004106
Chris Wilson37e680a2012-06-07 15:38:42 +01004107 obj->ops = ops;
4108
Chris Wilson0327d6b2012-08-11 15:41:06 +01004109 obj->fence_reg = I915_FENCE_REG_NONE;
4110 obj->madv = I915_MADV_WILLNEED;
4111 /* Avoid an unnecessary call to unbind on the first bind. */
4112 obj->map_and_fenceable = true;
4113
4114 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4115}
4116
Chris Wilson37e680a2012-06-07 15:38:42 +01004117static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4118 .get_pages = i915_gem_object_get_pages_gtt,
4119 .put_pages = i915_gem_object_put_pages_gtt,
4120};
4121
Chris Wilson05394f32010-11-08 19:18:58 +00004122struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4123 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004124{
Daniel Vetterc397b902010-04-09 19:05:07 +00004125 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004126 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004127 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004128
Chris Wilson42dcedd2012-11-15 11:32:30 +00004129 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004130 if (obj == NULL)
4131 return NULL;
4132
4133 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004134 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004135 return NULL;
4136 }
4137
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004138 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4139 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4140 /* 965gm cannot relocate objects above 4GiB. */
4141 mask &= ~__GFP_HIGHMEM;
4142 mask |= __GFP_DMA32;
4143 }
4144
Al Viro496ad9a2013-01-23 17:07:38 -05004145 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004146 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004147
Chris Wilson37e680a2012-06-07 15:38:42 +01004148 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004149
Daniel Vetterc397b902010-04-09 19:05:07 +00004150 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4151 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4152
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004153 if (HAS_LLC(dev)) {
4154 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004155 * cache) for about a 10% performance improvement
4156 * compared to uncached. Graphics requests other than
4157 * display scanout are coherent with the CPU in
4158 * accessing this cache. This means in this mode we
4159 * don't need to clflush on the CPU side, and on the
4160 * GPU side we only need to flush internal caches to
4161 * get data visible to the CPU.
4162 *
4163 * However, we maintain the display planes as UC, and so
4164 * need to rebind when first used as such.
4165 */
4166 obj->cache_level = I915_CACHE_LLC;
4167 } else
4168 obj->cache_level = I915_CACHE_NONE;
4169
Daniel Vetterd861e332013-07-24 23:25:03 +02004170 trace_i915_gem_object_create(obj);
4171
Chris Wilson05394f32010-11-08 19:18:58 +00004172 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004173}
4174
Chris Wilson1488fc02012-04-24 15:47:31 +01004175void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004176{
Chris Wilson1488fc02012-04-24 15:47:31 +01004177 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004178 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004179 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004180 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004181
Chris Wilson26e12f892011-03-20 11:20:19 +00004182 trace_i915_gem_object_destroy(obj);
4183
Chris Wilson1488fc02012-04-24 15:47:31 +01004184 if (obj->phys_obj)
4185 i915_gem_detach_phys_object(dev, obj);
4186
4187 obj->pin_count = 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004188 /* NB: 0 or 1 elements */
4189 WARN_ON(!list_empty(&obj->vma_list) &&
4190 !list_is_singular(&obj->vma_list));
4191 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4192 int ret = i915_vma_unbind(vma);
4193 if (WARN_ON(ret == -ERESTARTSYS)) {
4194 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004195
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004196 was_interruptible = dev_priv->mm.interruptible;
4197 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004198
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004199 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004200
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004201 dev_priv->mm.interruptible = was_interruptible;
4202 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004203 }
4204
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004205 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4206 * before progressing. */
4207 if (obj->stolen)
4208 i915_gem_object_unpin_pages(obj);
4209
Ben Widawsky401c29f2013-05-31 11:28:47 -07004210 if (WARN_ON(obj->pages_pin_count))
4211 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004212 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004213 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004214 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004215
Chris Wilson9da3da62012-06-01 15:20:22 +01004216 BUG_ON(obj->pages);
4217
Chris Wilson2f745ad2012-09-04 21:02:58 +01004218 if (obj->base.import_attach)
4219 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004220
Chris Wilson05394f32010-11-08 19:18:58 +00004221 drm_gem_object_release(&obj->base);
4222 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004223
Chris Wilson05394f32010-11-08 19:18:58 +00004224 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004225 i915_gem_object_free(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004226}
4227
Daniel Vettere656a6c2013-08-14 14:14:04 +02004228struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004229 struct i915_address_space *vm)
4230{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004231 struct i915_vma *vma;
4232 list_for_each_entry(vma, &obj->vma_list, vma_link)
4233 if (vma->vm == vm)
4234 return vma;
4235
4236 return NULL;
4237}
4238
4239static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4240 struct i915_address_space *vm)
4241{
Ben Widawsky2f633152013-07-17 12:19:03 -07004242 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4243 if (vma == NULL)
4244 return ERR_PTR(-ENOMEM);
4245
4246 INIT_LIST_HEAD(&vma->vma_link);
Ben Widawskyca191b12013-07-31 17:00:14 -07004247 INIT_LIST_HEAD(&vma->mm_list);
Ben Widawsky82a55ad2013-08-14 11:38:34 +02004248 INIT_LIST_HEAD(&vma->exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -07004249 vma->vm = vm;
4250 vma->obj = obj;
4251
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004252 /* Keep GGTT vmas first to make debug easier */
4253 if (i915_is_ggtt(vm))
4254 list_add(&vma->vma_link, &obj->vma_list);
4255 else
4256 list_add_tail(&vma->vma_link, &obj->vma_list);
4257
Ben Widawsky2f633152013-07-17 12:19:03 -07004258 return vma;
4259}
4260
Daniel Vettere656a6c2013-08-14 14:14:04 +02004261struct i915_vma *
4262i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4263 struct i915_address_space *vm)
4264{
4265 struct i915_vma *vma;
4266
4267 vma = i915_gem_obj_to_vma(obj, vm);
4268 if (!vma)
4269 vma = __i915_gem_vma_create(obj, vm);
4270
4271 return vma;
4272}
4273
Ben Widawsky2f633152013-07-17 12:19:03 -07004274void i915_gem_vma_destroy(struct i915_vma *vma)
4275{
4276 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004277
4278 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4279 if (!list_empty(&vma->exec_list))
4280 return;
4281
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004282 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004283
Ben Widawsky2f633152013-07-17 12:19:03 -07004284 kfree(vma);
4285}
4286
Jesse Barnes5669fca2009-02-17 15:13:31 -08004287int
Chris Wilson45c5f202013-10-16 11:50:01 +01004288i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004289{
4290 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004291 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004292
Chris Wilson45c5f202013-10-16 11:50:01 +01004293 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004294 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004295 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004296
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004297 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004298 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004299 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004300
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004301 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004302
Chris Wilson29105cc2010-01-07 10:39:13 +00004303 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004304 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004305 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004306
Chris Wilson29105cc2010-01-07 10:39:13 +00004307 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004308 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004309
Chris Wilson45c5f202013-10-16 11:50:01 +01004310 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4311 * We need to replace this with a semaphore, or something.
4312 * And not confound ums.mm_suspended!
4313 */
4314 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4315 DRIVER_MODESET);
4316 mutex_unlock(&dev->struct_mutex);
4317
4318 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004319 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004320 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004321
Eric Anholt673a3942008-07-30 12:06:12 -07004322 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004323
4324err:
4325 mutex_unlock(&dev->struct_mutex);
4326 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004327}
4328
Ben Widawskyc3787e22013-09-17 21:12:44 -07004329int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004330{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004331 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004332 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004333 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4334 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004335 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004336
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004337 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004338 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004339
Ben Widawskyc3787e22013-09-17 21:12:44 -07004340 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4341 if (ret)
4342 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004343
Ben Widawskyc3787e22013-09-17 21:12:44 -07004344 /*
4345 * Note: We do not worry about the concurrent register cacheline hang
4346 * here because no other code should access these registers other than
4347 * at initialization time.
4348 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004349 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004350 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4351 intel_ring_emit(ring, reg_base + i);
4352 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004353 }
4354
Ben Widawskyc3787e22013-09-17 21:12:44 -07004355 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004356
Ben Widawskyc3787e22013-09-17 21:12:44 -07004357 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004358}
4359
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004360void i915_gem_init_swizzling(struct drm_device *dev)
4361{
4362 drm_i915_private_t *dev_priv = dev->dev_private;
4363
Daniel Vetter11782b02012-01-31 16:47:55 +01004364 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004365 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4366 return;
4367
4368 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4369 DISP_TILE_SURFACE_SWIZZLING);
4370
Daniel Vetter11782b02012-01-31 16:47:55 +01004371 if (IS_GEN5(dev))
4372 return;
4373
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004374 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4375 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004376 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004377 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004378 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004379 else if (IS_GEN8(dev))
4380 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004381 else
4382 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004383}
Daniel Vettere21af882012-02-09 20:53:27 +01004384
Chris Wilson67b1b572012-07-05 23:49:40 +01004385static bool
4386intel_enable_blt(struct drm_device *dev)
4387{
4388 if (!HAS_BLT(dev))
4389 return false;
4390
4391 /* The blitter was dysfunctional on early prototypes */
4392 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4393 DRM_INFO("BLT not supported on this pre-production hardware;"
4394 " graphics performance will be degraded.\n");
4395 return false;
4396 }
4397
4398 return true;
4399}
4400
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004401static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004402{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004403 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004404 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004405
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004406 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004407 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004408 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004409
4410 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004411 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004412 if (ret)
4413 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004414 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004415
Chris Wilson67b1b572012-07-05 23:49:40 +01004416 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004417 ret = intel_init_blt_ring_buffer(dev);
4418 if (ret)
4419 goto cleanup_bsd_ring;
4420 }
4421
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004422 if (HAS_VEBOX(dev)) {
4423 ret = intel_init_vebox_ring_buffer(dev);
4424 if (ret)
4425 goto cleanup_blt_ring;
4426 }
4427
4428
Mika Kuoppala99433932013-01-22 14:12:17 +02004429 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4430 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004431 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004432
4433 return 0;
4434
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004435cleanup_vebox_ring:
4436 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004437cleanup_blt_ring:
4438 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4439cleanup_bsd_ring:
4440 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4441cleanup_render_ring:
4442 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4443
4444 return ret;
4445}
4446
4447int
4448i915_gem_init_hw(struct drm_device *dev)
4449{
4450 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004451 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004452
4453 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4454 return -EIO;
4455
Ben Widawsky59124502013-07-04 11:02:05 -07004456 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004457 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004458
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004459 if (IS_HASWELL(dev))
4460 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4461 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004462
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004463 if (HAS_PCH_NOP(dev)) {
4464 u32 temp = I915_READ(GEN7_MSG_CTL);
4465 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4466 I915_WRITE(GEN7_MSG_CTL, temp);
4467 }
4468
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004469 i915_gem_init_swizzling(dev);
4470
4471 ret = i915_gem_init_rings(dev);
4472 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004473 return ret;
4474
Ben Widawskyc3787e22013-09-17 21:12:44 -07004475 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4476 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4477
Ben Widawsky254f9652012-06-04 14:42:42 -07004478 /*
4479 * XXX: There was some w/a described somewhere suggesting loading
4480 * contexts before PPGTT.
4481 */
4482 i915_gem_context_init(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004483 if (dev_priv->mm.aliasing_ppgtt) {
4484 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4485 if (ret) {
4486 i915_gem_cleanup_aliasing_ppgtt(dev);
4487 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4488 }
4489 }
Daniel Vettere21af882012-02-09 20:53:27 +01004490
Chris Wilson68f95ba2010-05-27 13:18:22 +01004491 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004492}
4493
Chris Wilson1070a422012-04-24 15:47:41 +01004494int i915_gem_init(struct drm_device *dev)
4495{
4496 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004497 int ret;
4498
Chris Wilson1070a422012-04-24 15:47:41 +01004499 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004500
4501 if (IS_VALLEYVIEW(dev)) {
4502 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4503 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4504 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4505 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4506 }
4507
Ben Widawskyd7e50082012-12-18 10:31:25 -08004508 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004509
Chris Wilson1070a422012-04-24 15:47:41 +01004510 ret = i915_gem_init_hw(dev);
4511 mutex_unlock(&dev->struct_mutex);
4512 if (ret) {
4513 i915_gem_cleanup_aliasing_ppgtt(dev);
4514 return ret;
4515 }
4516
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004517 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4518 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4519 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004520 return 0;
4521}
4522
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004523void
4524i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4525{
4526 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004527 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004528 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004529
Chris Wilsonb4519512012-05-11 14:29:30 +01004530 for_each_ring(ring, dev_priv, i)
4531 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004532}
4533
4534int
Eric Anholt673a3942008-07-30 12:06:12 -07004535i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4536 struct drm_file *file_priv)
4537{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004538 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004539 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004540
Jesse Barnes79e53942008-11-07 14:24:08 -08004541 if (drm_core_check_feature(dev, DRIVER_MODESET))
4542 return 0;
4543
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004544 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004545 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004546 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004547 }
4548
Eric Anholt673a3942008-07-30 12:06:12 -07004549 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004550 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004551
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004552 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004553 if (ret != 0) {
4554 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004555 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004556 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004557
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004558 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004559 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004560
Chris Wilson5f353082010-06-07 14:03:03 +01004561 ret = drm_irq_install(dev);
4562 if (ret)
4563 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004564
Eric Anholt673a3942008-07-30 12:06:12 -07004565 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004566
4567cleanup_ringbuffer:
4568 mutex_lock(&dev->struct_mutex);
4569 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004570 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004571 mutex_unlock(&dev->struct_mutex);
4572
4573 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004574}
4575
4576int
4577i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4578 struct drm_file *file_priv)
4579{
Jesse Barnes79e53942008-11-07 14:24:08 -08004580 if (drm_core_check_feature(dev, DRIVER_MODESET))
4581 return 0;
4582
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004583 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004584
Chris Wilson45c5f202013-10-16 11:50:01 +01004585 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004586}
4587
4588void
4589i915_gem_lastclose(struct drm_device *dev)
4590{
4591 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004592
Eric Anholte806b492009-01-22 09:56:58 -08004593 if (drm_core_check_feature(dev, DRIVER_MODESET))
4594 return;
4595
Chris Wilson45c5f202013-10-16 11:50:01 +01004596 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004597 if (ret)
4598 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004599}
4600
Chris Wilson64193402010-10-24 12:38:05 +01004601static void
4602init_ring_lists(struct intel_ring_buffer *ring)
4603{
4604 INIT_LIST_HEAD(&ring->active_list);
4605 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004606}
4607
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004608static void i915_init_vm(struct drm_i915_private *dev_priv,
4609 struct i915_address_space *vm)
4610{
4611 vm->dev = dev_priv->dev;
4612 INIT_LIST_HEAD(&vm->active_list);
4613 INIT_LIST_HEAD(&vm->inactive_list);
4614 INIT_LIST_HEAD(&vm->global_link);
4615 list_add(&vm->global_link, &dev_priv->vm_list);
4616}
4617
Eric Anholt673a3942008-07-30 12:06:12 -07004618void
4619i915_gem_load(struct drm_device *dev)
4620{
4621 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004622 int i;
4623
4624 dev_priv->slab =
4625 kmem_cache_create("i915_gem_object",
4626 sizeof(struct drm_i915_gem_object), 0,
4627 SLAB_HWCACHE_ALIGN,
4628 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004629
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004630 INIT_LIST_HEAD(&dev_priv->vm_list);
4631 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4632
Ben Widawskya33afea2013-09-17 21:12:45 -07004633 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004634 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4635 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004636 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004637 for (i = 0; i < I915_NUM_RINGS; i++)
4638 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004639 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004640 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004641 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4642 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004643 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4644 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004645 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004646
Dave Airlie94400122010-07-20 13:15:31 +10004647 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4648 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004649 I915_WRITE(MI_ARB_STATE,
4650 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004651 }
4652
Chris Wilson72bfa192010-12-19 11:42:05 +00004653 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4654
Jesse Barnesde151cf2008-11-12 10:03:55 -08004655 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004656 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4657 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004658
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004659 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4660 dev_priv->num_fence_regs = 32;
4661 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004662 dev_priv->num_fence_regs = 16;
4663 else
4664 dev_priv->num_fence_regs = 8;
4665
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004666 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004667 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4668 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004669
Eric Anholt673a3942008-07-30 12:06:12 -07004670 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004671 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004672
Chris Wilsonce453d82011-02-21 14:43:56 +00004673 dev_priv->mm.interruptible = true;
4674
Dave Chinner7dc19d52013-08-28 10:18:11 +10004675 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4676 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004677 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4678 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004679}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004680
4681/*
4682 * Create a physically contiguous memory object for this object
4683 * e.g. for cursor + overlay regs
4684 */
Chris Wilson995b6762010-08-20 13:23:26 +01004685static int i915_gem_init_phys_object(struct drm_device *dev,
4686 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004687{
4688 drm_i915_private_t *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690 int ret;
4691
4692 if (dev_priv->mm.phys_objs[id - 1] || !size)
4693 return 0;
4694
Daniel Vetterb14c5672013-09-19 12:18:32 +02004695 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004696 if (!phys_obj)
4697 return -ENOMEM;
4698
4699 phys_obj->id = id;
4700
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004701 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004702 if (!phys_obj->handle) {
4703 ret = -ENOMEM;
4704 goto kfree_obj;
4705 }
4706#ifdef CONFIG_X86
4707 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4708#endif
4709
4710 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4711
4712 return 0;
4713kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004714 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004715 return ret;
4716}
4717
Chris Wilson995b6762010-08-20 13:23:26 +01004718static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004719{
4720 drm_i915_private_t *dev_priv = dev->dev_private;
4721 struct drm_i915_gem_phys_object *phys_obj;
4722
4723 if (!dev_priv->mm.phys_objs[id - 1])
4724 return;
4725
4726 phys_obj = dev_priv->mm.phys_objs[id - 1];
4727 if (phys_obj->cur_obj) {
4728 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4729 }
4730
4731#ifdef CONFIG_X86
4732 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4733#endif
4734 drm_pci_free(dev, phys_obj->handle);
4735 kfree(phys_obj);
4736 dev_priv->mm.phys_objs[id - 1] = NULL;
4737}
4738
4739void i915_gem_free_all_phys_object(struct drm_device *dev)
4740{
4741 int i;
4742
Dave Airlie260883c2009-01-22 17:58:49 +10004743 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004744 i915_gem_free_phys_object(dev, i);
4745}
4746
4747void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004748 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004749{
Al Viro496ad9a2013-01-23 17:07:38 -05004750 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004751 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004752 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004753 int page_count;
4754
Chris Wilson05394f32010-11-08 19:18:58 +00004755 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004756 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004757 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004758
Chris Wilson05394f32010-11-08 19:18:58 +00004759 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004760 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004761 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004762 if (!IS_ERR(page)) {
4763 char *dst = kmap_atomic(page);
4764 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4765 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004766
Chris Wilsone5281cc2010-10-28 13:45:36 +01004767 drm_clflush_pages(&page, 1);
4768
4769 set_page_dirty(page);
4770 mark_page_accessed(page);
4771 page_cache_release(page);
4772 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004773 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004774 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004775
Chris Wilson05394f32010-11-08 19:18:58 +00004776 obj->phys_obj->cur_obj = NULL;
4777 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004778}
4779
4780int
4781i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004782 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004783 int id,
4784 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004785{
Al Viro496ad9a2013-01-23 17:07:38 -05004786 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004787 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004788 int ret = 0;
4789 int page_count;
4790 int i;
4791
4792 if (id > I915_MAX_PHYS_OBJECT)
4793 return -EINVAL;
4794
Chris Wilson05394f32010-11-08 19:18:58 +00004795 if (obj->phys_obj) {
4796 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004797 return 0;
4798 i915_gem_detach_phys_object(dev, obj);
4799 }
4800
Dave Airlie71acb5e2008-12-30 20:31:46 +10004801 /* create a new object */
4802 if (!dev_priv->mm.phys_objs[id - 1]) {
4803 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004804 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004805 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004806 DRM_ERROR("failed to init phys object %d size: %zu\n",
4807 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004808 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004809 }
4810 }
4811
4812 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004813 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4814 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004815
Chris Wilson05394f32010-11-08 19:18:58 +00004816 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004817
4818 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004819 struct page *page;
4820 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004821
Hugh Dickins5949eac2011-06-27 16:18:18 -07004822 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004823 if (IS_ERR(page))
4824 return PTR_ERR(page);
4825
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004826 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004827 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004828 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004829 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004830
4831 mark_page_accessed(page);
4832 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004833 }
4834
4835 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004836}
4837
4838static int
Chris Wilson05394f32010-11-08 19:18:58 +00004839i915_gem_phys_pwrite(struct drm_device *dev,
4840 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004841 struct drm_i915_gem_pwrite *args,
4842 struct drm_file *file_priv)
4843{
Chris Wilson05394f32010-11-08 19:18:58 +00004844 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004845 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004846
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004847 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4848 unsigned long unwritten;
4849
4850 /* The physical object once assigned is fixed for the lifetime
4851 * of the obj, so we can safely drop the lock and continue
4852 * to access vaddr.
4853 */
4854 mutex_unlock(&dev->struct_mutex);
4855 unwritten = copy_from_user(vaddr, user_data, args->size);
4856 mutex_lock(&dev->struct_mutex);
4857 if (unwritten)
4858 return -EFAULT;
4859 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004860
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004861 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004862 return 0;
4863}
Eric Anholtb9624422009-06-03 07:27:35 +00004864
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004865void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004866{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004867 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004868
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004869 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4870
Eric Anholtb9624422009-06-03 07:27:35 +00004871 /* Clean up our request list when the client is going away, so that
4872 * later retire_requests won't dereference our soon-to-be-gone
4873 * file_priv.
4874 */
Chris Wilson1c255952010-09-26 11:03:27 +01004875 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004876 while (!list_empty(&file_priv->mm.request_list)) {
4877 struct drm_i915_gem_request *request;
4878
4879 request = list_first_entry(&file_priv->mm.request_list,
4880 struct drm_i915_gem_request,
4881 client_list);
4882 list_del(&request->client_list);
4883 request->file_priv = NULL;
4884 }
Chris Wilson1c255952010-09-26 11:03:27 +01004885 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004886}
Chris Wilson31169712009-09-14 16:50:28 +01004887
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004888static void
4889i915_gem_file_idle_work_handler(struct work_struct *work)
4890{
4891 struct drm_i915_file_private *file_priv =
4892 container_of(work, typeof(*file_priv), mm.idle_work.work);
4893
4894 atomic_set(&file_priv->rps_wait_boost, false);
4895}
4896
4897int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4898{
4899 struct drm_i915_file_private *file_priv;
4900
4901 DRM_DEBUG_DRIVER("\n");
4902
4903 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4904 if (!file_priv)
4905 return -ENOMEM;
4906
4907 file->driver_priv = file_priv;
4908 file_priv->dev_priv = dev->dev_private;
4909
4910 spin_lock_init(&file_priv->mm.lock);
4911 INIT_LIST_HEAD(&file_priv->mm.request_list);
4912 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4913 i915_gem_file_idle_work_handler);
4914
4915 idr_init(&file_priv->context_idr);
4916
4917 return 0;
4918}
4919
Chris Wilson57745062012-11-21 13:04:04 +00004920static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4921{
4922 if (!mutex_is_locked(mutex))
4923 return false;
4924
4925#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4926 return mutex->owner == task;
4927#else
4928 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4929 return false;
4930#endif
4931}
4932
Dave Chinner7dc19d52013-08-28 10:18:11 +10004933static unsigned long
4934i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004935{
Chris Wilson17250b72010-10-28 12:51:39 +01004936 struct drm_i915_private *dev_priv =
4937 container_of(shrinker,
4938 struct drm_i915_private,
4939 mm.inactive_shrinker);
4940 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004941 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004942 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004943 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004944
Chris Wilson57745062012-11-21 13:04:04 +00004945 if (!mutex_trylock(&dev->struct_mutex)) {
4946 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004947 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004948
Daniel Vetter677feac2012-12-19 14:33:45 +01004949 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004950 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004951
Chris Wilson57745062012-11-21 13:04:04 +00004952 unlock = false;
4953 }
Chris Wilson31169712009-09-14 16:50:28 +01004954
Dave Chinner7dc19d52013-08-28 10:18:11 +10004955 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004956 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004957 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004958 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004959
4960 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4961 if (obj->active)
4962 continue;
4963
Chris Wilsona5570172012-09-04 21:02:54 +01004964 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004965 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004966 }
Chris Wilson31169712009-09-14 16:50:28 +01004967
Chris Wilson57745062012-11-21 13:04:04 +00004968 if (unlock)
4969 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004970
Dave Chinner7dc19d52013-08-28 10:18:11 +10004971 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004972}
Ben Widawskya70a3142013-07-31 16:59:56 -07004973
4974/* All the new VM stuff */
4975unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4976 struct i915_address_space *vm)
4977{
4978 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4979 struct i915_vma *vma;
4980
4981 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4982 vm = &dev_priv->gtt.base;
4983
4984 BUG_ON(list_empty(&o->vma_list));
4985 list_for_each_entry(vma, &o->vma_list, vma_link) {
4986 if (vma->vm == vm)
4987 return vma->node.start;
4988
4989 }
4990 return -1;
4991}
4992
4993bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4994 struct i915_address_space *vm)
4995{
4996 struct i915_vma *vma;
4997
4998 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004999 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005000 return true;
5001
5002 return false;
5003}
5004
5005bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5006{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005007 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005008
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005009 list_for_each_entry(vma, &o->vma_list, vma_link)
5010 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005011 return true;
5012
5013 return false;
5014}
5015
5016unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5017 struct i915_address_space *vm)
5018{
5019 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5020 struct i915_vma *vma;
5021
5022 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5023 vm = &dev_priv->gtt.base;
5024
5025 BUG_ON(list_empty(&o->vma_list));
5026
5027 list_for_each_entry(vma, &o->vma_list, vma_link)
5028 if (vma->vm == vm)
5029 return vma->node.size;
5030
5031 return 0;
5032}
5033
Dave Chinner7dc19d52013-08-28 10:18:11 +10005034static unsigned long
5035i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5036{
5037 struct drm_i915_private *dev_priv =
5038 container_of(shrinker,
5039 struct drm_i915_private,
5040 mm.inactive_shrinker);
5041 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005042 unsigned long freed;
5043 bool unlock = true;
5044
5045 if (!mutex_trylock(&dev->struct_mutex)) {
5046 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005047 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005048
5049 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005050 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005051
5052 unlock = false;
5053 }
5054
Chris Wilsond9973b42013-10-04 10:33:00 +01005055 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5056 if (freed < sc->nr_to_scan)
5057 freed += __i915_gem_shrink(dev_priv,
5058 sc->nr_to_scan - freed,
5059 false);
5060 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005061 freed += i915_gem_shrink_all(dev_priv);
5062
5063 if (unlock)
5064 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005065
Dave Chinner7dc19d52013-08-28 10:18:11 +10005066 return freed;
5067}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005068
5069struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5070{
5071 struct i915_vma *vma;
5072
5073 if (WARN_ON(list_empty(&obj->vma_list)))
5074 return NULL;
5075
5076 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5077 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
5078 return NULL;
5079
5080 return vma;
5081}