blob: 279387a2fef5c90e28927124862ac019c1fe93a1 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
Ben Widawsky07fe0b12013-07-31 17:00:10 -070047i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson61050802012-04-17 15:31:31 +010057static void i915_gem_write_fence(struct drm_device *dev, int reg,
58 struct drm_i915_gem_object *obj);
59static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
60 struct drm_i915_fence_reg *fence,
61 bool enable);
62
Dave Chinner7dc19d52013-08-28 10:18:11 +100063static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
64 struct shrink_control *sc);
65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
66 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010067static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010069static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Chris Wilson1b502472012-04-24 15:47:30 +0100207 if (obj->pin_count)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Daniel Vetterd174bd62012-03-25 19:47:40 +0200335/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700338static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200339shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 char __user *user_data,
341 bool page_do_bit17_swizzling, bool needs_clflush)
342{
343 char *vaddr;
344 int ret;
345
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200346 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200347 return -EINVAL;
348
349 vaddr = kmap_atomic(page);
350 if (needs_clflush)
351 drm_clflush_virt_range(vaddr + shmem_page_offset,
352 page_length);
353 ret = __copy_to_user_inatomic(user_data,
354 vaddr + shmem_page_offset,
355 page_length);
356 kunmap_atomic(vaddr);
357
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100358 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359}
360
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361static void
362shmem_clflush_swizzled_range(char *addr, unsigned long length,
363 bool swizzled)
364{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200365 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200366 unsigned long start = (unsigned long) addr;
367 unsigned long end = (unsigned long) addr + length;
368
369 /* For swizzling simply ensure that we always flush both
370 * channels. Lame, but simple and it works. Swizzled
371 * pwrite/pread is far from a hotpath - current userspace
372 * doesn't use it at all. */
373 start = round_down(start, 128);
374 end = round_up(end, 128);
375
376 drm_clflush_virt_range((void *)start, end - start);
377 } else {
378 drm_clflush_virt_range(addr, length);
379 }
380
381}
382
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383/* Only difference to the fast-path function is that this can handle bit17
384 * and uses non-atomic copy and kmap functions. */
385static int
386shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
387 char __user *user_data,
388 bool page_do_bit17_swizzling, bool needs_clflush)
389{
390 char *vaddr;
391 int ret;
392
393 vaddr = kmap(page);
394 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
396 page_length,
397 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200398
399 if (page_do_bit17_swizzling)
400 ret = __copy_to_user_swizzled(user_data,
401 vaddr, shmem_page_offset,
402 page_length);
403 else
404 ret = __copy_to_user(user_data,
405 vaddr + shmem_page_offset,
406 page_length);
407 kunmap(page);
408
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100409 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200410}
411
Eric Anholteb014592009-03-10 11:44:52 -0700412static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200413i915_gem_shmem_pread(struct drm_device *dev,
414 struct drm_i915_gem_object *obj,
415 struct drm_i915_gem_pread *args,
416 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700417{
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700419 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100420 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100421 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100422 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200423 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200424 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200425 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700426
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200427 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700428 remain = args->size;
429
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700431
Daniel Vetter84897312012-03-25 19:47:31 +0200432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200441 }
Eric Anholteb014592009-03-10 11:44:52 -0700442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 ret = i915_gem_object_get_pages(obj);
444 if (ret)
445 return ret;
446
447 i915_gem_object_pin_pages(obj);
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100450
Imre Deak67d5a502013-02-18 19:28:02 +0200451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
452 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200453 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100454
455 if (remain <= 0)
456 break;
457
Eric Anholteb014592009-03-10 11:44:52 -0700458 /* Operation in this page
459 *
Eric Anholteb014592009-03-10 11:44:52 -0700460 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700461 * page_length = bytes to copy for this page
462 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100463 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700464 page_length = remain;
465 if ((shmem_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
469 (page_to_phys(page) & (1 << 17)) != 0;
470
Daniel Vetterd174bd62012-03-25 19:47:40 +0200471 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
472 user_data, page_do_bit17_swizzling,
473 needs_clflush);
474 if (ret == 0)
475 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700476
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200477 mutex_unlock(&dev->struct_mutex);
478
Xiong Zhang0b74b502013-07-19 13:51:24 +0800479 if (likely(!i915_prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200480 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the
483 * data up to the first fault. Hence ignore any errors
484 * and just continue. */
485 (void)ret;
486 prefaulted = 1;
487 }
488
Daniel Vetterd174bd62012-03-25 19:47:40 +0200489 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
490 user_data, page_do_bit17_swizzling,
491 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200493 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100494
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200495next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100496 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100497
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100498 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100500
Eric Anholteb014592009-03-10 11:44:52 -0700501 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700503 offset += page_length;
504 }
505
Chris Wilson4f27b752010-10-14 15:26:45 +0100506out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100507 i915_gem_object_unpin_pages(obj);
508
Eric Anholteb014592009-03-10 11:44:52 -0700509 return ret;
510}
511
Eric Anholt673a3942008-07-30 12:06:12 -0700512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700520{
521 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100523 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700524
Chris Wilson51311d02010-11-17 09:10:42 +0000525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200529 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000530 args->size))
531 return -EFAULT;
532
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100535 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700536
Chris Wilson05394f32010-11-08 19:18:58 +0000537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000538 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100539 ret = -ENOENT;
540 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Chris Wilson7dcd2492010-09-26 20:21:44 +0100543 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100546 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100548 }
549
Daniel Vetter1286ff72012-05-10 15:25:09 +0200550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
Chris Wilsondb53a302011-02-03 11:57:46 +0000558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200560 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson35b62a82010-09-26 20:23:38 +0100562out:
Chris Wilson05394f32010-11-08 19:18:58 +0000563 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100564unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100565 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700566 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567}
568
Keith Packard0839ccb2008-10-30 19:38:48 -0700569/* This is the fast write path which cannot handle
570 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700571 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572
Keith Packard0839ccb2008-10-30 19:38:48 -0700573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
578{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 void __iomem *vaddr_atomic;
580 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700588 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100589 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Chris Wilson05394f32010-11-08 19:18:58 +0000597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700599 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000600 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700601{
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 int page_offset, page_length, ret;
607
Ben Widawskyc37e2202013-07-31 16:59:58 -0700608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200620 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700621 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700623 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700624
625 while (remain > 0) {
626 /* Operation in this page
627 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700631 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800642 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700651 }
Eric Anholt673a3942008-07-30 12:06:12 -0700652
Daniel Vetter935aaa62012-03-25 19:47:35 +0200653out_unpin:
654 i915_gem_object_unpin(obj);
655out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700663static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700669{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200670 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200673 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687
Chris Wilson755d2212012-09-04 21:02:55 +0100688 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689}
690
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700693static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700699{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 char *vaddr;
701 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700702
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100710 user_data,
711 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilson755d2212012-09-04 21:02:55 +0100722 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700723}
724
Eric Anholt40123c12009-03-09 13:42:30 -0700725static int
Daniel Vettere244a442012-03-25 19:47:28 +0200726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700730{
Eric Anholt40123c12009-03-09 13:42:30 -0700731 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 loff_t offset;
733 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100734 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200736 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200737 int needs_clflush_after = 0;
738 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200739 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200741 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700742 remain = args->size;
743
Daniel Vetter8c599672011-12-14 13:57:31 +0100744 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700745
Daniel Vetter58642882012-03-25 19:47:37 +0200746 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
747 /* If we're not in the cpu write domain, set ourself into the gtt
748 * write domain and manually flush cachelines (if required). This
749 * optimizes for the case when the gpu will use the data
750 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100751 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700752 ret = i915_gem_object_wait_rendering(obj, false);
753 if (ret)
754 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100756 /* Same trick applies to invalidate partially written cachelines read
757 * before writing. */
758 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
759 needs_clflush_before =
760 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200761
Chris Wilson755d2212012-09-04 21:02:55 +0100762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
Eric Anholt40123c12009-03-09 13:42:30 -0700768 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000769 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
Imre Deak67d5a502013-02-18 19:28:02 +0200771 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200773 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200774 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100775
Chris Wilson9da3da62012-06-01 15:20:22 +0100776 if (remain <= 0)
777 break;
778
Eric Anholt40123c12009-03-09 13:42:30 -0700779 /* Operation in this page
780 *
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700782 * page_length = bytes to copy for this page
783 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100784 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700785
786 page_length = remain;
787 if ((shmem_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700789
Daniel Vetter58642882012-03-25 19:47:37 +0200790 /* If we don't overwrite a cacheline completely we need to be
791 * careful to have up-to-date data by first clflushing. Don't
792 * overcomplicate things and flush the entire patch. */
793 partial_cacheline_write = needs_clflush_before &&
794 ((shmem_page_offset | page_length)
795 & (boot_cpu_data.x86_clflush_size - 1));
796
Daniel Vetter8c599672011-12-14 13:57:31 +0100797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
804 if (ret == 0)
805 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700806
Daniel Vettere244a442012-03-25 19:47:28 +0200807 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200808 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200809 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100815
Daniel Vettere244a442012-03-25 19:47:28 +0200816next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100817 set_page_dirty(page);
818 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819
Chris Wilson755d2212012-09-04 21:02:55 +0100820 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100822
Eric Anholt40123c12009-03-09 13:42:30 -0700823 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100824 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825 offset += page_length;
826 }
827
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100828out:
Chris Wilson755d2212012-09-04 21:02:55 +0100829 i915_gem_object_unpin_pages(obj);
830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100832 /*
833 * Fixup: Flush cpu caches in case we didn't flush the dirty
834 * cachelines in-line while writing and the object moved
835 * out of the cpu write domain while we've dropped the lock.
836 */
837 if (!needs_clflush_after &&
838 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100839 if (i915_gem_clflush_object(obj, obj->pin_display))
840 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200841 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100842 }
Eric Anholt40123c12009-03-09 13:42:30 -0700843
Daniel Vetter58642882012-03-25 19:47:37 +0200844 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200846
Eric Anholt40123c12009-03-09 13:42:30 -0700847 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
850/**
851 * Writes data to the object referenced by handle.
852 *
853 * On error, the contents of the buffer that were to be modified are undefined.
854 */
855int
856i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100857 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700858{
859 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000860 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000861 int ret;
862
863 if (args->size == 0)
864 return 0;
865
866 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200867 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000868 args->size))
869 return -EFAULT;
870
Xiong Zhang0b74b502013-07-19 13:51:24 +0800871 if (likely(!i915_prefault_disable)) {
872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size);
874 if (ret)
875 return -EFAULT;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
Chris Wilson05394f32010-11-08 19:18:58 +0000882 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000883 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = -ENOENT;
885 goto unlock;
886 }
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Chris Wilson7dcd2492010-09-26 20:21:44 +0100888 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000889 if (args->offset > obj->base.size ||
890 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100891 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100892 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 }
894
Daniel Vetter1286ff72012-05-10 15:25:09 +0200895 /* prime objects have no backing filp to GEM pread/pwrite
896 * pages from.
897 */
898 if (!obj->base.filp) {
899 ret = -EINVAL;
900 goto out;
901 }
902
Chris Wilsondb53a302011-02-03 11:57:46 +0000903 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
904
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700906 /* We can only do the GTT pwrite on untiled buffers, as otherwise
907 * it would end up going through the fenced access, and we'll get
908 * different detiling behavior between reading and writing.
909 * pread/pwrite currently are reading and writing from the CPU
910 * perspective, requiring manual detiling by the client.
911 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100914 goto out;
915 }
916
Chris Wilson2c225692013-08-09 12:26:45 +0100917 if (obj->tiling_mode == I915_TILING_NONE &&
918 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
919 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100920 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200921 /* Note that the gtt paths might fail with non-page-backed user
922 * pointers (e.g. gtt mappings when moving data between
923 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700924 }
Eric Anholt673a3942008-07-30 12:06:12 -0700925
Chris Wilson86a1ee22012-08-11 15:41:04 +0100926 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200927 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100928
Chris Wilson35b62a82010-09-26 20:23:38 +0100929out:
Chris Wilson05394f32010-11-08 19:18:58 +0000930 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100931unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return ret;
934}
935
Chris Wilsonb3612372012-08-24 09:35:08 +0100936int
Daniel Vetter33196de2012-11-14 17:14:05 +0100937i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100938 bool interruptible)
939{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100940 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100941 /* Non-interruptible callers can't handle -EAGAIN, hence return
942 * -EIO unconditionally for these. */
943 if (!interruptible)
944 return -EIO;
945
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100946 /* Recovery complete, but the reset failed ... */
947 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100948 return -EIO;
949
950 return -EAGAIN;
951 }
952
953 return 0;
954}
955
956/*
957 * Compare seqno against outstanding lazy request. Emit a request if they are
958 * equal.
959 */
960static int
961i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
962{
963 int ret;
964
965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
966
967 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100968 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300969 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100970
971 return ret;
972}
973
Chris Wilson094f9a52013-09-25 17:34:55 +0100974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
Chris Wilsonb3612372012-08-24 09:35:08 +0100993/**
994 * __wait_seqno - wait until execution of seqno has finished
995 * @ring: the ring expected to report seqno
996 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100997 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100998 * @interruptible: do an interruptible wait (normally yes)
999 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1000 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001001 * Note: It is of utmost importance that the passed in seqno and reset_counter
1002 * values have been read by the caller in an smp safe manner. Where read-side
1003 * locks are involved, it is sufficient to read the reset_counter before
1004 * unlocking the lock that protects the seqno. For lockless tricks, the
1005 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1006 * inserted.
1007 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001008 * Returns 0 if the seqno was found within the alloted time. Else returns the
1009 * errno with remaining time filled in timeout argument.
1010 */
1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001012 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001016{
1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilson094f9a52013-09-25 17:34:55 +01001018 struct timespec before, now;
1019 DEFINE_WAIT(wait);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001020 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001021 int ret;
1022
Paulo Zanonic67a4702013-08-19 13:18:09 -03001023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1024
Chris Wilsonb3612372012-08-24 09:35:08 +01001025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1026 return 0;
1027
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001028 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001029
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1031 gen6_rps_boost(dev_priv);
1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1036 }
1037
Chris Wilson094f9a52013-09-25 17:34:55 +01001038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1039 WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001040 return -ENODEV;
1041
Chris Wilson094f9a52013-09-25 17:34:55 +01001042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001044 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001045 for (;;) {
1046 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001047
Chris Wilson094f9a52013-09-25 17:34:55 +01001048 prepare_to_wait(&ring->irq_queue, &wait,
1049 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001050
Daniel Vetterf69061b2012-12-06 09:01:42 +01001051 /* We need to check whether any gpu reset happened in between
1052 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001053 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1054 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1055 * is truely gone. */
1056 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1057 if (ret == 0)
1058 ret = -EAGAIN;
1059 break;
1060 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001061
Chris Wilson094f9a52013-09-25 17:34:55 +01001062 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1063 ret = 0;
1064 break;
1065 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001066
Chris Wilson094f9a52013-09-25 17:34:55 +01001067 if (interruptible && signal_pending(current)) {
1068 ret = -ERESTARTSYS;
1069 break;
1070 }
1071
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001072 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001073 ret = -ETIME;
1074 break;
1075 }
1076
1077 timer.function = NULL;
1078 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001079 unsigned long expire;
1080
Chris Wilson094f9a52013-09-25 17:34:55 +01001081 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001082 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001083 mod_timer(&timer, expire);
1084 }
1085
Chris Wilson5035c272013-10-04 09:58:46 +01001086 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001087
Chris Wilson094f9a52013-09-25 17:34:55 +01001088 if (timer.function) {
1089 del_singleshot_timer_sync(&timer);
1090 destroy_timer_on_stack(&timer);
1091 }
1092 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001093 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001094 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001095
1096 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001097
1098 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001099
1100 if (timeout) {
1101 struct timespec sleep_time = timespec_sub(now, before);
1102 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001103 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1104 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001105 }
1106
Chris Wilson094f9a52013-09-25 17:34:55 +01001107 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001108}
1109
1110/**
1111 * Waits for a sequence number to be signaled, and cleans up the
1112 * request and object lists appropriately for that event.
1113 */
1114int
1115i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1116{
1117 struct drm_device *dev = ring->dev;
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119 bool interruptible = dev_priv->mm.interruptible;
1120 int ret;
1121
1122 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1123 BUG_ON(seqno == 0);
1124
Daniel Vetter33196de2012-11-14 17:14:05 +01001125 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001126 if (ret)
1127 return ret;
1128
1129 ret = i915_gem_check_olr(ring, seqno);
1130 if (ret)
1131 return ret;
1132
Daniel Vetterf69061b2012-12-06 09:01:42 +01001133 return __wait_seqno(ring, seqno,
1134 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001135 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001136}
1137
Chris Wilsond26e3af2013-06-29 22:05:26 +01001138static int
1139i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1140 struct intel_ring_buffer *ring)
1141{
1142 i915_gem_retire_requests_ring(ring);
1143
1144 /* Manually manage the write flush as we may have not yet
1145 * retired the buffer.
1146 *
1147 * Note that the last_write_seqno is always the earlier of
1148 * the two (read/write) seqno, so if we haved successfully waited,
1149 * we know we have passed the last write.
1150 */
1151 obj->last_write_seqno = 0;
1152 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1153
1154 return 0;
1155}
1156
Chris Wilsonb3612372012-08-24 09:35:08 +01001157/**
1158 * Ensures that all rendering to the object has completed and the object is
1159 * safe to unbind from the GTT or access from the CPU.
1160 */
1161static __must_check int
1162i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1163 bool readonly)
1164{
1165 struct intel_ring_buffer *ring = obj->ring;
1166 u32 seqno;
1167 int ret;
1168
1169 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1170 if (seqno == 0)
1171 return 0;
1172
1173 ret = i915_wait_seqno(ring, seqno);
1174 if (ret)
1175 return ret;
1176
Chris Wilsond26e3af2013-06-29 22:05:26 +01001177 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001178}
1179
Chris Wilson3236f572012-08-24 09:35:09 +01001180/* A nonblocking variant of the above wait. This is a highly dangerous routine
1181 * as the object state may change during this call.
1182 */
1183static __must_check int
1184i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001185 struct drm_file *file,
Chris Wilson3236f572012-08-24 09:35:09 +01001186 bool readonly)
1187{
1188 struct drm_device *dev = obj->base.dev;
1189 struct drm_i915_private *dev_priv = dev->dev_private;
1190 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001191 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001192 u32 seqno;
1193 int ret;
1194
1195 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1196 BUG_ON(!dev_priv->mm.interruptible);
1197
1198 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1199 if (seqno == 0)
1200 return 0;
1201
Daniel Vetter33196de2012-11-14 17:14:05 +01001202 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001203 if (ret)
1204 return ret;
1205
1206 ret = i915_gem_check_olr(ring, seqno);
1207 if (ret)
1208 return ret;
1209
Daniel Vetterf69061b2012-12-06 09:01:42 +01001210 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001211 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001212 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001213 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001214 if (ret)
1215 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001216
Chris Wilsond26e3af2013-06-29 22:05:26 +01001217 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001218}
1219
Eric Anholt673a3942008-07-30 12:06:12 -07001220/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001221 * Called when user space prepares to use an object with the CPU, either
1222 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001223 */
1224int
1225i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001226 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001227{
1228 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001229 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001230 uint32_t read_domains = args->read_domains;
1231 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001232 int ret;
1233
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001234 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001235 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001236 return -EINVAL;
1237
Chris Wilson21d509e2009-06-06 09:46:02 +01001238 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001239 return -EINVAL;
1240
1241 /* Having something in the write domain implies it's in the read
1242 * domain, and only that read domain. Enforce that in the request.
1243 */
1244 if (write_domain != 0 && read_domains != write_domain)
1245 return -EINVAL;
1246
Chris Wilson76c1dec2010-09-25 11:22:51 +01001247 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001248 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001249 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001250
Chris Wilson05394f32010-11-08 19:18:58 +00001251 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001252 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001253 ret = -ENOENT;
1254 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001255 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001256
Chris Wilson3236f572012-08-24 09:35:09 +01001257 /* Try to flush the object off the GPU without holding the lock.
1258 * We will repeat the flush holding the lock in the normal manner
1259 * to catch cases where we are gazumped.
1260 */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001261 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001262 if (ret)
1263 goto unref;
1264
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001265 if (read_domains & I915_GEM_DOMAIN_GTT) {
1266 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001267
1268 /* Silently promote "you're not bound, there was nothing to do"
1269 * to success, since the client was just asking us to
1270 * make sure everything was done.
1271 */
1272 if (ret == -EINVAL)
1273 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001274 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001275 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001276 }
1277
Chris Wilson3236f572012-08-24 09:35:09 +01001278unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001279 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001280unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001281 mutex_unlock(&dev->struct_mutex);
1282 return ret;
1283}
1284
1285/**
1286 * Called when user space has done writes to this buffer
1287 */
1288int
1289i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001290 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001291{
1292 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001293 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001294 int ret = 0;
1295
Chris Wilson76c1dec2010-09-25 11:22:51 +01001296 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001297 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001298 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001299
Chris Wilson05394f32010-11-08 19:18:58 +00001300 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001301 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001302 ret = -ENOENT;
1303 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001304 }
1305
Eric Anholt673a3942008-07-30 12:06:12 -07001306 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001307 if (obj->pin_display)
1308 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001309
Chris Wilson05394f32010-11-08 19:18:58 +00001310 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001311unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001312 mutex_unlock(&dev->struct_mutex);
1313 return ret;
1314}
1315
1316/**
1317 * Maps the contents of an object, returning the address it is mapped
1318 * into.
1319 *
1320 * While the mapping holds a reference on the contents of the object, it doesn't
1321 * imply a ref on the object itself.
1322 */
1323int
1324i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001325 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001326{
1327 struct drm_i915_gem_mmap *args = data;
1328 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001329 unsigned long addr;
1330
Chris Wilson05394f32010-11-08 19:18:58 +00001331 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001332 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001333 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001334
Daniel Vetter1286ff72012-05-10 15:25:09 +02001335 /* prime objects have no backing filp to GEM mmap
1336 * pages from.
1337 */
1338 if (!obj->filp) {
1339 drm_gem_object_unreference_unlocked(obj);
1340 return -EINVAL;
1341 }
1342
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001343 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001344 PROT_READ | PROT_WRITE, MAP_SHARED,
1345 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001346 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001347 if (IS_ERR((void *)addr))
1348 return addr;
1349
1350 args->addr_ptr = (uint64_t) addr;
1351
1352 return 0;
1353}
1354
Jesse Barnesde151cf2008-11-12 10:03:55 -08001355/**
1356 * i915_gem_fault - fault a page into the GTT
1357 * vma: VMA in question
1358 * vmf: fault info
1359 *
1360 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1361 * from userspace. The fault handler takes care of binding the object to
1362 * the GTT (if needed), allocating and programming a fence register (again,
1363 * only if needed based on whether the old reg is still valid or the object
1364 * is tiled) and inserting a new PTE into the faulting process.
1365 *
1366 * Note that the faulting process may involve evicting existing objects
1367 * from the GTT and/or fence registers to make room. So performance may
1368 * suffer if the GTT working set is large or there are few fence registers
1369 * left.
1370 */
1371int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1372{
Chris Wilson05394f32010-11-08 19:18:58 +00001373 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1374 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001375 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001376 pgoff_t page_offset;
1377 unsigned long pfn;
1378 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001379 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001380
Paulo Zanonif65c9162013-11-27 18:20:34 -02001381 intel_runtime_pm_get(dev_priv);
1382
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383 /* We don't use vmf->pgoff since that has the fake offset */
1384 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1385 PAGE_SHIFT;
1386
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001387 ret = i915_mutex_lock_interruptible(dev);
1388 if (ret)
1389 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001390
Chris Wilsondb53a302011-02-03 11:57:46 +00001391 trace_i915_gem_object_fault(obj, page_offset, true, write);
1392
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001393 /* Access to snoopable pages through the GTT is incoherent. */
1394 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1395 ret = -EINVAL;
1396 goto unlock;
1397 }
1398
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001399 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001400 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001401 if (ret)
1402 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001403
Chris Wilsonc9839302012-11-20 10:45:17 +00001404 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1405 if (ret)
1406 goto unpin;
1407
1408 ret = i915_gem_object_get_fence(obj);
1409 if (ret)
1410 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001411
Chris Wilson6299f992010-11-24 12:23:44 +00001412 obj->fault_mappable = true;
1413
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001414 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1415 pfn >>= PAGE_SHIFT;
1416 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417
1418 /* Finally, remap it using the new GTT offset */
1419 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001420unpin:
1421 i915_gem_object_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001422unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001423 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001424out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001425 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001426 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001427 /* If this -EIO is due to a gpu hang, give the reset code a
1428 * chance to clean up the mess. Otherwise return the proper
1429 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001430 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1431 ret = VM_FAULT_SIGBUS;
1432 break;
1433 }
Chris Wilson045e7692010-11-07 09:18:22 +00001434 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001435 /*
1436 * EAGAIN means the gpu is hung and we'll wait for the error
1437 * handler to reset everything when re-faulting in
1438 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001439 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001440 case 0:
1441 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001442 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001443 case -EBUSY:
1444 /*
1445 * EBUSY is ok: this just means that another thread
1446 * already did the job.
1447 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001448 ret = VM_FAULT_NOPAGE;
1449 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001450 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001451 ret = VM_FAULT_OOM;
1452 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001453 case -ENOSPC:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001454 ret = VM_FAULT_SIGBUS;
1455 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001456 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001457 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001458 ret = VM_FAULT_SIGBUS;
1459 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001460 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001461
1462 intel_runtime_pm_put(dev_priv);
1463 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001464}
1465
1466/**
Chris Wilson901782b2009-07-10 08:18:50 +01001467 * i915_gem_release_mmap - remove physical page mappings
1468 * @obj: obj in question
1469 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001470 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001471 * relinquish ownership of the pages back to the system.
1472 *
1473 * It is vital that we remove the page mapping if we have mapped a tiled
1474 * object through the GTT and then lose the fence register due to
1475 * resource pressure. Similarly if the object has been moved out of the
1476 * aperture, than pages mapped into userspace must be revoked. Removing the
1477 * mapping will then trigger a page fault on the next user access, allowing
1478 * fixup by i915_gem_fault().
1479 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001480void
Chris Wilson05394f32010-11-08 19:18:58 +00001481i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001482{
Chris Wilson6299f992010-11-24 12:23:44 +00001483 if (!obj->fault_mappable)
1484 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001485
David Herrmann51335df2013-07-24 21:10:03 +02001486 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001487 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001488}
1489
Imre Deak0fa87792013-01-07 21:47:35 +02001490uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001491i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001492{
Chris Wilsone28f8712011-07-18 13:11:49 -07001493 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001494
1495 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001496 tiling_mode == I915_TILING_NONE)
1497 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001498
1499 /* Previous chips need a power-of-two fence region when tiling */
1500 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001501 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001502 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001503 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001504
Chris Wilsone28f8712011-07-18 13:11:49 -07001505 while (gtt_size < size)
1506 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001507
Chris Wilsone28f8712011-07-18 13:11:49 -07001508 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001509}
1510
Jesse Barnesde151cf2008-11-12 10:03:55 -08001511/**
1512 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1513 * @obj: object to check
1514 *
1515 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001516 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001517 */
Imre Deakd8651102013-01-07 21:47:33 +02001518uint32_t
1519i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1520 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001521{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001522 /*
1523 * Minimum alignment is 4k (GTT page size), but might be greater
1524 * if a fence register is needed for the object.
1525 */
Imre Deakd8651102013-01-07 21:47:33 +02001526 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001527 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001528 return 4096;
1529
1530 /*
1531 * Previous chips need to be aligned to the size of the smallest
1532 * fence register that can contain the object.
1533 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001534 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001535}
1536
Chris Wilsond8cb5082012-08-11 15:41:03 +01001537static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1538{
1539 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1540 int ret;
1541
David Herrmann0de23972013-07-24 21:07:52 +02001542 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001543 return 0;
1544
Daniel Vetterda494d72012-12-20 15:11:16 +01001545 dev_priv->mm.shrinker_no_lock_stealing = true;
1546
Chris Wilsond8cb5082012-08-11 15:41:03 +01001547 ret = drm_gem_create_mmap_offset(&obj->base);
1548 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001549 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001550
1551 /* Badly fragmented mmap space? The only way we can recover
1552 * space is by destroying unwanted objects. We can't randomly release
1553 * mmap_offsets as userspace expects them to be persistent for the
1554 * lifetime of the objects. The closest we can is to release the
1555 * offsets on purgeable objects by truncating it and marking it purged,
1556 * which prevents userspace from ever using that object again.
1557 */
1558 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1559 ret = drm_gem_create_mmap_offset(&obj->base);
1560 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001561 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001562
1563 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001564 ret = drm_gem_create_mmap_offset(&obj->base);
1565out:
1566 dev_priv->mm.shrinker_no_lock_stealing = false;
1567
1568 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001569}
1570
1571static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1572{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001573 drm_gem_free_mmap_offset(&obj->base);
1574}
1575
Jesse Barnesde151cf2008-11-12 10:03:55 -08001576int
Dave Airlieff72145b2011-02-07 12:16:14 +10001577i915_gem_mmap_gtt(struct drm_file *file,
1578 struct drm_device *dev,
1579 uint32_t handle,
1580 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001581{
Chris Wilsonda761a62010-10-27 17:37:08 +01001582 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001583 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001584 int ret;
1585
Chris Wilson76c1dec2010-09-25 11:22:51 +01001586 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001587 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001588 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001589
Dave Airlieff72145b2011-02-07 12:16:14 +10001590 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001591 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001592 ret = -ENOENT;
1593 goto unlock;
1594 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001595
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001596 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001597 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001598 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001599 }
1600
Chris Wilson05394f32010-11-08 19:18:58 +00001601 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001602 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001603 ret = -EINVAL;
1604 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001605 }
1606
Chris Wilsond8cb5082012-08-11 15:41:03 +01001607 ret = i915_gem_object_create_mmap_offset(obj);
1608 if (ret)
1609 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001610
David Herrmann0de23972013-07-24 21:07:52 +02001611 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001612
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001613out:
Chris Wilson05394f32010-11-08 19:18:58 +00001614 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001615unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001616 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001617 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001618}
1619
Dave Airlieff72145b2011-02-07 12:16:14 +10001620/**
1621 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1622 * @dev: DRM device
1623 * @data: GTT mapping ioctl data
1624 * @file: GEM object info
1625 *
1626 * Simply returns the fake offset to userspace so it can mmap it.
1627 * The mmap call will end up in drm_gem_mmap(), which will set things
1628 * up so we can get faults in the handler above.
1629 *
1630 * The fault handler will take care of binding the object into the GTT
1631 * (since it may have been evicted to make room for something), allocating
1632 * a fence register, and mapping the appropriate aperture address into
1633 * userspace.
1634 */
1635int
1636i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1637 struct drm_file *file)
1638{
1639 struct drm_i915_gem_mmap_gtt *args = data;
1640
Dave Airlieff72145b2011-02-07 12:16:14 +10001641 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1642}
1643
Daniel Vetter225067e2012-08-20 10:23:20 +02001644/* Immediately discard the backing storage */
1645static void
1646i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001647{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001648 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001649
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001650 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001651
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001652 if (obj->base.filp == NULL)
1653 return;
1654
Daniel Vetter225067e2012-08-20 10:23:20 +02001655 /* Our goal here is to return as much of the memory as
1656 * is possible back to the system as we are called from OOM.
1657 * To do this we must instruct the shmfs to drop all of its
1658 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001659 */
Al Viro496ad9a2013-01-23 17:07:38 -05001660 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001661 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001662
Daniel Vetter225067e2012-08-20 10:23:20 +02001663 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001664}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001665
Daniel Vetter225067e2012-08-20 10:23:20 +02001666static inline int
1667i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1668{
1669 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001670}
1671
Chris Wilson5cdf5882010-09-27 15:51:07 +01001672static void
Chris Wilson05394f32010-11-08 19:18:58 +00001673i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001674{
Imre Deak90797e62013-02-18 19:28:03 +02001675 struct sg_page_iter sg_iter;
1676 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001677
Chris Wilson05394f32010-11-08 19:18:58 +00001678 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001679
Chris Wilson6c085a72012-08-20 11:40:46 +02001680 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1681 if (ret) {
1682 /* In the event of a disaster, abandon all caches and
1683 * hope for the best.
1684 */
1685 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001686 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001687 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1688 }
1689
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001690 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001691 i915_gem_object_save_bit_17_swizzle(obj);
1692
Chris Wilson05394f32010-11-08 19:18:58 +00001693 if (obj->madv == I915_MADV_DONTNEED)
1694 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001695
Imre Deak90797e62013-02-18 19:28:03 +02001696 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001697 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001698
Chris Wilson05394f32010-11-08 19:18:58 +00001699 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001700 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001701
Chris Wilson05394f32010-11-08 19:18:58 +00001702 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001703 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001704
Chris Wilson9da3da62012-06-01 15:20:22 +01001705 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001706 }
Chris Wilson05394f32010-11-08 19:18:58 +00001707 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001708
Chris Wilson9da3da62012-06-01 15:20:22 +01001709 sg_free_table(obj->pages);
1710 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001711}
1712
Chris Wilsondd624af2013-01-15 12:39:35 +00001713int
Chris Wilson37e680a2012-06-07 15:38:42 +01001714i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1715{
1716 const struct drm_i915_gem_object_ops *ops = obj->ops;
1717
Chris Wilson2f745ad2012-09-04 21:02:58 +01001718 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001719 return 0;
1720
Chris Wilsona5570172012-09-04 21:02:54 +01001721 if (obj->pages_pin_count)
1722 return -EBUSY;
1723
Ben Widawsky98438772013-07-31 17:00:12 -07001724 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001725
Chris Wilsona2165e32012-12-03 11:49:00 +00001726 /* ->put_pages might need to allocate memory for the bit17 swizzle
1727 * array, hence protect them from being reaped by removing them from gtt
1728 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001729 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001730
Chris Wilson37e680a2012-06-07 15:38:42 +01001731 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001732 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001733
Chris Wilson6c085a72012-08-20 11:40:46 +02001734 if (i915_gem_object_is_purgeable(obj))
1735 i915_gem_object_truncate(obj);
1736
1737 return 0;
1738}
1739
Chris Wilsond9973b42013-10-04 10:33:00 +01001740static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001741__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1742 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001743{
Chris Wilson57094f82013-09-04 10:45:50 +01001744 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001745 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001746 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001747
1748 list_for_each_entry_safe(obj, next,
1749 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001750 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001751 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001752 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001753 count += obj->base.size >> PAGE_SHIFT;
1754 if (count >= target)
1755 return count;
1756 }
1757 }
1758
Chris Wilson57094f82013-09-04 10:45:50 +01001759 /*
1760 * As we may completely rewrite the bound list whilst unbinding
1761 * (due to retiring requests) we have to strictly process only
1762 * one element of the list at the time, and recheck the list
1763 * on every iteration.
1764 */
1765 INIT_LIST_HEAD(&still_bound_list);
1766 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001767 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001768
Chris Wilson57094f82013-09-04 10:45:50 +01001769 obj = list_first_entry(&dev_priv->mm.bound_list,
1770 typeof(*obj), global_list);
1771 list_move_tail(&obj->global_list, &still_bound_list);
1772
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001773 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1774 continue;
1775
Chris Wilson57094f82013-09-04 10:45:50 +01001776 /*
1777 * Hold a reference whilst we unbind this object, as we may
1778 * end up waiting for and retiring requests. This might
1779 * release the final reference (held by the active list)
1780 * and result in the object being freed from under us.
1781 * in this object being freed.
1782 *
1783 * Note 1: Shrinking the bound list is special since only active
1784 * (and hence bound objects) can contain such limbo objects, so
1785 * we don't need special tricks for shrinking the unbound list.
1786 * The only other place where we have to be careful with active
1787 * objects suddenly disappearing due to retiring requests is the
1788 * eviction code.
1789 *
1790 * Note 2: Even though the bound list doesn't hold a reference
1791 * to the object we can safely grab one here: The final object
1792 * unreferencing and the bound_list are both protected by the
1793 * dev->struct_mutex and so we won't ever be able to observe an
1794 * object on the bound_list with a reference count equals 0.
1795 */
1796 drm_gem_object_reference(&obj->base);
1797
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001798 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1799 if (i915_vma_unbind(vma))
1800 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001801
Chris Wilson57094f82013-09-04 10:45:50 +01001802 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001803 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001804
1805 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001806 }
Chris Wilson57094f82013-09-04 10:45:50 +01001807 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001808
1809 return count;
1810}
1811
Chris Wilsond9973b42013-10-04 10:33:00 +01001812static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001813i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1814{
1815 return __i915_gem_shrink(dev_priv, target, true);
1816}
1817
Chris Wilsond9973b42013-10-04 10:33:00 +01001818static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001819i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1820{
1821 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001822 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001823
1824 i915_gem_evict_everything(dev_priv->dev);
1825
Ben Widawsky35c20a62013-05-31 11:28:48 -07001826 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001827 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001828 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001829 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001830 }
1831 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001832}
1833
Chris Wilson37e680a2012-06-07 15:38:42 +01001834static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001835i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001836{
Chris Wilson6c085a72012-08-20 11:40:46 +02001837 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001838 int page_count, i;
1839 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001840 struct sg_table *st;
1841 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001842 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001843 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001844 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001845 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001846
Chris Wilson6c085a72012-08-20 11:40:46 +02001847 /* Assert that the object is not currently in any GPU domain. As it
1848 * wasn't in the GTT, there shouldn't be any way it could have been in
1849 * a GPU cache
1850 */
1851 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1852 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1853
Chris Wilson9da3da62012-06-01 15:20:22 +01001854 st = kmalloc(sizeof(*st), GFP_KERNEL);
1855 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001856 return -ENOMEM;
1857
Chris Wilson9da3da62012-06-01 15:20:22 +01001858 page_count = obj->base.size / PAGE_SIZE;
1859 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001860 kfree(st);
1861 return -ENOMEM;
1862 }
1863
1864 /* Get the list of pages out of our struct file. They'll be pinned
1865 * at this point until we release them.
1866 *
1867 * Fail silently without starting the shrinker
1868 */
Al Viro496ad9a2013-01-23 17:07:38 -05001869 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001870 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001871 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001872 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001873 sg = st->sgl;
1874 st->nents = 0;
1875 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001876 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1877 if (IS_ERR(page)) {
1878 i915_gem_purge(dev_priv, page_count);
1879 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1880 }
1881 if (IS_ERR(page)) {
1882 /* We've tried hard to allocate the memory by reaping
1883 * our own buffer, now let the real VM do its job and
1884 * go down in flames if truly OOM.
1885 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001886 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001887 gfp |= __GFP_IO | __GFP_WAIT;
1888
1889 i915_gem_shrink_all(dev_priv);
1890 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1891 if (IS_ERR(page))
1892 goto err_pages;
1893
Linus Torvaldscaf49192012-12-10 10:51:16 -08001894 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001895 gfp &= ~(__GFP_IO | __GFP_WAIT);
1896 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001897#ifdef CONFIG_SWIOTLB
1898 if (swiotlb_nr_tbl()) {
1899 st->nents++;
1900 sg_set_page(sg, page, PAGE_SIZE, 0);
1901 sg = sg_next(sg);
1902 continue;
1903 }
1904#endif
Imre Deak90797e62013-02-18 19:28:03 +02001905 if (!i || page_to_pfn(page) != last_pfn + 1) {
1906 if (i)
1907 sg = sg_next(sg);
1908 st->nents++;
1909 sg_set_page(sg, page, PAGE_SIZE, 0);
1910 } else {
1911 sg->length += PAGE_SIZE;
1912 }
1913 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001914
1915 /* Check that the i965g/gm workaround works. */
1916 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001917 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001918#ifdef CONFIG_SWIOTLB
1919 if (!swiotlb_nr_tbl())
1920#endif
1921 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001922 obj->pages = st;
1923
Eric Anholt673a3942008-07-30 12:06:12 -07001924 if (i915_gem_object_needs_bit17_swizzle(obj))
1925 i915_gem_object_do_bit_17_swizzle(obj);
1926
1927 return 0;
1928
1929err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001930 sg_mark_end(sg);
1931 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001932 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001933 sg_free_table(st);
1934 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001935 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001936}
1937
Chris Wilson37e680a2012-06-07 15:38:42 +01001938/* Ensure that the associated pages are gathered from the backing storage
1939 * and pinned into our object. i915_gem_object_get_pages() may be called
1940 * multiple times before they are released by a single call to
1941 * i915_gem_object_put_pages() - once the pages are no longer referenced
1942 * either as a result of memory pressure (reaping pages under the shrinker)
1943 * or as the object is itself released.
1944 */
1945int
1946i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1947{
1948 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1949 const struct drm_i915_gem_object_ops *ops = obj->ops;
1950 int ret;
1951
Chris Wilson2f745ad2012-09-04 21:02:58 +01001952 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001953 return 0;
1954
Chris Wilson43e28f02013-01-08 10:53:09 +00001955 if (obj->madv != I915_MADV_WILLNEED) {
1956 DRM_ERROR("Attempting to obtain a purgeable object\n");
1957 return -EINVAL;
1958 }
1959
Chris Wilsona5570172012-09-04 21:02:54 +01001960 BUG_ON(obj->pages_pin_count);
1961
Chris Wilson37e680a2012-06-07 15:38:42 +01001962 ret = ops->get_pages(obj);
1963 if (ret)
1964 return ret;
1965
Ben Widawsky35c20a62013-05-31 11:28:48 -07001966 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001967 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001968}
1969
Ben Widawskye2d05a82013-09-24 09:57:58 -07001970static void
Chris Wilson05394f32010-11-08 19:18:58 +00001971i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001972 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001973{
Chris Wilson05394f32010-11-08 19:18:58 +00001974 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001975 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001976 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001977
Zou Nan hai852835f2010-05-21 09:08:56 +08001978 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001979 if (obj->ring != ring && obj->last_write_seqno) {
1980 /* Keep the seqno relative to the current ring */
1981 obj->last_write_seqno = seqno;
1982 }
Chris Wilson05394f32010-11-08 19:18:58 +00001983 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001984
1985 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001986 if (!obj->active) {
1987 drm_gem_object_reference(&obj->base);
1988 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001989 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001990
Chris Wilson05394f32010-11-08 19:18:58 +00001991 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001992
Chris Wilson0201f1e2012-07-20 12:41:01 +01001993 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001994
Chris Wilsoncaea7472010-11-12 13:53:37 +00001995 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001996 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001997
Chris Wilson7dd49062012-03-21 10:48:18 +00001998 /* Bump MRU to take account of the delayed flush */
1999 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2000 struct drm_i915_fence_reg *reg;
2001
2002 reg = &dev_priv->fence_regs[obj->fence_reg];
2003 list_move_tail(&reg->lru_list,
2004 &dev_priv->mm.fence_list);
2005 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002006 }
2007}
2008
Ben Widawskye2d05a82013-09-24 09:57:58 -07002009void i915_vma_move_to_active(struct i915_vma *vma,
2010 struct intel_ring_buffer *ring)
2011{
2012 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2013 return i915_gem_object_move_to_active(vma->obj, ring);
2014}
2015
Chris Wilsoncaea7472010-11-12 13:53:37 +00002016static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002017i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2018{
Ben Widawskyca191b12013-07-31 17:00:14 -07002019 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2020 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2021 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002022
Chris Wilson65ce3022012-07-20 12:41:02 +01002023 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002024 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002025
Ben Widawskyca191b12013-07-31 17:00:14 -07002026 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002027
Chris Wilson65ce3022012-07-20 12:41:02 +01002028 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002029 obj->ring = NULL;
2030
Chris Wilson65ce3022012-07-20 12:41:02 +01002031 obj->last_read_seqno = 0;
2032 obj->last_write_seqno = 0;
2033 obj->base.write_domain = 0;
2034
2035 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002036 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002037
2038 obj->active = 0;
2039 drm_gem_object_unreference(&obj->base);
2040
2041 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002042}
Eric Anholt673a3942008-07-30 12:06:12 -07002043
Chris Wilson9d7730912012-11-27 16:22:52 +00002044static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002045i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002046{
Chris Wilson9d7730912012-11-27 16:22:52 +00002047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 struct intel_ring_buffer *ring;
2049 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002050
Chris Wilson107f27a52012-12-10 13:56:17 +02002051 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002052 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002053 ret = intel_ring_idle(ring);
2054 if (ret)
2055 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002056 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002057 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002058
2059 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002060 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002061 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002062
Chris Wilson9d7730912012-11-27 16:22:52 +00002063 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2064 ring->sync_seqno[j] = 0;
2065 }
2066
2067 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002068}
2069
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002070int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2071{
2072 struct drm_i915_private *dev_priv = dev->dev_private;
2073 int ret;
2074
2075 if (seqno == 0)
2076 return -EINVAL;
2077
2078 /* HWS page needs to be set less than what we
2079 * will inject to ring
2080 */
2081 ret = i915_gem_init_seqno(dev, seqno - 1);
2082 if (ret)
2083 return ret;
2084
2085 /* Carefully set the last_seqno value so that wrap
2086 * detection still works
2087 */
2088 dev_priv->next_seqno = seqno;
2089 dev_priv->last_seqno = seqno - 1;
2090 if (dev_priv->last_seqno == 0)
2091 dev_priv->last_seqno--;
2092
2093 return 0;
2094}
2095
Chris Wilson9d7730912012-11-27 16:22:52 +00002096int
2097i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002098{
Chris Wilson9d7730912012-11-27 16:22:52 +00002099 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002100
Chris Wilson9d7730912012-11-27 16:22:52 +00002101 /* reserve 0 for non-seqno */
2102 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002103 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002104 if (ret)
2105 return ret;
2106
2107 dev_priv->next_seqno = 1;
2108 }
2109
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002110 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002111 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002112}
2113
Mika Kuoppala0025c072013-06-12 12:35:30 +03002114int __i915_add_request(struct intel_ring_buffer *ring,
2115 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002116 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002117 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002118{
Chris Wilsondb53a302011-02-03 11:57:46 +00002119 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002120 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002121 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002122 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002123 int ret;
2124
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002125 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002126 /*
2127 * Emit any outstanding flushes - execbuf can fail to emit the flush
2128 * after having emitted the batchbuffer command. Hence we need to fix
2129 * things up similar to emitting the lazy request. The difference here
2130 * is that the flush _must_ happen before the next request, no matter
2131 * what.
2132 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002133 ret = intel_ring_flush_all_caches(ring);
2134 if (ret)
2135 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002136
Chris Wilson3c0e2342013-09-04 10:45:52 +01002137 request = ring->preallocated_lazy_request;
2138 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002139 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002140
Chris Wilsona71d8d92012-02-15 11:25:36 +00002141 /* Record the position of the start of the request so that
2142 * should we detect the updated seqno part-way through the
2143 * GPU processing the request, we never over-estimate the
2144 * position of the head.
2145 */
2146 request_ring_position = intel_ring_get_tail(ring);
2147
Chris Wilson9d7730912012-11-27 16:22:52 +00002148 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002149 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002150 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002151
Chris Wilson9d7730912012-11-27 16:22:52 +00002152 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002153 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002154 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002155 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002156
2157 /* Whilst this request exists, batch_obj will be on the
2158 * active_list, and so will hold the active reference. Only when this
2159 * request is retired will the the batch_obj be moved onto the
2160 * inactive_list and lose its active reference. Hence we do not need
2161 * to explicitly hold another reference here.
2162 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002163 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002164
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002165 /* Hold a reference to the current context so that we can inspect
2166 * it later in case a hangcheck error event fires.
2167 */
2168 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002169 if (request->ctx)
2170 i915_gem_context_reference(request->ctx);
2171
Eric Anholt673a3942008-07-30 12:06:12 -07002172 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002173 was_empty = list_empty(&ring->request_list);
2174 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002175 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002176
Chris Wilsondb53a302011-02-03 11:57:46 +00002177 if (file) {
2178 struct drm_i915_file_private *file_priv = file->driver_priv;
2179
Chris Wilson1c255952010-09-26 11:03:27 +01002180 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002181 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002182 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002183 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002184 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002185 }
Eric Anholt673a3942008-07-30 12:06:12 -07002186
Chris Wilson9d7730912012-11-27 16:22:52 +00002187 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002188 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002189 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002190
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002191 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002192 i915_queue_hangcheck(ring->dev);
2193
Chris Wilsonf047e392012-07-21 12:31:41 +01002194 if (was_empty) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002195 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002196 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002197 &dev_priv->mm.retire_work,
2198 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002199 intel_mark_busy(dev_priv->dev);
2200 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002201 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002202
Chris Wilsonacb868d2012-09-26 13:47:30 +01002203 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002204 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002205 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002206}
2207
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002208static inline void
2209i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002210{
Chris Wilson1c255952010-09-26 11:03:27 +01002211 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002212
Chris Wilson1c255952010-09-26 11:03:27 +01002213 if (!file_priv)
2214 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002215
Chris Wilson1c255952010-09-26 11:03:27 +01002216 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002217 list_del(&request->client_list);
2218 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002219 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002220}
2221
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002222static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2223 struct i915_address_space *vm)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002224{
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002225 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2226 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002227 return true;
2228
2229 return false;
2230}
2231
2232static bool i915_head_inside_request(const u32 acthd_unmasked,
2233 const u32 request_start,
2234 const u32 request_end)
2235{
2236 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2237
2238 if (request_start < request_end) {
2239 if (acthd >= request_start && acthd < request_end)
2240 return true;
2241 } else if (request_start > request_end) {
2242 if (acthd >= request_start || acthd < request_end)
2243 return true;
2244 }
2245
2246 return false;
2247}
2248
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002249static struct i915_address_space *
2250request_to_vm(struct drm_i915_gem_request *request)
2251{
2252 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2253 struct i915_address_space *vm;
2254
2255 vm = &dev_priv->gtt.base;
2256
2257 return vm;
2258}
2259
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002260static bool i915_request_guilty(struct drm_i915_gem_request *request,
2261 const u32 acthd, bool *inside)
2262{
2263 /* There is a possibility that unmasked head address
2264 * pointing inside the ring, matches the batch_obj address range.
2265 * However this is extremely unlikely.
2266 */
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002267 if (request->batch_obj) {
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002268 if (i915_head_inside_object(acthd, request->batch_obj,
2269 request_to_vm(request))) {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002270 *inside = true;
2271 return true;
2272 }
2273 }
2274
2275 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2276 *inside = false;
2277 return true;
2278 }
2279
2280 return false;
2281}
2282
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002283static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2284{
2285 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2286
2287 if (hs->banned)
2288 return true;
2289
2290 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2291 DRM_ERROR("context hanging too fast, declaring banned!\n");
2292 return true;
2293 }
2294
2295 return false;
2296}
2297
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002298static void i915_set_reset_status(struct intel_ring_buffer *ring,
2299 struct drm_i915_gem_request *request,
2300 u32 acthd)
2301{
2302 struct i915_ctx_hang_stats *hs = NULL;
2303 bool inside, guilty;
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002304 unsigned long offset = 0;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002305
2306 /* Innocent until proven guilty */
2307 guilty = false;
2308
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002309 if (request->batch_obj)
2310 offset = i915_gem_obj_offset(request->batch_obj,
2311 request_to_vm(request));
2312
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002313 if (ring->hangcheck.action != HANGCHECK_WAIT &&
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002314 i915_request_guilty(request, acthd, &inside)) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002315 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002316 ring->name,
2317 inside ? "inside" : "flushing",
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002318 offset,
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002319 request->ctx ? request->ctx->id : 0,
2320 acthd);
2321
2322 guilty = true;
2323 }
2324
2325 /* If contexts are disabled or this is the default context, use
2326 * file_priv->reset_state
2327 */
2328 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2329 hs = &request->ctx->hang_stats;
2330 else if (request->file_priv)
2331 hs = &request->file_priv->hang_stats;
2332
2333 if (hs) {
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002334 if (guilty) {
2335 hs->banned = i915_context_is_banned(hs);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002336 hs->batch_active++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002337 hs->guilty_ts = get_seconds();
2338 } else {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002339 hs->batch_pending++;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002340 }
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002341 }
2342}
2343
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002344static void i915_gem_free_request(struct drm_i915_gem_request *request)
2345{
2346 list_del(&request->list);
2347 i915_gem_request_remove_from_client(request);
2348
2349 if (request->ctx)
2350 i915_gem_context_unreference(request->ctx);
2351
2352 kfree(request);
2353}
2354
Chris Wilsondfaae392010-09-22 10:31:52 +01002355static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2356 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002357{
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002358 u32 completed_seqno;
2359 u32 acthd;
2360
2361 acthd = intel_ring_get_active_head(ring);
2362 completed_seqno = ring->get_seqno(ring, false);
2363
Chris Wilsondfaae392010-09-22 10:31:52 +01002364 while (!list_empty(&ring->request_list)) {
2365 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01002366
Chris Wilsondfaae392010-09-22 10:31:52 +01002367 request = list_first_entry(&ring->request_list,
2368 struct drm_i915_gem_request,
2369 list);
2370
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002371 if (request->seqno > completed_seqno)
2372 i915_set_reset_status(ring, request, acthd);
2373
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002374 i915_gem_free_request(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01002375 }
2376
2377 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002378 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002379
Chris Wilson05394f32010-11-08 19:18:58 +00002380 obj = list_first_entry(&ring->active_list,
2381 struct drm_i915_gem_object,
2382 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002383
Chris Wilson05394f32010-11-08 19:18:58 +00002384 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002385 }
Eric Anholt673a3942008-07-30 12:06:12 -07002386}
2387
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002388void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002389{
2390 struct drm_i915_private *dev_priv = dev->dev_private;
2391 int i;
2392
Daniel Vetter4b9de732011-10-09 21:52:02 +02002393 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002394 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002395
Daniel Vetter94a335d2013-07-17 14:51:28 +02002396 /*
2397 * Commit delayed tiling changes if we have an object still
2398 * attached to the fence, otherwise just clear the fence.
2399 */
2400 if (reg->obj) {
2401 i915_gem_object_update_fence(reg->obj, reg,
2402 reg->obj->tiling_mode);
2403 } else {
2404 i915_gem_write_fence(dev, i, NULL);
2405 }
Chris Wilson312817a2010-11-22 11:50:11 +00002406 }
2407}
2408
Chris Wilson069efc12010-09-30 16:53:18 +01002409void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002410{
Chris Wilsondfaae392010-09-22 10:31:52 +01002411 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002412 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002413 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002414
Chris Wilsonb4519512012-05-11 14:29:30 +01002415 for_each_ring(ring, dev_priv, i)
2416 i915_gem_reset_ring_lists(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002417
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002418 i915_gem_cleanup_ringbuffer(dev);
2419
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002420 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002421}
2422
2423/**
2424 * This function clears the request list as sequence numbers are passed.
2425 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002426void
Chris Wilsondb53a302011-02-03 11:57:46 +00002427i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002428{
Eric Anholt673a3942008-07-30 12:06:12 -07002429 uint32_t seqno;
2430
Chris Wilsondb53a302011-02-03 11:57:46 +00002431 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002432 return;
2433
Chris Wilsondb53a302011-02-03 11:57:46 +00002434 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002435
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002436 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002437
Zou Nan hai852835f2010-05-21 09:08:56 +08002438 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002439 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002440
Zou Nan hai852835f2010-05-21 09:08:56 +08002441 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002442 struct drm_i915_gem_request,
2443 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002444
Chris Wilsondfaae392010-09-22 10:31:52 +01002445 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002446 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002447
Chris Wilsondb53a302011-02-03 11:57:46 +00002448 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002449 /* We know the GPU must have read the request to have
2450 * sent us the seqno + interrupt, so use the position
2451 * of tail of the request to update the last known position
2452 * of the GPU head.
2453 */
2454 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002455
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002456 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002457 }
2458
2459 /* Move any buffers on the active list that are no longer referenced
2460 * by the ringbuffer to the flushing/inactive lists as appropriate.
2461 */
2462 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002463 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002464
Akshay Joshi0206e352011-08-16 15:34:10 -04002465 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00002466 struct drm_i915_gem_object,
2467 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002468
Chris Wilson0201f1e2012-07-20 12:41:01 +01002469 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002470 break;
2471
Chris Wilson65ce3022012-07-20 12:41:02 +01002472 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002473 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002474
Chris Wilsondb53a302011-02-03 11:57:46 +00002475 if (unlikely(ring->trace_irq_seqno &&
2476 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002477 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002478 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002479 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002480
Chris Wilsondb53a302011-02-03 11:57:46 +00002481 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002482}
2483
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002484bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002485i915_gem_retire_requests(struct drm_device *dev)
2486{
2487 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002488 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002489 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002490 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002491
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002492 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002493 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002494 idle &= list_empty(&ring->request_list);
2495 }
2496
2497 if (idle)
2498 mod_delayed_work(dev_priv->wq,
2499 &dev_priv->mm.idle_work,
2500 msecs_to_jiffies(100));
2501
2502 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002503}
2504
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002505static void
Eric Anholt673a3942008-07-30 12:06:12 -07002506i915_gem_retire_work_handler(struct work_struct *work)
2507{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002508 struct drm_i915_private *dev_priv =
2509 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2510 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002511 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002512
Chris Wilson891b48c2010-09-29 12:26:37 +01002513 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002514 idle = false;
2515 if (mutex_trylock(&dev->struct_mutex)) {
2516 idle = i915_gem_retire_requests(dev);
2517 mutex_unlock(&dev->struct_mutex);
2518 }
2519 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002520 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2521 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002522}
Chris Wilson891b48c2010-09-29 12:26:37 +01002523
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002524static void
2525i915_gem_idle_work_handler(struct work_struct *work)
2526{
2527 struct drm_i915_private *dev_priv =
2528 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002529
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002530 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002531}
2532
Ben Widawsky5816d642012-04-11 11:18:19 -07002533/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002534 * Ensures that an object will eventually get non-busy by flushing any required
2535 * write domains, emitting any outstanding lazy request and retiring and
2536 * completed requests.
2537 */
2538static int
2539i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2540{
2541 int ret;
2542
2543 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002544 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002545 if (ret)
2546 return ret;
2547
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002548 i915_gem_retire_requests_ring(obj->ring);
2549 }
2550
2551 return 0;
2552}
2553
2554/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002555 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2556 * @DRM_IOCTL_ARGS: standard ioctl arguments
2557 *
2558 * Returns 0 if successful, else an error is returned with the remaining time in
2559 * the timeout parameter.
2560 * -ETIME: object is still busy after timeout
2561 * -ERESTARTSYS: signal interrupted the wait
2562 * -ENONENT: object doesn't exist
2563 * Also possible, but rare:
2564 * -EAGAIN: GPU wedged
2565 * -ENOMEM: damn
2566 * -ENODEV: Internal IRQ fail
2567 * -E?: The add request failed
2568 *
2569 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2570 * non-zero timeout parameter the wait ioctl will wait for the given number of
2571 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2572 * without holding struct_mutex the object may become re-busied before this
2573 * function completes. A similar but shorter * race condition exists in the busy
2574 * ioctl
2575 */
2576int
2577i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2578{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002579 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002580 struct drm_i915_gem_wait *args = data;
2581 struct drm_i915_gem_object *obj;
2582 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002583 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002584 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002585 u32 seqno = 0;
2586 int ret = 0;
2587
Ben Widawskyeac1f142012-06-05 15:24:24 -07002588 if (args->timeout_ns >= 0) {
2589 timeout_stack = ns_to_timespec(args->timeout_ns);
2590 timeout = &timeout_stack;
2591 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002592
2593 ret = i915_mutex_lock_interruptible(dev);
2594 if (ret)
2595 return ret;
2596
2597 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2598 if (&obj->base == NULL) {
2599 mutex_unlock(&dev->struct_mutex);
2600 return -ENOENT;
2601 }
2602
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002603 /* Need to make sure the object gets inactive eventually. */
2604 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002605 if (ret)
2606 goto out;
2607
2608 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002609 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002610 ring = obj->ring;
2611 }
2612
2613 if (seqno == 0)
2614 goto out;
2615
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002616 /* Do this after OLR check to make sure we make forward progress polling
2617 * on this IOCTL with a 0 timeout (like busy ioctl)
2618 */
2619 if (!args->timeout_ns) {
2620 ret = -ETIME;
2621 goto out;
2622 }
2623
2624 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002625 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002626 mutex_unlock(&dev->struct_mutex);
2627
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002628 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002629 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002630 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002631 return ret;
2632
2633out:
2634 drm_gem_object_unreference(&obj->base);
2635 mutex_unlock(&dev->struct_mutex);
2636 return ret;
2637}
2638
2639/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002640 * i915_gem_object_sync - sync an object to a ring.
2641 *
2642 * @obj: object which may be in use on another ring.
2643 * @to: ring we wish to use the object on. May be NULL.
2644 *
2645 * This code is meant to abstract object synchronization with the GPU.
2646 * Calling with NULL implies synchronizing the object with the CPU
2647 * rather than a particular GPU ring.
2648 *
2649 * Returns 0 if successful, else propagates up the lower layer error.
2650 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002651int
2652i915_gem_object_sync(struct drm_i915_gem_object *obj,
2653 struct intel_ring_buffer *to)
2654{
2655 struct intel_ring_buffer *from = obj->ring;
2656 u32 seqno;
2657 int ret, idx;
2658
2659 if (from == NULL || to == from)
2660 return 0;
2661
Ben Widawsky5816d642012-04-11 11:18:19 -07002662 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002663 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002664
2665 idx = intel_ring_sync_index(from, to);
2666
Chris Wilson0201f1e2012-07-20 12:41:01 +01002667 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002668 if (seqno <= from->sync_seqno[idx])
2669 return 0;
2670
Ben Widawskyb4aca012012-04-25 20:50:12 -07002671 ret = i915_gem_check_olr(obj->ring, seqno);
2672 if (ret)
2673 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002674
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002675 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002676 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002677 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002678 /* We use last_read_seqno because sync_to()
2679 * might have just caused seqno wrap under
2680 * the radar.
2681 */
2682 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002683
Ben Widawskye3a5a222012-04-11 11:18:20 -07002684 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002685}
2686
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002687static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2688{
2689 u32 old_write_domain, old_read_domains;
2690
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002691 /* Force a pagefault for domain tracking on next user access */
2692 i915_gem_release_mmap(obj);
2693
Keith Packardb97c3d92011-06-24 21:02:59 -07002694 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2695 return;
2696
Chris Wilson97c809fd2012-10-09 19:24:38 +01002697 /* Wait for any direct GTT access to complete */
2698 mb();
2699
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002700 old_read_domains = obj->base.read_domains;
2701 old_write_domain = obj->base.write_domain;
2702
2703 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2704 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2705
2706 trace_i915_gem_object_change_domain(obj,
2707 old_read_domains,
2708 old_write_domain);
2709}
2710
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002711int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002712{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002713 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002714 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002715 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002716
Daniel Vetterb93dab62013-08-26 11:23:47 +02002717 /* For now we only ever use 1 vma per object */
2718 WARN_ON(!list_is_singular(&obj->vma_list));
2719
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002720 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002721 return 0;
2722
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002723 if (!drm_mm_node_allocated(&vma->node)) {
2724 i915_gem_vma_destroy(vma);
2725
2726 return 0;
2727 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002728
Chris Wilson31d8d652012-05-24 19:11:20 +01002729 if (obj->pin_count)
2730 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002731
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002732 BUG_ON(obj->pages == NULL);
2733
Chris Wilsona8198ee2011-04-13 22:04:09 +01002734 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002735 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002736 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002737 /* Continue on if we fail due to EIO, the GPU is hung so we
2738 * should be safe and we need to cleanup or else we might
2739 * cause memory corruption through use-after-free.
2740 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002741
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002742 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002743
Daniel Vetter96b47b62009-12-15 17:50:00 +01002744 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002745 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002746 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002747 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002748
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002749 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002750
Daniel Vetter74898d72012-02-15 23:50:22 +01002751 if (obj->has_global_gtt_mapping)
2752 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002753 if (obj->has_aliasing_ppgtt_mapping) {
2754 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2755 obj->has_aliasing_ppgtt_mapping = 0;
2756 }
Daniel Vetter74163902012-02-15 23:50:21 +01002757 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002758
Ben Widawskyca191b12013-07-31 17:00:14 -07002759 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002760 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002761 if (i915_is_ggtt(vma->vm))
2762 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002763
Ben Widawsky2f633152013-07-17 12:19:03 -07002764 drm_mm_remove_node(&vma->node);
2765 i915_gem_vma_destroy(vma);
2766
2767 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002768 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002769 if (list_empty(&obj->vma_list))
2770 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002771
Chris Wilson70903c32013-12-04 09:59:09 +00002772 /* And finally now the object is completely decoupled from this vma,
2773 * we can drop its hold on the backing storage and allow it to be
2774 * reaped by the shrinker.
2775 */
2776 i915_gem_object_unpin_pages(obj);
2777
Chris Wilson88241782011-01-07 17:09:48 +00002778 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002779}
2780
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002781/**
2782 * Unbinds an object from the global GTT aperture.
2783 */
2784int
2785i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2786{
2787 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2788 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2789
Dan Carpenter58e73e12013-08-09 12:44:11 +03002790 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002791 return 0;
2792
2793 if (obj->pin_count)
2794 return -EBUSY;
2795
2796 BUG_ON(obj->pages == NULL);
2797
2798 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2799}
2800
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002801int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002802{
2803 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002804 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002805 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002806
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002807 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002808 for_each_ring(ring, dev_priv, i) {
Ben Widawskyb6c74882012-08-14 14:35:14 -07002809 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2810 if (ret)
2811 return ret;
2812
Chris Wilson3e960502012-11-27 16:22:54 +00002813 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002814 if (ret)
2815 return ret;
2816 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002817
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002818 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002819}
2820
Chris Wilson9ce079e2012-04-17 15:31:30 +01002821static void i965_write_fence_reg(struct drm_device *dev, int reg,
2822 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002823{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002824 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002825 int fence_reg;
2826 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002827
Imre Deak56c844e2013-01-07 21:47:34 +02002828 if (INTEL_INFO(dev)->gen >= 6) {
2829 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2830 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2831 } else {
2832 fence_reg = FENCE_REG_965_0;
2833 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2834 }
2835
Chris Wilsond18b9612013-07-10 13:36:23 +01002836 fence_reg += reg * 8;
2837
2838 /* To w/a incoherency with non-atomic 64-bit register updates,
2839 * we split the 64-bit update into two 32-bit writes. In order
2840 * for a partial fence not to be evaluated between writes, we
2841 * precede the update with write to turn off the fence register,
2842 * and only enable the fence as the last step.
2843 *
2844 * For extra levels of paranoia, we make sure each step lands
2845 * before applying the next step.
2846 */
2847 I915_WRITE(fence_reg, 0);
2848 POSTING_READ(fence_reg);
2849
Chris Wilson9ce079e2012-04-17 15:31:30 +01002850 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002851 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002852 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002853
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002854 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002855 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002856 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002857 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002858 if (obj->tiling_mode == I915_TILING_Y)
2859 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2860 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002861
Chris Wilsond18b9612013-07-10 13:36:23 +01002862 I915_WRITE(fence_reg + 4, val >> 32);
2863 POSTING_READ(fence_reg + 4);
2864
2865 I915_WRITE(fence_reg + 0, val);
2866 POSTING_READ(fence_reg);
2867 } else {
2868 I915_WRITE(fence_reg + 4, 0);
2869 POSTING_READ(fence_reg + 4);
2870 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002871}
2872
Chris Wilson9ce079e2012-04-17 15:31:30 +01002873static void i915_write_fence_reg(struct drm_device *dev, int reg,
2874 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002875{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002876 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002877 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002878
Chris Wilson9ce079e2012-04-17 15:31:30 +01002879 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002880 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002881 int pitch_val;
2882 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002883
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002884 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002885 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002886 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2887 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2888 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002889
2890 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2891 tile_width = 128;
2892 else
2893 tile_width = 512;
2894
2895 /* Note: pitch better be a power of two tile widths */
2896 pitch_val = obj->stride / tile_width;
2897 pitch_val = ffs(pitch_val) - 1;
2898
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002899 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002900 if (obj->tiling_mode == I915_TILING_Y)
2901 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2902 val |= I915_FENCE_SIZE_BITS(size);
2903 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2904 val |= I830_FENCE_REG_VALID;
2905 } else
2906 val = 0;
2907
2908 if (reg < 8)
2909 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002910 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002911 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002912
Chris Wilson9ce079e2012-04-17 15:31:30 +01002913 I915_WRITE(reg, val);
2914 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002915}
2916
Chris Wilson9ce079e2012-04-17 15:31:30 +01002917static void i830_write_fence_reg(struct drm_device *dev, int reg,
2918 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002919{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002920 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002921 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002922
Chris Wilson9ce079e2012-04-17 15:31:30 +01002923 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002924 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002925 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002926
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002927 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002928 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002929 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2930 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2931 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002932
Chris Wilson9ce079e2012-04-17 15:31:30 +01002933 pitch_val = obj->stride / 128;
2934 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002935
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002936 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002937 if (obj->tiling_mode == I915_TILING_Y)
2938 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2939 val |= I830_FENCE_SIZE_BITS(size);
2940 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2941 val |= I830_FENCE_REG_VALID;
2942 } else
2943 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002944
Chris Wilson9ce079e2012-04-17 15:31:30 +01002945 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2946 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2947}
2948
Chris Wilsond0a57782012-10-09 19:24:37 +01002949inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2950{
2951 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2952}
2953
Chris Wilson9ce079e2012-04-17 15:31:30 +01002954static void i915_gem_write_fence(struct drm_device *dev, int reg,
2955 struct drm_i915_gem_object *obj)
2956{
Chris Wilsond0a57782012-10-09 19:24:37 +01002957 struct drm_i915_private *dev_priv = dev->dev_private;
2958
2959 /* Ensure that all CPU reads are completed before installing a fence
2960 * and all writes before removing the fence.
2961 */
2962 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2963 mb();
2964
Daniel Vetter94a335d2013-07-17 14:51:28 +02002965 WARN(obj && (!obj->stride || !obj->tiling_mode),
2966 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2967 obj->stride, obj->tiling_mode);
2968
Chris Wilson9ce079e2012-04-17 15:31:30 +01002969 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002970 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002971 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002972 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002973 case 5:
2974 case 4: i965_write_fence_reg(dev, reg, obj); break;
2975 case 3: i915_write_fence_reg(dev, reg, obj); break;
2976 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002977 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002978 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002979
2980 /* And similarly be paranoid that no direct access to this region
2981 * is reordered to before the fence is installed.
2982 */
2983 if (i915_gem_object_needs_mb(obj))
2984 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002985}
2986
Chris Wilson61050802012-04-17 15:31:31 +01002987static inline int fence_number(struct drm_i915_private *dev_priv,
2988 struct drm_i915_fence_reg *fence)
2989{
2990 return fence - dev_priv->fence_regs;
2991}
2992
2993static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2994 struct drm_i915_fence_reg *fence,
2995 bool enable)
2996{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002997 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002998 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002999
Chris Wilson46a0b632013-07-10 13:36:24 +01003000 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003001
3002 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003003 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003004 fence->obj = obj;
3005 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3006 } else {
3007 obj->fence_reg = I915_FENCE_REG_NONE;
3008 fence->obj = NULL;
3009 list_del_init(&fence->lru_list);
3010 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003011 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003012}
3013
Chris Wilsond9e86c02010-11-10 16:40:20 +00003014static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003015i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003016{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003017 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003018 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003019 if (ret)
3020 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003021
3022 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003023 }
3024
Chris Wilson86d5bc32012-07-20 12:41:04 +01003025 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003026 return 0;
3027}
3028
3029int
3030i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3031{
Chris Wilson61050802012-04-17 15:31:31 +01003032 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003033 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003034 int ret;
3035
Chris Wilsond0a57782012-10-09 19:24:37 +01003036 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003037 if (ret)
3038 return ret;
3039
Chris Wilson61050802012-04-17 15:31:31 +01003040 if (obj->fence_reg == I915_FENCE_REG_NONE)
3041 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003042
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003043 fence = &dev_priv->fence_regs[obj->fence_reg];
3044
Chris Wilson61050802012-04-17 15:31:31 +01003045 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003046 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003047
3048 return 0;
3049}
3050
3051static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003052i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003053{
Daniel Vetterae3db242010-02-19 11:51:58 +01003054 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003055 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003056 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003057
3058 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003059 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003060 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3061 reg = &dev_priv->fence_regs[i];
3062 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003063 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003064
Chris Wilson1690e1e2011-12-14 13:57:08 +01003065 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003066 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003067 }
3068
Chris Wilsond9e86c02010-11-10 16:40:20 +00003069 if (avail == NULL)
3070 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003071
3072 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003073 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003074 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003075 continue;
3076
Chris Wilson8fe301a2012-04-17 15:31:28 +01003077 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003078 }
3079
Chris Wilson8fe301a2012-04-17 15:31:28 +01003080 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003081}
3082
Jesse Barnesde151cf2008-11-12 10:03:55 -08003083/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003084 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003085 * @obj: object to map through a fence reg
3086 *
3087 * When mapping objects through the GTT, userspace wants to be able to write
3088 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003089 * This function walks the fence regs looking for a free one for @obj,
3090 * stealing one if it can't find any.
3091 *
3092 * It then sets up the reg based on the object's properties: address, pitch
3093 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003094 *
3095 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003096 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003097int
Chris Wilson06d98132012-04-17 15:31:24 +01003098i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003099{
Chris Wilson05394f32010-11-08 19:18:58 +00003100 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003101 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003102 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003103 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003104 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003105
Chris Wilson14415742012-04-17 15:31:33 +01003106 /* Have we updated the tiling parameters upon the object and so
3107 * will need to serialise the write to the associated fence register?
3108 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003109 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003110 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003111 if (ret)
3112 return ret;
3113 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003114
Chris Wilsond9e86c02010-11-10 16:40:20 +00003115 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003116 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3117 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003118 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003119 list_move_tail(&reg->lru_list,
3120 &dev_priv->mm.fence_list);
3121 return 0;
3122 }
3123 } else if (enable) {
3124 reg = i915_find_fence_reg(dev);
3125 if (reg == NULL)
3126 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003127
Chris Wilson14415742012-04-17 15:31:33 +01003128 if (reg->obj) {
3129 struct drm_i915_gem_object *old = reg->obj;
3130
Chris Wilsond0a57782012-10-09 19:24:37 +01003131 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003132 if (ret)
3133 return ret;
3134
Chris Wilson14415742012-04-17 15:31:33 +01003135 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003136 }
Chris Wilson14415742012-04-17 15:31:33 +01003137 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003138 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003139
Chris Wilson14415742012-04-17 15:31:33 +01003140 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003141
Chris Wilson9ce079e2012-04-17 15:31:30 +01003142 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003143}
3144
Chris Wilson42d6ab42012-07-26 11:49:32 +01003145static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3146 struct drm_mm_node *gtt_space,
3147 unsigned long cache_level)
3148{
3149 struct drm_mm_node *other;
3150
3151 /* On non-LLC machines we have to be careful when putting differing
3152 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003153 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003154 */
3155 if (HAS_LLC(dev))
3156 return true;
3157
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003158 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003159 return true;
3160
3161 if (list_empty(&gtt_space->node_list))
3162 return true;
3163
3164 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3165 if (other->allocated && !other->hole_follows && other->color != cache_level)
3166 return false;
3167
3168 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3169 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3170 return false;
3171
3172 return true;
3173}
3174
3175static void i915_gem_verify_gtt(struct drm_device *dev)
3176{
3177#if WATCH_GTT
3178 struct drm_i915_private *dev_priv = dev->dev_private;
3179 struct drm_i915_gem_object *obj;
3180 int err = 0;
3181
Ben Widawsky35c20a62013-05-31 11:28:48 -07003182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003183 if (obj->gtt_space == NULL) {
3184 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3185 err++;
3186 continue;
3187 }
3188
3189 if (obj->cache_level != obj->gtt_space->color) {
3190 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003191 i915_gem_obj_ggtt_offset(obj),
3192 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003193 obj->cache_level,
3194 obj->gtt_space->color);
3195 err++;
3196 continue;
3197 }
3198
3199 if (!i915_gem_valid_gtt_space(dev,
3200 obj->gtt_space,
3201 obj->cache_level)) {
3202 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003203 i915_gem_obj_ggtt_offset(obj),
3204 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003205 obj->cache_level);
3206 err++;
3207 continue;
3208 }
3209 }
3210
3211 WARN_ON(err);
3212#endif
3213}
3214
Jesse Barnesde151cf2008-11-12 10:03:55 -08003215/**
Eric Anholt673a3942008-07-30 12:06:12 -07003216 * Finds free space in the GTT aperture and binds the object there.
3217 */
3218static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003219i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3220 struct i915_address_space *vm,
3221 unsigned alignment,
3222 bool map_and_fenceable,
3223 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003224{
Chris Wilson05394f32010-11-08 19:18:58 +00003225 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003226 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003227 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003228 size_t gtt_max =
3229 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003230 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003231 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003232
Chris Wilsone28f8712011-07-18 13:11:49 -07003233 fence_size = i915_gem_get_gtt_size(dev,
3234 obj->base.size,
3235 obj->tiling_mode);
3236 fence_alignment = i915_gem_get_gtt_alignment(dev,
3237 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003238 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003239 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003240 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003241 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003242 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003243
Eric Anholt673a3942008-07-30 12:06:12 -07003244 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003245 alignment = map_and_fenceable ? fence_alignment :
3246 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003247 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003248 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3249 return -EINVAL;
3250 }
3251
Chris Wilson05394f32010-11-08 19:18:58 +00003252 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003253
Chris Wilson654fc602010-05-27 13:18:21 +01003254 /* If the object is bigger than the entire aperture, reject it early
3255 * before evicting everything in a vain attempt to find space.
3256 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003257 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003258 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003259 obj->base.size,
3260 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003261 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003262 return -E2BIG;
3263 }
3264
Chris Wilson37e680a2012-06-07 15:38:42 +01003265 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003266 if (ret)
3267 return ret;
3268
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003269 i915_gem_object_pin_pages(obj);
3270
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003271 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003272
Ben Widawskyaccfef22013-08-14 11:38:35 +02003273 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003274 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003275 ret = PTR_ERR(vma);
3276 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003277 }
3278
Ben Widawskyaccfef22013-08-14 11:38:35 +02003279 /* For now we only ever use 1 vma per object */
3280 WARN_ON(!list_is_singular(&obj->vma_list));
3281
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003282search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003283 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003284 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003285 obj->cache_level, 0, gtt_max,
3286 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003287 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003288 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003289 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003290 map_and_fenceable,
3291 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003292 if (ret == 0)
3293 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003294
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003295 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003296 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003297 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003298 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003299 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003300 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003301 }
3302
Daniel Vetter74163902012-02-15 23:50:21 +01003303 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003304 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003305 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003306
Ben Widawsky35c20a62013-05-31 11:28:48 -07003307 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003308 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003309
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003310 if (i915_is_ggtt(vm)) {
3311 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003312
Daniel Vetter49987092013-08-14 10:21:23 +02003313 fenceable = (vma->node.size == fence_size &&
3314 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003315
Daniel Vetter49987092013-08-14 10:21:23 +02003316 mappable = (vma->node.start + obj->base.size <=
3317 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003318
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003319 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003320 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003321
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003322 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003323
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003324 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003325 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003326 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003327
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003328err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003329 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003330err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003331 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003332err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003333 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003334 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003335}
3336
Chris Wilson000433b2013-08-08 14:41:09 +01003337bool
Chris Wilson2c225692013-08-09 12:26:45 +01003338i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3339 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003340{
Eric Anholt673a3942008-07-30 12:06:12 -07003341 /* If we don't have a page list set up, then we're not pinned
3342 * to GPU, and we can ignore the cache flush because it'll happen
3343 * again at bind time.
3344 */
Chris Wilson05394f32010-11-08 19:18:58 +00003345 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003346 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003347
Imre Deak769ce462013-02-13 21:56:05 +02003348 /*
3349 * Stolen memory is always coherent with the GPU as it is explicitly
3350 * marked as wc by the system, or the system is cache-coherent.
3351 */
3352 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003353 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003354
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003355 /* If the GPU is snooping the contents of the CPU cache,
3356 * we do not need to manually clear the CPU cache lines. However,
3357 * the caches are only snooped when the render cache is
3358 * flushed/invalidated. As we always have to emit invalidations
3359 * and flushes when moving into and out of the RENDER domain, correct
3360 * snooping behaviour occurs naturally as the result of our domain
3361 * tracking.
3362 */
Chris Wilson2c225692013-08-09 12:26:45 +01003363 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003364 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003365
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003366 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003367 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003368
3369 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003370}
3371
3372/** Flushes the GTT write domain for the object if it's dirty. */
3373static void
Chris Wilson05394f32010-11-08 19:18:58 +00003374i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003375{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003376 uint32_t old_write_domain;
3377
Chris Wilson05394f32010-11-08 19:18:58 +00003378 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003379 return;
3380
Chris Wilson63256ec2011-01-04 18:42:07 +00003381 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003382 * to it immediately go to main memory as far as we know, so there's
3383 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003384 *
3385 * However, we do have to enforce the order so that all writes through
3386 * the GTT land before any writes to the device, such as updates to
3387 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003388 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003389 wmb();
3390
Chris Wilson05394f32010-11-08 19:18:58 +00003391 old_write_domain = obj->base.write_domain;
3392 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003393
3394 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003395 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003396 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003397}
3398
3399/** Flushes the CPU write domain for the object if it's dirty. */
3400static void
Chris Wilson2c225692013-08-09 12:26:45 +01003401i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3402 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003403{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003404 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003405
Chris Wilson05394f32010-11-08 19:18:58 +00003406 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003407 return;
3408
Chris Wilson000433b2013-08-08 14:41:09 +01003409 if (i915_gem_clflush_object(obj, force))
3410 i915_gem_chipset_flush(obj->base.dev);
3411
Chris Wilson05394f32010-11-08 19:18:58 +00003412 old_write_domain = obj->base.write_domain;
3413 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003414
3415 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003416 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003417 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003418}
3419
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003420/**
3421 * Moves a single object to the GTT read, and possibly write domain.
3422 *
3423 * This function returns when the move is complete, including waiting on
3424 * flushes to occur.
3425 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003426int
Chris Wilson20217462010-11-23 15:26:33 +00003427i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003428{
Chris Wilson8325a092012-04-24 15:52:35 +01003429 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003430 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003431 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003432
Eric Anholt02354392008-11-26 13:58:13 -08003433 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003434 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003435 return -EINVAL;
3436
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003437 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3438 return 0;
3439
Chris Wilson0201f1e2012-07-20 12:41:01 +01003440 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003441 if (ret)
3442 return ret;
3443
Chris Wilson2c225692013-08-09 12:26:45 +01003444 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003445
Chris Wilsond0a57782012-10-09 19:24:37 +01003446 /* Serialise direct access to this object with the barriers for
3447 * coherent writes from the GPU, by effectively invalidating the
3448 * GTT domain upon first access.
3449 */
3450 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3451 mb();
3452
Chris Wilson05394f32010-11-08 19:18:58 +00003453 old_write_domain = obj->base.write_domain;
3454 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003455
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003456 /* It should now be out of any other write domains, and we can update
3457 * the domain values for our changes.
3458 */
Chris Wilson05394f32010-11-08 19:18:58 +00003459 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3460 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003461 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003462 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3463 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3464 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003465 }
3466
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003467 trace_i915_gem_object_change_domain(obj,
3468 old_read_domains,
3469 old_write_domain);
3470
Chris Wilson8325a092012-04-24 15:52:35 +01003471 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003472 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003473 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003474 if (vma)
3475 list_move_tail(&vma->mm_list,
3476 &dev_priv->gtt.base.inactive_list);
3477
3478 }
Chris Wilson8325a092012-04-24 15:52:35 +01003479
Eric Anholte47c68e2008-11-14 13:35:19 -08003480 return 0;
3481}
3482
Chris Wilsone4ffd172011-04-04 09:44:39 +01003483int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3484 enum i915_cache_level cache_level)
3485{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003486 struct drm_device *dev = obj->base.dev;
3487 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003488 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003489 int ret;
3490
3491 if (obj->cache_level == cache_level)
3492 return 0;
3493
3494 if (obj->pin_count) {
3495 DRM_DEBUG("can not change the cache level of pinned objects\n");
3496 return -EBUSY;
3497 }
3498
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003499 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3500 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003501 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003502 if (ret)
3503 return ret;
3504
3505 break;
3506 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003507 }
3508
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003509 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003510 ret = i915_gem_object_finish_gpu(obj);
3511 if (ret)
3512 return ret;
3513
3514 i915_gem_object_finish_gtt(obj);
3515
3516 /* Before SandyBridge, you could not use tiling or fence
3517 * registers with snooped memory, so relinquish any fences
3518 * currently pointing to our region in the aperture.
3519 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003520 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003521 ret = i915_gem_object_put_fence(obj);
3522 if (ret)
3523 return ret;
3524 }
3525
Daniel Vetter74898d72012-02-15 23:50:22 +01003526 if (obj->has_global_gtt_mapping)
3527 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01003528 if (obj->has_aliasing_ppgtt_mapping)
3529 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3530 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003531 }
3532
Chris Wilson2c225692013-08-09 12:26:45 +01003533 list_for_each_entry(vma, &obj->vma_list, vma_link)
3534 vma->node.color = cache_level;
3535 obj->cache_level = cache_level;
3536
3537 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003538 u32 old_read_domains, old_write_domain;
3539
3540 /* If we're coming from LLC cached, then we haven't
3541 * actually been tracking whether the data is in the
3542 * CPU cache or not, since we only allow one bit set
3543 * in obj->write_domain and have been skipping the clflushes.
3544 * Just set it to the CPU cache for now.
3545 */
3546 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003547
3548 old_read_domains = obj->base.read_domains;
3549 old_write_domain = obj->base.write_domain;
3550
3551 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3552 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3553
3554 trace_i915_gem_object_change_domain(obj,
3555 old_read_domains,
3556 old_write_domain);
3557 }
3558
Chris Wilson42d6ab42012-07-26 11:49:32 +01003559 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003560 return 0;
3561}
3562
Ben Widawsky199adf42012-09-21 17:01:20 -07003563int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3564 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003565{
Ben Widawsky199adf42012-09-21 17:01:20 -07003566 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003567 struct drm_i915_gem_object *obj;
3568 int ret;
3569
3570 ret = i915_mutex_lock_interruptible(dev);
3571 if (ret)
3572 return ret;
3573
3574 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3575 if (&obj->base == NULL) {
3576 ret = -ENOENT;
3577 goto unlock;
3578 }
3579
Chris Wilson651d7942013-08-08 14:41:10 +01003580 switch (obj->cache_level) {
3581 case I915_CACHE_LLC:
3582 case I915_CACHE_L3_LLC:
3583 args->caching = I915_CACHING_CACHED;
3584 break;
3585
Chris Wilson4257d3b2013-08-08 14:41:11 +01003586 case I915_CACHE_WT:
3587 args->caching = I915_CACHING_DISPLAY;
3588 break;
3589
Chris Wilson651d7942013-08-08 14:41:10 +01003590 default:
3591 args->caching = I915_CACHING_NONE;
3592 break;
3593 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003594
3595 drm_gem_object_unreference(&obj->base);
3596unlock:
3597 mutex_unlock(&dev->struct_mutex);
3598 return ret;
3599}
3600
Ben Widawsky199adf42012-09-21 17:01:20 -07003601int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3602 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003603{
Ben Widawsky199adf42012-09-21 17:01:20 -07003604 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003605 struct drm_i915_gem_object *obj;
3606 enum i915_cache_level level;
3607 int ret;
3608
Ben Widawsky199adf42012-09-21 17:01:20 -07003609 switch (args->caching) {
3610 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003611 level = I915_CACHE_NONE;
3612 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003613 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003614 level = I915_CACHE_LLC;
3615 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003616 case I915_CACHING_DISPLAY:
3617 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3618 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003619 default:
3620 return -EINVAL;
3621 }
3622
Ben Widawsky3bc29132012-09-26 16:15:20 -07003623 ret = i915_mutex_lock_interruptible(dev);
3624 if (ret)
3625 return ret;
3626
Chris Wilsone6994ae2012-07-10 10:27:08 +01003627 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3628 if (&obj->base == NULL) {
3629 ret = -ENOENT;
3630 goto unlock;
3631 }
3632
3633 ret = i915_gem_object_set_cache_level(obj, level);
3634
3635 drm_gem_object_unreference(&obj->base);
3636unlock:
3637 mutex_unlock(&dev->struct_mutex);
3638 return ret;
3639}
3640
Chris Wilsoncc98b412013-08-09 12:25:09 +01003641static bool is_pin_display(struct drm_i915_gem_object *obj)
3642{
3643 /* There are 3 sources that pin objects:
3644 * 1. The display engine (scanouts, sprites, cursors);
3645 * 2. Reservations for execbuffer;
3646 * 3. The user.
3647 *
3648 * We can ignore reservations as we hold the struct_mutex and
3649 * are only called outside of the reservation path. The user
3650 * can only increment pin_count once, and so if after
3651 * subtracting the potential reference by the user, any pin_count
3652 * remains, it must be due to another use by the display engine.
3653 */
3654 return obj->pin_count - !!obj->user_pin_count;
3655}
3656
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003657/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003658 * Prepare buffer for display plane (scanout, cursors, etc).
3659 * Can be called from an uninterruptible phase (modesetting) and allows
3660 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003661 */
3662int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003663i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3664 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003665 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003666{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003667 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003668 int ret;
3669
Chris Wilson0be73282010-12-06 14:36:27 +00003670 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003671 ret = i915_gem_object_sync(obj, pipelined);
3672 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003673 return ret;
3674 }
3675
Chris Wilsoncc98b412013-08-09 12:25:09 +01003676 /* Mark the pin_display early so that we account for the
3677 * display coherency whilst setting up the cache domains.
3678 */
3679 obj->pin_display = true;
3680
Eric Anholta7ef0642011-03-29 16:59:54 -07003681 /* The display engine is not coherent with the LLC cache on gen6. As
3682 * a result, we make sure that the pinning that is about to occur is
3683 * done with uncached PTEs. This is lowest common denominator for all
3684 * chipsets.
3685 *
3686 * However for gen6+, we could do better by using the GFDT bit instead
3687 * of uncaching, which would allow us to flush all the LLC-cached data
3688 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3689 */
Chris Wilson651d7942013-08-08 14:41:10 +01003690 ret = i915_gem_object_set_cache_level(obj,
3691 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003692 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003693 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003694
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003695 /* As the user may map the buffer once pinned in the display plane
3696 * (e.g. libkms for the bootup splash), we have to ensure that we
3697 * always use map_and_fenceable for all scanout buffers.
3698 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003699 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003700 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003701 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003702
Chris Wilson2c225692013-08-09 12:26:45 +01003703 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003704
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003705 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003706 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003707
3708 /* It should now be out of any other write domains, and we can update
3709 * the domain values for our changes.
3710 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003711 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003712 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003713
3714 trace_i915_gem_object_change_domain(obj,
3715 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003716 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003717
3718 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003719
3720err_unpin_display:
3721 obj->pin_display = is_pin_display(obj);
3722 return ret;
3723}
3724
3725void
3726i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3727{
3728 i915_gem_object_unpin(obj);
3729 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003730}
3731
Chris Wilson85345512010-11-13 09:49:11 +00003732int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003733i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003734{
Chris Wilson88241782011-01-07 17:09:48 +00003735 int ret;
3736
Chris Wilsona8198ee2011-04-13 22:04:09 +01003737 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003738 return 0;
3739
Chris Wilson0201f1e2012-07-20 12:41:01 +01003740 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003741 if (ret)
3742 return ret;
3743
Chris Wilsona8198ee2011-04-13 22:04:09 +01003744 /* Ensure that we invalidate the GPU's caches and TLBs. */
3745 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003746 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003747}
3748
Eric Anholte47c68e2008-11-14 13:35:19 -08003749/**
3750 * Moves a single object to the CPU read, and possibly write domain.
3751 *
3752 * This function returns when the move is complete, including waiting on
3753 * flushes to occur.
3754 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003755int
Chris Wilson919926a2010-11-12 13:42:53 +00003756i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003757{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003758 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003759 int ret;
3760
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003761 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3762 return 0;
3763
Chris Wilson0201f1e2012-07-20 12:41:01 +01003764 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003765 if (ret)
3766 return ret;
3767
Eric Anholte47c68e2008-11-14 13:35:19 -08003768 i915_gem_object_flush_gtt_write_domain(obj);
3769
Chris Wilson05394f32010-11-08 19:18:58 +00003770 old_write_domain = obj->base.write_domain;
3771 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003772
Eric Anholte47c68e2008-11-14 13:35:19 -08003773 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003774 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003775 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003776
Chris Wilson05394f32010-11-08 19:18:58 +00003777 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003778 }
3779
3780 /* It should now be out of any other write domains, and we can update
3781 * the domain values for our changes.
3782 */
Chris Wilson05394f32010-11-08 19:18:58 +00003783 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003784
3785 /* If we're writing through the CPU, then the GPU read domains will
3786 * need to be invalidated at next use.
3787 */
3788 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003789 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3790 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003791 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003792
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003793 trace_i915_gem_object_change_domain(obj,
3794 old_read_domains,
3795 old_write_domain);
3796
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003797 return 0;
3798}
3799
Eric Anholt673a3942008-07-30 12:06:12 -07003800/* Throttle our rendering by waiting until the ring has completed our requests
3801 * emitted over 20 msec ago.
3802 *
Eric Anholtb9624422009-06-03 07:27:35 +00003803 * Note that if we were to use the current jiffies each time around the loop,
3804 * we wouldn't escape the function with any frames outstanding if the time to
3805 * render a frame was over 20ms.
3806 *
Eric Anholt673a3942008-07-30 12:06:12 -07003807 * This should get us reasonable parallelism between CPU and GPU but also
3808 * relatively low latency when blocking on a particular request to finish.
3809 */
3810static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003811i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003812{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003813 struct drm_i915_private *dev_priv = dev->dev_private;
3814 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003815 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003816 struct drm_i915_gem_request *request;
3817 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003818 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003819 u32 seqno = 0;
3820 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003821
Daniel Vetter308887a2012-11-14 17:14:06 +01003822 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3823 if (ret)
3824 return ret;
3825
3826 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3827 if (ret)
3828 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003829
Chris Wilson1c255952010-09-26 11:03:27 +01003830 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003831 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003832 if (time_after_eq(request->emitted_jiffies, recent_enough))
3833 break;
3834
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003835 ring = request->ring;
3836 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003837 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003838 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003839 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003840
3841 if (seqno == 0)
3842 return 0;
3843
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003844 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003845 if (ret == 0)
3846 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003847
Eric Anholt673a3942008-07-30 12:06:12 -07003848 return ret;
3849}
3850
Eric Anholt673a3942008-07-30 12:06:12 -07003851int
Chris Wilson05394f32010-11-08 19:18:58 +00003852i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003853 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003854 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003855 bool map_and_fenceable,
3856 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003857{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003858 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003859 int ret;
3860
Chris Wilson7e81a422012-09-15 09:41:57 +01003861 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3862 return -EBUSY;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003863
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003864 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3865
3866 vma = i915_gem_obj_to_vma(obj, vm);
3867
3868 if (vma) {
3869 if ((alignment &&
3870 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003871 (map_and_fenceable && !obj->map_and_fenceable)) {
3872 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003873 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003874 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003875 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003876 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003877 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003878 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003879 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003880 if (ret)
3881 return ret;
3882 }
3883 }
3884
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003885 if (!i915_gem_obj_bound(obj, vm)) {
Chris Wilson87422672012-11-21 13:04:03 +00003886 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3887
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003888 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3889 map_and_fenceable,
3890 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003891 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003892 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003893
3894 if (!dev_priv->mm.aliasing_ppgtt)
3895 i915_gem_gtt_bind_object(obj, obj->cache_level);
Chris Wilson22c344e2009-02-11 14:26:45 +00003896 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003897
Daniel Vetter74898d72012-02-15 23:50:22 +01003898 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3899 i915_gem_gtt_bind_object(obj, obj->cache_level);
3900
Chris Wilson1b502472012-04-24 15:47:30 +01003901 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003902 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003903
3904 return 0;
3905}
3906
3907void
Chris Wilson05394f32010-11-08 19:18:58 +00003908i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003909{
Chris Wilson05394f32010-11-08 19:18:58 +00003910 BUG_ON(obj->pin_count == 0);
Ben Widawsky98438772013-07-31 17:00:12 -07003911 BUG_ON(!i915_gem_obj_bound_any(obj));
Eric Anholt673a3942008-07-30 12:06:12 -07003912
Chris Wilson1b502472012-04-24 15:47:30 +01003913 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003914 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003915}
3916
3917int
3918i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003919 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003920{
3921 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003922 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003923 int ret;
3924
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003925 ret = i915_mutex_lock_interruptible(dev);
3926 if (ret)
3927 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003928
Chris Wilson05394f32010-11-08 19:18:58 +00003929 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003930 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003931 ret = -ENOENT;
3932 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003933 }
Eric Anholt673a3942008-07-30 12:06:12 -07003934
Chris Wilson05394f32010-11-08 19:18:58 +00003935 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003936 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003937 ret = -EINVAL;
3938 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003939 }
3940
Chris Wilson05394f32010-11-08 19:18:58 +00003941 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003942 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3943 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003944 ret = -EINVAL;
3945 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003946 }
3947
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003948 if (obj->user_pin_count == ULONG_MAX) {
3949 ret = -EBUSY;
3950 goto out;
3951 }
3952
Chris Wilson93be8782013-01-02 10:31:22 +00003953 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003954 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003955 if (ret)
3956 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003957 }
3958
Chris Wilson93be8782013-01-02 10:31:22 +00003959 obj->user_pin_count++;
3960 obj->pin_filp = file;
3961
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003962 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003963out:
Chris Wilson05394f32010-11-08 19:18:58 +00003964 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003965unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003966 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003967 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003968}
3969
3970int
3971i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003972 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003973{
3974 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003975 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003976 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003977
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003978 ret = i915_mutex_lock_interruptible(dev);
3979 if (ret)
3980 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003981
Chris Wilson05394f32010-11-08 19:18:58 +00003982 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003983 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003984 ret = -ENOENT;
3985 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003986 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003987
Chris Wilson05394f32010-11-08 19:18:58 +00003988 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003989 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3990 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003991 ret = -EINVAL;
3992 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003993 }
Chris Wilson05394f32010-11-08 19:18:58 +00003994 obj->user_pin_count--;
3995 if (obj->user_pin_count == 0) {
3996 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003997 i915_gem_object_unpin(obj);
3998 }
Eric Anholt673a3942008-07-30 12:06:12 -07003999
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004000out:
Chris Wilson05394f32010-11-08 19:18:58 +00004001 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004002unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004003 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004004 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004005}
4006
4007int
4008i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004009 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004010{
4011 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004012 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004013 int ret;
4014
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004015 ret = i915_mutex_lock_interruptible(dev);
4016 if (ret)
4017 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004018
Chris Wilson05394f32010-11-08 19:18:58 +00004019 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004020 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004021 ret = -ENOENT;
4022 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004023 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004024
Chris Wilson0be555b2010-08-04 15:36:30 +01004025 /* Count all active objects as busy, even if they are currently not used
4026 * by the gpu. Users of this interface expect objects to eventually
4027 * become non-busy without any further actions, therefore emit any
4028 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004029 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004030 ret = i915_gem_object_flush_active(obj);
4031
Chris Wilson05394f32010-11-08 19:18:58 +00004032 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004033 if (obj->ring) {
4034 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4035 args->busy |= intel_ring_flag(obj->ring) << 16;
4036 }
Eric Anholt673a3942008-07-30 12:06:12 -07004037
Chris Wilson05394f32010-11-08 19:18:58 +00004038 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004039unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004040 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004041 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004042}
4043
4044int
4045i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4046 struct drm_file *file_priv)
4047{
Akshay Joshi0206e352011-08-16 15:34:10 -04004048 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004049}
4050
Chris Wilson3ef94da2009-09-14 16:50:29 +01004051int
4052i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4053 struct drm_file *file_priv)
4054{
4055 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004056 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004057 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004058
4059 switch (args->madv) {
4060 case I915_MADV_DONTNEED:
4061 case I915_MADV_WILLNEED:
4062 break;
4063 default:
4064 return -EINVAL;
4065 }
4066
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004067 ret = i915_mutex_lock_interruptible(dev);
4068 if (ret)
4069 return ret;
4070
Chris Wilson05394f32010-11-08 19:18:58 +00004071 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004072 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004073 ret = -ENOENT;
4074 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004075 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004076
Chris Wilson05394f32010-11-08 19:18:58 +00004077 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004078 ret = -EINVAL;
4079 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004080 }
4081
Chris Wilson05394f32010-11-08 19:18:58 +00004082 if (obj->madv != __I915_MADV_PURGED)
4083 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004084
Chris Wilson6c085a72012-08-20 11:40:46 +02004085 /* if the object is no longer attached, discard its backing storage */
4086 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004087 i915_gem_object_truncate(obj);
4088
Chris Wilson05394f32010-11-08 19:18:58 +00004089 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004090
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004091out:
Chris Wilson05394f32010-11-08 19:18:58 +00004092 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004093unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004094 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004095 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004096}
4097
Chris Wilson37e680a2012-06-07 15:38:42 +01004098void i915_gem_object_init(struct drm_i915_gem_object *obj,
4099 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004100{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004101 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004102 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004103 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004104 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004105
Chris Wilson37e680a2012-06-07 15:38:42 +01004106 obj->ops = ops;
4107
Chris Wilson0327d6b2012-08-11 15:41:06 +01004108 obj->fence_reg = I915_FENCE_REG_NONE;
4109 obj->madv = I915_MADV_WILLNEED;
4110 /* Avoid an unnecessary call to unbind on the first bind. */
4111 obj->map_and_fenceable = true;
4112
4113 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4114}
4115
Chris Wilson37e680a2012-06-07 15:38:42 +01004116static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4117 .get_pages = i915_gem_object_get_pages_gtt,
4118 .put_pages = i915_gem_object_put_pages_gtt,
4119};
4120
Chris Wilson05394f32010-11-08 19:18:58 +00004121struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4122 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004123{
Daniel Vetterc397b902010-04-09 19:05:07 +00004124 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004125 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004126 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004127
Chris Wilson42dcedd2012-11-15 11:32:30 +00004128 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004129 if (obj == NULL)
4130 return NULL;
4131
4132 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004133 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004134 return NULL;
4135 }
4136
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004137 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4138 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4139 /* 965gm cannot relocate objects above 4GiB. */
4140 mask &= ~__GFP_HIGHMEM;
4141 mask |= __GFP_DMA32;
4142 }
4143
Al Viro496ad9a2013-01-23 17:07:38 -05004144 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004145 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004146
Chris Wilson37e680a2012-06-07 15:38:42 +01004147 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004148
Daniel Vetterc397b902010-04-09 19:05:07 +00004149 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4150 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4151
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004152 if (HAS_LLC(dev)) {
4153 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004154 * cache) for about a 10% performance improvement
4155 * compared to uncached. Graphics requests other than
4156 * display scanout are coherent with the CPU in
4157 * accessing this cache. This means in this mode we
4158 * don't need to clflush on the CPU side, and on the
4159 * GPU side we only need to flush internal caches to
4160 * get data visible to the CPU.
4161 *
4162 * However, we maintain the display planes as UC, and so
4163 * need to rebind when first used as such.
4164 */
4165 obj->cache_level = I915_CACHE_LLC;
4166 } else
4167 obj->cache_level = I915_CACHE_NONE;
4168
Daniel Vetterd861e332013-07-24 23:25:03 +02004169 trace_i915_gem_object_create(obj);
4170
Chris Wilson05394f32010-11-08 19:18:58 +00004171 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004172}
4173
Chris Wilson1488fc02012-04-24 15:47:31 +01004174void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004175{
Chris Wilson1488fc02012-04-24 15:47:31 +01004176 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004177 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004178 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004179 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004180
Paulo Zanonif65c9162013-11-27 18:20:34 -02004181 intel_runtime_pm_get(dev_priv);
4182
Chris Wilson26e12f892011-03-20 11:20:19 +00004183 trace_i915_gem_object_destroy(obj);
4184
Chris Wilson1488fc02012-04-24 15:47:31 +01004185 if (obj->phys_obj)
4186 i915_gem_detach_phys_object(dev, obj);
4187
4188 obj->pin_count = 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004189 /* NB: 0 or 1 elements */
4190 WARN_ON(!list_empty(&obj->vma_list) &&
4191 !list_is_singular(&obj->vma_list));
4192 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4193 int ret = i915_vma_unbind(vma);
4194 if (WARN_ON(ret == -ERESTARTSYS)) {
4195 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004196
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004197 was_interruptible = dev_priv->mm.interruptible;
4198 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004199
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004200 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004201
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004202 dev_priv->mm.interruptible = was_interruptible;
4203 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004204 }
4205
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004206 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4207 * before progressing. */
4208 if (obj->stolen)
4209 i915_gem_object_unpin_pages(obj);
4210
Ben Widawsky401c29f2013-05-31 11:28:47 -07004211 if (WARN_ON(obj->pages_pin_count))
4212 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004213 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004214 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004215 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004216
Chris Wilson9da3da62012-06-01 15:20:22 +01004217 BUG_ON(obj->pages);
4218
Chris Wilson2f745ad2012-09-04 21:02:58 +01004219 if (obj->base.import_attach)
4220 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004221
Chris Wilson05394f32010-11-08 19:18:58 +00004222 drm_gem_object_release(&obj->base);
4223 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004224
Chris Wilson05394f32010-11-08 19:18:58 +00004225 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004226 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004227
4228 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004229}
4230
Daniel Vettere656a6c2013-08-14 14:14:04 +02004231struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004232 struct i915_address_space *vm)
4233{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004234 struct i915_vma *vma;
4235 list_for_each_entry(vma, &obj->vma_list, vma_link)
4236 if (vma->vm == vm)
4237 return vma;
4238
4239 return NULL;
4240}
4241
4242static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4243 struct i915_address_space *vm)
4244{
Ben Widawsky2f633152013-07-17 12:19:03 -07004245 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4246 if (vma == NULL)
4247 return ERR_PTR(-ENOMEM);
4248
4249 INIT_LIST_HEAD(&vma->vma_link);
Ben Widawskyca191b12013-07-31 17:00:14 -07004250 INIT_LIST_HEAD(&vma->mm_list);
Ben Widawsky82a55ad2013-08-14 11:38:34 +02004251 INIT_LIST_HEAD(&vma->exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -07004252 vma->vm = vm;
4253 vma->obj = obj;
4254
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004255 /* Keep GGTT vmas first to make debug easier */
4256 if (i915_is_ggtt(vm))
4257 list_add(&vma->vma_link, &obj->vma_list);
4258 else
4259 list_add_tail(&vma->vma_link, &obj->vma_list);
4260
Ben Widawsky2f633152013-07-17 12:19:03 -07004261 return vma;
4262}
4263
Daniel Vettere656a6c2013-08-14 14:14:04 +02004264struct i915_vma *
4265i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4266 struct i915_address_space *vm)
4267{
4268 struct i915_vma *vma;
4269
4270 vma = i915_gem_obj_to_vma(obj, vm);
4271 if (!vma)
4272 vma = __i915_gem_vma_create(obj, vm);
4273
4274 return vma;
4275}
4276
Ben Widawsky2f633152013-07-17 12:19:03 -07004277void i915_gem_vma_destroy(struct i915_vma *vma)
4278{
4279 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004280
4281 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4282 if (!list_empty(&vma->exec_list))
4283 return;
4284
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004285 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004286
Ben Widawsky2f633152013-07-17 12:19:03 -07004287 kfree(vma);
4288}
4289
Jesse Barnes5669fca2009-02-17 15:13:31 -08004290int
Chris Wilson45c5f202013-10-16 11:50:01 +01004291i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004292{
4293 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004294 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004295
Chris Wilson45c5f202013-10-16 11:50:01 +01004296 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004297 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004298 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004299
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004300 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004301 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004302 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004303
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004304 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004305
Chris Wilson29105cc2010-01-07 10:39:13 +00004306 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004307 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004308 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004309
Chris Wilson29105cc2010-01-07 10:39:13 +00004310 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004311 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004312
Chris Wilson45c5f202013-10-16 11:50:01 +01004313 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4314 * We need to replace this with a semaphore, or something.
4315 * And not confound ums.mm_suspended!
4316 */
4317 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4318 DRIVER_MODESET);
4319 mutex_unlock(&dev->struct_mutex);
4320
4321 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004322 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004323 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004324
Eric Anholt673a3942008-07-30 12:06:12 -07004325 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004326
4327err:
4328 mutex_unlock(&dev->struct_mutex);
4329 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004330}
4331
Ben Widawskyc3787e22013-09-17 21:12:44 -07004332int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004333{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004334 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004335 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004336 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4337 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004338 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004339
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004340 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004341 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004342
Ben Widawskyc3787e22013-09-17 21:12:44 -07004343 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4344 if (ret)
4345 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004346
Ben Widawskyc3787e22013-09-17 21:12:44 -07004347 /*
4348 * Note: We do not worry about the concurrent register cacheline hang
4349 * here because no other code should access these registers other than
4350 * at initialization time.
4351 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004352 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004353 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4354 intel_ring_emit(ring, reg_base + i);
4355 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004356 }
4357
Ben Widawskyc3787e22013-09-17 21:12:44 -07004358 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004359
Ben Widawskyc3787e22013-09-17 21:12:44 -07004360 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004361}
4362
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004363void i915_gem_init_swizzling(struct drm_device *dev)
4364{
4365 drm_i915_private_t *dev_priv = dev->dev_private;
4366
Daniel Vetter11782b02012-01-31 16:47:55 +01004367 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004368 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4369 return;
4370
4371 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4372 DISP_TILE_SURFACE_SWIZZLING);
4373
Daniel Vetter11782b02012-01-31 16:47:55 +01004374 if (IS_GEN5(dev))
4375 return;
4376
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004377 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4378 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004379 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004380 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004381 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004382 else if (IS_GEN8(dev))
4383 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004384 else
4385 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004386}
Daniel Vettere21af882012-02-09 20:53:27 +01004387
Chris Wilson67b1b572012-07-05 23:49:40 +01004388static bool
4389intel_enable_blt(struct drm_device *dev)
4390{
4391 if (!HAS_BLT(dev))
4392 return false;
4393
4394 /* The blitter was dysfunctional on early prototypes */
4395 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4396 DRM_INFO("BLT not supported on this pre-production hardware;"
4397 " graphics performance will be degraded.\n");
4398 return false;
4399 }
4400
4401 return true;
4402}
4403
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004404static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004405{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004406 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004407 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004408
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004409 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004410 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004411 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004412
4413 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004414 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004415 if (ret)
4416 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004417 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004418
Chris Wilson67b1b572012-07-05 23:49:40 +01004419 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004420 ret = intel_init_blt_ring_buffer(dev);
4421 if (ret)
4422 goto cleanup_bsd_ring;
4423 }
4424
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004425 if (HAS_VEBOX(dev)) {
4426 ret = intel_init_vebox_ring_buffer(dev);
4427 if (ret)
4428 goto cleanup_blt_ring;
4429 }
4430
4431
Mika Kuoppala99433932013-01-22 14:12:17 +02004432 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4433 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004434 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004435
4436 return 0;
4437
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004438cleanup_vebox_ring:
4439 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004440cleanup_blt_ring:
4441 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4442cleanup_bsd_ring:
4443 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4444cleanup_render_ring:
4445 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4446
4447 return ret;
4448}
4449
4450int
4451i915_gem_init_hw(struct drm_device *dev)
4452{
4453 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004454 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004455
4456 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4457 return -EIO;
4458
Ben Widawsky59124502013-07-04 11:02:05 -07004459 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004460 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004461
Rodrigo Vivi94353732013-08-28 16:45:46 -03004462 if (IS_HSW_GT3(dev))
4463 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4464 else
4465 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4466
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004467 if (HAS_PCH_NOP(dev)) {
4468 u32 temp = I915_READ(GEN7_MSG_CTL);
4469 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4470 I915_WRITE(GEN7_MSG_CTL, temp);
4471 }
4472
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004473 i915_gem_init_swizzling(dev);
4474
4475 ret = i915_gem_init_rings(dev);
4476 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004477 return ret;
4478
Ben Widawskyc3787e22013-09-17 21:12:44 -07004479 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4480 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4481
Ben Widawsky254f9652012-06-04 14:42:42 -07004482 /*
4483 * XXX: There was some w/a described somewhere suggesting loading
4484 * contexts before PPGTT.
4485 */
Ben Widawsky8245be32013-11-06 13:56:29 -02004486 ret = i915_gem_context_init(dev);
4487 if (ret) {
4488 i915_gem_cleanup_ringbuffer(dev);
4489 DRM_ERROR("Context initialization failed %d\n", ret);
4490 return ret;
4491 }
4492
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004493 if (dev_priv->mm.aliasing_ppgtt) {
4494 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4495 if (ret) {
4496 i915_gem_cleanup_aliasing_ppgtt(dev);
4497 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4498 }
4499 }
Daniel Vettere21af882012-02-09 20:53:27 +01004500
Chris Wilson68f95ba2010-05-27 13:18:22 +01004501 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004502}
4503
Chris Wilson1070a422012-04-24 15:47:41 +01004504int i915_gem_init(struct drm_device *dev)
4505{
4506 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004507 int ret;
4508
Chris Wilson1070a422012-04-24 15:47:41 +01004509 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004510
4511 if (IS_VALLEYVIEW(dev)) {
4512 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4513 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4514 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4515 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4516 }
4517
Ben Widawskyd7e50082012-12-18 10:31:25 -08004518 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004519
Chris Wilson1070a422012-04-24 15:47:41 +01004520 ret = i915_gem_init_hw(dev);
4521 mutex_unlock(&dev->struct_mutex);
4522 if (ret) {
4523 i915_gem_cleanup_aliasing_ppgtt(dev);
4524 return ret;
4525 }
4526
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004527 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4528 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4529 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004530 return 0;
4531}
4532
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004533void
4534i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4535{
4536 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004537 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004538 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004539
Chris Wilsonb4519512012-05-11 14:29:30 +01004540 for_each_ring(ring, dev_priv, i)
4541 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004542}
4543
4544int
Eric Anholt673a3942008-07-30 12:06:12 -07004545i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4546 struct drm_file *file_priv)
4547{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004548 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004549 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004550
Jesse Barnes79e53942008-11-07 14:24:08 -08004551 if (drm_core_check_feature(dev, DRIVER_MODESET))
4552 return 0;
4553
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004554 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004555 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004556 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004557 }
4558
Eric Anholt673a3942008-07-30 12:06:12 -07004559 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004560 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004561
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004562 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004563 if (ret != 0) {
4564 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004565 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004566 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004567
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004568 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004569 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004570
Chris Wilson5f353082010-06-07 14:03:03 +01004571 ret = drm_irq_install(dev);
4572 if (ret)
4573 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004574
Eric Anholt673a3942008-07-30 12:06:12 -07004575 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004576
4577cleanup_ringbuffer:
4578 mutex_lock(&dev->struct_mutex);
4579 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004580 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004581 mutex_unlock(&dev->struct_mutex);
4582
4583 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004584}
4585
4586int
4587i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4588 struct drm_file *file_priv)
4589{
Jesse Barnes79e53942008-11-07 14:24:08 -08004590 if (drm_core_check_feature(dev, DRIVER_MODESET))
4591 return 0;
4592
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004593 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004594
Chris Wilson45c5f202013-10-16 11:50:01 +01004595 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004596}
4597
4598void
4599i915_gem_lastclose(struct drm_device *dev)
4600{
4601 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004602
Eric Anholte806b492009-01-22 09:56:58 -08004603 if (drm_core_check_feature(dev, DRIVER_MODESET))
4604 return;
4605
Chris Wilson45c5f202013-10-16 11:50:01 +01004606 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004607 if (ret)
4608 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004609}
4610
Chris Wilson64193402010-10-24 12:38:05 +01004611static void
4612init_ring_lists(struct intel_ring_buffer *ring)
4613{
4614 INIT_LIST_HEAD(&ring->active_list);
4615 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004616}
4617
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004618static void i915_init_vm(struct drm_i915_private *dev_priv,
4619 struct i915_address_space *vm)
4620{
4621 vm->dev = dev_priv->dev;
4622 INIT_LIST_HEAD(&vm->active_list);
4623 INIT_LIST_HEAD(&vm->inactive_list);
4624 INIT_LIST_HEAD(&vm->global_link);
4625 list_add(&vm->global_link, &dev_priv->vm_list);
4626}
4627
Eric Anholt673a3942008-07-30 12:06:12 -07004628void
4629i915_gem_load(struct drm_device *dev)
4630{
4631 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004632 int i;
4633
4634 dev_priv->slab =
4635 kmem_cache_create("i915_gem_object",
4636 sizeof(struct drm_i915_gem_object), 0,
4637 SLAB_HWCACHE_ALIGN,
4638 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004639
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004640 INIT_LIST_HEAD(&dev_priv->vm_list);
4641 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4642
Ben Widawskya33afea2013-09-17 21:12:45 -07004643 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004644 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4645 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004646 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004647 for (i = 0; i < I915_NUM_RINGS; i++)
4648 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004649 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004650 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004651 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4652 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004653 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4654 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004655 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004656
Dave Airlie94400122010-07-20 13:15:31 +10004657 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4658 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004659 I915_WRITE(MI_ARB_STATE,
4660 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004661 }
4662
Chris Wilson72bfa192010-12-19 11:42:05 +00004663 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4664
Jesse Barnesde151cf2008-11-12 10:03:55 -08004665 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004666 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4667 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004668
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004669 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4670 dev_priv->num_fence_regs = 32;
4671 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004672 dev_priv->num_fence_regs = 16;
4673 else
4674 dev_priv->num_fence_regs = 8;
4675
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004676 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004677 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4678 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004679
Eric Anholt673a3942008-07-30 12:06:12 -07004680 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004681 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004682
Chris Wilsonce453d82011-02-21 14:43:56 +00004683 dev_priv->mm.interruptible = true;
4684
Dave Chinner7dc19d52013-08-28 10:18:11 +10004685 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4686 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004687 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4688 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004689}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004690
4691/*
4692 * Create a physically contiguous memory object for this object
4693 * e.g. for cursor + overlay regs
4694 */
Chris Wilson995b6762010-08-20 13:23:26 +01004695static int i915_gem_init_phys_object(struct drm_device *dev,
4696 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004697{
4698 drm_i915_private_t *dev_priv = dev->dev_private;
4699 struct drm_i915_gem_phys_object *phys_obj;
4700 int ret;
4701
4702 if (dev_priv->mm.phys_objs[id - 1] || !size)
4703 return 0;
4704
Daniel Vetterb14c5672013-09-19 12:18:32 +02004705 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004706 if (!phys_obj)
4707 return -ENOMEM;
4708
4709 phys_obj->id = id;
4710
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004711 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004712 if (!phys_obj->handle) {
4713 ret = -ENOMEM;
4714 goto kfree_obj;
4715 }
4716#ifdef CONFIG_X86
4717 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4718#endif
4719
4720 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4721
4722 return 0;
4723kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004724 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004725 return ret;
4726}
4727
Chris Wilson995b6762010-08-20 13:23:26 +01004728static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004729{
4730 drm_i915_private_t *dev_priv = dev->dev_private;
4731 struct drm_i915_gem_phys_object *phys_obj;
4732
4733 if (!dev_priv->mm.phys_objs[id - 1])
4734 return;
4735
4736 phys_obj = dev_priv->mm.phys_objs[id - 1];
4737 if (phys_obj->cur_obj) {
4738 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4739 }
4740
4741#ifdef CONFIG_X86
4742 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4743#endif
4744 drm_pci_free(dev, phys_obj->handle);
4745 kfree(phys_obj);
4746 dev_priv->mm.phys_objs[id - 1] = NULL;
4747}
4748
4749void i915_gem_free_all_phys_object(struct drm_device *dev)
4750{
4751 int i;
4752
Dave Airlie260883c2009-01-22 17:58:49 +10004753 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004754 i915_gem_free_phys_object(dev, i);
4755}
4756
4757void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004758 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004759{
Al Viro496ad9a2013-01-23 17:07:38 -05004760 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004761 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004762 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004763 int page_count;
4764
Chris Wilson05394f32010-11-08 19:18:58 +00004765 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004766 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004767 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004768
Chris Wilson05394f32010-11-08 19:18:58 +00004769 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004770 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004771 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004772 if (!IS_ERR(page)) {
4773 char *dst = kmap_atomic(page);
4774 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4775 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004776
Chris Wilsone5281cc2010-10-28 13:45:36 +01004777 drm_clflush_pages(&page, 1);
4778
4779 set_page_dirty(page);
4780 mark_page_accessed(page);
4781 page_cache_release(page);
4782 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004783 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004784 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004785
Chris Wilson05394f32010-11-08 19:18:58 +00004786 obj->phys_obj->cur_obj = NULL;
4787 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004788}
4789
4790int
4791i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004792 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004793 int id,
4794 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004795{
Al Viro496ad9a2013-01-23 17:07:38 -05004796 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004797 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004798 int ret = 0;
4799 int page_count;
4800 int i;
4801
4802 if (id > I915_MAX_PHYS_OBJECT)
4803 return -EINVAL;
4804
Chris Wilson05394f32010-11-08 19:18:58 +00004805 if (obj->phys_obj) {
4806 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004807 return 0;
4808 i915_gem_detach_phys_object(dev, obj);
4809 }
4810
Dave Airlie71acb5e2008-12-30 20:31:46 +10004811 /* create a new object */
4812 if (!dev_priv->mm.phys_objs[id - 1]) {
4813 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004814 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004815 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004816 DRM_ERROR("failed to init phys object %d size: %zu\n",
4817 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004818 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004819 }
4820 }
4821
4822 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004823 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4824 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004825
Chris Wilson05394f32010-11-08 19:18:58 +00004826 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004827
4828 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004829 struct page *page;
4830 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004831
Hugh Dickins5949eac2011-06-27 16:18:18 -07004832 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004833 if (IS_ERR(page))
4834 return PTR_ERR(page);
4835
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004836 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004837 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004838 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004839 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004840
4841 mark_page_accessed(page);
4842 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004843 }
4844
4845 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004846}
4847
4848static int
Chris Wilson05394f32010-11-08 19:18:58 +00004849i915_gem_phys_pwrite(struct drm_device *dev,
4850 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004851 struct drm_i915_gem_pwrite *args,
4852 struct drm_file *file_priv)
4853{
Chris Wilson05394f32010-11-08 19:18:58 +00004854 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004855 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004856
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004857 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4858 unsigned long unwritten;
4859
4860 /* The physical object once assigned is fixed for the lifetime
4861 * of the obj, so we can safely drop the lock and continue
4862 * to access vaddr.
4863 */
4864 mutex_unlock(&dev->struct_mutex);
4865 unwritten = copy_from_user(vaddr, user_data, args->size);
4866 mutex_lock(&dev->struct_mutex);
4867 if (unwritten)
4868 return -EFAULT;
4869 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004870
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004871 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004872 return 0;
4873}
Eric Anholtb9624422009-06-03 07:27:35 +00004874
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004875void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004876{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004877 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004878
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004879 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4880
Eric Anholtb9624422009-06-03 07:27:35 +00004881 /* Clean up our request list when the client is going away, so that
4882 * later retire_requests won't dereference our soon-to-be-gone
4883 * file_priv.
4884 */
Chris Wilson1c255952010-09-26 11:03:27 +01004885 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004886 while (!list_empty(&file_priv->mm.request_list)) {
4887 struct drm_i915_gem_request *request;
4888
4889 request = list_first_entry(&file_priv->mm.request_list,
4890 struct drm_i915_gem_request,
4891 client_list);
4892 list_del(&request->client_list);
4893 request->file_priv = NULL;
4894 }
Chris Wilson1c255952010-09-26 11:03:27 +01004895 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004896}
Chris Wilson31169712009-09-14 16:50:28 +01004897
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004898static void
4899i915_gem_file_idle_work_handler(struct work_struct *work)
4900{
4901 struct drm_i915_file_private *file_priv =
4902 container_of(work, typeof(*file_priv), mm.idle_work.work);
4903
4904 atomic_set(&file_priv->rps_wait_boost, false);
4905}
4906
4907int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4908{
4909 struct drm_i915_file_private *file_priv;
4910
4911 DRM_DEBUG_DRIVER("\n");
4912
4913 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4914 if (!file_priv)
4915 return -ENOMEM;
4916
4917 file->driver_priv = file_priv;
4918 file_priv->dev_priv = dev->dev_private;
4919
4920 spin_lock_init(&file_priv->mm.lock);
4921 INIT_LIST_HEAD(&file_priv->mm.request_list);
4922 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4923 i915_gem_file_idle_work_handler);
4924
4925 idr_init(&file_priv->context_idr);
4926
4927 return 0;
4928}
4929
Chris Wilson57745062012-11-21 13:04:04 +00004930static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4931{
4932 if (!mutex_is_locked(mutex))
4933 return false;
4934
4935#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4936 return mutex->owner == task;
4937#else
4938 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4939 return false;
4940#endif
4941}
4942
Dave Chinner7dc19d52013-08-28 10:18:11 +10004943static unsigned long
4944i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004945{
Chris Wilson17250b72010-10-28 12:51:39 +01004946 struct drm_i915_private *dev_priv =
4947 container_of(shrinker,
4948 struct drm_i915_private,
4949 mm.inactive_shrinker);
4950 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004951 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004952 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004953 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004954
Chris Wilson57745062012-11-21 13:04:04 +00004955 if (!mutex_trylock(&dev->struct_mutex)) {
4956 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004957 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004958
Daniel Vetter677feac2012-12-19 14:33:45 +01004959 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004960 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004961
Chris Wilson57745062012-11-21 13:04:04 +00004962 unlock = false;
4963 }
Chris Wilson31169712009-09-14 16:50:28 +01004964
Dave Chinner7dc19d52013-08-28 10:18:11 +10004965 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004966 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004967 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004968 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004969
4970 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4971 if (obj->active)
4972 continue;
4973
Chris Wilsona5570172012-09-04 21:02:54 +01004974 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004975 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004976 }
Chris Wilson31169712009-09-14 16:50:28 +01004977
Chris Wilson57745062012-11-21 13:04:04 +00004978 if (unlock)
4979 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004980
Dave Chinner7dc19d52013-08-28 10:18:11 +10004981 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004982}
Ben Widawskya70a3142013-07-31 16:59:56 -07004983
4984/* All the new VM stuff */
4985unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4986 struct i915_address_space *vm)
4987{
4988 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4989 struct i915_vma *vma;
4990
4991 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4992 vm = &dev_priv->gtt.base;
4993
4994 BUG_ON(list_empty(&o->vma_list));
4995 list_for_each_entry(vma, &o->vma_list, vma_link) {
4996 if (vma->vm == vm)
4997 return vma->node.start;
4998
4999 }
5000 return -1;
5001}
5002
5003bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5004 struct i915_address_space *vm)
5005{
5006 struct i915_vma *vma;
5007
5008 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07005009 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005010 return true;
5011
5012 return false;
5013}
5014
5015bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5016{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005017 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005018
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005019 list_for_each_entry(vma, &o->vma_list, vma_link)
5020 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005021 return true;
5022
5023 return false;
5024}
5025
5026unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5027 struct i915_address_space *vm)
5028{
5029 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5030 struct i915_vma *vma;
5031
5032 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5033 vm = &dev_priv->gtt.base;
5034
5035 BUG_ON(list_empty(&o->vma_list));
5036
5037 list_for_each_entry(vma, &o->vma_list, vma_link)
5038 if (vma->vm == vm)
5039 return vma->node.size;
5040
5041 return 0;
5042}
5043
Dave Chinner7dc19d52013-08-28 10:18:11 +10005044static unsigned long
5045i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5046{
5047 struct drm_i915_private *dev_priv =
5048 container_of(shrinker,
5049 struct drm_i915_private,
5050 mm.inactive_shrinker);
5051 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005052 unsigned long freed;
5053 bool unlock = true;
5054
5055 if (!mutex_trylock(&dev->struct_mutex)) {
5056 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005057 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005058
5059 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005060 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005061
5062 unlock = false;
5063 }
5064
Chris Wilsond9973b42013-10-04 10:33:00 +01005065 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5066 if (freed < sc->nr_to_scan)
5067 freed += __i915_gem_shrink(dev_priv,
5068 sc->nr_to_scan - freed,
5069 false);
5070 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005071 freed += i915_gem_shrink_all(dev_priv);
5072
5073 if (unlock)
5074 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005075
Dave Chinner7dc19d52013-08-28 10:18:11 +10005076 return freed;
5077}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005078
5079struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5080{
5081 struct i915_vma *vma;
5082
5083 if (WARN_ON(list_empty(&obj->vma_list)))
5084 return NULL;
5085
5086 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5087 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
5088 return NULL;
5089
5090 return vma;
5091}