blob: dee560267b1d3840b530457fef425d8a65d3d4f0 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
Ben Widawsky07fe0b12013-07-31 17:00:10 -070047i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson61050802012-04-17 15:31:31 +010057static void i915_gem_write_fence(struct drm_device *dev, int reg,
58 struct drm_i915_gem_object *obj);
59static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
60 struct drm_i915_fence_reg *fence,
61 bool enable);
62
Dave Chinner7dc19d52013-08-28 10:18:11 +100063static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
64 struct shrink_control *sc);
65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
66 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010067static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010069static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800207 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Daniel Vetterd174bd62012-03-25 19:47:40 +0200335/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700338static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200339shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 char __user *user_data,
341 bool page_do_bit17_swizzling, bool needs_clflush)
342{
343 char *vaddr;
344 int ret;
345
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200346 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200347 return -EINVAL;
348
349 vaddr = kmap_atomic(page);
350 if (needs_clflush)
351 drm_clflush_virt_range(vaddr + shmem_page_offset,
352 page_length);
353 ret = __copy_to_user_inatomic(user_data,
354 vaddr + shmem_page_offset,
355 page_length);
356 kunmap_atomic(vaddr);
357
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100358 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359}
360
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361static void
362shmem_clflush_swizzled_range(char *addr, unsigned long length,
363 bool swizzled)
364{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200365 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200366 unsigned long start = (unsigned long) addr;
367 unsigned long end = (unsigned long) addr + length;
368
369 /* For swizzling simply ensure that we always flush both
370 * channels. Lame, but simple and it works. Swizzled
371 * pwrite/pread is far from a hotpath - current userspace
372 * doesn't use it at all. */
373 start = round_down(start, 128);
374 end = round_up(end, 128);
375
376 drm_clflush_virt_range((void *)start, end - start);
377 } else {
378 drm_clflush_virt_range(addr, length);
379 }
380
381}
382
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383/* Only difference to the fast-path function is that this can handle bit17
384 * and uses non-atomic copy and kmap functions. */
385static int
386shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
387 char __user *user_data,
388 bool page_do_bit17_swizzling, bool needs_clflush)
389{
390 char *vaddr;
391 int ret;
392
393 vaddr = kmap(page);
394 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
396 page_length,
397 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200398
399 if (page_do_bit17_swizzling)
400 ret = __copy_to_user_swizzled(user_data,
401 vaddr, shmem_page_offset,
402 page_length);
403 else
404 ret = __copy_to_user(user_data,
405 vaddr + shmem_page_offset,
406 page_length);
407 kunmap(page);
408
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100409 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200410}
411
Eric Anholteb014592009-03-10 11:44:52 -0700412static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200413i915_gem_shmem_pread(struct drm_device *dev,
414 struct drm_i915_gem_object *obj,
415 struct drm_i915_gem_pread *args,
416 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700417{
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700419 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100420 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100421 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100422 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200423 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200424 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200425 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700426
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200427 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700428 remain = args->size;
429
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700431
Daniel Vetter84897312012-03-25 19:47:31 +0200432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200441 }
Eric Anholteb014592009-03-10 11:44:52 -0700442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 ret = i915_gem_object_get_pages(obj);
444 if (ret)
445 return ret;
446
447 i915_gem_object_pin_pages(obj);
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100450
Imre Deak67d5a502013-02-18 19:28:02 +0200451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
452 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200453 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100454
455 if (remain <= 0)
456 break;
457
Eric Anholteb014592009-03-10 11:44:52 -0700458 /* Operation in this page
459 *
Eric Anholteb014592009-03-10 11:44:52 -0700460 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700461 * page_length = bytes to copy for this page
462 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100463 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700464 page_length = remain;
465 if ((shmem_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
469 (page_to_phys(page) & (1 << 17)) != 0;
470
Daniel Vetterd174bd62012-03-25 19:47:40 +0200471 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
472 user_data, page_do_bit17_swizzling,
473 needs_clflush);
474 if (ret == 0)
475 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700476
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200477 mutex_unlock(&dev->struct_mutex);
478
Jani Nikulad330a952014-01-21 11:24:25 +0200479 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200480 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the
483 * data up to the first fault. Hence ignore any errors
484 * and just continue. */
485 (void)ret;
486 prefaulted = 1;
487 }
488
Daniel Vetterd174bd62012-03-25 19:47:40 +0200489 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
490 user_data, page_do_bit17_swizzling,
491 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200493 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100494
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200495next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100496 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100497
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100498 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100500
Eric Anholteb014592009-03-10 11:44:52 -0700501 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700503 offset += page_length;
504 }
505
Chris Wilson4f27b752010-10-14 15:26:45 +0100506out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100507 i915_gem_object_unpin_pages(obj);
508
Eric Anholteb014592009-03-10 11:44:52 -0700509 return ret;
510}
511
Eric Anholt673a3942008-07-30 12:06:12 -0700512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700520{
521 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100523 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700524
Chris Wilson51311d02010-11-17 09:10:42 +0000525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200529 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000530 args->size))
531 return -EFAULT;
532
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100535 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700536
Chris Wilson05394f32010-11-08 19:18:58 +0000537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000538 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100539 ret = -ENOENT;
540 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Chris Wilson7dcd2492010-09-26 20:21:44 +0100543 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100546 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100548 }
549
Daniel Vetter1286ff72012-05-10 15:25:09 +0200550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
Chris Wilsondb53a302011-02-03 11:57:46 +0000558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200560 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson35b62a82010-09-26 20:23:38 +0100562out:
Chris Wilson05394f32010-11-08 19:18:58 +0000563 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100564unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100565 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700566 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567}
568
Keith Packard0839ccb2008-10-30 19:38:48 -0700569/* This is the fast write path which cannot handle
570 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700571 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572
Keith Packard0839ccb2008-10-30 19:38:48 -0700573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
578{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 void __iomem *vaddr_atomic;
580 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700588 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100589 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Chris Wilson05394f32010-11-08 19:18:58 +0000597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700599 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000600 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700601{
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 int page_offset, page_length, ret;
607
Ben Widawskyc37e2202013-07-31 16:59:58 -0700608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200620 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700621 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700623 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700624
625 while (remain > 0) {
626 /* Operation in this page
627 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700631 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800642 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700651 }
Eric Anholt673a3942008-07-30 12:06:12 -0700652
Daniel Vetter935aaa62012-03-25 19:47:35 +0200653out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800654 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200655out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700663static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700669{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200670 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200673 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687
Chris Wilson755d2212012-09-04 21:02:55 +0100688 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689}
690
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700693static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700699{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 char *vaddr;
701 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700702
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100710 user_data,
711 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilson755d2212012-09-04 21:02:55 +0100722 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700723}
724
Eric Anholt40123c12009-03-09 13:42:30 -0700725static int
Daniel Vettere244a442012-03-25 19:47:28 +0200726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700730{
Eric Anholt40123c12009-03-09 13:42:30 -0700731 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 loff_t offset;
733 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100734 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200736 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200737 int needs_clflush_after = 0;
738 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200739 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200741 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700742 remain = args->size;
743
Daniel Vetter8c599672011-12-14 13:57:31 +0100744 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700745
Daniel Vetter58642882012-03-25 19:47:37 +0200746 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
747 /* If we're not in the cpu write domain, set ourself into the gtt
748 * write domain and manually flush cachelines (if required). This
749 * optimizes for the case when the gpu will use the data
750 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100751 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700752 ret = i915_gem_object_wait_rendering(obj, false);
753 if (ret)
754 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100756 /* Same trick applies to invalidate partially written cachelines read
757 * before writing. */
758 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
759 needs_clflush_before =
760 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200761
Chris Wilson755d2212012-09-04 21:02:55 +0100762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
Eric Anholt40123c12009-03-09 13:42:30 -0700768 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000769 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
Imre Deak67d5a502013-02-18 19:28:02 +0200771 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200773 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200774 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100775
Chris Wilson9da3da62012-06-01 15:20:22 +0100776 if (remain <= 0)
777 break;
778
Eric Anholt40123c12009-03-09 13:42:30 -0700779 /* Operation in this page
780 *
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700782 * page_length = bytes to copy for this page
783 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100784 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700785
786 page_length = remain;
787 if ((shmem_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700789
Daniel Vetter58642882012-03-25 19:47:37 +0200790 /* If we don't overwrite a cacheline completely we need to be
791 * careful to have up-to-date data by first clflushing. Don't
792 * overcomplicate things and flush the entire patch. */
793 partial_cacheline_write = needs_clflush_before &&
794 ((shmem_page_offset | page_length)
795 & (boot_cpu_data.x86_clflush_size - 1));
796
Daniel Vetter8c599672011-12-14 13:57:31 +0100797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
804 if (ret == 0)
805 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700806
Daniel Vettere244a442012-03-25 19:47:28 +0200807 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200808 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200809 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100815
Daniel Vettere244a442012-03-25 19:47:28 +0200816next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100817 set_page_dirty(page);
818 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819
Chris Wilson755d2212012-09-04 21:02:55 +0100820 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100822
Eric Anholt40123c12009-03-09 13:42:30 -0700823 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100824 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825 offset += page_length;
826 }
827
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100828out:
Chris Wilson755d2212012-09-04 21:02:55 +0100829 i915_gem_object_unpin_pages(obj);
830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100832 /*
833 * Fixup: Flush cpu caches in case we didn't flush the dirty
834 * cachelines in-line while writing and the object moved
835 * out of the cpu write domain while we've dropped the lock.
836 */
837 if (!needs_clflush_after &&
838 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100839 if (i915_gem_clflush_object(obj, obj->pin_display))
840 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200841 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100842 }
Eric Anholt40123c12009-03-09 13:42:30 -0700843
Daniel Vetter58642882012-03-25 19:47:37 +0200844 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200846
Eric Anholt40123c12009-03-09 13:42:30 -0700847 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
850/**
851 * Writes data to the object referenced by handle.
852 *
853 * On error, the contents of the buffer that were to be modified are undefined.
854 */
855int
856i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100857 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700858{
859 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000860 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000861 int ret;
862
863 if (args->size == 0)
864 return 0;
865
866 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200867 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000868 args->size))
869 return -EFAULT;
870
Jani Nikulad330a952014-01-21 11:24:25 +0200871 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size);
874 if (ret)
875 return -EFAULT;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
Chris Wilson05394f32010-11-08 19:18:58 +0000882 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000883 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = -ENOENT;
885 goto unlock;
886 }
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Chris Wilson7dcd2492010-09-26 20:21:44 +0100888 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000889 if (args->offset > obj->base.size ||
890 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100891 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100892 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 }
894
Daniel Vetter1286ff72012-05-10 15:25:09 +0200895 /* prime objects have no backing filp to GEM pread/pwrite
896 * pages from.
897 */
898 if (!obj->base.filp) {
899 ret = -EINVAL;
900 goto out;
901 }
902
Chris Wilsondb53a302011-02-03 11:57:46 +0000903 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
904
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700906 /* We can only do the GTT pwrite on untiled buffers, as otherwise
907 * it would end up going through the fenced access, and we'll get
908 * different detiling behavior between reading and writing.
909 * pread/pwrite currently are reading and writing from the CPU
910 * perspective, requiring manual detiling by the client.
911 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100914 goto out;
915 }
916
Chris Wilson2c225692013-08-09 12:26:45 +0100917 if (obj->tiling_mode == I915_TILING_NONE &&
918 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
919 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100920 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200921 /* Note that the gtt paths might fail with non-page-backed user
922 * pointers (e.g. gtt mappings when moving data between
923 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700924 }
Eric Anholt673a3942008-07-30 12:06:12 -0700925
Chris Wilson86a1ee22012-08-11 15:41:04 +0100926 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200927 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100928
Chris Wilson35b62a82010-09-26 20:23:38 +0100929out:
Chris Wilson05394f32010-11-08 19:18:58 +0000930 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100931unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return ret;
934}
935
Chris Wilsonb3612372012-08-24 09:35:08 +0100936int
Daniel Vetter33196de2012-11-14 17:14:05 +0100937i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100938 bool interruptible)
939{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100940 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100941 /* Non-interruptible callers can't handle -EAGAIN, hence return
942 * -EIO unconditionally for these. */
943 if (!interruptible)
944 return -EIO;
945
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100946 /* Recovery complete, but the reset failed ... */
947 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100948 return -EIO;
949
950 return -EAGAIN;
951 }
952
953 return 0;
954}
955
956/*
957 * Compare seqno against outstanding lazy request. Emit a request if they are
958 * equal.
959 */
960static int
961i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
962{
963 int ret;
964
965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
966
967 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100968 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300969 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100970
971 return ret;
972}
973
Chris Wilson094f9a52013-09-25 17:34:55 +0100974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
Chris Wilsonb3612372012-08-24 09:35:08 +0100993/**
994 * __wait_seqno - wait until execution of seqno has finished
995 * @ring: the ring expected to report seqno
996 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100997 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100998 * @interruptible: do an interruptible wait (normally yes)
999 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1000 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001001 * Note: It is of utmost importance that the passed in seqno and reset_counter
1002 * values have been read by the caller in an smp safe manner. Where read-side
1003 * locks are involved, it is sufficient to read the reset_counter before
1004 * unlocking the lock that protects the seqno. For lockless tricks, the
1005 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1006 * inserted.
1007 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001008 * Returns 0 if the seqno was found within the alloted time. Else returns the
1009 * errno with remaining time filled in timeout argument.
1010 */
1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001012 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001016{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001017 struct drm_device *dev = ring->dev;
1018 drm_i915_private_t *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001019 const bool irq_test_in_progress =
1020 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001021 struct timespec before, now;
1022 DEFINE_WAIT(wait);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001023 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001024 int ret;
1025
Paulo Zanonic67a4702013-08-19 13:18:09 -03001026 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1027
Chris Wilsonb3612372012-08-24 09:35:08 +01001028 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1029 return 0;
1030
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001031 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001032
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001033 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001034 gen6_rps_boost(dev_priv);
1035 if (file_priv)
1036 mod_delayed_work(dev_priv->wq,
1037 &file_priv->mm.idle_work,
1038 msecs_to_jiffies(100));
1039 }
1040
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001041 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001042 return -ENODEV;
1043
Chris Wilson094f9a52013-09-25 17:34:55 +01001044 /* Record current time in case interrupted by signal, or wedged */
1045 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001046 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001047 for (;;) {
1048 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001049
Chris Wilson094f9a52013-09-25 17:34:55 +01001050 prepare_to_wait(&ring->irq_queue, &wait,
1051 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001052
Daniel Vetterf69061b2012-12-06 09:01:42 +01001053 /* We need to check whether any gpu reset happened in between
1054 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001055 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1056 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1057 * is truely gone. */
1058 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1059 if (ret == 0)
1060 ret = -EAGAIN;
1061 break;
1062 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001063
Chris Wilson094f9a52013-09-25 17:34:55 +01001064 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1065 ret = 0;
1066 break;
1067 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001068
Chris Wilson094f9a52013-09-25 17:34:55 +01001069 if (interruptible && signal_pending(current)) {
1070 ret = -ERESTARTSYS;
1071 break;
1072 }
1073
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001074 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001075 ret = -ETIME;
1076 break;
1077 }
1078
1079 timer.function = NULL;
1080 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001081 unsigned long expire;
1082
Chris Wilson094f9a52013-09-25 17:34:55 +01001083 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001084 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001085 mod_timer(&timer, expire);
1086 }
1087
Chris Wilson5035c272013-10-04 09:58:46 +01001088 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001089
Chris Wilson094f9a52013-09-25 17:34:55 +01001090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001095 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001096 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001097
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001098 if (!irq_test_in_progress)
1099 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001100
1101 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001102
1103 if (timeout) {
1104 struct timespec sleep_time = timespec_sub(now, before);
1105 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001106 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1107 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001108 }
1109
Chris Wilson094f9a52013-09-25 17:34:55 +01001110 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001111}
1112
1113/**
1114 * Waits for a sequence number to be signaled, and cleans up the
1115 * request and object lists appropriately for that event.
1116 */
1117int
1118i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1119{
1120 struct drm_device *dev = ring->dev;
1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 bool interruptible = dev_priv->mm.interruptible;
1123 int ret;
1124
1125 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1126 BUG_ON(seqno == 0);
1127
Daniel Vetter33196de2012-11-14 17:14:05 +01001128 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001129 if (ret)
1130 return ret;
1131
1132 ret = i915_gem_check_olr(ring, seqno);
1133 if (ret)
1134 return ret;
1135
Daniel Vetterf69061b2012-12-06 09:01:42 +01001136 return __wait_seqno(ring, seqno,
1137 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001138 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001139}
1140
Chris Wilsond26e3af2013-06-29 22:05:26 +01001141static int
1142i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1143 struct intel_ring_buffer *ring)
1144{
1145 i915_gem_retire_requests_ring(ring);
1146
1147 /* Manually manage the write flush as we may have not yet
1148 * retired the buffer.
1149 *
1150 * Note that the last_write_seqno is always the earlier of
1151 * the two (read/write) seqno, so if we haved successfully waited,
1152 * we know we have passed the last write.
1153 */
1154 obj->last_write_seqno = 0;
1155 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1156
1157 return 0;
1158}
1159
Chris Wilsonb3612372012-08-24 09:35:08 +01001160/**
1161 * Ensures that all rendering to the object has completed and the object is
1162 * safe to unbind from the GTT or access from the CPU.
1163 */
1164static __must_check int
1165i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1166 bool readonly)
1167{
1168 struct intel_ring_buffer *ring = obj->ring;
1169 u32 seqno;
1170 int ret;
1171
1172 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1173 if (seqno == 0)
1174 return 0;
1175
1176 ret = i915_wait_seqno(ring, seqno);
1177 if (ret)
1178 return ret;
1179
Chris Wilsond26e3af2013-06-29 22:05:26 +01001180 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001181}
1182
Chris Wilson3236f572012-08-24 09:35:09 +01001183/* A nonblocking variant of the above wait. This is a highly dangerous routine
1184 * as the object state may change during this call.
1185 */
1186static __must_check int
1187i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001188 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001189 bool readonly)
1190{
1191 struct drm_device *dev = obj->base.dev;
1192 struct drm_i915_private *dev_priv = dev->dev_private;
1193 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001194 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001195 u32 seqno;
1196 int ret;
1197
1198 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1199 BUG_ON(!dev_priv->mm.interruptible);
1200
1201 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1202 if (seqno == 0)
1203 return 0;
1204
Daniel Vetter33196de2012-11-14 17:14:05 +01001205 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001206 if (ret)
1207 return ret;
1208
1209 ret = i915_gem_check_olr(ring, seqno);
1210 if (ret)
1211 return ret;
1212
Daniel Vetterf69061b2012-12-06 09:01:42 +01001213 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001214 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001215 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001216 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001217 if (ret)
1218 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001219
Chris Wilsond26e3af2013-06-29 22:05:26 +01001220 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001221}
1222
Eric Anholt673a3942008-07-30 12:06:12 -07001223/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001224 * Called when user space prepares to use an object with the CPU, either
1225 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001226 */
1227int
1228i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001229 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001230{
1231 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001232 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001233 uint32_t read_domains = args->read_domains;
1234 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001235 int ret;
1236
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001237 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001238 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001239 return -EINVAL;
1240
Chris Wilson21d509e2009-06-06 09:46:02 +01001241 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001242 return -EINVAL;
1243
1244 /* Having something in the write domain implies it's in the read
1245 * domain, and only that read domain. Enforce that in the request.
1246 */
1247 if (write_domain != 0 && read_domains != write_domain)
1248 return -EINVAL;
1249
Chris Wilson76c1dec2010-09-25 11:22:51 +01001250 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001251 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001252 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001253
Chris Wilson05394f32010-11-08 19:18:58 +00001254 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001255 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001256 ret = -ENOENT;
1257 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001258 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001259
Chris Wilson3236f572012-08-24 09:35:09 +01001260 /* Try to flush the object off the GPU without holding the lock.
1261 * We will repeat the flush holding the lock in the normal manner
1262 * to catch cases where we are gazumped.
1263 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001264 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1265 file->driver_priv,
1266 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001267 if (ret)
1268 goto unref;
1269
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001270 if (read_domains & I915_GEM_DOMAIN_GTT) {
1271 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001272
1273 /* Silently promote "you're not bound, there was nothing to do"
1274 * to success, since the client was just asking us to
1275 * make sure everything was done.
1276 */
1277 if (ret == -EINVAL)
1278 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001279 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001280 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001281 }
1282
Chris Wilson3236f572012-08-24 09:35:09 +01001283unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001284 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001285unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001286 mutex_unlock(&dev->struct_mutex);
1287 return ret;
1288}
1289
1290/**
1291 * Called when user space has done writes to this buffer
1292 */
1293int
1294i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001295 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001296{
1297 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001298 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001299 int ret = 0;
1300
Chris Wilson76c1dec2010-09-25 11:22:51 +01001301 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001302 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001303 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001304
Chris Wilson05394f32010-11-08 19:18:58 +00001305 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001306 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001307 ret = -ENOENT;
1308 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001309 }
1310
Eric Anholt673a3942008-07-30 12:06:12 -07001311 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001312 if (obj->pin_display)
1313 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001314
Chris Wilson05394f32010-11-08 19:18:58 +00001315 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001316unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001317 mutex_unlock(&dev->struct_mutex);
1318 return ret;
1319}
1320
1321/**
1322 * Maps the contents of an object, returning the address it is mapped
1323 * into.
1324 *
1325 * While the mapping holds a reference on the contents of the object, it doesn't
1326 * imply a ref on the object itself.
1327 */
1328int
1329i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001330 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001331{
1332 struct drm_i915_gem_mmap *args = data;
1333 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001334 unsigned long addr;
1335
Chris Wilson05394f32010-11-08 19:18:58 +00001336 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001337 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001338 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001339
Daniel Vetter1286ff72012-05-10 15:25:09 +02001340 /* prime objects have no backing filp to GEM mmap
1341 * pages from.
1342 */
1343 if (!obj->filp) {
1344 drm_gem_object_unreference_unlocked(obj);
1345 return -EINVAL;
1346 }
1347
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001348 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001349 PROT_READ | PROT_WRITE, MAP_SHARED,
1350 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001351 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001352 if (IS_ERR((void *)addr))
1353 return addr;
1354
1355 args->addr_ptr = (uint64_t) addr;
1356
1357 return 0;
1358}
1359
Jesse Barnesde151cf2008-11-12 10:03:55 -08001360/**
1361 * i915_gem_fault - fault a page into the GTT
1362 * vma: VMA in question
1363 * vmf: fault info
1364 *
1365 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1366 * from userspace. The fault handler takes care of binding the object to
1367 * the GTT (if needed), allocating and programming a fence register (again,
1368 * only if needed based on whether the old reg is still valid or the object
1369 * is tiled) and inserting a new PTE into the faulting process.
1370 *
1371 * Note that the faulting process may involve evicting existing objects
1372 * from the GTT and/or fence registers to make room. So performance may
1373 * suffer if the GTT working set is large or there are few fence registers
1374 * left.
1375 */
1376int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1377{
Chris Wilson05394f32010-11-08 19:18:58 +00001378 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1379 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001380 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001381 pgoff_t page_offset;
1382 unsigned long pfn;
1383 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001384 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001385
Paulo Zanonif65c9162013-11-27 18:20:34 -02001386 intel_runtime_pm_get(dev_priv);
1387
Jesse Barnesde151cf2008-11-12 10:03:55 -08001388 /* We don't use vmf->pgoff since that has the fake offset */
1389 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1390 PAGE_SHIFT;
1391
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001392 ret = i915_mutex_lock_interruptible(dev);
1393 if (ret)
1394 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001395
Chris Wilsondb53a302011-02-03 11:57:46 +00001396 trace_i915_gem_object_fault(obj, page_offset, true, write);
1397
Chris Wilson6e4930f2014-02-07 18:37:06 -02001398 /* Try to flush the object off the GPU first without holding the lock.
1399 * Upon reacquiring the lock, we will perform our sanity checks and then
1400 * repeat the flush holding the lock in the normal manner to catch cases
1401 * where we are gazumped.
1402 */
1403 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1404 if (ret)
1405 goto unlock;
1406
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001407 /* Access to snoopable pages through the GTT is incoherent. */
1408 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1409 ret = -EINVAL;
1410 goto unlock;
1411 }
1412
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001413 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001414 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001415 if (ret)
1416 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417
Chris Wilsonc9839302012-11-20 10:45:17 +00001418 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1419 if (ret)
1420 goto unpin;
1421
1422 ret = i915_gem_object_get_fence(obj);
1423 if (ret)
1424 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001425
Chris Wilson6299f992010-11-24 12:23:44 +00001426 obj->fault_mappable = true;
1427
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001428 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1429 pfn >>= PAGE_SHIFT;
1430 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001431
1432 /* Finally, remap it using the new GTT offset */
1433 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001434unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001435 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001436unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001437 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001438out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001439 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001440 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001441 /* If this -EIO is due to a gpu hang, give the reset code a
1442 * chance to clean up the mess. Otherwise return the proper
1443 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001444 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1445 ret = VM_FAULT_SIGBUS;
1446 break;
1447 }
Chris Wilson045e7692010-11-07 09:18:22 +00001448 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001449 /*
1450 * EAGAIN means the gpu is hung and we'll wait for the error
1451 * handler to reset everything when re-faulting in
1452 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001453 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001454 case 0:
1455 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001456 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001457 case -EBUSY:
1458 /*
1459 * EBUSY is ok: this just means that another thread
1460 * already did the job.
1461 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001462 ret = VM_FAULT_NOPAGE;
1463 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001464 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001465 ret = VM_FAULT_OOM;
1466 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001467 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001468 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001469 ret = VM_FAULT_SIGBUS;
1470 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001471 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001472 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001473 ret = VM_FAULT_SIGBUS;
1474 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001476
1477 intel_runtime_pm_put(dev_priv);
1478 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001479}
1480
Paulo Zanoni48018a52013-12-13 15:22:31 -02001481void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1482{
1483 struct i915_vma *vma;
1484
1485 /*
1486 * Only the global gtt is relevant for gtt memory mappings, so restrict
1487 * list traversal to objects bound into the global address space. Note
1488 * that the active list should be empty, but better safe than sorry.
1489 */
1490 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1491 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1492 i915_gem_release_mmap(vma->obj);
1493 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1494 i915_gem_release_mmap(vma->obj);
1495}
1496
Jesse Barnesde151cf2008-11-12 10:03:55 -08001497/**
Chris Wilson901782b2009-07-10 08:18:50 +01001498 * i915_gem_release_mmap - remove physical page mappings
1499 * @obj: obj in question
1500 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001501 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001502 * relinquish ownership of the pages back to the system.
1503 *
1504 * It is vital that we remove the page mapping if we have mapped a tiled
1505 * object through the GTT and then lose the fence register due to
1506 * resource pressure. Similarly if the object has been moved out of the
1507 * aperture, than pages mapped into userspace must be revoked. Removing the
1508 * mapping will then trigger a page fault on the next user access, allowing
1509 * fixup by i915_gem_fault().
1510 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001511void
Chris Wilson05394f32010-11-08 19:18:58 +00001512i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001513{
Chris Wilson6299f992010-11-24 12:23:44 +00001514 if (!obj->fault_mappable)
1515 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001516
David Herrmann51335df2013-07-24 21:10:03 +02001517 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001518 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001519}
1520
Imre Deak0fa87792013-01-07 21:47:35 +02001521uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001522i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001523{
Chris Wilsone28f8712011-07-18 13:11:49 -07001524 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001525
1526 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001527 tiling_mode == I915_TILING_NONE)
1528 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001529
1530 /* Previous chips need a power-of-two fence region when tiling */
1531 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001532 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001533 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001534 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001535
Chris Wilsone28f8712011-07-18 13:11:49 -07001536 while (gtt_size < size)
1537 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001538
Chris Wilsone28f8712011-07-18 13:11:49 -07001539 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001540}
1541
Jesse Barnesde151cf2008-11-12 10:03:55 -08001542/**
1543 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1544 * @obj: object to check
1545 *
1546 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001547 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001548 */
Imre Deakd8651102013-01-07 21:47:33 +02001549uint32_t
1550i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1551 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001552{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001553 /*
1554 * Minimum alignment is 4k (GTT page size), but might be greater
1555 * if a fence register is needed for the object.
1556 */
Imre Deakd8651102013-01-07 21:47:33 +02001557 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001558 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001559 return 4096;
1560
1561 /*
1562 * Previous chips need to be aligned to the size of the smallest
1563 * fence register that can contain the object.
1564 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001565 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001566}
1567
Chris Wilsond8cb5082012-08-11 15:41:03 +01001568static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1569{
1570 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1571 int ret;
1572
David Herrmann0de23972013-07-24 21:07:52 +02001573 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001574 return 0;
1575
Daniel Vetterda494d72012-12-20 15:11:16 +01001576 dev_priv->mm.shrinker_no_lock_stealing = true;
1577
Chris Wilsond8cb5082012-08-11 15:41:03 +01001578 ret = drm_gem_create_mmap_offset(&obj->base);
1579 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001580 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001581
1582 /* Badly fragmented mmap space? The only way we can recover
1583 * space is by destroying unwanted objects. We can't randomly release
1584 * mmap_offsets as userspace expects them to be persistent for the
1585 * lifetime of the objects. The closest we can is to release the
1586 * offsets on purgeable objects by truncating it and marking it purged,
1587 * which prevents userspace from ever using that object again.
1588 */
1589 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1590 ret = drm_gem_create_mmap_offset(&obj->base);
1591 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001592 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001593
1594 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001595 ret = drm_gem_create_mmap_offset(&obj->base);
1596out:
1597 dev_priv->mm.shrinker_no_lock_stealing = false;
1598
1599 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001600}
1601
1602static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1603{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001604 drm_gem_free_mmap_offset(&obj->base);
1605}
1606
Jesse Barnesde151cf2008-11-12 10:03:55 -08001607int
Dave Airlieff72145b2011-02-07 12:16:14 +10001608i915_gem_mmap_gtt(struct drm_file *file,
1609 struct drm_device *dev,
1610 uint32_t handle,
1611 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001612{
Chris Wilsonda761a62010-10-27 17:37:08 +01001613 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001614 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001615 int ret;
1616
Chris Wilson76c1dec2010-09-25 11:22:51 +01001617 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001618 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001619 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001620
Dave Airlieff72145b2011-02-07 12:16:14 +10001621 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001622 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001623 ret = -ENOENT;
1624 goto unlock;
1625 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001626
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001627 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001628 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001629 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001630 }
1631
Chris Wilson05394f32010-11-08 19:18:58 +00001632 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001633 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001634 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001635 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001636 }
1637
Chris Wilsond8cb5082012-08-11 15:41:03 +01001638 ret = i915_gem_object_create_mmap_offset(obj);
1639 if (ret)
1640 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001641
David Herrmann0de23972013-07-24 21:07:52 +02001642 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001643
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001644out:
Chris Wilson05394f32010-11-08 19:18:58 +00001645 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001646unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001647 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001648 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001649}
1650
Dave Airlieff72145b2011-02-07 12:16:14 +10001651/**
1652 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1653 * @dev: DRM device
1654 * @data: GTT mapping ioctl data
1655 * @file: GEM object info
1656 *
1657 * Simply returns the fake offset to userspace so it can mmap it.
1658 * The mmap call will end up in drm_gem_mmap(), which will set things
1659 * up so we can get faults in the handler above.
1660 *
1661 * The fault handler will take care of binding the object into the GTT
1662 * (since it may have been evicted to make room for something), allocating
1663 * a fence register, and mapping the appropriate aperture address into
1664 * userspace.
1665 */
1666int
1667i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1668 struct drm_file *file)
1669{
1670 struct drm_i915_gem_mmap_gtt *args = data;
1671
Dave Airlieff72145b2011-02-07 12:16:14 +10001672 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1673}
1674
Daniel Vetter225067e2012-08-20 10:23:20 +02001675/* Immediately discard the backing storage */
1676static void
1677i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001678{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001679 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001680
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001681 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001682
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001683 if (obj->base.filp == NULL)
1684 return;
1685
Daniel Vetter225067e2012-08-20 10:23:20 +02001686 /* Our goal here is to return as much of the memory as
1687 * is possible back to the system as we are called from OOM.
1688 * To do this we must instruct the shmfs to drop all of its
1689 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001690 */
Al Viro496ad9a2013-01-23 17:07:38 -05001691 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001692 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001693
Daniel Vetter225067e2012-08-20 10:23:20 +02001694 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001695}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001696
Daniel Vetter225067e2012-08-20 10:23:20 +02001697static inline int
1698i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1699{
1700 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001701}
1702
Chris Wilson5cdf5882010-09-27 15:51:07 +01001703static void
Chris Wilson05394f32010-11-08 19:18:58 +00001704i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001705{
Imre Deak90797e62013-02-18 19:28:03 +02001706 struct sg_page_iter sg_iter;
1707 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001708
Chris Wilson05394f32010-11-08 19:18:58 +00001709 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001710
Chris Wilson6c085a72012-08-20 11:40:46 +02001711 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1712 if (ret) {
1713 /* In the event of a disaster, abandon all caches and
1714 * hope for the best.
1715 */
1716 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001717 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001718 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1719 }
1720
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001721 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001722 i915_gem_object_save_bit_17_swizzle(obj);
1723
Chris Wilson05394f32010-11-08 19:18:58 +00001724 if (obj->madv == I915_MADV_DONTNEED)
1725 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001726
Imre Deak90797e62013-02-18 19:28:03 +02001727 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001728 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001729
Chris Wilson05394f32010-11-08 19:18:58 +00001730 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001731 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001732
Chris Wilson05394f32010-11-08 19:18:58 +00001733 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001734 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001735
Chris Wilson9da3da62012-06-01 15:20:22 +01001736 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001737 }
Chris Wilson05394f32010-11-08 19:18:58 +00001738 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001739
Chris Wilson9da3da62012-06-01 15:20:22 +01001740 sg_free_table(obj->pages);
1741 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001742}
1743
Chris Wilsondd624af2013-01-15 12:39:35 +00001744int
Chris Wilson37e680a2012-06-07 15:38:42 +01001745i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1746{
1747 const struct drm_i915_gem_object_ops *ops = obj->ops;
1748
Chris Wilson2f745ad2012-09-04 21:02:58 +01001749 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001750 return 0;
1751
Chris Wilsona5570172012-09-04 21:02:54 +01001752 if (obj->pages_pin_count)
1753 return -EBUSY;
1754
Ben Widawsky98438772013-07-31 17:00:12 -07001755 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001756
Chris Wilsona2165e32012-12-03 11:49:00 +00001757 /* ->put_pages might need to allocate memory for the bit17 swizzle
1758 * array, hence protect them from being reaped by removing them from gtt
1759 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001760 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001761
Chris Wilson37e680a2012-06-07 15:38:42 +01001762 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001763 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001764
Chris Wilson6c085a72012-08-20 11:40:46 +02001765 if (i915_gem_object_is_purgeable(obj))
1766 i915_gem_object_truncate(obj);
1767
1768 return 0;
1769}
1770
Chris Wilsond9973b42013-10-04 10:33:00 +01001771static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001772__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1773 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001774{
Chris Wilson57094f82013-09-04 10:45:50 +01001775 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001776 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001777 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001778
1779 list_for_each_entry_safe(obj, next,
1780 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001781 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001782 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001783 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001784 count += obj->base.size >> PAGE_SHIFT;
1785 if (count >= target)
1786 return count;
1787 }
1788 }
1789
Chris Wilson57094f82013-09-04 10:45:50 +01001790 /*
1791 * As we may completely rewrite the bound list whilst unbinding
1792 * (due to retiring requests) we have to strictly process only
1793 * one element of the list at the time, and recheck the list
1794 * on every iteration.
1795 */
1796 INIT_LIST_HEAD(&still_bound_list);
1797 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001798 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001799
Chris Wilson57094f82013-09-04 10:45:50 +01001800 obj = list_first_entry(&dev_priv->mm.bound_list,
1801 typeof(*obj), global_list);
1802 list_move_tail(&obj->global_list, &still_bound_list);
1803
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001804 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1805 continue;
1806
Chris Wilson57094f82013-09-04 10:45:50 +01001807 /*
1808 * Hold a reference whilst we unbind this object, as we may
1809 * end up waiting for and retiring requests. This might
1810 * release the final reference (held by the active list)
1811 * and result in the object being freed from under us.
1812 * in this object being freed.
1813 *
1814 * Note 1: Shrinking the bound list is special since only active
1815 * (and hence bound objects) can contain such limbo objects, so
1816 * we don't need special tricks for shrinking the unbound list.
1817 * The only other place where we have to be careful with active
1818 * objects suddenly disappearing due to retiring requests is the
1819 * eviction code.
1820 *
1821 * Note 2: Even though the bound list doesn't hold a reference
1822 * to the object we can safely grab one here: The final object
1823 * unreferencing and the bound_list are both protected by the
1824 * dev->struct_mutex and so we won't ever be able to observe an
1825 * object on the bound_list with a reference count equals 0.
1826 */
1827 drm_gem_object_reference(&obj->base);
1828
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001829 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1830 if (i915_vma_unbind(vma))
1831 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001832
Chris Wilson57094f82013-09-04 10:45:50 +01001833 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001834 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001835
1836 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001837 }
Chris Wilson57094f82013-09-04 10:45:50 +01001838 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001839
1840 return count;
1841}
1842
Chris Wilsond9973b42013-10-04 10:33:00 +01001843static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001844i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1845{
1846 return __i915_gem_shrink(dev_priv, target, true);
1847}
1848
Chris Wilsond9973b42013-10-04 10:33:00 +01001849static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001850i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1851{
1852 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001853 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001854
1855 i915_gem_evict_everything(dev_priv->dev);
1856
Ben Widawsky35c20a62013-05-31 11:28:48 -07001857 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001858 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001859 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001860 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001861 }
1862 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001863}
1864
Chris Wilson37e680a2012-06-07 15:38:42 +01001865static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001866i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001867{
Chris Wilson6c085a72012-08-20 11:40:46 +02001868 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001869 int page_count, i;
1870 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001871 struct sg_table *st;
1872 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001873 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001874 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001875 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001876 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001877
Chris Wilson6c085a72012-08-20 11:40:46 +02001878 /* Assert that the object is not currently in any GPU domain. As it
1879 * wasn't in the GTT, there shouldn't be any way it could have been in
1880 * a GPU cache
1881 */
1882 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1883 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1884
Chris Wilson9da3da62012-06-01 15:20:22 +01001885 st = kmalloc(sizeof(*st), GFP_KERNEL);
1886 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001887 return -ENOMEM;
1888
Chris Wilson9da3da62012-06-01 15:20:22 +01001889 page_count = obj->base.size / PAGE_SIZE;
1890 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001891 kfree(st);
1892 return -ENOMEM;
1893 }
1894
1895 /* Get the list of pages out of our struct file. They'll be pinned
1896 * at this point until we release them.
1897 *
1898 * Fail silently without starting the shrinker
1899 */
Al Viro496ad9a2013-01-23 17:07:38 -05001900 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001901 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001902 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001903 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001904 sg = st->sgl;
1905 st->nents = 0;
1906 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001907 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1908 if (IS_ERR(page)) {
1909 i915_gem_purge(dev_priv, page_count);
1910 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1911 }
1912 if (IS_ERR(page)) {
1913 /* We've tried hard to allocate the memory by reaping
1914 * our own buffer, now let the real VM do its job and
1915 * go down in flames if truly OOM.
1916 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001917 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001918 gfp |= __GFP_IO | __GFP_WAIT;
1919
1920 i915_gem_shrink_all(dev_priv);
1921 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1922 if (IS_ERR(page))
1923 goto err_pages;
1924
Linus Torvaldscaf49192012-12-10 10:51:16 -08001925 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001926 gfp &= ~(__GFP_IO | __GFP_WAIT);
1927 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001928#ifdef CONFIG_SWIOTLB
1929 if (swiotlb_nr_tbl()) {
1930 st->nents++;
1931 sg_set_page(sg, page, PAGE_SIZE, 0);
1932 sg = sg_next(sg);
1933 continue;
1934 }
1935#endif
Imre Deak90797e62013-02-18 19:28:03 +02001936 if (!i || page_to_pfn(page) != last_pfn + 1) {
1937 if (i)
1938 sg = sg_next(sg);
1939 st->nents++;
1940 sg_set_page(sg, page, PAGE_SIZE, 0);
1941 } else {
1942 sg->length += PAGE_SIZE;
1943 }
1944 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001945
1946 /* Check that the i965g/gm workaround works. */
1947 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001948 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001949#ifdef CONFIG_SWIOTLB
1950 if (!swiotlb_nr_tbl())
1951#endif
1952 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001953 obj->pages = st;
1954
Eric Anholt673a3942008-07-30 12:06:12 -07001955 if (i915_gem_object_needs_bit17_swizzle(obj))
1956 i915_gem_object_do_bit_17_swizzle(obj);
1957
1958 return 0;
1959
1960err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001961 sg_mark_end(sg);
1962 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001963 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001964 sg_free_table(st);
1965 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001966 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001967}
1968
Chris Wilson37e680a2012-06-07 15:38:42 +01001969/* Ensure that the associated pages are gathered from the backing storage
1970 * and pinned into our object. i915_gem_object_get_pages() may be called
1971 * multiple times before they are released by a single call to
1972 * i915_gem_object_put_pages() - once the pages are no longer referenced
1973 * either as a result of memory pressure (reaping pages under the shrinker)
1974 * or as the object is itself released.
1975 */
1976int
1977i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1978{
1979 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1980 const struct drm_i915_gem_object_ops *ops = obj->ops;
1981 int ret;
1982
Chris Wilson2f745ad2012-09-04 21:02:58 +01001983 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001984 return 0;
1985
Chris Wilson43e28f02013-01-08 10:53:09 +00001986 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001987 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001988 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00001989 }
1990
Chris Wilsona5570172012-09-04 21:02:54 +01001991 BUG_ON(obj->pages_pin_count);
1992
Chris Wilson37e680a2012-06-07 15:38:42 +01001993 ret = ops->get_pages(obj);
1994 if (ret)
1995 return ret;
1996
Ben Widawsky35c20a62013-05-31 11:28:48 -07001997 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001998 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001999}
2000
Ben Widawskye2d05a82013-09-24 09:57:58 -07002001static void
Chris Wilson05394f32010-11-08 19:18:58 +00002002i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00002003 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002004{
Chris Wilson05394f32010-11-08 19:18:58 +00002005 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01002006 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00002007 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002008
Zou Nan hai852835f2010-05-21 09:08:56 +08002009 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002010 if (obj->ring != ring && obj->last_write_seqno) {
2011 /* Keep the seqno relative to the current ring */
2012 obj->last_write_seqno = seqno;
2013 }
Chris Wilson05394f32010-11-08 19:18:58 +00002014 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002015
2016 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002017 if (!obj->active) {
2018 drm_gem_object_reference(&obj->base);
2019 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002020 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002021
Chris Wilson05394f32010-11-08 19:18:58 +00002022 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002023
Chris Wilson0201f1e2012-07-20 12:41:01 +01002024 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002025
Chris Wilsoncaea7472010-11-12 13:53:37 +00002026 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002027 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002028
Chris Wilson7dd49062012-03-21 10:48:18 +00002029 /* Bump MRU to take account of the delayed flush */
2030 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2031 struct drm_i915_fence_reg *reg;
2032
2033 reg = &dev_priv->fence_regs[obj->fence_reg];
2034 list_move_tail(&reg->lru_list,
2035 &dev_priv->mm.fence_list);
2036 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002037 }
2038}
2039
Ben Widawskye2d05a82013-09-24 09:57:58 -07002040void i915_vma_move_to_active(struct i915_vma *vma,
2041 struct intel_ring_buffer *ring)
2042{
2043 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2044 return i915_gem_object_move_to_active(vma->obj, ring);
2045}
2046
Chris Wilsoncaea7472010-11-12 13:53:37 +00002047static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002048i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2049{
Ben Widawskyca191b12013-07-31 17:00:14 -07002050 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002051 struct i915_address_space *vm;
2052 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002053
Chris Wilson65ce3022012-07-20 12:41:02 +01002054 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002055 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002056
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002057 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2058 vma = i915_gem_obj_to_vma(obj, vm);
2059 if (vma && !list_empty(&vma->mm_list))
2060 list_move_tail(&vma->mm_list, &vm->inactive_list);
2061 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002062
Chris Wilson65ce3022012-07-20 12:41:02 +01002063 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002064 obj->ring = NULL;
2065
Chris Wilson65ce3022012-07-20 12:41:02 +01002066 obj->last_read_seqno = 0;
2067 obj->last_write_seqno = 0;
2068 obj->base.write_domain = 0;
2069
2070 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002071 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002072
2073 obj->active = 0;
2074 drm_gem_object_unreference(&obj->base);
2075
2076 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002077}
Eric Anholt673a3942008-07-30 12:06:12 -07002078
Chris Wilson9d7730912012-11-27 16:22:52 +00002079static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002080i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002081{
Chris Wilson9d7730912012-11-27 16:22:52 +00002082 struct drm_i915_private *dev_priv = dev->dev_private;
2083 struct intel_ring_buffer *ring;
2084 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002085
Chris Wilson107f27a52012-12-10 13:56:17 +02002086 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002087 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002088 ret = intel_ring_idle(ring);
2089 if (ret)
2090 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002091 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002092 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002093
2094 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002095 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002096 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002097
Chris Wilson9d7730912012-11-27 16:22:52 +00002098 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2099 ring->sync_seqno[j] = 0;
2100 }
2101
2102 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002103}
2104
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002105int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2106{
2107 struct drm_i915_private *dev_priv = dev->dev_private;
2108 int ret;
2109
2110 if (seqno == 0)
2111 return -EINVAL;
2112
2113 /* HWS page needs to be set less than what we
2114 * will inject to ring
2115 */
2116 ret = i915_gem_init_seqno(dev, seqno - 1);
2117 if (ret)
2118 return ret;
2119
2120 /* Carefully set the last_seqno value so that wrap
2121 * detection still works
2122 */
2123 dev_priv->next_seqno = seqno;
2124 dev_priv->last_seqno = seqno - 1;
2125 if (dev_priv->last_seqno == 0)
2126 dev_priv->last_seqno--;
2127
2128 return 0;
2129}
2130
Chris Wilson9d7730912012-11-27 16:22:52 +00002131int
2132i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002133{
Chris Wilson9d7730912012-11-27 16:22:52 +00002134 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002135
Chris Wilson9d7730912012-11-27 16:22:52 +00002136 /* reserve 0 for non-seqno */
2137 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002138 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002139 if (ret)
2140 return ret;
2141
2142 dev_priv->next_seqno = 1;
2143 }
2144
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002145 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002146 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002147}
2148
Mika Kuoppala0025c072013-06-12 12:35:30 +03002149int __i915_add_request(struct intel_ring_buffer *ring,
2150 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002151 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002152 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002153{
Chris Wilsondb53a302011-02-03 11:57:46 +00002154 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002155 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002156 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002157 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002158 int ret;
2159
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002160 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002161 /*
2162 * Emit any outstanding flushes - execbuf can fail to emit the flush
2163 * after having emitted the batchbuffer command. Hence we need to fix
2164 * things up similar to emitting the lazy request. The difference here
2165 * is that the flush _must_ happen before the next request, no matter
2166 * what.
2167 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002168 ret = intel_ring_flush_all_caches(ring);
2169 if (ret)
2170 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002171
Chris Wilson3c0e2342013-09-04 10:45:52 +01002172 request = ring->preallocated_lazy_request;
2173 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002174 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002175
Chris Wilsona71d8d92012-02-15 11:25:36 +00002176 /* Record the position of the start of the request so that
2177 * should we detect the updated seqno part-way through the
2178 * GPU processing the request, we never over-estimate the
2179 * position of the head.
2180 */
2181 request_ring_position = intel_ring_get_tail(ring);
2182
Chris Wilson9d7730912012-11-27 16:22:52 +00002183 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002184 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002185 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002186
Chris Wilson9d7730912012-11-27 16:22:52 +00002187 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002188 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002189 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002190 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002191
2192 /* Whilst this request exists, batch_obj will be on the
2193 * active_list, and so will hold the active reference. Only when this
2194 * request is retired will the the batch_obj be moved onto the
2195 * inactive_list and lose its active reference. Hence we do not need
2196 * to explicitly hold another reference here.
2197 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002198 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002199
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002200 /* Hold a reference to the current context so that we can inspect
2201 * it later in case a hangcheck error event fires.
2202 */
2203 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002204 if (request->ctx)
2205 i915_gem_context_reference(request->ctx);
2206
Eric Anholt673a3942008-07-30 12:06:12 -07002207 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002208 was_empty = list_empty(&ring->request_list);
2209 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002210 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002211
Chris Wilsondb53a302011-02-03 11:57:46 +00002212 if (file) {
2213 struct drm_i915_file_private *file_priv = file->driver_priv;
2214
Chris Wilson1c255952010-09-26 11:03:27 +01002215 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002216 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002217 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002218 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002219 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002220 }
Eric Anholt673a3942008-07-30 12:06:12 -07002221
Chris Wilson9d7730912012-11-27 16:22:52 +00002222 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002223 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002224 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002225
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002226 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002227 i915_queue_hangcheck(ring->dev);
2228
Chris Wilsonf047e392012-07-21 12:31:41 +01002229 if (was_empty) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002230 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002231 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002232 &dev_priv->mm.retire_work,
2233 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002234 intel_mark_busy(dev_priv->dev);
2235 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002236 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002237
Chris Wilsonacb868d2012-09-26 13:47:30 +01002238 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002239 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002240 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002241}
2242
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002243static inline void
2244i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002245{
Chris Wilson1c255952010-09-26 11:03:27 +01002246 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002247
Chris Wilson1c255952010-09-26 11:03:27 +01002248 if (!file_priv)
2249 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002250
Chris Wilson1c255952010-09-26 11:03:27 +01002251 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002252 list_del(&request->client_list);
2253 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002254 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002255}
2256
Mika Kuoppala939fd762014-01-30 19:04:44 +02002257static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002258 const struct i915_hw_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002259{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002260 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002261
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002262 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2263
2264 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002265 return true;
2266
2267 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002268 if (dev_priv->gpu_error.stop_rings == 0 &&
2269 i915_gem_context_is_default(ctx)) {
2270 DRM_ERROR("gpu hanging too fast, banning!\n");
2271 } else {
2272 DRM_DEBUG("context hanging too fast, banning!\n");
2273 }
2274
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002275 return true;
2276 }
2277
2278 return false;
2279}
2280
Mika Kuoppala939fd762014-01-30 19:04:44 +02002281static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2282 struct i915_hw_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002283 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002284{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002285 struct i915_ctx_hang_stats *hs;
2286
2287 if (WARN_ON(!ctx))
2288 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002289
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002290 hs = &ctx->hang_stats;
2291
2292 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002293 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002294 hs->batch_active++;
2295 hs->guilty_ts = get_seconds();
2296 } else {
2297 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002298 }
2299}
2300
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002301static void i915_gem_free_request(struct drm_i915_gem_request *request)
2302{
2303 list_del(&request->list);
2304 i915_gem_request_remove_from_client(request);
2305
2306 if (request->ctx)
2307 i915_gem_context_unreference(request->ctx);
2308
2309 kfree(request);
2310}
2311
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002312static struct drm_i915_gem_request *
2313i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002314{
Chris Wilson4db080f2013-12-04 11:37:09 +00002315 struct drm_i915_gem_request *request;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002316 const u32 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002317
Chris Wilson4db080f2013-12-04 11:37:09 +00002318 list_for_each_entry(request, &ring->request_list, list) {
2319 if (i915_seqno_passed(completed_seqno, request->seqno))
2320 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002321
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002322 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002323 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002324
2325 return NULL;
2326}
2327
2328static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2329 struct intel_ring_buffer *ring)
2330{
2331 struct drm_i915_gem_request *request;
2332 bool ring_hung;
2333
2334 request = i915_gem_find_first_non_complete(ring);
2335
2336 if (request == NULL)
2337 return;
2338
2339 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2340
Mika Kuoppala939fd762014-01-30 19:04:44 +02002341 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002342
2343 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002344 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002345}
2346
2347static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2348 struct intel_ring_buffer *ring)
2349{
Chris Wilsondfaae392010-09-22 10:31:52 +01002350 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002351 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002352
Chris Wilson05394f32010-11-08 19:18:58 +00002353 obj = list_first_entry(&ring->active_list,
2354 struct drm_i915_gem_object,
2355 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002356
Chris Wilson05394f32010-11-08 19:18:58 +00002357 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002358 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002359
2360 /*
2361 * We must free the requests after all the corresponding objects have
2362 * been moved off active lists. Which is the same order as the normal
2363 * retire_requests function does. This is important if object hold
2364 * implicit references on things like e.g. ppgtt address spaces through
2365 * the request.
2366 */
2367 while (!list_empty(&ring->request_list)) {
2368 struct drm_i915_gem_request *request;
2369
2370 request = list_first_entry(&ring->request_list,
2371 struct drm_i915_gem_request,
2372 list);
2373
2374 i915_gem_free_request(request);
2375 }
Eric Anholt673a3942008-07-30 12:06:12 -07002376}
2377
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002378void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002379{
2380 struct drm_i915_private *dev_priv = dev->dev_private;
2381 int i;
2382
Daniel Vetter4b9de732011-10-09 21:52:02 +02002383 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002384 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002385
Daniel Vetter94a335d2013-07-17 14:51:28 +02002386 /*
2387 * Commit delayed tiling changes if we have an object still
2388 * attached to the fence, otherwise just clear the fence.
2389 */
2390 if (reg->obj) {
2391 i915_gem_object_update_fence(reg->obj, reg,
2392 reg->obj->tiling_mode);
2393 } else {
2394 i915_gem_write_fence(dev, i, NULL);
2395 }
Chris Wilson312817a2010-11-22 11:50:11 +00002396 }
2397}
2398
Chris Wilson069efc12010-09-30 16:53:18 +01002399void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002400{
Chris Wilsondfaae392010-09-22 10:31:52 +01002401 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002402 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002403 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002404
Chris Wilson4db080f2013-12-04 11:37:09 +00002405 /*
2406 * Before we free the objects from the requests, we need to inspect
2407 * them for finding the guilty party. As the requests only borrow
2408 * their reference to the objects, the inspection must be done first.
2409 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002410 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002411 i915_gem_reset_ring_status(dev_priv, ring);
2412
2413 for_each_ring(ring, dev_priv, i)
2414 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002415
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002416 i915_gem_cleanup_ringbuffer(dev);
2417
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002418 i915_gem_context_reset(dev);
2419
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002420 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002421}
2422
2423/**
2424 * This function clears the request list as sequence numbers are passed.
2425 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002426void
Chris Wilsondb53a302011-02-03 11:57:46 +00002427i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002428{
Eric Anholt673a3942008-07-30 12:06:12 -07002429 uint32_t seqno;
2430
Chris Wilsondb53a302011-02-03 11:57:46 +00002431 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002432 return;
2433
Chris Wilsondb53a302011-02-03 11:57:46 +00002434 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002435
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002436 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002437
Chris Wilsone9103032014-01-07 11:45:14 +00002438 /* Move any buffers on the active list that are no longer referenced
2439 * by the ringbuffer to the flushing/inactive lists as appropriate,
2440 * before we free the context associated with the requests.
2441 */
2442 while (!list_empty(&ring->active_list)) {
2443 struct drm_i915_gem_object *obj;
2444
2445 obj = list_first_entry(&ring->active_list,
2446 struct drm_i915_gem_object,
2447 ring_list);
2448
2449 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2450 break;
2451
2452 i915_gem_object_move_to_inactive(obj);
2453 }
2454
2455
Zou Nan hai852835f2010-05-21 09:08:56 +08002456 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002457 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002458
Zou Nan hai852835f2010-05-21 09:08:56 +08002459 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002460 struct drm_i915_gem_request,
2461 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002462
Chris Wilsondfaae392010-09-22 10:31:52 +01002463 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002464 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002465
Chris Wilsondb53a302011-02-03 11:57:46 +00002466 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002467 /* We know the GPU must have read the request to have
2468 * sent us the seqno + interrupt, so use the position
2469 * of tail of the request to update the last known position
2470 * of the GPU head.
2471 */
2472 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002473
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002474 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002475 }
2476
Chris Wilsondb53a302011-02-03 11:57:46 +00002477 if (unlikely(ring->trace_irq_seqno &&
2478 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002479 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002480 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002481 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002482
Chris Wilsondb53a302011-02-03 11:57:46 +00002483 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002484}
2485
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002486bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002487i915_gem_retire_requests(struct drm_device *dev)
2488{
2489 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002490 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002491 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002492 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002493
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002494 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002495 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002496 idle &= list_empty(&ring->request_list);
2497 }
2498
2499 if (idle)
2500 mod_delayed_work(dev_priv->wq,
2501 &dev_priv->mm.idle_work,
2502 msecs_to_jiffies(100));
2503
2504 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002505}
2506
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002507static void
Eric Anholt673a3942008-07-30 12:06:12 -07002508i915_gem_retire_work_handler(struct work_struct *work)
2509{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002510 struct drm_i915_private *dev_priv =
2511 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2512 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002513 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002514
Chris Wilson891b48c2010-09-29 12:26:37 +01002515 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002516 idle = false;
2517 if (mutex_trylock(&dev->struct_mutex)) {
2518 idle = i915_gem_retire_requests(dev);
2519 mutex_unlock(&dev->struct_mutex);
2520 }
2521 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002522 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2523 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002524}
Chris Wilson891b48c2010-09-29 12:26:37 +01002525
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002526static void
2527i915_gem_idle_work_handler(struct work_struct *work)
2528{
2529 struct drm_i915_private *dev_priv =
2530 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002531
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002532 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002533}
2534
Ben Widawsky5816d642012-04-11 11:18:19 -07002535/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002536 * Ensures that an object will eventually get non-busy by flushing any required
2537 * write domains, emitting any outstanding lazy request and retiring and
2538 * completed requests.
2539 */
2540static int
2541i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2542{
2543 int ret;
2544
2545 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002546 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002547 if (ret)
2548 return ret;
2549
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002550 i915_gem_retire_requests_ring(obj->ring);
2551 }
2552
2553 return 0;
2554}
2555
2556/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002557 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2558 * @DRM_IOCTL_ARGS: standard ioctl arguments
2559 *
2560 * Returns 0 if successful, else an error is returned with the remaining time in
2561 * the timeout parameter.
2562 * -ETIME: object is still busy after timeout
2563 * -ERESTARTSYS: signal interrupted the wait
2564 * -ENONENT: object doesn't exist
2565 * Also possible, but rare:
2566 * -EAGAIN: GPU wedged
2567 * -ENOMEM: damn
2568 * -ENODEV: Internal IRQ fail
2569 * -E?: The add request failed
2570 *
2571 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2572 * non-zero timeout parameter the wait ioctl will wait for the given number of
2573 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2574 * without holding struct_mutex the object may become re-busied before this
2575 * function completes. A similar but shorter * race condition exists in the busy
2576 * ioctl
2577 */
2578int
2579i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2580{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002581 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002582 struct drm_i915_gem_wait *args = data;
2583 struct drm_i915_gem_object *obj;
2584 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002585 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002586 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002587 u32 seqno = 0;
2588 int ret = 0;
2589
Ben Widawskyeac1f142012-06-05 15:24:24 -07002590 if (args->timeout_ns >= 0) {
2591 timeout_stack = ns_to_timespec(args->timeout_ns);
2592 timeout = &timeout_stack;
2593 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002594
2595 ret = i915_mutex_lock_interruptible(dev);
2596 if (ret)
2597 return ret;
2598
2599 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2600 if (&obj->base == NULL) {
2601 mutex_unlock(&dev->struct_mutex);
2602 return -ENOENT;
2603 }
2604
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002605 /* Need to make sure the object gets inactive eventually. */
2606 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002607 if (ret)
2608 goto out;
2609
2610 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002611 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002612 ring = obj->ring;
2613 }
2614
2615 if (seqno == 0)
2616 goto out;
2617
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002618 /* Do this after OLR check to make sure we make forward progress polling
2619 * on this IOCTL with a 0 timeout (like busy ioctl)
2620 */
2621 if (!args->timeout_ns) {
2622 ret = -ETIME;
2623 goto out;
2624 }
2625
2626 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002627 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002628 mutex_unlock(&dev->struct_mutex);
2629
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002630 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002631 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002632 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002633 return ret;
2634
2635out:
2636 drm_gem_object_unreference(&obj->base);
2637 mutex_unlock(&dev->struct_mutex);
2638 return ret;
2639}
2640
2641/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002642 * i915_gem_object_sync - sync an object to a ring.
2643 *
2644 * @obj: object which may be in use on another ring.
2645 * @to: ring we wish to use the object on. May be NULL.
2646 *
2647 * This code is meant to abstract object synchronization with the GPU.
2648 * Calling with NULL implies synchronizing the object with the CPU
2649 * rather than a particular GPU ring.
2650 *
2651 * Returns 0 if successful, else propagates up the lower layer error.
2652 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002653int
2654i915_gem_object_sync(struct drm_i915_gem_object *obj,
2655 struct intel_ring_buffer *to)
2656{
2657 struct intel_ring_buffer *from = obj->ring;
2658 u32 seqno;
2659 int ret, idx;
2660
2661 if (from == NULL || to == from)
2662 return 0;
2663
Ben Widawsky5816d642012-04-11 11:18:19 -07002664 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002665 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002666
2667 idx = intel_ring_sync_index(from, to);
2668
Chris Wilson0201f1e2012-07-20 12:41:01 +01002669 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002670 if (seqno <= from->sync_seqno[idx])
2671 return 0;
2672
Ben Widawskyb4aca012012-04-25 20:50:12 -07002673 ret = i915_gem_check_olr(obj->ring, seqno);
2674 if (ret)
2675 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002676
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002677 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002678 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002679 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002680 /* We use last_read_seqno because sync_to()
2681 * might have just caused seqno wrap under
2682 * the radar.
2683 */
2684 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002685
Ben Widawskye3a5a222012-04-11 11:18:20 -07002686 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002687}
2688
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002689static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2690{
2691 u32 old_write_domain, old_read_domains;
2692
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002693 /* Force a pagefault for domain tracking on next user access */
2694 i915_gem_release_mmap(obj);
2695
Keith Packardb97c3d92011-06-24 21:02:59 -07002696 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2697 return;
2698
Chris Wilson97c809fd2012-10-09 19:24:38 +01002699 /* Wait for any direct GTT access to complete */
2700 mb();
2701
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002702 old_read_domains = obj->base.read_domains;
2703 old_write_domain = obj->base.write_domain;
2704
2705 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2706 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2707
2708 trace_i915_gem_object_change_domain(obj,
2709 old_read_domains,
2710 old_write_domain);
2711}
2712
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002713int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002714{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002715 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002716 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002717 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002718
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002719 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002720 return 0;
2721
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002722 if (!drm_mm_node_allocated(&vma->node)) {
2723 i915_gem_vma_destroy(vma);
2724
2725 return 0;
2726 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002727
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002728 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002729 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002730
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002731 BUG_ON(obj->pages == NULL);
2732
Chris Wilsona8198ee2011-04-13 22:04:09 +01002733 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002734 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002735 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002736 /* Continue on if we fail due to EIO, the GPU is hung so we
2737 * should be safe and we need to cleanup or else we might
2738 * cause memory corruption through use-after-free.
2739 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002740
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002741 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002742
Daniel Vetter96b47b62009-12-15 17:50:00 +01002743 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002744 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002745 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002746 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002747
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002748 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002749
Ben Widawsky6f65e292013-12-06 14:10:56 -08002750 vma->unbind_vma(vma);
2751
Daniel Vetter74163902012-02-15 23:50:21 +01002752 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002753
Ben Widawskyca191b12013-07-31 17:00:14 -07002754 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002755 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002756 if (i915_is_ggtt(vma->vm))
2757 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002758
Ben Widawsky2f633152013-07-17 12:19:03 -07002759 drm_mm_remove_node(&vma->node);
2760 i915_gem_vma_destroy(vma);
2761
2762 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002763 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002764 if (list_empty(&obj->vma_list))
2765 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002766
Chris Wilson70903c32013-12-04 09:59:09 +00002767 /* And finally now the object is completely decoupled from this vma,
2768 * we can drop its hold on the backing storage and allow it to be
2769 * reaped by the shrinker.
2770 */
2771 i915_gem_object_unpin_pages(obj);
2772
Chris Wilson88241782011-01-07 17:09:48 +00002773 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002774}
2775
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002776/**
2777 * Unbinds an object from the global GTT aperture.
2778 */
2779int
2780i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2781{
2782 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2783 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2784
Dan Carpenter58e73e12013-08-09 12:44:11 +03002785 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002786 return 0;
2787
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002788 if (i915_gem_obj_to_ggtt(obj)->pin_count)
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002789 return -EBUSY;
2790
2791 BUG_ON(obj->pages == NULL);
2792
2793 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2794}
2795
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002796int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002797{
2798 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002799 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002800 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002801
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002802 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002803 for_each_ring(ring, dev_priv, i) {
Ben Widawsky41bde552013-12-06 14:11:21 -08002804 ret = i915_switch_context(ring, NULL, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002805 if (ret)
2806 return ret;
2807
Chris Wilson3e960502012-11-27 16:22:54 +00002808 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002809 if (ret)
2810 return ret;
2811 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002812
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002813 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002814}
2815
Chris Wilson9ce079e2012-04-17 15:31:30 +01002816static void i965_write_fence_reg(struct drm_device *dev, int reg,
2817 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002818{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002819 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002820 int fence_reg;
2821 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002822
Imre Deak56c844e2013-01-07 21:47:34 +02002823 if (INTEL_INFO(dev)->gen >= 6) {
2824 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2825 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2826 } else {
2827 fence_reg = FENCE_REG_965_0;
2828 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2829 }
2830
Chris Wilsond18b9612013-07-10 13:36:23 +01002831 fence_reg += reg * 8;
2832
2833 /* To w/a incoherency with non-atomic 64-bit register updates,
2834 * we split the 64-bit update into two 32-bit writes. In order
2835 * for a partial fence not to be evaluated between writes, we
2836 * precede the update with write to turn off the fence register,
2837 * and only enable the fence as the last step.
2838 *
2839 * For extra levels of paranoia, we make sure each step lands
2840 * before applying the next step.
2841 */
2842 I915_WRITE(fence_reg, 0);
2843 POSTING_READ(fence_reg);
2844
Chris Wilson9ce079e2012-04-17 15:31:30 +01002845 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002846 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002847 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002848
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002849 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002850 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002851 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002852 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002853 if (obj->tiling_mode == I915_TILING_Y)
2854 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2855 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002856
Chris Wilsond18b9612013-07-10 13:36:23 +01002857 I915_WRITE(fence_reg + 4, val >> 32);
2858 POSTING_READ(fence_reg + 4);
2859
2860 I915_WRITE(fence_reg + 0, val);
2861 POSTING_READ(fence_reg);
2862 } else {
2863 I915_WRITE(fence_reg + 4, 0);
2864 POSTING_READ(fence_reg + 4);
2865 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002866}
2867
Chris Wilson9ce079e2012-04-17 15:31:30 +01002868static void i915_write_fence_reg(struct drm_device *dev, int reg,
2869 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002870{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002871 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002872 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002873
Chris Wilson9ce079e2012-04-17 15:31:30 +01002874 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002875 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002876 int pitch_val;
2877 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002878
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002879 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002880 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002881 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2882 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2883 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002884
2885 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2886 tile_width = 128;
2887 else
2888 tile_width = 512;
2889
2890 /* Note: pitch better be a power of two tile widths */
2891 pitch_val = obj->stride / tile_width;
2892 pitch_val = ffs(pitch_val) - 1;
2893
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002894 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002895 if (obj->tiling_mode == I915_TILING_Y)
2896 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2897 val |= I915_FENCE_SIZE_BITS(size);
2898 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2899 val |= I830_FENCE_REG_VALID;
2900 } else
2901 val = 0;
2902
2903 if (reg < 8)
2904 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002905 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002906 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002907
Chris Wilson9ce079e2012-04-17 15:31:30 +01002908 I915_WRITE(reg, val);
2909 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002910}
2911
Chris Wilson9ce079e2012-04-17 15:31:30 +01002912static void i830_write_fence_reg(struct drm_device *dev, int reg,
2913 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002914{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002915 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002916 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002917
Chris Wilson9ce079e2012-04-17 15:31:30 +01002918 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002919 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002920 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002921
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002922 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002923 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002924 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2925 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2926 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002927
Chris Wilson9ce079e2012-04-17 15:31:30 +01002928 pitch_val = obj->stride / 128;
2929 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002930
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002931 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002932 if (obj->tiling_mode == I915_TILING_Y)
2933 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2934 val |= I830_FENCE_SIZE_BITS(size);
2935 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2936 val |= I830_FENCE_REG_VALID;
2937 } else
2938 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002939
Chris Wilson9ce079e2012-04-17 15:31:30 +01002940 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2941 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2942}
2943
Chris Wilsond0a57782012-10-09 19:24:37 +01002944inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2945{
2946 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2947}
2948
Chris Wilson9ce079e2012-04-17 15:31:30 +01002949static void i915_gem_write_fence(struct drm_device *dev, int reg,
2950 struct drm_i915_gem_object *obj)
2951{
Chris Wilsond0a57782012-10-09 19:24:37 +01002952 struct drm_i915_private *dev_priv = dev->dev_private;
2953
2954 /* Ensure that all CPU reads are completed before installing a fence
2955 * and all writes before removing the fence.
2956 */
2957 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2958 mb();
2959
Daniel Vetter94a335d2013-07-17 14:51:28 +02002960 WARN(obj && (!obj->stride || !obj->tiling_mode),
2961 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2962 obj->stride, obj->tiling_mode);
2963
Chris Wilson9ce079e2012-04-17 15:31:30 +01002964 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002965 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002966 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002967 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002968 case 5:
2969 case 4: i965_write_fence_reg(dev, reg, obj); break;
2970 case 3: i915_write_fence_reg(dev, reg, obj); break;
2971 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002972 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002973 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002974
2975 /* And similarly be paranoid that no direct access to this region
2976 * is reordered to before the fence is installed.
2977 */
2978 if (i915_gem_object_needs_mb(obj))
2979 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002980}
2981
Chris Wilson61050802012-04-17 15:31:31 +01002982static inline int fence_number(struct drm_i915_private *dev_priv,
2983 struct drm_i915_fence_reg *fence)
2984{
2985 return fence - dev_priv->fence_regs;
2986}
2987
2988static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2989 struct drm_i915_fence_reg *fence,
2990 bool enable)
2991{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002992 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002993 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002994
Chris Wilson46a0b632013-07-10 13:36:24 +01002995 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01002996
2997 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01002998 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01002999 fence->obj = obj;
3000 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3001 } else {
3002 obj->fence_reg = I915_FENCE_REG_NONE;
3003 fence->obj = NULL;
3004 list_del_init(&fence->lru_list);
3005 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003006 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003007}
3008
Chris Wilsond9e86c02010-11-10 16:40:20 +00003009static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003010i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003011{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003012 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003013 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003014 if (ret)
3015 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003016
3017 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003018 }
3019
Chris Wilson86d5bc32012-07-20 12:41:04 +01003020 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003021 return 0;
3022}
3023
3024int
3025i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3026{
Chris Wilson61050802012-04-17 15:31:31 +01003027 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003028 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003029 int ret;
3030
Chris Wilsond0a57782012-10-09 19:24:37 +01003031 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003032 if (ret)
3033 return ret;
3034
Chris Wilson61050802012-04-17 15:31:31 +01003035 if (obj->fence_reg == I915_FENCE_REG_NONE)
3036 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003037
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003038 fence = &dev_priv->fence_regs[obj->fence_reg];
3039
Chris Wilson61050802012-04-17 15:31:31 +01003040 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003041 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003042
3043 return 0;
3044}
3045
3046static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003047i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003048{
Daniel Vetterae3db242010-02-19 11:51:58 +01003049 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003050 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003051 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003052
3053 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003054 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003055 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3056 reg = &dev_priv->fence_regs[i];
3057 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003058 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003059
Chris Wilson1690e1e2011-12-14 13:57:08 +01003060 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003061 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003062 }
3063
Chris Wilsond9e86c02010-11-10 16:40:20 +00003064 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003065 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003066
3067 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003068 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003069 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003070 continue;
3071
Chris Wilson8fe301a2012-04-17 15:31:28 +01003072 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003073 }
3074
Chris Wilson5dce5b932014-01-20 10:17:36 +00003075deadlock:
3076 /* Wait for completion of pending flips which consume fences */
3077 if (intel_has_pending_fb_unpin(dev))
3078 return ERR_PTR(-EAGAIN);
3079
3080 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003081}
3082
Jesse Barnesde151cf2008-11-12 10:03:55 -08003083/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003084 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003085 * @obj: object to map through a fence reg
3086 *
3087 * When mapping objects through the GTT, userspace wants to be able to write
3088 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003089 * This function walks the fence regs looking for a free one for @obj,
3090 * stealing one if it can't find any.
3091 *
3092 * It then sets up the reg based on the object's properties: address, pitch
3093 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003094 *
3095 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003096 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003097int
Chris Wilson06d98132012-04-17 15:31:24 +01003098i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003099{
Chris Wilson05394f32010-11-08 19:18:58 +00003100 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003101 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003102 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003103 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003104 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003105
Chris Wilson14415742012-04-17 15:31:33 +01003106 /* Have we updated the tiling parameters upon the object and so
3107 * will need to serialise the write to the associated fence register?
3108 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003109 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003110 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003111 if (ret)
3112 return ret;
3113 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003114
Chris Wilsond9e86c02010-11-10 16:40:20 +00003115 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003116 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3117 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003118 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003119 list_move_tail(&reg->lru_list,
3120 &dev_priv->mm.fence_list);
3121 return 0;
3122 }
3123 } else if (enable) {
3124 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003125 if (IS_ERR(reg))
3126 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003127
Chris Wilson14415742012-04-17 15:31:33 +01003128 if (reg->obj) {
3129 struct drm_i915_gem_object *old = reg->obj;
3130
Chris Wilsond0a57782012-10-09 19:24:37 +01003131 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003132 if (ret)
3133 return ret;
3134
Chris Wilson14415742012-04-17 15:31:33 +01003135 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003136 }
Chris Wilson14415742012-04-17 15:31:33 +01003137 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003138 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003139
Chris Wilson14415742012-04-17 15:31:33 +01003140 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003141
Chris Wilson9ce079e2012-04-17 15:31:30 +01003142 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003143}
3144
Chris Wilson42d6ab42012-07-26 11:49:32 +01003145static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3146 struct drm_mm_node *gtt_space,
3147 unsigned long cache_level)
3148{
3149 struct drm_mm_node *other;
3150
3151 /* On non-LLC machines we have to be careful when putting differing
3152 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003153 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003154 */
3155 if (HAS_LLC(dev))
3156 return true;
3157
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003158 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003159 return true;
3160
3161 if (list_empty(&gtt_space->node_list))
3162 return true;
3163
3164 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3165 if (other->allocated && !other->hole_follows && other->color != cache_level)
3166 return false;
3167
3168 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3169 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3170 return false;
3171
3172 return true;
3173}
3174
3175static void i915_gem_verify_gtt(struct drm_device *dev)
3176{
3177#if WATCH_GTT
3178 struct drm_i915_private *dev_priv = dev->dev_private;
3179 struct drm_i915_gem_object *obj;
3180 int err = 0;
3181
Ben Widawsky35c20a62013-05-31 11:28:48 -07003182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003183 if (obj->gtt_space == NULL) {
3184 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3185 err++;
3186 continue;
3187 }
3188
3189 if (obj->cache_level != obj->gtt_space->color) {
3190 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003191 i915_gem_obj_ggtt_offset(obj),
3192 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003193 obj->cache_level,
3194 obj->gtt_space->color);
3195 err++;
3196 continue;
3197 }
3198
3199 if (!i915_gem_valid_gtt_space(dev,
3200 obj->gtt_space,
3201 obj->cache_level)) {
3202 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003203 i915_gem_obj_ggtt_offset(obj),
3204 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003205 obj->cache_level);
3206 err++;
3207 continue;
3208 }
3209 }
3210
3211 WARN_ON(err);
3212#endif
3213}
3214
Jesse Barnesde151cf2008-11-12 10:03:55 -08003215/**
Eric Anholt673a3942008-07-30 12:06:12 -07003216 * Finds free space in the GTT aperture and binds the object there.
3217 */
3218static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003219i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3220 struct i915_address_space *vm,
3221 unsigned alignment,
3222 bool map_and_fenceable,
3223 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003224{
Chris Wilson05394f32010-11-08 19:18:58 +00003225 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003226 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003227 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003228 size_t gtt_max =
3229 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003230 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003231 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003232
Chris Wilsone28f8712011-07-18 13:11:49 -07003233 fence_size = i915_gem_get_gtt_size(dev,
3234 obj->base.size,
3235 obj->tiling_mode);
3236 fence_alignment = i915_gem_get_gtt_alignment(dev,
3237 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003238 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003239 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003240 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003241 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003242 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003243
Eric Anholt673a3942008-07-30 12:06:12 -07003244 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003245 alignment = map_and_fenceable ? fence_alignment :
3246 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003247 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003248 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Eric Anholt673a3942008-07-30 12:06:12 -07003249 return -EINVAL;
3250 }
3251
Chris Wilson05394f32010-11-08 19:18:58 +00003252 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003253
Chris Wilson654fc602010-05-27 13:18:21 +01003254 /* If the object is bigger than the entire aperture, reject it early
3255 * before evicting everything in a vain attempt to find space.
3256 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003257 if (obj->base.size > gtt_max) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003258 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003259 obj->base.size,
3260 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003261 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003262 return -E2BIG;
3263 }
3264
Chris Wilson37e680a2012-06-07 15:38:42 +01003265 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003266 if (ret)
3267 return ret;
3268
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003269 i915_gem_object_pin_pages(obj);
3270
Ben Widawskyaccfef22013-08-14 11:38:35 +02003271 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003272 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003273 ret = PTR_ERR(vma);
3274 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003275 }
3276
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003277search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003278 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003279 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003280 obj->cache_level, 0, gtt_max,
3281 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003282 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003283 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003284 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003285 map_and_fenceable,
3286 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003287 if (ret == 0)
3288 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003289
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003290 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003291 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003292 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003293 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003294 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003295 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003296 }
3297
Daniel Vetter74163902012-02-15 23:50:21 +01003298 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003299 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003300 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003301
Ben Widawsky35c20a62013-05-31 11:28:48 -07003302 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003303 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003304
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003305 if (i915_is_ggtt(vm)) {
3306 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003307
Daniel Vetter49987092013-08-14 10:21:23 +02003308 fenceable = (vma->node.size == fence_size &&
3309 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003310
Daniel Vetter49987092013-08-14 10:21:23 +02003311 mappable = (vma->node.start + obj->base.size <=
3312 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003313
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003314 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003315 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003316
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003317 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003318
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003319 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003320 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003321 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003322
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003323err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003324 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003325err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003326 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003327err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003328 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003329 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003330}
3331
Chris Wilson000433b2013-08-08 14:41:09 +01003332bool
Chris Wilson2c225692013-08-09 12:26:45 +01003333i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3334 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003335{
Eric Anholt673a3942008-07-30 12:06:12 -07003336 /* If we don't have a page list set up, then we're not pinned
3337 * to GPU, and we can ignore the cache flush because it'll happen
3338 * again at bind time.
3339 */
Chris Wilson05394f32010-11-08 19:18:58 +00003340 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003341 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003342
Imre Deak769ce462013-02-13 21:56:05 +02003343 /*
3344 * Stolen memory is always coherent with the GPU as it is explicitly
3345 * marked as wc by the system, or the system is cache-coherent.
3346 */
3347 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003348 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003349
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003350 /* If the GPU is snooping the contents of the CPU cache,
3351 * we do not need to manually clear the CPU cache lines. However,
3352 * the caches are only snooped when the render cache is
3353 * flushed/invalidated. As we always have to emit invalidations
3354 * and flushes when moving into and out of the RENDER domain, correct
3355 * snooping behaviour occurs naturally as the result of our domain
3356 * tracking.
3357 */
Chris Wilson2c225692013-08-09 12:26:45 +01003358 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003359 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003360
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003361 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003362 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003363
3364 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003365}
3366
3367/** Flushes the GTT write domain for the object if it's dirty. */
3368static void
Chris Wilson05394f32010-11-08 19:18:58 +00003369i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003370{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003371 uint32_t old_write_domain;
3372
Chris Wilson05394f32010-11-08 19:18:58 +00003373 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003374 return;
3375
Chris Wilson63256ec2011-01-04 18:42:07 +00003376 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003377 * to it immediately go to main memory as far as we know, so there's
3378 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003379 *
3380 * However, we do have to enforce the order so that all writes through
3381 * the GTT land before any writes to the device, such as updates to
3382 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003383 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003384 wmb();
3385
Chris Wilson05394f32010-11-08 19:18:58 +00003386 old_write_domain = obj->base.write_domain;
3387 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003388
3389 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003390 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003391 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003392}
3393
3394/** Flushes the CPU write domain for the object if it's dirty. */
3395static void
Chris Wilson2c225692013-08-09 12:26:45 +01003396i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3397 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003398{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003399 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003400
Chris Wilson05394f32010-11-08 19:18:58 +00003401 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003402 return;
3403
Chris Wilson000433b2013-08-08 14:41:09 +01003404 if (i915_gem_clflush_object(obj, force))
3405 i915_gem_chipset_flush(obj->base.dev);
3406
Chris Wilson05394f32010-11-08 19:18:58 +00003407 old_write_domain = obj->base.write_domain;
3408 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003409
3410 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003411 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003412 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003413}
3414
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003415/**
3416 * Moves a single object to the GTT read, and possibly write domain.
3417 *
3418 * This function returns when the move is complete, including waiting on
3419 * flushes to occur.
3420 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003421int
Chris Wilson20217462010-11-23 15:26:33 +00003422i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003423{
Chris Wilson8325a092012-04-24 15:52:35 +01003424 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003425 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003426 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003427
Eric Anholt02354392008-11-26 13:58:13 -08003428 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003429 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003430 return -EINVAL;
3431
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003432 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3433 return 0;
3434
Chris Wilson0201f1e2012-07-20 12:41:01 +01003435 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003436 if (ret)
3437 return ret;
3438
Chris Wilson2c225692013-08-09 12:26:45 +01003439 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003440
Chris Wilsond0a57782012-10-09 19:24:37 +01003441 /* Serialise direct access to this object with the barriers for
3442 * coherent writes from the GPU, by effectively invalidating the
3443 * GTT domain upon first access.
3444 */
3445 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3446 mb();
3447
Chris Wilson05394f32010-11-08 19:18:58 +00003448 old_write_domain = obj->base.write_domain;
3449 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003450
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003451 /* It should now be out of any other write domains, and we can update
3452 * the domain values for our changes.
3453 */
Chris Wilson05394f32010-11-08 19:18:58 +00003454 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3455 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003456 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003457 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3458 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3459 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003460 }
3461
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003462 trace_i915_gem_object_change_domain(obj,
3463 old_read_domains,
3464 old_write_domain);
3465
Chris Wilson8325a092012-04-24 15:52:35 +01003466 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003467 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003468 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003469 if (vma)
3470 list_move_tail(&vma->mm_list,
3471 &dev_priv->gtt.base.inactive_list);
3472
3473 }
Chris Wilson8325a092012-04-24 15:52:35 +01003474
Eric Anholte47c68e2008-11-14 13:35:19 -08003475 return 0;
3476}
3477
Chris Wilsone4ffd172011-04-04 09:44:39 +01003478int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3479 enum i915_cache_level cache_level)
3480{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003481 struct drm_device *dev = obj->base.dev;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003482 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003483 int ret;
3484
3485 if (obj->cache_level == cache_level)
3486 return 0;
3487
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003488 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003489 DRM_DEBUG("can not change the cache level of pinned objects\n");
3490 return -EBUSY;
3491 }
3492
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003493 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3494 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003495 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003496 if (ret)
3497 return ret;
3498
3499 break;
3500 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003501 }
3502
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003503 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003504 ret = i915_gem_object_finish_gpu(obj);
3505 if (ret)
3506 return ret;
3507
3508 i915_gem_object_finish_gtt(obj);
3509
3510 /* Before SandyBridge, you could not use tiling or fence
3511 * registers with snooped memory, so relinquish any fences
3512 * currently pointing to our region in the aperture.
3513 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003514 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003515 ret = i915_gem_object_put_fence(obj);
3516 if (ret)
3517 return ret;
3518 }
3519
Ben Widawsky6f65e292013-12-06 14:10:56 -08003520 list_for_each_entry(vma, &obj->vma_list, vma_link)
3521 vma->bind_vma(vma, cache_level, 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003522 }
3523
Chris Wilson2c225692013-08-09 12:26:45 +01003524 list_for_each_entry(vma, &obj->vma_list, vma_link)
3525 vma->node.color = cache_level;
3526 obj->cache_level = cache_level;
3527
3528 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003529 u32 old_read_domains, old_write_domain;
3530
3531 /* If we're coming from LLC cached, then we haven't
3532 * actually been tracking whether the data is in the
3533 * CPU cache or not, since we only allow one bit set
3534 * in obj->write_domain and have been skipping the clflushes.
3535 * Just set it to the CPU cache for now.
3536 */
3537 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003538
3539 old_read_domains = obj->base.read_domains;
3540 old_write_domain = obj->base.write_domain;
3541
3542 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3543 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3544
3545 trace_i915_gem_object_change_domain(obj,
3546 old_read_domains,
3547 old_write_domain);
3548 }
3549
Chris Wilson42d6ab42012-07-26 11:49:32 +01003550 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003551 return 0;
3552}
3553
Ben Widawsky199adf42012-09-21 17:01:20 -07003554int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3555 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003556{
Ben Widawsky199adf42012-09-21 17:01:20 -07003557 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003558 struct drm_i915_gem_object *obj;
3559 int ret;
3560
3561 ret = i915_mutex_lock_interruptible(dev);
3562 if (ret)
3563 return ret;
3564
3565 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3566 if (&obj->base == NULL) {
3567 ret = -ENOENT;
3568 goto unlock;
3569 }
3570
Chris Wilson651d7942013-08-08 14:41:10 +01003571 switch (obj->cache_level) {
3572 case I915_CACHE_LLC:
3573 case I915_CACHE_L3_LLC:
3574 args->caching = I915_CACHING_CACHED;
3575 break;
3576
Chris Wilson4257d3b2013-08-08 14:41:11 +01003577 case I915_CACHE_WT:
3578 args->caching = I915_CACHING_DISPLAY;
3579 break;
3580
Chris Wilson651d7942013-08-08 14:41:10 +01003581 default:
3582 args->caching = I915_CACHING_NONE;
3583 break;
3584 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003585
3586 drm_gem_object_unreference(&obj->base);
3587unlock:
3588 mutex_unlock(&dev->struct_mutex);
3589 return ret;
3590}
3591
Ben Widawsky199adf42012-09-21 17:01:20 -07003592int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3593 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003594{
Ben Widawsky199adf42012-09-21 17:01:20 -07003595 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003596 struct drm_i915_gem_object *obj;
3597 enum i915_cache_level level;
3598 int ret;
3599
Ben Widawsky199adf42012-09-21 17:01:20 -07003600 switch (args->caching) {
3601 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003602 level = I915_CACHE_NONE;
3603 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003604 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003605 level = I915_CACHE_LLC;
3606 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003607 case I915_CACHING_DISPLAY:
3608 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3609 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003610 default:
3611 return -EINVAL;
3612 }
3613
Ben Widawsky3bc29132012-09-26 16:15:20 -07003614 ret = i915_mutex_lock_interruptible(dev);
3615 if (ret)
3616 return ret;
3617
Chris Wilsone6994ae2012-07-10 10:27:08 +01003618 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3619 if (&obj->base == NULL) {
3620 ret = -ENOENT;
3621 goto unlock;
3622 }
3623
3624 ret = i915_gem_object_set_cache_level(obj, level);
3625
3626 drm_gem_object_unreference(&obj->base);
3627unlock:
3628 mutex_unlock(&dev->struct_mutex);
3629 return ret;
3630}
3631
Chris Wilsoncc98b412013-08-09 12:25:09 +01003632static bool is_pin_display(struct drm_i915_gem_object *obj)
3633{
3634 /* There are 3 sources that pin objects:
3635 * 1. The display engine (scanouts, sprites, cursors);
3636 * 2. Reservations for execbuffer;
3637 * 3. The user.
3638 *
3639 * We can ignore reservations as we hold the struct_mutex and
3640 * are only called outside of the reservation path. The user
3641 * can only increment pin_count once, and so if after
3642 * subtracting the potential reference by the user, any pin_count
3643 * remains, it must be due to another use by the display engine.
3644 */
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003645 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003646}
3647
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003648/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003649 * Prepare buffer for display plane (scanout, cursors, etc).
3650 * Can be called from an uninterruptible phase (modesetting) and allows
3651 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003652 */
3653int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003654i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3655 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003656 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003657{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003658 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003659 int ret;
3660
Chris Wilson0be73282010-12-06 14:36:27 +00003661 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003662 ret = i915_gem_object_sync(obj, pipelined);
3663 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003664 return ret;
3665 }
3666
Chris Wilsoncc98b412013-08-09 12:25:09 +01003667 /* Mark the pin_display early so that we account for the
3668 * display coherency whilst setting up the cache domains.
3669 */
3670 obj->pin_display = true;
3671
Eric Anholta7ef0642011-03-29 16:59:54 -07003672 /* The display engine is not coherent with the LLC cache on gen6. As
3673 * a result, we make sure that the pinning that is about to occur is
3674 * done with uncached PTEs. This is lowest common denominator for all
3675 * chipsets.
3676 *
3677 * However for gen6+, we could do better by using the GFDT bit instead
3678 * of uncaching, which would allow us to flush all the LLC-cached data
3679 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3680 */
Chris Wilson651d7942013-08-08 14:41:10 +01003681 ret = i915_gem_object_set_cache_level(obj,
3682 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003683 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003684 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003685
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003686 /* As the user may map the buffer once pinned in the display plane
3687 * (e.g. libkms for the bootup splash), we have to ensure that we
3688 * always use map_and_fenceable for all scanout buffers.
3689 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003690 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003691 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003692 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003693
Chris Wilson2c225692013-08-09 12:26:45 +01003694 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003695
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003696 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003697 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003698
3699 /* It should now be out of any other write domains, and we can update
3700 * the domain values for our changes.
3701 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003702 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003703 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003704
3705 trace_i915_gem_object_change_domain(obj,
3706 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003707 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003708
3709 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003710
3711err_unpin_display:
3712 obj->pin_display = is_pin_display(obj);
3713 return ret;
3714}
3715
3716void
3717i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3718{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003719 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003720 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003721}
3722
Chris Wilson85345512010-11-13 09:49:11 +00003723int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003724i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003725{
Chris Wilson88241782011-01-07 17:09:48 +00003726 int ret;
3727
Chris Wilsona8198ee2011-04-13 22:04:09 +01003728 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003729 return 0;
3730
Chris Wilson0201f1e2012-07-20 12:41:01 +01003731 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003732 if (ret)
3733 return ret;
3734
Chris Wilsona8198ee2011-04-13 22:04:09 +01003735 /* Ensure that we invalidate the GPU's caches and TLBs. */
3736 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003737 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003738}
3739
Eric Anholte47c68e2008-11-14 13:35:19 -08003740/**
3741 * Moves a single object to the CPU read, and possibly write domain.
3742 *
3743 * This function returns when the move is complete, including waiting on
3744 * flushes to occur.
3745 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003746int
Chris Wilson919926a2010-11-12 13:42:53 +00003747i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003748{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003749 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003750 int ret;
3751
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003752 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3753 return 0;
3754
Chris Wilson0201f1e2012-07-20 12:41:01 +01003755 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003756 if (ret)
3757 return ret;
3758
Eric Anholte47c68e2008-11-14 13:35:19 -08003759 i915_gem_object_flush_gtt_write_domain(obj);
3760
Chris Wilson05394f32010-11-08 19:18:58 +00003761 old_write_domain = obj->base.write_domain;
3762 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003763
Eric Anholte47c68e2008-11-14 13:35:19 -08003764 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003765 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003766 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003767
Chris Wilson05394f32010-11-08 19:18:58 +00003768 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003769 }
3770
3771 /* It should now be out of any other write domains, and we can update
3772 * the domain values for our changes.
3773 */
Chris Wilson05394f32010-11-08 19:18:58 +00003774 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003775
3776 /* If we're writing through the CPU, then the GPU read domains will
3777 * need to be invalidated at next use.
3778 */
3779 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003780 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3781 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003782 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003783
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003784 trace_i915_gem_object_change_domain(obj,
3785 old_read_domains,
3786 old_write_domain);
3787
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003788 return 0;
3789}
3790
Eric Anholt673a3942008-07-30 12:06:12 -07003791/* Throttle our rendering by waiting until the ring has completed our requests
3792 * emitted over 20 msec ago.
3793 *
Eric Anholtb9624422009-06-03 07:27:35 +00003794 * Note that if we were to use the current jiffies each time around the loop,
3795 * we wouldn't escape the function with any frames outstanding if the time to
3796 * render a frame was over 20ms.
3797 *
Eric Anholt673a3942008-07-30 12:06:12 -07003798 * This should get us reasonable parallelism between CPU and GPU but also
3799 * relatively low latency when blocking on a particular request to finish.
3800 */
3801static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003802i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003803{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003804 struct drm_i915_private *dev_priv = dev->dev_private;
3805 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003806 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003807 struct drm_i915_gem_request *request;
3808 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003809 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003810 u32 seqno = 0;
3811 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003812
Daniel Vetter308887a2012-11-14 17:14:06 +01003813 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3814 if (ret)
3815 return ret;
3816
3817 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3818 if (ret)
3819 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003820
Chris Wilson1c255952010-09-26 11:03:27 +01003821 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003822 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003823 if (time_after_eq(request->emitted_jiffies, recent_enough))
3824 break;
3825
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003826 ring = request->ring;
3827 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003828 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003829 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003830 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003831
3832 if (seqno == 0)
3833 return 0;
3834
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003835 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003836 if (ret == 0)
3837 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003838
Eric Anholt673a3942008-07-30 12:06:12 -07003839 return ret;
3840}
3841
Eric Anholt673a3942008-07-30 12:06:12 -07003842int
Chris Wilson05394f32010-11-08 19:18:58 +00003843i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003844 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003845 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003846 bool map_and_fenceable,
3847 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003848{
Ben Widawsky6f65e292013-12-06 14:10:56 -08003849 const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003850 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003851 int ret;
3852
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003853 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3854
3855 vma = i915_gem_obj_to_vma(obj, vm);
3856
3857 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003858 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3859 return -EBUSY;
3860
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003861 if ((alignment &&
3862 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003863 (map_and_fenceable && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003864 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003865 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003866 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003867 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003868 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003869 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003870 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003871 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003872 if (ret)
3873 return ret;
3874 }
3875 }
3876
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003877 if (!i915_gem_obj_bound(obj, vm)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003878 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3879 map_and_fenceable,
3880 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003881 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003882 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003883
Chris Wilson22c344e2009-02-11 14:26:45 +00003884 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003885
Ben Widawsky6f65e292013-12-06 14:10:56 -08003886 vma = i915_gem_obj_to_vma(obj, vm);
Daniel Vetter74898d72012-02-15 23:50:22 +01003887
Ben Widawsky6f65e292013-12-06 14:10:56 -08003888 vma->bind_vma(vma, obj->cache_level, flags);
Jesse Barnes79e53942008-11-07 14:24:08 -08003889
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003890 i915_gem_obj_to_vma(obj, vm)->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003891 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003892
3893 return 0;
3894}
3895
3896void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003897i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003898{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003899 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003900
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003901 BUG_ON(!vma);
3902 BUG_ON(vma->pin_count == 0);
3903 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3904
3905 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003906 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003907}
3908
3909int
3910i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003911 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003912{
3913 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003914 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003915 int ret;
3916
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003917 if (INTEL_INFO(dev)->gen >= 6)
3918 return -ENODEV;
3919
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003920 ret = i915_mutex_lock_interruptible(dev);
3921 if (ret)
3922 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003923
Chris Wilson05394f32010-11-08 19:18:58 +00003924 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003925 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003926 ret = -ENOENT;
3927 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003928 }
Eric Anholt673a3942008-07-30 12:06:12 -07003929
Chris Wilson05394f32010-11-08 19:18:58 +00003930 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003931 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00003932 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003933 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003934 }
3935
Chris Wilson05394f32010-11-08 19:18:58 +00003936 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003937 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003938 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003939 ret = -EINVAL;
3940 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003941 }
3942
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003943 if (obj->user_pin_count == ULONG_MAX) {
3944 ret = -EBUSY;
3945 goto out;
3946 }
3947
Chris Wilson93be8782013-01-02 10:31:22 +00003948 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003949 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003950 if (ret)
3951 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003952 }
3953
Chris Wilson93be8782013-01-02 10:31:22 +00003954 obj->user_pin_count++;
3955 obj->pin_filp = file;
3956
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003957 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003958out:
Chris Wilson05394f32010-11-08 19:18:58 +00003959 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003960unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003961 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003962 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003963}
3964
3965int
3966i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003967 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003968{
3969 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003970 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003971 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003972
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003973 ret = i915_mutex_lock_interruptible(dev);
3974 if (ret)
3975 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003976
Chris Wilson05394f32010-11-08 19:18:58 +00003977 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003978 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003979 ret = -ENOENT;
3980 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003981 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003982
Chris Wilson05394f32010-11-08 19:18:58 +00003983 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003984 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003985 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003986 ret = -EINVAL;
3987 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003988 }
Chris Wilson05394f32010-11-08 19:18:58 +00003989 obj->user_pin_count--;
3990 if (obj->user_pin_count == 0) {
3991 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003992 i915_gem_object_ggtt_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003993 }
3994
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003995out:
Chris Wilson05394f32010-11-08 19:18:58 +00003996 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003997unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003998 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003999 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004000}
4001
4002int
4003i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004004 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004005{
4006 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004007 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004008 int ret;
4009
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004010 ret = i915_mutex_lock_interruptible(dev);
4011 if (ret)
4012 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004013
Chris Wilson05394f32010-11-08 19:18:58 +00004014 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004015 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004016 ret = -ENOENT;
4017 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004018 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004019
Chris Wilson0be555b2010-08-04 15:36:30 +01004020 /* Count all active objects as busy, even if they are currently not used
4021 * by the gpu. Users of this interface expect objects to eventually
4022 * become non-busy without any further actions, therefore emit any
4023 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004024 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004025 ret = i915_gem_object_flush_active(obj);
4026
Chris Wilson05394f32010-11-08 19:18:58 +00004027 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004028 if (obj->ring) {
4029 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4030 args->busy |= intel_ring_flag(obj->ring) << 16;
4031 }
Eric Anholt673a3942008-07-30 12:06:12 -07004032
Chris Wilson05394f32010-11-08 19:18:58 +00004033 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004034unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004035 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004036 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004037}
4038
4039int
4040i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4041 struct drm_file *file_priv)
4042{
Akshay Joshi0206e352011-08-16 15:34:10 -04004043 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004044}
4045
Chris Wilson3ef94da2009-09-14 16:50:29 +01004046int
4047i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4048 struct drm_file *file_priv)
4049{
4050 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004051 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004052 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004053
4054 switch (args->madv) {
4055 case I915_MADV_DONTNEED:
4056 case I915_MADV_WILLNEED:
4057 break;
4058 default:
4059 return -EINVAL;
4060 }
4061
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004062 ret = i915_mutex_lock_interruptible(dev);
4063 if (ret)
4064 return ret;
4065
Chris Wilson05394f32010-11-08 19:18:58 +00004066 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004067 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004068 ret = -ENOENT;
4069 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004070 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004071
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004072 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004073 ret = -EINVAL;
4074 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004075 }
4076
Chris Wilson05394f32010-11-08 19:18:58 +00004077 if (obj->madv != __I915_MADV_PURGED)
4078 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004079
Chris Wilson6c085a72012-08-20 11:40:46 +02004080 /* if the object is no longer attached, discard its backing storage */
4081 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004082 i915_gem_object_truncate(obj);
4083
Chris Wilson05394f32010-11-08 19:18:58 +00004084 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004085
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004086out:
Chris Wilson05394f32010-11-08 19:18:58 +00004087 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004088unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004089 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004090 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004091}
4092
Chris Wilson37e680a2012-06-07 15:38:42 +01004093void i915_gem_object_init(struct drm_i915_gem_object *obj,
4094 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004095{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004096 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004097 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004098 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004099 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004100
Chris Wilson37e680a2012-06-07 15:38:42 +01004101 obj->ops = ops;
4102
Chris Wilson0327d6b2012-08-11 15:41:06 +01004103 obj->fence_reg = I915_FENCE_REG_NONE;
4104 obj->madv = I915_MADV_WILLNEED;
4105 /* Avoid an unnecessary call to unbind on the first bind. */
4106 obj->map_and_fenceable = true;
4107
4108 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4109}
4110
Chris Wilson37e680a2012-06-07 15:38:42 +01004111static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4112 .get_pages = i915_gem_object_get_pages_gtt,
4113 .put_pages = i915_gem_object_put_pages_gtt,
4114};
4115
Chris Wilson05394f32010-11-08 19:18:58 +00004116struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4117 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004118{
Daniel Vetterc397b902010-04-09 19:05:07 +00004119 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004120 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004121 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004122
Chris Wilson42dcedd2012-11-15 11:32:30 +00004123 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004124 if (obj == NULL)
4125 return NULL;
4126
4127 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004128 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004129 return NULL;
4130 }
4131
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004132 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4133 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4134 /* 965gm cannot relocate objects above 4GiB. */
4135 mask &= ~__GFP_HIGHMEM;
4136 mask |= __GFP_DMA32;
4137 }
4138
Al Viro496ad9a2013-01-23 17:07:38 -05004139 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004140 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004141
Chris Wilson37e680a2012-06-07 15:38:42 +01004142 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004143
Daniel Vetterc397b902010-04-09 19:05:07 +00004144 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4145 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4146
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004147 if (HAS_LLC(dev)) {
4148 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004149 * cache) for about a 10% performance improvement
4150 * compared to uncached. Graphics requests other than
4151 * display scanout are coherent with the CPU in
4152 * accessing this cache. This means in this mode we
4153 * don't need to clflush on the CPU side, and on the
4154 * GPU side we only need to flush internal caches to
4155 * get data visible to the CPU.
4156 *
4157 * However, we maintain the display planes as UC, and so
4158 * need to rebind when first used as such.
4159 */
4160 obj->cache_level = I915_CACHE_LLC;
4161 } else
4162 obj->cache_level = I915_CACHE_NONE;
4163
Daniel Vetterd861e332013-07-24 23:25:03 +02004164 trace_i915_gem_object_create(obj);
4165
Chris Wilson05394f32010-11-08 19:18:58 +00004166 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004167}
4168
Chris Wilson1488fc02012-04-24 15:47:31 +01004169void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004170{
Chris Wilson1488fc02012-04-24 15:47:31 +01004171 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004172 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004173 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004174 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004175
Paulo Zanonif65c9162013-11-27 18:20:34 -02004176 intel_runtime_pm_get(dev_priv);
4177
Chris Wilson26e12f892011-03-20 11:20:19 +00004178 trace_i915_gem_object_destroy(obj);
4179
Chris Wilson1488fc02012-04-24 15:47:31 +01004180 if (obj->phys_obj)
4181 i915_gem_detach_phys_object(dev, obj);
4182
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004183 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004184 int ret;
4185
4186 vma->pin_count = 0;
4187 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004188 if (WARN_ON(ret == -ERESTARTSYS)) {
4189 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004190
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004191 was_interruptible = dev_priv->mm.interruptible;
4192 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004193
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004194 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004195
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004196 dev_priv->mm.interruptible = was_interruptible;
4197 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004198 }
4199
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004200 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4201 * before progressing. */
4202 if (obj->stolen)
4203 i915_gem_object_unpin_pages(obj);
4204
Ben Widawsky401c29f2013-05-31 11:28:47 -07004205 if (WARN_ON(obj->pages_pin_count))
4206 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004207 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004208 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004209 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004210
Chris Wilson9da3da62012-06-01 15:20:22 +01004211 BUG_ON(obj->pages);
4212
Chris Wilson2f745ad2012-09-04 21:02:58 +01004213 if (obj->base.import_attach)
4214 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004215
Chris Wilson05394f32010-11-08 19:18:58 +00004216 drm_gem_object_release(&obj->base);
4217 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004218
Chris Wilson05394f32010-11-08 19:18:58 +00004219 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004220 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004221
4222 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004223}
4224
Daniel Vettere656a6c2013-08-14 14:14:04 +02004225struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004226 struct i915_address_space *vm)
4227{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004228 struct i915_vma *vma;
4229 list_for_each_entry(vma, &obj->vma_list, vma_link)
4230 if (vma->vm == vm)
4231 return vma;
4232
4233 return NULL;
4234}
4235
Ben Widawsky2f633152013-07-17 12:19:03 -07004236void i915_gem_vma_destroy(struct i915_vma *vma)
4237{
4238 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004239
4240 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4241 if (!list_empty(&vma->exec_list))
4242 return;
4243
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004244 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004245
Ben Widawsky2f633152013-07-17 12:19:03 -07004246 kfree(vma);
4247}
4248
Jesse Barnes5669fca2009-02-17 15:13:31 -08004249int
Chris Wilson45c5f202013-10-16 11:50:01 +01004250i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004251{
4252 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004253 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004254
Chris Wilson45c5f202013-10-16 11:50:01 +01004255 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004256 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004257 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004258
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004259 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004260 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004261 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004262
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004263 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004264
Chris Wilson29105cc2010-01-07 10:39:13 +00004265 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004266 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004267 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004268
Chris Wilson29105cc2010-01-07 10:39:13 +00004269 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004270 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004271
Chris Wilson45c5f202013-10-16 11:50:01 +01004272 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4273 * We need to replace this with a semaphore, or something.
4274 * And not confound ums.mm_suspended!
4275 */
4276 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4277 DRIVER_MODESET);
4278 mutex_unlock(&dev->struct_mutex);
4279
4280 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004281 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004282 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004283
Eric Anholt673a3942008-07-30 12:06:12 -07004284 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004285
4286err:
4287 mutex_unlock(&dev->struct_mutex);
4288 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004289}
4290
Ben Widawskyc3787e22013-09-17 21:12:44 -07004291int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004292{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004293 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004294 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004295 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4296 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004297 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004298
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004299 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004300 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004301
Ben Widawskyc3787e22013-09-17 21:12:44 -07004302 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4303 if (ret)
4304 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004305
Ben Widawskyc3787e22013-09-17 21:12:44 -07004306 /*
4307 * Note: We do not worry about the concurrent register cacheline hang
4308 * here because no other code should access these registers other than
4309 * at initialization time.
4310 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004311 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004312 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4313 intel_ring_emit(ring, reg_base + i);
4314 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004315 }
4316
Ben Widawskyc3787e22013-09-17 21:12:44 -07004317 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004318
Ben Widawskyc3787e22013-09-17 21:12:44 -07004319 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004320}
4321
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004322void i915_gem_init_swizzling(struct drm_device *dev)
4323{
4324 drm_i915_private_t *dev_priv = dev->dev_private;
4325
Daniel Vetter11782b02012-01-31 16:47:55 +01004326 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004327 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4328 return;
4329
4330 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4331 DISP_TILE_SURFACE_SWIZZLING);
4332
Daniel Vetter11782b02012-01-31 16:47:55 +01004333 if (IS_GEN5(dev))
4334 return;
4335
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004336 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4337 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004338 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004339 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004340 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004341 else if (IS_GEN8(dev))
4342 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004343 else
4344 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004345}
Daniel Vettere21af882012-02-09 20:53:27 +01004346
Chris Wilson67b1b572012-07-05 23:49:40 +01004347static bool
4348intel_enable_blt(struct drm_device *dev)
4349{
4350 if (!HAS_BLT(dev))
4351 return false;
4352
4353 /* The blitter was dysfunctional on early prototypes */
4354 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4355 DRM_INFO("BLT not supported on this pre-production hardware;"
4356 " graphics performance will be degraded.\n");
4357 return false;
4358 }
4359
4360 return true;
4361}
4362
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004363static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004364{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004365 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004366 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004367
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004368 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004369 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004370 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004371
4372 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004373 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004374 if (ret)
4375 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004376 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004377
Chris Wilson67b1b572012-07-05 23:49:40 +01004378 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004379 ret = intel_init_blt_ring_buffer(dev);
4380 if (ret)
4381 goto cleanup_bsd_ring;
4382 }
4383
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004384 if (HAS_VEBOX(dev)) {
4385 ret = intel_init_vebox_ring_buffer(dev);
4386 if (ret)
4387 goto cleanup_blt_ring;
4388 }
4389
4390
Mika Kuoppala99433932013-01-22 14:12:17 +02004391 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4392 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004393 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004394
4395 return 0;
4396
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004397cleanup_vebox_ring:
4398 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004399cleanup_blt_ring:
4400 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4401cleanup_bsd_ring:
4402 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4403cleanup_render_ring:
4404 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4405
4406 return ret;
4407}
4408
4409int
4410i915_gem_init_hw(struct drm_device *dev)
4411{
4412 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004413 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004414
4415 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4416 return -EIO;
4417
Ben Widawsky59124502013-07-04 11:02:05 -07004418 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004419 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004420
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004421 if (IS_HASWELL(dev))
4422 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4423 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004424
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004425 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004426 if (IS_IVYBRIDGE(dev)) {
4427 u32 temp = I915_READ(GEN7_MSG_CTL);
4428 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4429 I915_WRITE(GEN7_MSG_CTL, temp);
4430 } else if (INTEL_INFO(dev)->gen >= 7) {
4431 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4432 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4433 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4434 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004435 }
4436
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004437 i915_gem_init_swizzling(dev);
4438
4439 ret = i915_gem_init_rings(dev);
4440 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004441 return ret;
4442
Ben Widawskyc3787e22013-09-17 21:12:44 -07004443 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4444 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4445
Ben Widawsky254f9652012-06-04 14:42:42 -07004446 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004447 * XXX: Contexts should only be initialized once. Doing a switch to the
4448 * default context switch however is something we'd like to do after
4449 * reset or thaw (the latter may not actually be necessary for HW, but
4450 * goes with our code better). Context switching requires rings (for
4451 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004452 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004453 ret = i915_gem_context_enable(dev_priv);
Ben Widawsky8245be32013-11-06 13:56:29 -02004454 if (ret) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004455 DRM_ERROR("Context enable failed %d\n", ret);
4456 goto err_out;
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004457 }
Daniel Vettere21af882012-02-09 20:53:27 +01004458
Chris Wilson68f95ba2010-05-27 13:18:22 +01004459 return 0;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004460
4461err_out:
4462 i915_gem_cleanup_ringbuffer(dev);
4463 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004464}
4465
Chris Wilson1070a422012-04-24 15:47:41 +01004466int i915_gem_init(struct drm_device *dev)
4467{
4468 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004469 int ret;
4470
Chris Wilson1070a422012-04-24 15:47:41 +01004471 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004472
4473 if (IS_VALLEYVIEW(dev)) {
4474 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4475 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4476 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4477 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4478 }
4479
Ben Widawskyd7e50082012-12-18 10:31:25 -08004480 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004481
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004482 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004483 if (ret) {
4484 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004485 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004486 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004487
Chris Wilson1070a422012-04-24 15:47:41 +01004488 ret = i915_gem_init_hw(dev);
4489 mutex_unlock(&dev->struct_mutex);
4490 if (ret) {
Ben Widawskybdf4fd72013-12-06 14:11:18 -08004491 WARN_ON(dev_priv->mm.aliasing_ppgtt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004492 i915_gem_context_fini(dev);
Ben Widawskyc39538a2013-12-06 14:10:50 -08004493 drm_mm_takedown(&dev_priv->gtt.base.mm);
Chris Wilson1070a422012-04-24 15:47:41 +01004494 return ret;
4495 }
4496
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004497 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4498 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4499 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004500 return 0;
4501}
4502
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004503void
4504i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4505{
4506 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004507 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004508 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004509
Chris Wilsonb4519512012-05-11 14:29:30 +01004510 for_each_ring(ring, dev_priv, i)
4511 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004512}
4513
4514int
Eric Anholt673a3942008-07-30 12:06:12 -07004515i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4516 struct drm_file *file_priv)
4517{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004518 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004519 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004520
Jesse Barnes79e53942008-11-07 14:24:08 -08004521 if (drm_core_check_feature(dev, DRIVER_MODESET))
4522 return 0;
4523
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004524 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004525 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004526 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004527 }
4528
Eric Anholt673a3942008-07-30 12:06:12 -07004529 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004530 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004531
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004532 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004533 if (ret != 0) {
4534 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004535 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004536 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004537
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004538 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004539 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004540
Chris Wilson5f353082010-06-07 14:03:03 +01004541 ret = drm_irq_install(dev);
4542 if (ret)
4543 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004544
Eric Anholt673a3942008-07-30 12:06:12 -07004545 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004546
4547cleanup_ringbuffer:
4548 mutex_lock(&dev->struct_mutex);
4549 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004550 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004551 mutex_unlock(&dev->struct_mutex);
4552
4553 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004554}
4555
4556int
4557i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4558 struct drm_file *file_priv)
4559{
Jesse Barnes79e53942008-11-07 14:24:08 -08004560 if (drm_core_check_feature(dev, DRIVER_MODESET))
4561 return 0;
4562
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004563 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004564
Chris Wilson45c5f202013-10-16 11:50:01 +01004565 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004566}
4567
4568void
4569i915_gem_lastclose(struct drm_device *dev)
4570{
4571 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004572
Eric Anholte806b492009-01-22 09:56:58 -08004573 if (drm_core_check_feature(dev, DRIVER_MODESET))
4574 return;
4575
Chris Wilson45c5f202013-10-16 11:50:01 +01004576 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004577 if (ret)
4578 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004579}
4580
Chris Wilson64193402010-10-24 12:38:05 +01004581static void
4582init_ring_lists(struct intel_ring_buffer *ring)
4583{
4584 INIT_LIST_HEAD(&ring->active_list);
4585 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004586}
4587
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004588void i915_init_vm(struct drm_i915_private *dev_priv,
4589 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004590{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004591 if (!i915_is_ggtt(vm))
4592 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004593 vm->dev = dev_priv->dev;
4594 INIT_LIST_HEAD(&vm->active_list);
4595 INIT_LIST_HEAD(&vm->inactive_list);
4596 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004597 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004598}
4599
Eric Anholt673a3942008-07-30 12:06:12 -07004600void
4601i915_gem_load(struct drm_device *dev)
4602{
4603 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004604 int i;
4605
4606 dev_priv->slab =
4607 kmem_cache_create("i915_gem_object",
4608 sizeof(struct drm_i915_gem_object), 0,
4609 SLAB_HWCACHE_ALIGN,
4610 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004611
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004612 INIT_LIST_HEAD(&dev_priv->vm_list);
4613 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4614
Ben Widawskya33afea2013-09-17 21:12:45 -07004615 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004616 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4617 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004618 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004619 for (i = 0; i < I915_NUM_RINGS; i++)
4620 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004621 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004622 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004623 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4624 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004625 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4626 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004627 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004628
Dave Airlie94400122010-07-20 13:15:31 +10004629 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4630 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004631 I915_WRITE(MI_ARB_STATE,
4632 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004633 }
4634
Chris Wilson72bfa192010-12-19 11:42:05 +00004635 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4636
Jesse Barnesde151cf2008-11-12 10:03:55 -08004637 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004638 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4639 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004640
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004641 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4642 dev_priv->num_fence_regs = 32;
4643 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004644 dev_priv->num_fence_regs = 16;
4645 else
4646 dev_priv->num_fence_regs = 8;
4647
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004648 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004649 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4650 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004651
Eric Anholt673a3942008-07-30 12:06:12 -07004652 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004653 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004654
Chris Wilsonce453d82011-02-21 14:43:56 +00004655 dev_priv->mm.interruptible = true;
4656
Dave Chinner7dc19d52013-08-28 10:18:11 +10004657 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4658 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004659 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4660 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004661}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004662
4663/*
4664 * Create a physically contiguous memory object for this object
4665 * e.g. for cursor + overlay regs
4666 */
Chris Wilson995b6762010-08-20 13:23:26 +01004667static int i915_gem_init_phys_object(struct drm_device *dev,
4668 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004669{
4670 drm_i915_private_t *dev_priv = dev->dev_private;
4671 struct drm_i915_gem_phys_object *phys_obj;
4672 int ret;
4673
4674 if (dev_priv->mm.phys_objs[id - 1] || !size)
4675 return 0;
4676
Daniel Vetterb14c5672013-09-19 12:18:32 +02004677 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004678 if (!phys_obj)
4679 return -ENOMEM;
4680
4681 phys_obj->id = id;
4682
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004683 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004684 if (!phys_obj->handle) {
4685 ret = -ENOMEM;
4686 goto kfree_obj;
4687 }
4688#ifdef CONFIG_X86
4689 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4690#endif
4691
4692 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4693
4694 return 0;
4695kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004696 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004697 return ret;
4698}
4699
Chris Wilson995b6762010-08-20 13:23:26 +01004700static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004701{
4702 drm_i915_private_t *dev_priv = dev->dev_private;
4703 struct drm_i915_gem_phys_object *phys_obj;
4704
4705 if (!dev_priv->mm.phys_objs[id - 1])
4706 return;
4707
4708 phys_obj = dev_priv->mm.phys_objs[id - 1];
4709 if (phys_obj->cur_obj) {
4710 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4711 }
4712
4713#ifdef CONFIG_X86
4714 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4715#endif
4716 drm_pci_free(dev, phys_obj->handle);
4717 kfree(phys_obj);
4718 dev_priv->mm.phys_objs[id - 1] = NULL;
4719}
4720
4721void i915_gem_free_all_phys_object(struct drm_device *dev)
4722{
4723 int i;
4724
Dave Airlie260883c2009-01-22 17:58:49 +10004725 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004726 i915_gem_free_phys_object(dev, i);
4727}
4728
4729void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004730 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004731{
Al Viro496ad9a2013-01-23 17:07:38 -05004732 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004733 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004734 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004735 int page_count;
4736
Chris Wilson05394f32010-11-08 19:18:58 +00004737 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004738 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004739 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004740
Chris Wilson05394f32010-11-08 19:18:58 +00004741 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004742 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004743 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004744 if (!IS_ERR(page)) {
4745 char *dst = kmap_atomic(page);
4746 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4747 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004748
Chris Wilsone5281cc2010-10-28 13:45:36 +01004749 drm_clflush_pages(&page, 1);
4750
4751 set_page_dirty(page);
4752 mark_page_accessed(page);
4753 page_cache_release(page);
4754 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004755 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004756 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004757
Chris Wilson05394f32010-11-08 19:18:58 +00004758 obj->phys_obj->cur_obj = NULL;
4759 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004760}
4761
4762int
4763i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004764 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004765 int id,
4766 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004767{
Al Viro496ad9a2013-01-23 17:07:38 -05004768 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004769 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004770 int ret = 0;
4771 int page_count;
4772 int i;
4773
4774 if (id > I915_MAX_PHYS_OBJECT)
4775 return -EINVAL;
4776
Chris Wilson05394f32010-11-08 19:18:58 +00004777 if (obj->phys_obj) {
4778 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004779 return 0;
4780 i915_gem_detach_phys_object(dev, obj);
4781 }
4782
Dave Airlie71acb5e2008-12-30 20:31:46 +10004783 /* create a new object */
4784 if (!dev_priv->mm.phys_objs[id - 1]) {
4785 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004786 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004787 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004788 DRM_ERROR("failed to init phys object %d size: %zu\n",
4789 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004790 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004791 }
4792 }
4793
4794 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004795 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4796 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004797
Chris Wilson05394f32010-11-08 19:18:58 +00004798 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004799
4800 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004801 struct page *page;
4802 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004803
Hugh Dickins5949eac2011-06-27 16:18:18 -07004804 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004805 if (IS_ERR(page))
4806 return PTR_ERR(page);
4807
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004808 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004809 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004810 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004811 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004812
4813 mark_page_accessed(page);
4814 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004815 }
4816
4817 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004818}
4819
4820static int
Chris Wilson05394f32010-11-08 19:18:58 +00004821i915_gem_phys_pwrite(struct drm_device *dev,
4822 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004823 struct drm_i915_gem_pwrite *args,
4824 struct drm_file *file_priv)
4825{
Chris Wilson05394f32010-11-08 19:18:58 +00004826 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004827 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004828
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004829 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4830 unsigned long unwritten;
4831
4832 /* The physical object once assigned is fixed for the lifetime
4833 * of the obj, so we can safely drop the lock and continue
4834 * to access vaddr.
4835 */
4836 mutex_unlock(&dev->struct_mutex);
4837 unwritten = copy_from_user(vaddr, user_data, args->size);
4838 mutex_lock(&dev->struct_mutex);
4839 if (unwritten)
4840 return -EFAULT;
4841 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004842
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004843 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004844 return 0;
4845}
Eric Anholtb9624422009-06-03 07:27:35 +00004846
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004847void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004848{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004849 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004850
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004851 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4852
Eric Anholtb9624422009-06-03 07:27:35 +00004853 /* Clean up our request list when the client is going away, so that
4854 * later retire_requests won't dereference our soon-to-be-gone
4855 * file_priv.
4856 */
Chris Wilson1c255952010-09-26 11:03:27 +01004857 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004858 while (!list_empty(&file_priv->mm.request_list)) {
4859 struct drm_i915_gem_request *request;
4860
4861 request = list_first_entry(&file_priv->mm.request_list,
4862 struct drm_i915_gem_request,
4863 client_list);
4864 list_del(&request->client_list);
4865 request->file_priv = NULL;
4866 }
Chris Wilson1c255952010-09-26 11:03:27 +01004867 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004868}
Chris Wilson31169712009-09-14 16:50:28 +01004869
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004870static void
4871i915_gem_file_idle_work_handler(struct work_struct *work)
4872{
4873 struct drm_i915_file_private *file_priv =
4874 container_of(work, typeof(*file_priv), mm.idle_work.work);
4875
4876 atomic_set(&file_priv->rps_wait_boost, false);
4877}
4878
4879int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4880{
4881 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004882 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004883
4884 DRM_DEBUG_DRIVER("\n");
4885
4886 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4887 if (!file_priv)
4888 return -ENOMEM;
4889
4890 file->driver_priv = file_priv;
4891 file_priv->dev_priv = dev->dev_private;
4892
4893 spin_lock_init(&file_priv->mm.lock);
4894 INIT_LIST_HEAD(&file_priv->mm.request_list);
4895 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4896 i915_gem_file_idle_work_handler);
4897
Ben Widawskye422b882013-12-06 14:10:58 -08004898 ret = i915_gem_context_open(dev, file);
4899 if (ret)
4900 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004901
Ben Widawskye422b882013-12-06 14:10:58 -08004902 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004903}
4904
Chris Wilson57745062012-11-21 13:04:04 +00004905static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4906{
4907 if (!mutex_is_locked(mutex))
4908 return false;
4909
4910#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4911 return mutex->owner == task;
4912#else
4913 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4914 return false;
4915#endif
4916}
4917
Dave Chinner7dc19d52013-08-28 10:18:11 +10004918static unsigned long
4919i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004920{
Chris Wilson17250b72010-10-28 12:51:39 +01004921 struct drm_i915_private *dev_priv =
4922 container_of(shrinker,
4923 struct drm_i915_private,
4924 mm.inactive_shrinker);
4925 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004926 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004927 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004928 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004929
Chris Wilson57745062012-11-21 13:04:04 +00004930 if (!mutex_trylock(&dev->struct_mutex)) {
4931 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004932 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004933
Daniel Vetter677feac2012-12-19 14:33:45 +01004934 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004935 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004936
Chris Wilson57745062012-11-21 13:04:04 +00004937 unlock = false;
4938 }
Chris Wilson31169712009-09-14 16:50:28 +01004939
Dave Chinner7dc19d52013-08-28 10:18:11 +10004940 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004941 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004942 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004943 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004944
4945 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4946 if (obj->active)
4947 continue;
4948
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004949 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004950 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004951 }
Chris Wilson31169712009-09-14 16:50:28 +01004952
Chris Wilson57745062012-11-21 13:04:04 +00004953 if (unlock)
4954 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004955
Dave Chinner7dc19d52013-08-28 10:18:11 +10004956 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004957}
Ben Widawskya70a3142013-07-31 16:59:56 -07004958
4959/* All the new VM stuff */
4960unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4961 struct i915_address_space *vm)
4962{
4963 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4964 struct i915_vma *vma;
4965
Ben Widawsky6f425322013-12-06 14:10:48 -08004966 if (!dev_priv->mm.aliasing_ppgtt ||
4967 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004968 vm = &dev_priv->gtt.base;
4969
4970 BUG_ON(list_empty(&o->vma_list));
4971 list_for_each_entry(vma, &o->vma_list, vma_link) {
4972 if (vma->vm == vm)
4973 return vma->node.start;
4974
4975 }
4976 return -1;
4977}
4978
4979bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4980 struct i915_address_space *vm)
4981{
4982 struct i915_vma *vma;
4983
4984 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004985 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004986 return true;
4987
4988 return false;
4989}
4990
4991bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4992{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004993 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07004994
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004995 list_for_each_entry(vma, &o->vma_list, vma_link)
4996 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004997 return true;
4998
4999 return false;
5000}
5001
5002unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5003 struct i915_address_space *vm)
5004{
5005 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5006 struct i915_vma *vma;
5007
Ben Widawsky6f425322013-12-06 14:10:48 -08005008 if (!dev_priv->mm.aliasing_ppgtt ||
5009 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005010 vm = &dev_priv->gtt.base;
5011
5012 BUG_ON(list_empty(&o->vma_list));
5013
5014 list_for_each_entry(vma, &o->vma_list, vma_link)
5015 if (vma->vm == vm)
5016 return vma->node.size;
5017
5018 return 0;
5019}
5020
Dave Chinner7dc19d52013-08-28 10:18:11 +10005021static unsigned long
5022i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5023{
5024 struct drm_i915_private *dev_priv =
5025 container_of(shrinker,
5026 struct drm_i915_private,
5027 mm.inactive_shrinker);
5028 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005029 unsigned long freed;
5030 bool unlock = true;
5031
5032 if (!mutex_trylock(&dev->struct_mutex)) {
5033 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005034 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005035
5036 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005037 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005038
5039 unlock = false;
5040 }
5041
Chris Wilsond9973b42013-10-04 10:33:00 +01005042 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5043 if (freed < sc->nr_to_scan)
5044 freed += __i915_gem_shrink(dev_priv,
5045 sc->nr_to_scan - freed,
5046 false);
5047 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005048 freed += i915_gem_shrink_all(dev_priv);
5049
5050 if (unlock)
5051 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005052
Dave Chinner7dc19d52013-08-28 10:18:11 +10005053 return freed;
5054}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005055
5056struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5057{
5058 struct i915_vma *vma;
5059
5060 if (WARN_ON(list_empty(&obj->vma_list)))
5061 return NULL;
5062
5063 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005064 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005065 return NULL;
5066
5067 return vma;
5068}