blob: 9b77be074e2600d43733d496efd1a2dee5eb27d5 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46static __must_check int
Ben Widawsky07fe0b12013-07-31 17:00:10 -070047i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
48 struct i915_address_space *vm,
49 unsigned alignment,
50 bool map_and_fenceable,
51 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson61050802012-04-17 15:31:31 +010057static void i915_gem_write_fence(struct drm_device *dev, int reg,
58 struct drm_i915_gem_object *obj);
59static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
60 struct drm_i915_fence_reg *fence,
61 bool enable);
62
Dave Chinner7dc19d52013-08-28 10:18:11 +100063static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
64 struct shrink_control *sc);
65static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
66 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010067static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
68static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010069static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010070
Chris Wilsonc76ce032013-08-08 14:41:03 +010071static bool cpu_cache_is_coherent(struct drm_device *dev,
72 enum i915_cache_level level)
73{
74 return HAS_LLC(dev) || level != I915_CACHE_NONE;
75}
76
Chris Wilson2c225692013-08-09 12:26:45 +010077static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
78{
79 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
80 return true;
81
82 return obj->pin_display;
83}
84
Chris Wilson61050802012-04-17 15:31:31 +010085static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
86{
87 if (obj->tiling_mode)
88 i915_gem_release_mmap(obj);
89
90 /* As we do not have an associated fence register, we will force
91 * a tiling change if we ever need to acquire one.
92 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010093 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010094 obj->fence_reg = I915_FENCE_REG_NONE;
95}
96
Chris Wilson73aa8082010-09-30 11:46:12 +010097/* some bookkeeping */
98static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
99 size_t size)
100{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200101 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100102 dev_priv->mm.object_count++;
103 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105}
106
107static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
108 size_t size)
109{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200110 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100111 dev_priv->mm.object_count--;
112 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200113 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100114}
115
Chris Wilson21dd3732011-01-26 15:55:56 +0000116static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100117i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 int ret;
120
Daniel Vetter7abb6902013-05-24 21:29:32 +0200121#define EXIT_COND (!i915_reset_in_progress(error) || \
122 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100123 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100124 return 0;
125
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200126 /*
127 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
128 * userspace. If it takes that long something really bad is going on and
129 * we should simply try to bail out and fail as gracefully as possible.
130 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100131 ret = wait_event_interruptible_timeout(error->reset_queue,
132 EXIT_COND,
133 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 if (ret == 0) {
135 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
136 return -EIO;
137 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200139 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100140#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100141
Chris Wilson21dd3732011-01-26 15:55:56 +0000142 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100143}
144
Chris Wilson54cf91d2010-11-25 18:00:26 +0000145int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146{
Daniel Vetter33196de2012-11-14 17:14:05 +0100147 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100148 int ret;
149
Daniel Vetter33196de2012-11-14 17:14:05 +0100150 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100151 if (ret)
152 return ret;
153
154 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 if (ret)
156 return ret;
157
Chris Wilson23bc5982010-09-29 16:10:57 +0100158 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100159 return 0;
160}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100161
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000163i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100164{
Ben Widawsky98438772013-07-31 17:00:12 -0700165 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100166}
167
Eric Anholt673a3942008-07-30 12:06:12 -0700168int
169i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700171{
Ben Widawsky93d18792013-01-17 12:45:17 -0800172 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700173 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000174
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200175 if (drm_core_check_feature(dev, DRIVER_MODESET))
176 return -ENODEV;
177
Chris Wilson20217462010-11-23 15:26:33 +0000178 if (args->gtt_start >= args->gtt_end ||
179 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
Daniel Vetterf534bc02012-03-26 22:37:04 +0200182 /* GEM with user mode setting was never supported on ilk and later. */
183 if (INTEL_INFO(dev)->gen >= 5)
184 return -ENODEV;
185
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800187 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
188 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800189 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700190 mutex_unlock(&dev->struct_mutex);
191
Chris Wilson20217462010-11-23 15:26:33 +0000192 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700193}
194
Eric Anholt5a125c32008-10-22 21:40:13 -0700195int
196i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000197 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700198{
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700200 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000201 struct drm_i915_gem_object *obj;
202 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700203
Chris Wilson6299f992010-11-24 12:23:44 +0000204 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800207 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700208 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100209 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700210
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700211 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400212 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000213
Eric Anholt5a125c32008-10-22 21:40:13 -0700214 return 0;
215}
216
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217void *i915_gem_object_alloc(struct drm_device *dev)
218{
219 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700220 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000221}
222
223void i915_gem_object_free(struct drm_i915_gem_object *obj)
224{
225 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
226 kmem_cache_free(dev_priv->slab, obj);
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229static int
230i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
232 uint64_t size,
233 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700234{
Chris Wilson05394f32010-11-08 19:18:58 +0000235 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300236 int ret;
237 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200240 if (size == 0)
241 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700242
243 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000244 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700245 if (obj == NULL)
246 return -ENOMEM;
247
Chris Wilson05394f32010-11-08 19:18:58 +0000248 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200250 drm_gem_object_unreference_unlocked(&obj->base);
251 if (ret)
252 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700255 return 0;
256}
257
Dave Airlieff72145b2011-02-07 12:16:14 +1000258int
259i915_gem_dumb_create(struct drm_file *file,
260 struct drm_device *dev,
261 struct drm_mode_create_dumb *args)
262{
263 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev,
267 args->size, &args->handle);
268}
269
Dave Airlieff72145b2011-02-07 12:16:14 +1000270/**
271 * Creates a new mm object and returns a handle to it.
272 */
273int
274i915_gem_create_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file)
276{
277 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200278
Dave Airlieff72145b2011-02-07 12:16:14 +1000279 return i915_gem_create(file, dev,
280 args->size, &args->handle);
281}
282
Daniel Vetter8c599672011-12-14 13:57:31 +0100283static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100284__copy_to_user_swizzled(char __user *cpu_vaddr,
285 const char *gpu_vaddr, int gpu_offset,
286 int length)
287{
288 int ret, cpu_offset = 0;
289
290 while (length > 0) {
291 int cacheline_end = ALIGN(gpu_offset + 1, 64);
292 int this_length = min(cacheline_end - gpu_offset, length);
293 int swizzled_gpu_offset = gpu_offset ^ 64;
294
295 ret = __copy_to_user(cpu_vaddr + cpu_offset,
296 gpu_vaddr + swizzled_gpu_offset,
297 this_length);
298 if (ret)
299 return ret + length;
300
301 cpu_offset += this_length;
302 gpu_offset += this_length;
303 length -= this_length;
304 }
305
306 return 0;
307}
308
309static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700310__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
311 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100312 int length)
313{
314 int ret, cpu_offset = 0;
315
316 while (length > 0) {
317 int cacheline_end = ALIGN(gpu_offset + 1, 64);
318 int this_length = min(cacheline_end - gpu_offset, length);
319 int swizzled_gpu_offset = gpu_offset ^ 64;
320
321 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
322 cpu_vaddr + cpu_offset,
323 this_length);
324 if (ret)
325 return ret + length;
326
327 cpu_offset += this_length;
328 gpu_offset += this_length;
329 length -= this_length;
330 }
331
332 return 0;
333}
334
Daniel Vetterd174bd62012-03-25 19:47:40 +0200335/* Per-page copy function for the shmem pread fastpath.
336 * Flushes invalid cachelines before reading the target if
337 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700338static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200339shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 char __user *user_data,
341 bool page_do_bit17_swizzling, bool needs_clflush)
342{
343 char *vaddr;
344 int ret;
345
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200346 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200347 return -EINVAL;
348
349 vaddr = kmap_atomic(page);
350 if (needs_clflush)
351 drm_clflush_virt_range(vaddr + shmem_page_offset,
352 page_length);
353 ret = __copy_to_user_inatomic(user_data,
354 vaddr + shmem_page_offset,
355 page_length);
356 kunmap_atomic(vaddr);
357
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100358 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359}
360
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361static void
362shmem_clflush_swizzled_range(char *addr, unsigned long length,
363 bool swizzled)
364{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200365 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200366 unsigned long start = (unsigned long) addr;
367 unsigned long end = (unsigned long) addr + length;
368
369 /* For swizzling simply ensure that we always flush both
370 * channels. Lame, but simple and it works. Swizzled
371 * pwrite/pread is far from a hotpath - current userspace
372 * doesn't use it at all. */
373 start = round_down(start, 128);
374 end = round_up(end, 128);
375
376 drm_clflush_virt_range((void *)start, end - start);
377 } else {
378 drm_clflush_virt_range(addr, length);
379 }
380
381}
382
Daniel Vetterd174bd62012-03-25 19:47:40 +0200383/* Only difference to the fast-path function is that this can handle bit17
384 * and uses non-atomic copy and kmap functions. */
385static int
386shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
387 char __user *user_data,
388 bool page_do_bit17_swizzling, bool needs_clflush)
389{
390 char *vaddr;
391 int ret;
392
393 vaddr = kmap(page);
394 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
396 page_length,
397 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200398
399 if (page_do_bit17_swizzling)
400 ret = __copy_to_user_swizzled(user_data,
401 vaddr, shmem_page_offset,
402 page_length);
403 else
404 ret = __copy_to_user(user_data,
405 vaddr + shmem_page_offset,
406 page_length);
407 kunmap(page);
408
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100409 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200410}
411
Eric Anholteb014592009-03-10 11:44:52 -0700412static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200413i915_gem_shmem_pread(struct drm_device *dev,
414 struct drm_i915_gem_object *obj,
415 struct drm_i915_gem_pread *args,
416 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700417{
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700419 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100420 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100421 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100422 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200423 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200424 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200425 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700426
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200427 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700428 remain = args->size;
429
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700431
Daniel Vetter84897312012-03-25 19:47:31 +0200432 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
433 /* If we're not in the cpu read domain, set ourself into the gtt
434 * read domain and manually flush cachelines (if required). This
435 * optimizes for the case when the gpu will dirty the data
436 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100437 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700438 ret = i915_gem_object_wait_rendering(obj, true);
439 if (ret)
440 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200441 }
Eric Anholteb014592009-03-10 11:44:52 -0700442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 ret = i915_gem_object_get_pages(obj);
444 if (ret)
445 return ret;
446
447 i915_gem_object_pin_pages(obj);
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100450
Imre Deak67d5a502013-02-18 19:28:02 +0200451 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
452 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200453 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100454
455 if (remain <= 0)
456 break;
457
Eric Anholteb014592009-03-10 11:44:52 -0700458 /* Operation in this page
459 *
Eric Anholteb014592009-03-10 11:44:52 -0700460 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700461 * page_length = bytes to copy for this page
462 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100463 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700464 page_length = remain;
465 if ((shmem_page_offset + page_length) > PAGE_SIZE)
466 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetter8461d222011-12-14 13:57:32 +0100468 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
469 (page_to_phys(page) & (1 << 17)) != 0;
470
Daniel Vetterd174bd62012-03-25 19:47:40 +0200471 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
472 user_data, page_do_bit17_swizzling,
473 needs_clflush);
474 if (ret == 0)
475 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700476
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200477 mutex_unlock(&dev->struct_mutex);
478
Jani Nikulad330a952014-01-21 11:24:25 +0200479 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200480 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the
483 * data up to the first fault. Hence ignore any errors
484 * and just continue. */
485 (void)ret;
486 prefaulted = 1;
487 }
488
Daniel Vetterd174bd62012-03-25 19:47:40 +0200489 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
490 user_data, page_do_bit17_swizzling,
491 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700492
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200493 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100494
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200495next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100496 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100497
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100498 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100500
Eric Anholteb014592009-03-10 11:44:52 -0700501 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700503 offset += page_length;
504 }
505
Chris Wilson4f27b752010-10-14 15:26:45 +0100506out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100507 i915_gem_object_unpin_pages(obj);
508
Eric Anholteb014592009-03-10 11:44:52 -0700509 return ret;
510}
511
Eric Anholt673a3942008-07-30 12:06:12 -0700512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700520{
521 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100523 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700524
Chris Wilson51311d02010-11-17 09:10:42 +0000525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200529 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000530 args->size))
531 return -EFAULT;
532
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100535 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700536
Chris Wilson05394f32010-11-08 19:18:58 +0000537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000538 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100539 ret = -ENOENT;
540 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Chris Wilson7dcd2492010-09-26 20:21:44 +0100543 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100546 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100547 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100548 }
549
Daniel Vetter1286ff72012-05-10 15:25:09 +0200550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
Chris Wilsondb53a302011-02-03 11:57:46 +0000558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200560 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson35b62a82010-09-26 20:23:38 +0100562out:
Chris Wilson05394f32010-11-08 19:18:58 +0000563 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100564unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100565 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700566 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567}
568
Keith Packard0839ccb2008-10-30 19:38:48 -0700569/* This is the fast write path which cannot handle
570 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700571 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572
Keith Packard0839ccb2008-10-30 19:38:48 -0700573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
578{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 void __iomem *vaddr_atomic;
580 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700588 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100589 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Chris Wilson05394f32010-11-08 19:18:58 +0000597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700599 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000600 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700601{
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 int page_offset, page_length, ret;
607
Ben Widawskyc37e2202013-07-31 16:59:58 -0700608 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200620 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700621 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700623 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700624
625 while (remain > 0) {
626 /* Operation in this page
627 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700631 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800642 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700651 }
Eric Anholt673a3942008-07-30 12:06:12 -0700652
Daniel Vetter935aaa62012-03-25 19:47:35 +0200653out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800654 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200655out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700657}
658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700663static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700669{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200670 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200673 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687
Chris Wilson755d2212012-09-04 21:02:55 +0100688 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689}
690
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700693static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700699{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 char *vaddr;
701 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700702
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100710 user_data,
711 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200720 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100721
Chris Wilson755d2212012-09-04 21:02:55 +0100722 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700723}
724
Eric Anholt40123c12009-03-09 13:42:30 -0700725static int
Daniel Vettere244a442012-03-25 19:47:28 +0200726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700730{
Eric Anholt40123c12009-03-09 13:42:30 -0700731 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 loff_t offset;
733 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100734 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200736 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200737 int needs_clflush_after = 0;
738 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200739 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200741 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700742 remain = args->size;
743
Daniel Vetter8c599672011-12-14 13:57:31 +0100744 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700745
Daniel Vetter58642882012-03-25 19:47:37 +0200746 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
747 /* If we're not in the cpu write domain, set ourself into the gtt
748 * write domain and manually flush cachelines (if required). This
749 * optimizes for the case when the gpu will use the data
750 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100751 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700752 ret = i915_gem_object_wait_rendering(obj, false);
753 if (ret)
754 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100756 /* Same trick applies to invalidate partially written cachelines read
757 * before writing. */
758 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
759 needs_clflush_before =
760 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200761
Chris Wilson755d2212012-09-04 21:02:55 +0100762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
Eric Anholt40123c12009-03-09 13:42:30 -0700768 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000769 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
Imre Deak67d5a502013-02-18 19:28:02 +0200771 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200773 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200774 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100775
Chris Wilson9da3da62012-06-01 15:20:22 +0100776 if (remain <= 0)
777 break;
778
Eric Anholt40123c12009-03-09 13:42:30 -0700779 /* Operation in this page
780 *
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700782 * page_length = bytes to copy for this page
783 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100784 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700785
786 page_length = remain;
787 if ((shmem_page_offset + page_length) > PAGE_SIZE)
788 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700789
Daniel Vetter58642882012-03-25 19:47:37 +0200790 /* If we don't overwrite a cacheline completely we need to be
791 * careful to have up-to-date data by first clflushing. Don't
792 * overcomplicate things and flush the entire patch. */
793 partial_cacheline_write = needs_clflush_before &&
794 ((shmem_page_offset | page_length)
795 & (boot_cpu_data.x86_clflush_size - 1));
796
Daniel Vetter8c599672011-12-14 13:57:31 +0100797 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
798 (page_to_phys(page) & (1 << 17)) != 0;
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
804 if (ret == 0)
805 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700806
Daniel Vettere244a442012-03-25 19:47:28 +0200807 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200808 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200809 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
810 user_data, page_do_bit17_swizzling,
811 partial_cacheline_write,
812 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700813
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100815
Daniel Vettere244a442012-03-25 19:47:28 +0200816next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100817 set_page_dirty(page);
818 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819
Chris Wilson755d2212012-09-04 21:02:55 +0100820 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100821 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100822
Eric Anholt40123c12009-03-09 13:42:30 -0700823 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100824 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700825 offset += page_length;
826 }
827
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100828out:
Chris Wilson755d2212012-09-04 21:02:55 +0100829 i915_gem_object_unpin_pages(obj);
830
Daniel Vettere244a442012-03-25 19:47:28 +0200831 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100832 /*
833 * Fixup: Flush cpu caches in case we didn't flush the dirty
834 * cachelines in-line while writing and the object moved
835 * out of the cpu write domain while we've dropped the lock.
836 */
837 if (!needs_clflush_after &&
838 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100839 if (i915_gem_clflush_object(obj, obj->pin_display))
840 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200841 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100842 }
Eric Anholt40123c12009-03-09 13:42:30 -0700843
Daniel Vetter58642882012-03-25 19:47:37 +0200844 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800845 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200846
Eric Anholt40123c12009-03-09 13:42:30 -0700847 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700848}
849
850/**
851 * Writes data to the object referenced by handle.
852 *
853 * On error, the contents of the buffer that were to be modified are undefined.
854 */
855int
856i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100857 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700858{
859 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000860 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000861 int ret;
862
863 if (args->size == 0)
864 return 0;
865
866 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200867 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000868 args->size))
869 return -EFAULT;
870
Jani Nikulad330a952014-01-21 11:24:25 +0200871 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size);
874 if (ret)
875 return -EFAULT;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
Chris Wilson05394f32010-11-08 19:18:58 +0000882 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000883 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = -ENOENT;
885 goto unlock;
886 }
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Chris Wilson7dcd2492010-09-26 20:21:44 +0100888 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000889 if (args->offset > obj->base.size ||
890 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100891 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100892 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 }
894
Daniel Vetter1286ff72012-05-10 15:25:09 +0200895 /* prime objects have no backing filp to GEM pread/pwrite
896 * pages from.
897 */
898 if (!obj->base.filp) {
899 ret = -EINVAL;
900 goto out;
901 }
902
Chris Wilsondb53a302011-02-03 11:57:46 +0000903 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
904
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700906 /* We can only do the GTT pwrite on untiled buffers, as otherwise
907 * it would end up going through the fenced access, and we'll get
908 * different detiling behavior between reading and writing.
909 * pread/pwrite currently are reading and writing from the CPU
910 * perspective, requiring manual detiling by the client.
911 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100914 goto out;
915 }
916
Chris Wilson2c225692013-08-09 12:26:45 +0100917 if (obj->tiling_mode == I915_TILING_NONE &&
918 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
919 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100920 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200921 /* Note that the gtt paths might fail with non-page-backed user
922 * pointers (e.g. gtt mappings when moving data between
923 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700924 }
Eric Anholt673a3942008-07-30 12:06:12 -0700925
Chris Wilson86a1ee22012-08-11 15:41:04 +0100926 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200927 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100928
Chris Wilson35b62a82010-09-26 20:23:38 +0100929out:
Chris Wilson05394f32010-11-08 19:18:58 +0000930 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100931unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100932 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return ret;
934}
935
Chris Wilsonb3612372012-08-24 09:35:08 +0100936int
Daniel Vetter33196de2012-11-14 17:14:05 +0100937i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100938 bool interruptible)
939{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100940 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100941 /* Non-interruptible callers can't handle -EAGAIN, hence return
942 * -EIO unconditionally for these. */
943 if (!interruptible)
944 return -EIO;
945
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100946 /* Recovery complete, but the reset failed ... */
947 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100948 return -EIO;
949
950 return -EAGAIN;
951 }
952
953 return 0;
954}
955
956/*
957 * Compare seqno against outstanding lazy request. Emit a request if they are
958 * equal.
959 */
960static int
961i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
962{
963 int ret;
964
965 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
966
967 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100968 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300969 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100970
971 return ret;
972}
973
Chris Wilson094f9a52013-09-25 17:34:55 +0100974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
Chris Wilsonb3612372012-08-24 09:35:08 +0100993/**
994 * __wait_seqno - wait until execution of seqno has finished
995 * @ring: the ring expected to report seqno
996 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100997 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100998 * @interruptible: do an interruptible wait (normally yes)
999 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1000 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001001 * Note: It is of utmost importance that the passed in seqno and reset_counter
1002 * values have been read by the caller in an smp safe manner. Where read-side
1003 * locks are involved, it is sufficient to read the reset_counter before
1004 * unlocking the lock that protects the seqno. For lockless tricks, the
1005 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1006 * inserted.
1007 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001008 * Returns 0 if the seqno was found within the alloted time. Else returns the
1009 * errno with remaining time filled in timeout argument.
1010 */
1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001012 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001016{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001017 struct drm_device *dev = ring->dev;
1018 drm_i915_private_t *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001019 const bool irq_test_in_progress =
1020 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001021 struct timespec before, now;
1022 DEFINE_WAIT(wait);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001023 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001024 int ret;
1025
Paulo Zanonic67a4702013-08-19 13:18:09 -03001026 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1027
Chris Wilsonb3612372012-08-24 09:35:08 +01001028 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1029 return 0;
1030
Mika Kuoppala47e97662013-12-10 17:02:43 +02001031 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001032
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001033 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001034 gen6_rps_boost(dev_priv);
1035 if (file_priv)
1036 mod_delayed_work(dev_priv->wq,
1037 &file_priv->mm.idle_work,
1038 msecs_to_jiffies(100));
1039 }
1040
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001041 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001042 return -ENODEV;
1043
Chris Wilson094f9a52013-09-25 17:34:55 +01001044 /* Record current time in case interrupted by signal, or wedged */
1045 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001046 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001047 for (;;) {
1048 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001049
Chris Wilson094f9a52013-09-25 17:34:55 +01001050 prepare_to_wait(&ring->irq_queue, &wait,
1051 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001052
Daniel Vetterf69061b2012-12-06 09:01:42 +01001053 /* We need to check whether any gpu reset happened in between
1054 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001055 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1056 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1057 * is truely gone. */
1058 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1059 if (ret == 0)
1060 ret = -EAGAIN;
1061 break;
1062 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001063
Chris Wilson094f9a52013-09-25 17:34:55 +01001064 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1065 ret = 0;
1066 break;
1067 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001068
Chris Wilson094f9a52013-09-25 17:34:55 +01001069 if (interruptible && signal_pending(current)) {
1070 ret = -ERESTARTSYS;
1071 break;
1072 }
1073
Mika Kuoppala47e97662013-12-10 17:02:43 +02001074 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001075 ret = -ETIME;
1076 break;
1077 }
1078
1079 timer.function = NULL;
1080 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e97662013-12-10 17:02:43 +02001081 unsigned long expire;
1082
Chris Wilson094f9a52013-09-25 17:34:55 +01001083 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001084 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001085 mod_timer(&timer, expire);
1086 }
1087
Chris Wilson5035c272013-10-04 09:58:46 +01001088 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001089
Chris Wilson094f9a52013-09-25 17:34:55 +01001090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001095 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001096 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001097
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001098 if (!irq_test_in_progress)
1099 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001100
1101 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001102
1103 if (timeout) {
1104 struct timespec sleep_time = timespec_sub(now, before);
1105 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001106 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1107 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001108 }
1109
Chris Wilson094f9a52013-09-25 17:34:55 +01001110 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001111}
1112
1113/**
1114 * Waits for a sequence number to be signaled, and cleans up the
1115 * request and object lists appropriately for that event.
1116 */
1117int
1118i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1119{
1120 struct drm_device *dev = ring->dev;
1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 bool interruptible = dev_priv->mm.interruptible;
1123 int ret;
1124
1125 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1126 BUG_ON(seqno == 0);
1127
Daniel Vetter33196de2012-11-14 17:14:05 +01001128 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001129 if (ret)
1130 return ret;
1131
1132 ret = i915_gem_check_olr(ring, seqno);
1133 if (ret)
1134 return ret;
1135
Daniel Vetterf69061b2012-12-06 09:01:42 +01001136 return __wait_seqno(ring, seqno,
1137 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001138 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001139}
1140
Chris Wilsond26e3af2013-06-29 22:05:26 +01001141static int
1142i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1143 struct intel_ring_buffer *ring)
1144{
1145 i915_gem_retire_requests_ring(ring);
1146
1147 /* Manually manage the write flush as we may have not yet
1148 * retired the buffer.
1149 *
1150 * Note that the last_write_seqno is always the earlier of
1151 * the two (read/write) seqno, so if we haved successfully waited,
1152 * we know we have passed the last write.
1153 */
1154 obj->last_write_seqno = 0;
1155 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1156
1157 return 0;
1158}
1159
Chris Wilsonb3612372012-08-24 09:35:08 +01001160/**
1161 * Ensures that all rendering to the object has completed and the object is
1162 * safe to unbind from the GTT or access from the CPU.
1163 */
1164static __must_check int
1165i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1166 bool readonly)
1167{
1168 struct intel_ring_buffer *ring = obj->ring;
1169 u32 seqno;
1170 int ret;
1171
1172 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1173 if (seqno == 0)
1174 return 0;
1175
1176 ret = i915_wait_seqno(ring, seqno);
1177 if (ret)
1178 return ret;
1179
Chris Wilsond26e3af2013-06-29 22:05:26 +01001180 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001181}
1182
Chris Wilson3236f572012-08-24 09:35:09 +01001183/* A nonblocking variant of the above wait. This is a highly dangerous routine
1184 * as the object state may change during this call.
1185 */
1186static __must_check int
1187i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001188 struct drm_file *file,
Chris Wilson3236f572012-08-24 09:35:09 +01001189 bool readonly)
1190{
1191 struct drm_device *dev = obj->base.dev;
1192 struct drm_i915_private *dev_priv = dev->dev_private;
1193 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001194 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001195 u32 seqno;
1196 int ret;
1197
1198 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1199 BUG_ON(!dev_priv->mm.interruptible);
1200
1201 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1202 if (seqno == 0)
1203 return 0;
1204
Daniel Vetter33196de2012-11-14 17:14:05 +01001205 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001206 if (ret)
1207 return ret;
1208
1209 ret = i915_gem_check_olr(ring, seqno);
1210 if (ret)
1211 return ret;
1212
Daniel Vetterf69061b2012-12-06 09:01:42 +01001213 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001214 mutex_unlock(&dev->struct_mutex);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001215 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001216 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001217 if (ret)
1218 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001219
Chris Wilsond26e3af2013-06-29 22:05:26 +01001220 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001221}
1222
Eric Anholt673a3942008-07-30 12:06:12 -07001223/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001224 * Called when user space prepares to use an object with the CPU, either
1225 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001226 */
1227int
1228i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001229 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001230{
1231 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001232 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001233 uint32_t read_domains = args->read_domains;
1234 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001235 int ret;
1236
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001237 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001238 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001239 return -EINVAL;
1240
Chris Wilson21d509e2009-06-06 09:46:02 +01001241 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001242 return -EINVAL;
1243
1244 /* Having something in the write domain implies it's in the read
1245 * domain, and only that read domain. Enforce that in the request.
1246 */
1247 if (write_domain != 0 && read_domains != write_domain)
1248 return -EINVAL;
1249
Chris Wilson76c1dec2010-09-25 11:22:51 +01001250 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001251 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001252 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001253
Chris Wilson05394f32010-11-08 19:18:58 +00001254 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001255 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001256 ret = -ENOENT;
1257 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001258 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001259
Chris Wilson3236f572012-08-24 09:35:09 +01001260 /* Try to flush the object off the GPU without holding the lock.
1261 * We will repeat the flush holding the lock in the normal manner
1262 * to catch cases where we are gazumped.
1263 */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001264 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001265 if (ret)
1266 goto unref;
1267
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001268 if (read_domains & I915_GEM_DOMAIN_GTT) {
1269 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001270
1271 /* Silently promote "you're not bound, there was nothing to do"
1272 * to success, since the client was just asking us to
1273 * make sure everything was done.
1274 */
1275 if (ret == -EINVAL)
1276 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001277 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001278 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001279 }
1280
Chris Wilson3236f572012-08-24 09:35:09 +01001281unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001282 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001283unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001284 mutex_unlock(&dev->struct_mutex);
1285 return ret;
1286}
1287
1288/**
1289 * Called when user space has done writes to this buffer
1290 */
1291int
1292i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001293 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001294{
1295 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001296 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001297 int ret = 0;
1298
Chris Wilson76c1dec2010-09-25 11:22:51 +01001299 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001300 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001301 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001302
Chris Wilson05394f32010-11-08 19:18:58 +00001303 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001304 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001305 ret = -ENOENT;
1306 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001307 }
1308
Eric Anholt673a3942008-07-30 12:06:12 -07001309 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001310 if (obj->pin_display)
1311 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001312
Chris Wilson05394f32010-11-08 19:18:58 +00001313 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001314unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001315 mutex_unlock(&dev->struct_mutex);
1316 return ret;
1317}
1318
1319/**
1320 * Maps the contents of an object, returning the address it is mapped
1321 * into.
1322 *
1323 * While the mapping holds a reference on the contents of the object, it doesn't
1324 * imply a ref on the object itself.
1325 */
1326int
1327i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001328 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001329{
1330 struct drm_i915_gem_mmap *args = data;
1331 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001332 unsigned long addr;
1333
Chris Wilson05394f32010-11-08 19:18:58 +00001334 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001335 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001336 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001337
Daniel Vetter1286ff72012-05-10 15:25:09 +02001338 /* prime objects have no backing filp to GEM mmap
1339 * pages from.
1340 */
1341 if (!obj->filp) {
1342 drm_gem_object_unreference_unlocked(obj);
1343 return -EINVAL;
1344 }
1345
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001346 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001347 PROT_READ | PROT_WRITE, MAP_SHARED,
1348 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001349 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001350 if (IS_ERR((void *)addr))
1351 return addr;
1352
1353 args->addr_ptr = (uint64_t) addr;
1354
1355 return 0;
1356}
1357
Jesse Barnesde151cf2008-11-12 10:03:55 -08001358/**
1359 * i915_gem_fault - fault a page into the GTT
1360 * vma: VMA in question
1361 * vmf: fault info
1362 *
1363 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1364 * from userspace. The fault handler takes care of binding the object to
1365 * the GTT (if needed), allocating and programming a fence register (again,
1366 * only if needed based on whether the old reg is still valid or the object
1367 * is tiled) and inserting a new PTE into the faulting process.
1368 *
1369 * Note that the faulting process may involve evicting existing objects
1370 * from the GTT and/or fence registers to make room. So performance may
1371 * suffer if the GTT working set is large or there are few fence registers
1372 * left.
1373 */
1374int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1375{
Chris Wilson05394f32010-11-08 19:18:58 +00001376 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1377 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001378 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001379 pgoff_t page_offset;
1380 unsigned long pfn;
1381 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001382 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383
Paulo Zanonif65c9162013-11-27 18:20:34 -02001384 intel_runtime_pm_get(dev_priv);
1385
Jesse Barnesde151cf2008-11-12 10:03:55 -08001386 /* We don't use vmf->pgoff since that has the fake offset */
1387 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1388 PAGE_SHIFT;
1389
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001390 ret = i915_mutex_lock_interruptible(dev);
1391 if (ret)
1392 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001393
Chris Wilsondb53a302011-02-03 11:57:46 +00001394 trace_i915_gem_object_fault(obj, page_offset, true, write);
1395
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001396 /* Access to snoopable pages through the GTT is incoherent. */
1397 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1398 ret = -EINVAL;
1399 goto unlock;
1400 }
1401
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001402 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001403 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001404 if (ret)
1405 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001406
Chris Wilsonc9839302012-11-20 10:45:17 +00001407 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1408 if (ret)
1409 goto unpin;
1410
1411 ret = i915_gem_object_get_fence(obj);
1412 if (ret)
1413 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001414
Chris Wilson6299f992010-11-24 12:23:44 +00001415 obj->fault_mappable = true;
1416
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001417 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1418 pfn >>= PAGE_SHIFT;
1419 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001420
1421 /* Finally, remap it using the new GTT offset */
1422 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001423unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001424 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001425unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001426 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001427out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001428 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001429 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001430 /* If this -EIO is due to a gpu hang, give the reset code a
1431 * chance to clean up the mess. Otherwise return the proper
1432 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001433 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1434 ret = VM_FAULT_SIGBUS;
1435 break;
1436 }
Chris Wilson045e7692010-11-07 09:18:22 +00001437 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001438 /*
1439 * EAGAIN means the gpu is hung and we'll wait for the error
1440 * handler to reset everything when re-faulting in
1441 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001442 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001443 case 0:
1444 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001445 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001446 case -EBUSY:
1447 /*
1448 * EBUSY is ok: this just means that another thread
1449 * already did the job.
1450 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001451 ret = VM_FAULT_NOPAGE;
1452 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001453 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001454 ret = VM_FAULT_OOM;
1455 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001456 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001457 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001458 ret = VM_FAULT_SIGBUS;
1459 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001460 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001461 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001462 ret = VM_FAULT_SIGBUS;
1463 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001464 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001465
1466 intel_runtime_pm_put(dev_priv);
1467 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001468}
1469
Paulo Zanoni48018a52013-12-13 15:22:31 -02001470void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1471{
1472 struct i915_vma *vma;
1473
1474 /*
1475 * Only the global gtt is relevant for gtt memory mappings, so restrict
1476 * list traversal to objects bound into the global address space. Note
1477 * that the active list should be empty, but better safe than sorry.
1478 */
1479 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1480 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1481 i915_gem_release_mmap(vma->obj);
1482 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1483 i915_gem_release_mmap(vma->obj);
1484}
1485
Jesse Barnesde151cf2008-11-12 10:03:55 -08001486/**
Chris Wilson901782b2009-07-10 08:18:50 +01001487 * i915_gem_release_mmap - remove physical page mappings
1488 * @obj: obj in question
1489 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001490 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001491 * relinquish ownership of the pages back to the system.
1492 *
1493 * It is vital that we remove the page mapping if we have mapped a tiled
1494 * object through the GTT and then lose the fence register due to
1495 * resource pressure. Similarly if the object has been moved out of the
1496 * aperture, than pages mapped into userspace must be revoked. Removing the
1497 * mapping will then trigger a page fault on the next user access, allowing
1498 * fixup by i915_gem_fault().
1499 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001500void
Chris Wilson05394f32010-11-08 19:18:58 +00001501i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001502{
Chris Wilson6299f992010-11-24 12:23:44 +00001503 if (!obj->fault_mappable)
1504 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001505
David Herrmann51335df2013-07-24 21:10:03 +02001506 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001507 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001508}
1509
Imre Deak0fa87792013-01-07 21:47:35 +02001510uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001511i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001512{
Chris Wilsone28f8712011-07-18 13:11:49 -07001513 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001514
1515 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001516 tiling_mode == I915_TILING_NONE)
1517 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001518
1519 /* Previous chips need a power-of-two fence region when tiling */
1520 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001521 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001522 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001523 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001524
Chris Wilsone28f8712011-07-18 13:11:49 -07001525 while (gtt_size < size)
1526 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001527
Chris Wilsone28f8712011-07-18 13:11:49 -07001528 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001529}
1530
Jesse Barnesde151cf2008-11-12 10:03:55 -08001531/**
1532 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1533 * @obj: object to check
1534 *
1535 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001536 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001537 */
Imre Deakd865110c2013-01-07 21:47:33 +02001538uint32_t
1539i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1540 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001541{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001542 /*
1543 * Minimum alignment is 4k (GTT page size), but might be greater
1544 * if a fence register is needed for the object.
1545 */
Imre Deakd865110c2013-01-07 21:47:33 +02001546 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001547 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001548 return 4096;
1549
1550 /*
1551 * Previous chips need to be aligned to the size of the smallest
1552 * fence register that can contain the object.
1553 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001554 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001555}
1556
Chris Wilsond8cb5082012-08-11 15:41:03 +01001557static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1558{
1559 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1560 int ret;
1561
David Herrmann0de23972013-07-24 21:07:52 +02001562 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001563 return 0;
1564
Daniel Vetterda494d72012-12-20 15:11:16 +01001565 dev_priv->mm.shrinker_no_lock_stealing = true;
1566
Chris Wilsond8cb5082012-08-11 15:41:03 +01001567 ret = drm_gem_create_mmap_offset(&obj->base);
1568 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001569 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001570
1571 /* Badly fragmented mmap space? The only way we can recover
1572 * space is by destroying unwanted objects. We can't randomly release
1573 * mmap_offsets as userspace expects them to be persistent for the
1574 * lifetime of the objects. The closest we can is to release the
1575 * offsets on purgeable objects by truncating it and marking it purged,
1576 * which prevents userspace from ever using that object again.
1577 */
1578 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1579 ret = drm_gem_create_mmap_offset(&obj->base);
1580 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001581 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001582
1583 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001584 ret = drm_gem_create_mmap_offset(&obj->base);
1585out:
1586 dev_priv->mm.shrinker_no_lock_stealing = false;
1587
1588 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001589}
1590
1591static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1592{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001593 drm_gem_free_mmap_offset(&obj->base);
1594}
1595
Jesse Barnesde151cf2008-11-12 10:03:55 -08001596int
Dave Airlieff72145b2011-02-07 12:16:14 +10001597i915_gem_mmap_gtt(struct drm_file *file,
1598 struct drm_device *dev,
1599 uint32_t handle,
1600 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001601{
Chris Wilsonda761a62010-10-27 17:37:08 +01001602 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001603 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001604 int ret;
1605
Chris Wilson76c1dec2010-09-25 11:22:51 +01001606 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001607 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001608 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001609
Dave Airlieff72145b2011-02-07 12:16:14 +10001610 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001611 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001612 ret = -ENOENT;
1613 goto unlock;
1614 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001615
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001616 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001617 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001618 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001619 }
1620
Chris Wilson05394f32010-11-08 19:18:58 +00001621 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001622 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001623 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001624 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001625 }
1626
Chris Wilsond8cb5082012-08-11 15:41:03 +01001627 ret = i915_gem_object_create_mmap_offset(obj);
1628 if (ret)
1629 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001630
David Herrmann0de23972013-07-24 21:07:52 +02001631 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001632
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001633out:
Chris Wilson05394f32010-11-08 19:18:58 +00001634 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001635unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001636 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001637 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001638}
1639
Dave Airlieff72145b2011-02-07 12:16:14 +10001640/**
1641 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1642 * @dev: DRM device
1643 * @data: GTT mapping ioctl data
1644 * @file: GEM object info
1645 *
1646 * Simply returns the fake offset to userspace so it can mmap it.
1647 * The mmap call will end up in drm_gem_mmap(), which will set things
1648 * up so we can get faults in the handler above.
1649 *
1650 * The fault handler will take care of binding the object into the GTT
1651 * (since it may have been evicted to make room for something), allocating
1652 * a fence register, and mapping the appropriate aperture address into
1653 * userspace.
1654 */
1655int
1656i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1657 struct drm_file *file)
1658{
1659 struct drm_i915_gem_mmap_gtt *args = data;
1660
Dave Airlieff72145b2011-02-07 12:16:14 +10001661 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1662}
1663
Daniel Vetter225067e2012-08-20 10:23:20 +02001664/* Immediately discard the backing storage */
1665static void
1666i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001667{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001668 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001669
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001670 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001671
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001672 if (obj->base.filp == NULL)
1673 return;
1674
Daniel Vetter225067e2012-08-20 10:23:20 +02001675 /* Our goal here is to return as much of the memory as
1676 * is possible back to the system as we are called from OOM.
1677 * To do this we must instruct the shmfs to drop all of its
1678 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001679 */
Al Viro496ad9a2013-01-23 17:07:38 -05001680 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001681 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001682
Daniel Vetter225067e2012-08-20 10:23:20 +02001683 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001684}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001685
Daniel Vetter225067e2012-08-20 10:23:20 +02001686static inline int
1687i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1688{
1689 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001690}
1691
Chris Wilson5cdf5882010-09-27 15:51:07 +01001692static void
Chris Wilson05394f32010-11-08 19:18:58 +00001693i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001694{
Imre Deak90797e62013-02-18 19:28:03 +02001695 struct sg_page_iter sg_iter;
1696 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001697
Chris Wilson05394f32010-11-08 19:18:58 +00001698 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001699
Chris Wilson6c085a72012-08-20 11:40:46 +02001700 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1701 if (ret) {
1702 /* In the event of a disaster, abandon all caches and
1703 * hope for the best.
1704 */
1705 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001706 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001707 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1708 }
1709
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001710 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001711 i915_gem_object_save_bit_17_swizzle(obj);
1712
Chris Wilson05394f32010-11-08 19:18:58 +00001713 if (obj->madv == I915_MADV_DONTNEED)
1714 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001715
Imre Deak90797e62013-02-18 19:28:03 +02001716 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001717 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001718
Chris Wilson05394f32010-11-08 19:18:58 +00001719 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001720 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001721
Chris Wilson05394f32010-11-08 19:18:58 +00001722 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001723 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001724
Chris Wilson9da3da62012-06-01 15:20:22 +01001725 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001726 }
Chris Wilson05394f32010-11-08 19:18:58 +00001727 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001728
Chris Wilson9da3da62012-06-01 15:20:22 +01001729 sg_free_table(obj->pages);
1730 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001731}
1732
Chris Wilsondd624af2013-01-15 12:39:35 +00001733int
Chris Wilson37e680a2012-06-07 15:38:42 +01001734i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1735{
1736 const struct drm_i915_gem_object_ops *ops = obj->ops;
1737
Chris Wilson2f745ad2012-09-04 21:02:58 +01001738 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001739 return 0;
1740
Chris Wilsona5570172012-09-04 21:02:54 +01001741 if (obj->pages_pin_count)
1742 return -EBUSY;
1743
Ben Widawsky98438772013-07-31 17:00:12 -07001744 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001745
Chris Wilsona2165e32012-12-03 11:49:00 +00001746 /* ->put_pages might need to allocate memory for the bit17 swizzle
1747 * array, hence protect them from being reaped by removing them from gtt
1748 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001749 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001750
Chris Wilson37e680a2012-06-07 15:38:42 +01001751 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001752 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001753
Chris Wilson6c085a72012-08-20 11:40:46 +02001754 if (i915_gem_object_is_purgeable(obj))
1755 i915_gem_object_truncate(obj);
1756
1757 return 0;
1758}
1759
Chris Wilsond9973b42013-10-04 10:33:00 +01001760static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001761__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1762 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001763{
Chris Wilson57094f82013-09-04 10:45:50 +01001764 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001765 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001766 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001767
1768 list_for_each_entry_safe(obj, next,
1769 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001770 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001771 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001772 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001773 count += obj->base.size >> PAGE_SHIFT;
1774 if (count >= target)
1775 return count;
1776 }
1777 }
1778
Chris Wilson57094f82013-09-04 10:45:50 +01001779 /*
1780 * As we may completely rewrite the bound list whilst unbinding
1781 * (due to retiring requests) we have to strictly process only
1782 * one element of the list at the time, and recheck the list
1783 * on every iteration.
1784 */
1785 INIT_LIST_HEAD(&still_bound_list);
1786 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001787 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001788
Chris Wilson57094f82013-09-04 10:45:50 +01001789 obj = list_first_entry(&dev_priv->mm.bound_list,
1790 typeof(*obj), global_list);
1791 list_move_tail(&obj->global_list, &still_bound_list);
1792
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001793 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1794 continue;
1795
Chris Wilson57094f82013-09-04 10:45:50 +01001796 /*
1797 * Hold a reference whilst we unbind this object, as we may
1798 * end up waiting for and retiring requests. This might
1799 * release the final reference (held by the active list)
1800 * and result in the object being freed from under us.
1801 * in this object being freed.
1802 *
1803 * Note 1: Shrinking the bound list is special since only active
1804 * (and hence bound objects) can contain such limbo objects, so
1805 * we don't need special tricks for shrinking the unbound list.
1806 * The only other place where we have to be careful with active
1807 * objects suddenly disappearing due to retiring requests is the
1808 * eviction code.
1809 *
1810 * Note 2: Even though the bound list doesn't hold a reference
1811 * to the object we can safely grab one here: The final object
1812 * unreferencing and the bound_list are both protected by the
1813 * dev->struct_mutex and so we won't ever be able to observe an
1814 * object on the bound_list with a reference count equals 0.
1815 */
1816 drm_gem_object_reference(&obj->base);
1817
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001818 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1819 if (i915_vma_unbind(vma))
1820 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001821
Chris Wilson57094f82013-09-04 10:45:50 +01001822 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001823 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001824
1825 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001826 }
Chris Wilson57094f82013-09-04 10:45:50 +01001827 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001828
1829 return count;
1830}
1831
Chris Wilsond9973b42013-10-04 10:33:00 +01001832static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001833i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1834{
1835 return __i915_gem_shrink(dev_priv, target, true);
1836}
1837
Chris Wilsond9973b42013-10-04 10:33:00 +01001838static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001839i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1840{
1841 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001842 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001843
1844 i915_gem_evict_everything(dev_priv->dev);
1845
Ben Widawsky35c20a62013-05-31 11:28:48 -07001846 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001847 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001848 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001849 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001850 }
1851 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001852}
1853
Chris Wilson37e680a2012-06-07 15:38:42 +01001854static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001855i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001856{
Chris Wilson6c085a72012-08-20 11:40:46 +02001857 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001858 int page_count, i;
1859 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001860 struct sg_table *st;
1861 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001862 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001863 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001864 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001865 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001866
Chris Wilson6c085a72012-08-20 11:40:46 +02001867 /* Assert that the object is not currently in any GPU domain. As it
1868 * wasn't in the GTT, there shouldn't be any way it could have been in
1869 * a GPU cache
1870 */
1871 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1872 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1873
Chris Wilson9da3da62012-06-01 15:20:22 +01001874 st = kmalloc(sizeof(*st), GFP_KERNEL);
1875 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001876 return -ENOMEM;
1877
Chris Wilson9da3da62012-06-01 15:20:22 +01001878 page_count = obj->base.size / PAGE_SIZE;
1879 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001880 kfree(st);
1881 return -ENOMEM;
1882 }
1883
1884 /* Get the list of pages out of our struct file. They'll be pinned
1885 * at this point until we release them.
1886 *
1887 * Fail silently without starting the shrinker
1888 */
Al Viro496ad9a2013-01-23 17:07:38 -05001889 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001890 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001891 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001892 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001893 sg = st->sgl;
1894 st->nents = 0;
1895 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001896 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1897 if (IS_ERR(page)) {
1898 i915_gem_purge(dev_priv, page_count);
1899 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1900 }
1901 if (IS_ERR(page)) {
1902 /* We've tried hard to allocate the memory by reaping
1903 * our own buffer, now let the real VM do its job and
1904 * go down in flames if truly OOM.
1905 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001906 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001907 gfp |= __GFP_IO | __GFP_WAIT;
1908
1909 i915_gem_shrink_all(dev_priv);
1910 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1911 if (IS_ERR(page))
1912 goto err_pages;
1913
Linus Torvaldscaf49192012-12-10 10:51:16 -08001914 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001915 gfp &= ~(__GFP_IO | __GFP_WAIT);
1916 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001917#ifdef CONFIG_SWIOTLB
1918 if (swiotlb_nr_tbl()) {
1919 st->nents++;
1920 sg_set_page(sg, page, PAGE_SIZE, 0);
1921 sg = sg_next(sg);
1922 continue;
1923 }
1924#endif
Imre Deak90797e62013-02-18 19:28:03 +02001925 if (!i || page_to_pfn(page) != last_pfn + 1) {
1926 if (i)
1927 sg = sg_next(sg);
1928 st->nents++;
1929 sg_set_page(sg, page, PAGE_SIZE, 0);
1930 } else {
1931 sg->length += PAGE_SIZE;
1932 }
1933 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001934
1935 /* Check that the i965g/gm workaround works. */
1936 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001937 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001938#ifdef CONFIG_SWIOTLB
1939 if (!swiotlb_nr_tbl())
1940#endif
1941 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001942 obj->pages = st;
1943
Eric Anholt673a3942008-07-30 12:06:12 -07001944 if (i915_gem_object_needs_bit17_swizzle(obj))
1945 i915_gem_object_do_bit_17_swizzle(obj);
1946
1947 return 0;
1948
1949err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001950 sg_mark_end(sg);
1951 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001952 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001953 sg_free_table(st);
1954 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001955 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001956}
1957
Chris Wilson37e680a2012-06-07 15:38:42 +01001958/* Ensure that the associated pages are gathered from the backing storage
1959 * and pinned into our object. i915_gem_object_get_pages() may be called
1960 * multiple times before they are released by a single call to
1961 * i915_gem_object_put_pages() - once the pages are no longer referenced
1962 * either as a result of memory pressure (reaping pages under the shrinker)
1963 * or as the object is itself released.
1964 */
1965int
1966i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1967{
1968 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1969 const struct drm_i915_gem_object_ops *ops = obj->ops;
1970 int ret;
1971
Chris Wilson2f745ad2012-09-04 21:02:58 +01001972 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001973 return 0;
1974
Chris Wilson43e28f02013-01-08 10:53:09 +00001975 if (obj->madv != I915_MADV_WILLNEED) {
1976 DRM_ERROR("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001977 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00001978 }
1979
Chris Wilsona5570172012-09-04 21:02:54 +01001980 BUG_ON(obj->pages_pin_count);
1981
Chris Wilson37e680a2012-06-07 15:38:42 +01001982 ret = ops->get_pages(obj);
1983 if (ret)
1984 return ret;
1985
Ben Widawsky35c20a62013-05-31 11:28:48 -07001986 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001987 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001988}
1989
Ben Widawskye2d05a82013-09-24 09:57:58 -07001990static void
Chris Wilson05394f32010-11-08 19:18:58 +00001991i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001992 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001993{
Chris Wilson05394f32010-11-08 19:18:58 +00001994 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001995 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001996 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001997
Zou Nan hai852835f2010-05-21 09:08:56 +08001998 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001999 if (obj->ring != ring && obj->last_write_seqno) {
2000 /* Keep the seqno relative to the current ring */
2001 obj->last_write_seqno = seqno;
2002 }
Chris Wilson05394f32010-11-08 19:18:58 +00002003 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002004
2005 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002006 if (!obj->active) {
2007 drm_gem_object_reference(&obj->base);
2008 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002009 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002010
Chris Wilson05394f32010-11-08 19:18:58 +00002011 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002012
Chris Wilson0201f1e2012-07-20 12:41:01 +01002013 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002014
Chris Wilsoncaea7472010-11-12 13:53:37 +00002015 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002016 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002017
Chris Wilson7dd49062012-03-21 10:48:18 +00002018 /* Bump MRU to take account of the delayed flush */
2019 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2020 struct drm_i915_fence_reg *reg;
2021
2022 reg = &dev_priv->fence_regs[obj->fence_reg];
2023 list_move_tail(&reg->lru_list,
2024 &dev_priv->mm.fence_list);
2025 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002026 }
2027}
2028
Ben Widawskye2d05a82013-09-24 09:57:58 -07002029void i915_vma_move_to_active(struct i915_vma *vma,
2030 struct intel_ring_buffer *ring)
2031{
2032 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2033 return i915_gem_object_move_to_active(vma->obj, ring);
2034}
2035
Chris Wilsoncaea7472010-11-12 13:53:37 +00002036static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002037i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2038{
Ben Widawskyca191b12013-07-31 17:00:14 -07002039 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002040 struct i915_address_space *vm;
2041 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002042
Chris Wilson65ce3022012-07-20 12:41:02 +01002043 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002044 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002045
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002046 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2047 vma = i915_gem_obj_to_vma(obj, vm);
2048 if (vma && !list_empty(&vma->mm_list))
2049 list_move_tail(&vma->mm_list, &vm->inactive_list);
2050 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002051
Chris Wilson65ce3022012-07-20 12:41:02 +01002052 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002053 obj->ring = NULL;
2054
Chris Wilson65ce3022012-07-20 12:41:02 +01002055 obj->last_read_seqno = 0;
2056 obj->last_write_seqno = 0;
2057 obj->base.write_domain = 0;
2058
2059 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002060 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002061
2062 obj->active = 0;
2063 drm_gem_object_unreference(&obj->base);
2064
2065 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002066}
Eric Anholt673a3942008-07-30 12:06:12 -07002067
Chris Wilson9d7730912012-11-27 16:22:52 +00002068static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002069i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002070{
Chris Wilson9d7730912012-11-27 16:22:52 +00002071 struct drm_i915_private *dev_priv = dev->dev_private;
2072 struct intel_ring_buffer *ring;
2073 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002074
Chris Wilson107f27a52012-12-10 13:56:17 +02002075 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002076 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002077 ret = intel_ring_idle(ring);
2078 if (ret)
2079 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002080 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002081 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002082
2083 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002084 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002085 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002086
Chris Wilson9d7730912012-11-27 16:22:52 +00002087 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2088 ring->sync_seqno[j] = 0;
2089 }
2090
2091 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002092}
2093
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002094int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2095{
2096 struct drm_i915_private *dev_priv = dev->dev_private;
2097 int ret;
2098
2099 if (seqno == 0)
2100 return -EINVAL;
2101
2102 /* HWS page needs to be set less than what we
2103 * will inject to ring
2104 */
2105 ret = i915_gem_init_seqno(dev, seqno - 1);
2106 if (ret)
2107 return ret;
2108
2109 /* Carefully set the last_seqno value so that wrap
2110 * detection still works
2111 */
2112 dev_priv->next_seqno = seqno;
2113 dev_priv->last_seqno = seqno - 1;
2114 if (dev_priv->last_seqno == 0)
2115 dev_priv->last_seqno--;
2116
2117 return 0;
2118}
2119
Chris Wilson9d7730912012-11-27 16:22:52 +00002120int
2121i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002122{
Chris Wilson9d7730912012-11-27 16:22:52 +00002123 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002124
Chris Wilson9d7730912012-11-27 16:22:52 +00002125 /* reserve 0 for non-seqno */
2126 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002127 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002128 if (ret)
2129 return ret;
2130
2131 dev_priv->next_seqno = 1;
2132 }
2133
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002134 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002135 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002136}
2137
Mika Kuoppala0025c072013-06-12 12:35:30 +03002138int __i915_add_request(struct intel_ring_buffer *ring,
2139 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002140 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002141 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002142{
Chris Wilsondb53a302011-02-03 11:57:46 +00002143 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002144 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002145 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002146 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002147 int ret;
2148
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002149 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002150 /*
2151 * Emit any outstanding flushes - execbuf can fail to emit the flush
2152 * after having emitted the batchbuffer command. Hence we need to fix
2153 * things up similar to emitting the lazy request. The difference here
2154 * is that the flush _must_ happen before the next request, no matter
2155 * what.
2156 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002157 ret = intel_ring_flush_all_caches(ring);
2158 if (ret)
2159 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002160
Chris Wilson3c0e2342013-09-04 10:45:52 +01002161 request = ring->preallocated_lazy_request;
2162 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002163 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002164
Chris Wilsona71d8d92012-02-15 11:25:36 +00002165 /* Record the position of the start of the request so that
2166 * should we detect the updated seqno part-way through the
2167 * GPU processing the request, we never over-estimate the
2168 * position of the head.
2169 */
2170 request_ring_position = intel_ring_get_tail(ring);
2171
Chris Wilson9d7730912012-11-27 16:22:52 +00002172 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002173 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002174 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002175
Chris Wilson9d7730912012-11-27 16:22:52 +00002176 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002177 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002178 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002179 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002180
2181 /* Whilst this request exists, batch_obj will be on the
2182 * active_list, and so will hold the active reference. Only when this
2183 * request is retired will the the batch_obj be moved onto the
2184 * inactive_list and lose its active reference. Hence we do not need
2185 * to explicitly hold another reference here.
2186 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002187 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002188
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002189 /* Hold a reference to the current context so that we can inspect
2190 * it later in case a hangcheck error event fires.
2191 */
2192 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002193 if (request->ctx)
2194 i915_gem_context_reference(request->ctx);
2195
Eric Anholt673a3942008-07-30 12:06:12 -07002196 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002197 was_empty = list_empty(&ring->request_list);
2198 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002199 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002200
Chris Wilsondb53a302011-02-03 11:57:46 +00002201 if (file) {
2202 struct drm_i915_file_private *file_priv = file->driver_priv;
2203
Chris Wilson1c255952010-09-26 11:03:27 +01002204 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002205 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002206 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002207 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002208 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002209 }
Eric Anholt673a3942008-07-30 12:06:12 -07002210
Chris Wilson9d7730912012-11-27 16:22:52 +00002211 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002212 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002213 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002214
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002215 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002216 i915_queue_hangcheck(ring->dev);
2217
Chris Wilsonf047e392012-07-21 12:31:41 +01002218 if (was_empty) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002219 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002220 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002221 &dev_priv->mm.retire_work,
2222 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002223 intel_mark_busy(dev_priv->dev);
2224 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002225 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002226
Chris Wilsonacb868d2012-09-26 13:47:30 +01002227 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002228 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002229 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002230}
2231
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002232static inline void
2233i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002234{
Chris Wilson1c255952010-09-26 11:03:27 +01002235 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002236
Chris Wilson1c255952010-09-26 11:03:27 +01002237 if (!file_priv)
2238 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002239
Chris Wilson1c255952010-09-26 11:03:27 +01002240 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002241 list_del(&request->client_list);
2242 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002243 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002244}
2245
Mika Kuoppala939fd762014-01-30 19:04:44 +02002246static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002247 const struct i915_hw_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002248{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002249 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002250
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002251 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2252
2253 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002254 return true;
2255
2256 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002257 if (dev_priv->gpu_error.stop_rings == 0 &&
2258 i915_gem_context_is_default(ctx)) {
2259 DRM_ERROR("gpu hanging too fast, banning!\n");
2260 } else {
2261 DRM_DEBUG("context hanging too fast, banning!\n");
2262 }
2263
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002264 return true;
2265 }
2266
2267 return false;
2268}
2269
Mika Kuoppala939fd762014-01-30 19:04:44 +02002270static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2271 struct i915_hw_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002272 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002273{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002274 struct i915_ctx_hang_stats *hs;
2275
2276 if (WARN_ON(!ctx))
2277 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002278
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002279 hs = &ctx->hang_stats;
2280
2281 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002282 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002283 hs->batch_active++;
2284 hs->guilty_ts = get_seconds();
2285 } else {
2286 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002287 }
2288}
2289
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002290static void i915_gem_free_request(struct drm_i915_gem_request *request)
2291{
2292 list_del(&request->list);
2293 i915_gem_request_remove_from_client(request);
2294
2295 if (request->ctx)
2296 i915_gem_context_unreference(request->ctx);
2297
2298 kfree(request);
2299}
2300
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002301static struct drm_i915_gem_request *
2302i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002303{
Chris Wilson4db080f2013-12-04 11:37:09 +00002304 struct drm_i915_gem_request *request;
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002305 const u32 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002306
Chris Wilson4db080f2013-12-04 11:37:09 +00002307 list_for_each_entry(request, &ring->request_list, list) {
2308 if (i915_seqno_passed(completed_seqno, request->seqno))
2309 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002310
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002311 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002312 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002313
2314 return NULL;
2315}
2316
2317static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2318 struct intel_ring_buffer *ring)
2319{
2320 struct drm_i915_gem_request *request;
2321 bool ring_hung;
2322
2323 request = i915_gem_find_first_non_complete(ring);
2324
2325 if (request == NULL)
2326 return;
2327
2328 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2329
Mika Kuoppala939fd762014-01-30 19:04:44 +02002330 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002331
2332 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002333 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002334}
2335
2336static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2337 struct intel_ring_buffer *ring)
2338{
Chris Wilsondfaae392010-09-22 10:31:52 +01002339 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002340 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002341
Chris Wilson05394f32010-11-08 19:18:58 +00002342 obj = list_first_entry(&ring->active_list,
2343 struct drm_i915_gem_object,
2344 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002345
Chris Wilson05394f32010-11-08 19:18:58 +00002346 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002347 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002348
2349 /*
2350 * We must free the requests after all the corresponding objects have
2351 * been moved off active lists. Which is the same order as the normal
2352 * retire_requests function does. This is important if object hold
2353 * implicit references on things like e.g. ppgtt address spaces through
2354 * the request.
2355 */
2356 while (!list_empty(&ring->request_list)) {
2357 struct drm_i915_gem_request *request;
2358
2359 request = list_first_entry(&ring->request_list,
2360 struct drm_i915_gem_request,
2361 list);
2362
2363 i915_gem_free_request(request);
2364 }
Eric Anholt673a3942008-07-30 12:06:12 -07002365}
2366
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002367void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002368{
2369 struct drm_i915_private *dev_priv = dev->dev_private;
2370 int i;
2371
Daniel Vetter4b9de732011-10-09 21:52:02 +02002372 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002373 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002374
Daniel Vetter94a335d2013-07-17 14:51:28 +02002375 /*
2376 * Commit delayed tiling changes if we have an object still
2377 * attached to the fence, otherwise just clear the fence.
2378 */
2379 if (reg->obj) {
2380 i915_gem_object_update_fence(reg->obj, reg,
2381 reg->obj->tiling_mode);
2382 } else {
2383 i915_gem_write_fence(dev, i, NULL);
2384 }
Chris Wilson312817a2010-11-22 11:50:11 +00002385 }
2386}
2387
Chris Wilson069efc12010-09-30 16:53:18 +01002388void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002389{
Chris Wilsondfaae392010-09-22 10:31:52 +01002390 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002391 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002392 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002393
Chris Wilson4db080f2013-12-04 11:37:09 +00002394 /*
2395 * Before we free the objects from the requests, we need to inspect
2396 * them for finding the guilty party. As the requests only borrow
2397 * their reference to the objects, the inspection must be done first.
2398 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002399 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002400 i915_gem_reset_ring_status(dev_priv, ring);
2401
2402 for_each_ring(ring, dev_priv, i)
2403 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002404
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002405 i915_gem_cleanup_ringbuffer(dev);
2406
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002407 i915_gem_context_reset(dev);
2408
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002409 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002410}
2411
2412/**
2413 * This function clears the request list as sequence numbers are passed.
2414 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002415void
Chris Wilsondb53a302011-02-03 11:57:46 +00002416i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002417{
Eric Anholt673a3942008-07-30 12:06:12 -07002418 uint32_t seqno;
2419
Chris Wilsondb53a302011-02-03 11:57:46 +00002420 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002421 return;
2422
Chris Wilsondb53a302011-02-03 11:57:46 +00002423 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002424
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002425 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002426
Chris Wilsone9103032014-01-07 11:45:14 +00002427 /* Move any buffers on the active list that are no longer referenced
2428 * by the ringbuffer to the flushing/inactive lists as appropriate,
2429 * before we free the context associated with the requests.
2430 */
2431 while (!list_empty(&ring->active_list)) {
2432 struct drm_i915_gem_object *obj;
2433
2434 obj = list_first_entry(&ring->active_list,
2435 struct drm_i915_gem_object,
2436 ring_list);
2437
2438 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2439 break;
2440
2441 i915_gem_object_move_to_inactive(obj);
2442 }
2443
2444
Zou Nan hai852835f2010-05-21 09:08:56 +08002445 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002446 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002447
Zou Nan hai852835f2010-05-21 09:08:56 +08002448 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002449 struct drm_i915_gem_request,
2450 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002451
Chris Wilsondfaae392010-09-22 10:31:52 +01002452 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002453 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002454
Chris Wilsondb53a302011-02-03 11:57:46 +00002455 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002456 /* We know the GPU must have read the request to have
2457 * sent us the seqno + interrupt, so use the position
2458 * of tail of the request to update the last known position
2459 * of the GPU head.
2460 */
2461 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002462
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002463 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002464 }
2465
Chris Wilsondb53a302011-02-03 11:57:46 +00002466 if (unlikely(ring->trace_irq_seqno &&
2467 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002468 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002469 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002470 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002471
Chris Wilsondb53a302011-02-03 11:57:46 +00002472 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002473}
2474
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002475bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002476i915_gem_retire_requests(struct drm_device *dev)
2477{
2478 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002479 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002480 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002481 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002482
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002483 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002484 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002485 idle &= list_empty(&ring->request_list);
2486 }
2487
2488 if (idle)
2489 mod_delayed_work(dev_priv->wq,
2490 &dev_priv->mm.idle_work,
2491 msecs_to_jiffies(100));
2492
2493 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002494}
2495
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002496static void
Eric Anholt673a3942008-07-30 12:06:12 -07002497i915_gem_retire_work_handler(struct work_struct *work)
2498{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002499 struct drm_i915_private *dev_priv =
2500 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2501 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002502 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002503
Chris Wilson891b48c2010-09-29 12:26:37 +01002504 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002505 idle = false;
2506 if (mutex_trylock(&dev->struct_mutex)) {
2507 idle = i915_gem_retire_requests(dev);
2508 mutex_unlock(&dev->struct_mutex);
2509 }
2510 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002511 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2512 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002513}
Chris Wilson891b48c2010-09-29 12:26:37 +01002514
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002515static void
2516i915_gem_idle_work_handler(struct work_struct *work)
2517{
2518 struct drm_i915_private *dev_priv =
2519 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002520
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002521 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002522}
2523
Ben Widawsky5816d642012-04-11 11:18:19 -07002524/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002525 * Ensures that an object will eventually get non-busy by flushing any required
2526 * write domains, emitting any outstanding lazy request and retiring and
2527 * completed requests.
2528 */
2529static int
2530i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2531{
2532 int ret;
2533
2534 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002535 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002536 if (ret)
2537 return ret;
2538
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002539 i915_gem_retire_requests_ring(obj->ring);
2540 }
2541
2542 return 0;
2543}
2544
2545/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002546 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2547 * @DRM_IOCTL_ARGS: standard ioctl arguments
2548 *
2549 * Returns 0 if successful, else an error is returned with the remaining time in
2550 * the timeout parameter.
2551 * -ETIME: object is still busy after timeout
2552 * -ERESTARTSYS: signal interrupted the wait
2553 * -ENONENT: object doesn't exist
2554 * Also possible, but rare:
2555 * -EAGAIN: GPU wedged
2556 * -ENOMEM: damn
2557 * -ENODEV: Internal IRQ fail
2558 * -E?: The add request failed
2559 *
2560 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2561 * non-zero timeout parameter the wait ioctl will wait for the given number of
2562 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2563 * without holding struct_mutex the object may become re-busied before this
2564 * function completes. A similar but shorter * race condition exists in the busy
2565 * ioctl
2566 */
2567int
2568i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2569{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002570 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002571 struct drm_i915_gem_wait *args = data;
2572 struct drm_i915_gem_object *obj;
2573 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002574 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002575 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002576 u32 seqno = 0;
2577 int ret = 0;
2578
Ben Widawskyeac1f142012-06-05 15:24:24 -07002579 if (args->timeout_ns >= 0) {
2580 timeout_stack = ns_to_timespec(args->timeout_ns);
2581 timeout = &timeout_stack;
2582 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002583
2584 ret = i915_mutex_lock_interruptible(dev);
2585 if (ret)
2586 return ret;
2587
2588 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2589 if (&obj->base == NULL) {
2590 mutex_unlock(&dev->struct_mutex);
2591 return -ENOENT;
2592 }
2593
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002594 /* Need to make sure the object gets inactive eventually. */
2595 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002596 if (ret)
2597 goto out;
2598
2599 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002600 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002601 ring = obj->ring;
2602 }
2603
2604 if (seqno == 0)
2605 goto out;
2606
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002607 /* Do this after OLR check to make sure we make forward progress polling
2608 * on this IOCTL with a 0 timeout (like busy ioctl)
2609 */
2610 if (!args->timeout_ns) {
2611 ret = -ETIME;
2612 goto out;
2613 }
2614
2615 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002616 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002617 mutex_unlock(&dev->struct_mutex);
2618
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002619 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002620 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002621 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002622 return ret;
2623
2624out:
2625 drm_gem_object_unreference(&obj->base);
2626 mutex_unlock(&dev->struct_mutex);
2627 return ret;
2628}
2629
2630/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002631 * i915_gem_object_sync - sync an object to a ring.
2632 *
2633 * @obj: object which may be in use on another ring.
2634 * @to: ring we wish to use the object on. May be NULL.
2635 *
2636 * This code is meant to abstract object synchronization with the GPU.
2637 * Calling with NULL implies synchronizing the object with the CPU
2638 * rather than a particular GPU ring.
2639 *
2640 * Returns 0 if successful, else propagates up the lower layer error.
2641 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002642int
2643i915_gem_object_sync(struct drm_i915_gem_object *obj,
2644 struct intel_ring_buffer *to)
2645{
2646 struct intel_ring_buffer *from = obj->ring;
2647 u32 seqno;
2648 int ret, idx;
2649
2650 if (from == NULL || to == from)
2651 return 0;
2652
Ben Widawsky5816d642012-04-11 11:18:19 -07002653 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002654 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002655
2656 idx = intel_ring_sync_index(from, to);
2657
Chris Wilson0201f1e2012-07-20 12:41:01 +01002658 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002659 if (seqno <= from->sync_seqno[idx])
2660 return 0;
2661
Ben Widawskyb4aca012012-04-25 20:50:12 -07002662 ret = i915_gem_check_olr(obj->ring, seqno);
2663 if (ret)
2664 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002665
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002666 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002667 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002668 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002669 /* We use last_read_seqno because sync_to()
2670 * might have just caused seqno wrap under
2671 * the radar.
2672 */
2673 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002674
Ben Widawskye3a5a222012-04-11 11:18:20 -07002675 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002676}
2677
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002678static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2679{
2680 u32 old_write_domain, old_read_domains;
2681
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002682 /* Force a pagefault for domain tracking on next user access */
2683 i915_gem_release_mmap(obj);
2684
Keith Packardb97c3d92011-06-24 21:02:59 -07002685 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2686 return;
2687
Chris Wilson97c809fd2012-10-09 19:24:38 +01002688 /* Wait for any direct GTT access to complete */
2689 mb();
2690
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002691 old_read_domains = obj->base.read_domains;
2692 old_write_domain = obj->base.write_domain;
2693
2694 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2695 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2696
2697 trace_i915_gem_object_change_domain(obj,
2698 old_read_domains,
2699 old_write_domain);
2700}
2701
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002702int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002703{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002704 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002705 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002706 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002707
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002708 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002709 return 0;
2710
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002711 if (!drm_mm_node_allocated(&vma->node)) {
2712 i915_gem_vma_destroy(vma);
2713
2714 return 0;
2715 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002716
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002717 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002718 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002719
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002720 BUG_ON(obj->pages == NULL);
2721
Chris Wilsona8198ee2011-04-13 22:04:09 +01002722 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002723 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002724 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002725 /* Continue on if we fail due to EIO, the GPU is hung so we
2726 * should be safe and we need to cleanup or else we might
2727 * cause memory corruption through use-after-free.
2728 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002729
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002730 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002731
Daniel Vetter96b47b62009-12-15 17:50:00 +01002732 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002733 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002734 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002735 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002736
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002737 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002738
Ben Widawsky6f65e292013-12-06 14:10:56 -08002739 vma->unbind_vma(vma);
2740
Daniel Vetter74163902012-02-15 23:50:21 +01002741 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002742
Ben Widawskyca191b12013-07-31 17:00:14 -07002743 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002744 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002745 if (i915_is_ggtt(vma->vm))
2746 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002747
Ben Widawsky2f633152013-07-17 12:19:03 -07002748 drm_mm_remove_node(&vma->node);
2749 i915_gem_vma_destroy(vma);
2750
2751 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002752 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002753 if (list_empty(&obj->vma_list))
2754 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002755
Chris Wilson70903c32013-12-04 09:59:09 +00002756 /* And finally now the object is completely decoupled from this vma,
2757 * we can drop its hold on the backing storage and allow it to be
2758 * reaped by the shrinker.
2759 */
2760 i915_gem_object_unpin_pages(obj);
2761
Chris Wilson88241782011-01-07 17:09:48 +00002762 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002763}
2764
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002765/**
2766 * Unbinds an object from the global GTT aperture.
2767 */
2768int
2769i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2770{
2771 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2772 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2773
Dan Carpenter58e73e12013-08-09 12:44:11 +03002774 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002775 return 0;
2776
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002777 if (i915_gem_obj_to_ggtt(obj)->pin_count)
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002778 return -EBUSY;
2779
2780 BUG_ON(obj->pages == NULL);
2781
2782 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2783}
2784
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002785int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002786{
2787 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002788 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002789 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002790
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002791 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002792 for_each_ring(ring, dev_priv, i) {
Ben Widawsky41bde552013-12-06 14:11:21 -08002793 ret = i915_switch_context(ring, NULL, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002794 if (ret)
2795 return ret;
2796
Chris Wilson3e960502012-11-27 16:22:54 +00002797 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002798 if (ret)
2799 return ret;
2800 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002801
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002802 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002803}
2804
Chris Wilson9ce079e2012-04-17 15:31:30 +01002805static void i965_write_fence_reg(struct drm_device *dev, int reg,
2806 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002807{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002808 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002809 int fence_reg;
2810 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002811
Imre Deak56c844e2013-01-07 21:47:34 +02002812 if (INTEL_INFO(dev)->gen >= 6) {
2813 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2814 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2815 } else {
2816 fence_reg = FENCE_REG_965_0;
2817 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2818 }
2819
Chris Wilsond18b9612013-07-10 13:36:23 +01002820 fence_reg += reg * 8;
2821
2822 /* To w/a incoherency with non-atomic 64-bit register updates,
2823 * we split the 64-bit update into two 32-bit writes. In order
2824 * for a partial fence not to be evaluated between writes, we
2825 * precede the update with write to turn off the fence register,
2826 * and only enable the fence as the last step.
2827 *
2828 * For extra levels of paranoia, we make sure each step lands
2829 * before applying the next step.
2830 */
2831 I915_WRITE(fence_reg, 0);
2832 POSTING_READ(fence_reg);
2833
Chris Wilson9ce079e2012-04-17 15:31:30 +01002834 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002835 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002836 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002837
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002838 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002839 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002840 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002841 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002842 if (obj->tiling_mode == I915_TILING_Y)
2843 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2844 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002845
Chris Wilsond18b9612013-07-10 13:36:23 +01002846 I915_WRITE(fence_reg + 4, val >> 32);
2847 POSTING_READ(fence_reg + 4);
2848
2849 I915_WRITE(fence_reg + 0, val);
2850 POSTING_READ(fence_reg);
2851 } else {
2852 I915_WRITE(fence_reg + 4, 0);
2853 POSTING_READ(fence_reg + 4);
2854 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002855}
2856
Chris Wilson9ce079e2012-04-17 15:31:30 +01002857static void i915_write_fence_reg(struct drm_device *dev, int reg,
2858 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002859{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002860 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002861 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002862
Chris Wilson9ce079e2012-04-17 15:31:30 +01002863 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002864 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002865 int pitch_val;
2866 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002867
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002868 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002869 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002870 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2871 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2872 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002873
2874 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2875 tile_width = 128;
2876 else
2877 tile_width = 512;
2878
2879 /* Note: pitch better be a power of two tile widths */
2880 pitch_val = obj->stride / tile_width;
2881 pitch_val = ffs(pitch_val) - 1;
2882
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002883 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002884 if (obj->tiling_mode == I915_TILING_Y)
2885 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2886 val |= I915_FENCE_SIZE_BITS(size);
2887 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2888 val |= I830_FENCE_REG_VALID;
2889 } else
2890 val = 0;
2891
2892 if (reg < 8)
2893 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002894 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002895 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002896
Chris Wilson9ce079e2012-04-17 15:31:30 +01002897 I915_WRITE(reg, val);
2898 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002899}
2900
Chris Wilson9ce079e2012-04-17 15:31:30 +01002901static void i830_write_fence_reg(struct drm_device *dev, int reg,
2902 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002903{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002904 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002905 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002906
Chris Wilson9ce079e2012-04-17 15:31:30 +01002907 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002908 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002909 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002910
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002911 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002912 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002913 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2914 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2915 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002916
Chris Wilson9ce079e2012-04-17 15:31:30 +01002917 pitch_val = obj->stride / 128;
2918 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002919
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002920 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002921 if (obj->tiling_mode == I915_TILING_Y)
2922 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2923 val |= I830_FENCE_SIZE_BITS(size);
2924 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2925 val |= I830_FENCE_REG_VALID;
2926 } else
2927 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002928
Chris Wilson9ce079e2012-04-17 15:31:30 +01002929 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2930 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2931}
2932
Chris Wilsond0a57782012-10-09 19:24:37 +01002933inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2934{
2935 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2936}
2937
Chris Wilson9ce079e2012-04-17 15:31:30 +01002938static void i915_gem_write_fence(struct drm_device *dev, int reg,
2939 struct drm_i915_gem_object *obj)
2940{
Chris Wilsond0a57782012-10-09 19:24:37 +01002941 struct drm_i915_private *dev_priv = dev->dev_private;
2942
2943 /* Ensure that all CPU reads are completed before installing a fence
2944 * and all writes before removing the fence.
2945 */
2946 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2947 mb();
2948
Daniel Vetter94a335d2013-07-17 14:51:28 +02002949 WARN(obj && (!obj->stride || !obj->tiling_mode),
2950 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2951 obj->stride, obj->tiling_mode);
2952
Chris Wilson9ce079e2012-04-17 15:31:30 +01002953 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002954 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002955 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002956 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002957 case 5:
2958 case 4: i965_write_fence_reg(dev, reg, obj); break;
2959 case 3: i915_write_fence_reg(dev, reg, obj); break;
2960 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002961 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002962 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002963
2964 /* And similarly be paranoid that no direct access to this region
2965 * is reordered to before the fence is installed.
2966 */
2967 if (i915_gem_object_needs_mb(obj))
2968 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002969}
2970
Chris Wilson61050802012-04-17 15:31:31 +01002971static inline int fence_number(struct drm_i915_private *dev_priv,
2972 struct drm_i915_fence_reg *fence)
2973{
2974 return fence - dev_priv->fence_regs;
2975}
2976
2977static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2978 struct drm_i915_fence_reg *fence,
2979 bool enable)
2980{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002981 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002982 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002983
Chris Wilson46a0b632013-07-10 13:36:24 +01002984 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01002985
2986 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01002987 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01002988 fence->obj = obj;
2989 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2990 } else {
2991 obj->fence_reg = I915_FENCE_REG_NONE;
2992 fence->obj = NULL;
2993 list_del_init(&fence->lru_list);
2994 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02002995 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01002996}
2997
Chris Wilsond9e86c02010-11-10 16:40:20 +00002998static int
Chris Wilsond0a57782012-10-09 19:24:37 +01002999i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003000{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003001 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003002 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003003 if (ret)
3004 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003005
3006 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003007 }
3008
Chris Wilson86d5bc32012-07-20 12:41:04 +01003009 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003010 return 0;
3011}
3012
3013int
3014i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3015{
Chris Wilson61050802012-04-17 15:31:31 +01003016 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003017 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003018 int ret;
3019
Chris Wilsond0a57782012-10-09 19:24:37 +01003020 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003021 if (ret)
3022 return ret;
3023
Chris Wilson61050802012-04-17 15:31:31 +01003024 if (obj->fence_reg == I915_FENCE_REG_NONE)
3025 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003026
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003027 fence = &dev_priv->fence_regs[obj->fence_reg];
3028
Chris Wilson61050802012-04-17 15:31:31 +01003029 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003030 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003031
3032 return 0;
3033}
3034
3035static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003036i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003037{
Daniel Vetterae3db242010-02-19 11:51:58 +01003038 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003039 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003040 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003041
3042 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003043 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003044 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3045 reg = &dev_priv->fence_regs[i];
3046 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003047 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003048
Chris Wilson1690e1e2011-12-14 13:57:08 +01003049 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003050 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003051 }
3052
Chris Wilsond9e86c02010-11-10 16:40:20 +00003053 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003054 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003055
3056 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003057 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003058 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003059 continue;
3060
Chris Wilson8fe301a2012-04-17 15:31:28 +01003061 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003062 }
3063
Chris Wilson5dce5b932014-01-20 10:17:36 +00003064deadlock:
3065 /* Wait for completion of pending flips which consume fences */
3066 if (intel_has_pending_fb_unpin(dev))
3067 return ERR_PTR(-EAGAIN);
3068
3069 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003070}
3071
Jesse Barnesde151cf2008-11-12 10:03:55 -08003072/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003073 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003074 * @obj: object to map through a fence reg
3075 *
3076 * When mapping objects through the GTT, userspace wants to be able to write
3077 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003078 * This function walks the fence regs looking for a free one for @obj,
3079 * stealing one if it can't find any.
3080 *
3081 * It then sets up the reg based on the object's properties: address, pitch
3082 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003083 *
3084 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003085 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003086int
Chris Wilson06d98132012-04-17 15:31:24 +01003087i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003088{
Chris Wilson05394f32010-11-08 19:18:58 +00003089 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003090 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003091 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003092 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003093 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003094
Chris Wilson14415742012-04-17 15:31:33 +01003095 /* Have we updated the tiling parameters upon the object and so
3096 * will need to serialise the write to the associated fence register?
3097 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003098 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003099 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003100 if (ret)
3101 return ret;
3102 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003103
Chris Wilsond9e86c02010-11-10 16:40:20 +00003104 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003105 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3106 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003107 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003108 list_move_tail(&reg->lru_list,
3109 &dev_priv->mm.fence_list);
3110 return 0;
3111 }
3112 } else if (enable) {
3113 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003114 if (IS_ERR(reg))
3115 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003116
Chris Wilson14415742012-04-17 15:31:33 +01003117 if (reg->obj) {
3118 struct drm_i915_gem_object *old = reg->obj;
3119
Chris Wilsond0a57782012-10-09 19:24:37 +01003120 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003121 if (ret)
3122 return ret;
3123
Chris Wilson14415742012-04-17 15:31:33 +01003124 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003125 }
Chris Wilson14415742012-04-17 15:31:33 +01003126 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003127 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003128
Chris Wilson14415742012-04-17 15:31:33 +01003129 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003130
Chris Wilson9ce079e2012-04-17 15:31:30 +01003131 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003132}
3133
Chris Wilson42d6ab42012-07-26 11:49:32 +01003134static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3135 struct drm_mm_node *gtt_space,
3136 unsigned long cache_level)
3137{
3138 struct drm_mm_node *other;
3139
3140 /* On non-LLC machines we have to be careful when putting differing
3141 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003142 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003143 */
3144 if (HAS_LLC(dev))
3145 return true;
3146
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003147 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003148 return true;
3149
3150 if (list_empty(&gtt_space->node_list))
3151 return true;
3152
3153 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3154 if (other->allocated && !other->hole_follows && other->color != cache_level)
3155 return false;
3156
3157 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3158 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3159 return false;
3160
3161 return true;
3162}
3163
3164static void i915_gem_verify_gtt(struct drm_device *dev)
3165{
3166#if WATCH_GTT
3167 struct drm_i915_private *dev_priv = dev->dev_private;
3168 struct drm_i915_gem_object *obj;
3169 int err = 0;
3170
Ben Widawsky35c20a62013-05-31 11:28:48 -07003171 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003172 if (obj->gtt_space == NULL) {
3173 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3174 err++;
3175 continue;
3176 }
3177
3178 if (obj->cache_level != obj->gtt_space->color) {
3179 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003180 i915_gem_obj_ggtt_offset(obj),
3181 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003182 obj->cache_level,
3183 obj->gtt_space->color);
3184 err++;
3185 continue;
3186 }
3187
3188 if (!i915_gem_valid_gtt_space(dev,
3189 obj->gtt_space,
3190 obj->cache_level)) {
3191 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003192 i915_gem_obj_ggtt_offset(obj),
3193 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003194 obj->cache_level);
3195 err++;
3196 continue;
3197 }
3198 }
3199
3200 WARN_ON(err);
3201#endif
3202}
3203
Jesse Barnesde151cf2008-11-12 10:03:55 -08003204/**
Eric Anholt673a3942008-07-30 12:06:12 -07003205 * Finds free space in the GTT aperture and binds the object there.
3206 */
3207static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm,
3210 unsigned alignment,
3211 bool map_and_fenceable,
3212 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003213{
Chris Wilson05394f32010-11-08 19:18:58 +00003214 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003215 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003216 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003217 size_t gtt_max =
3218 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003219 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003220 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003221
Chris Wilsone28f8712011-07-18 13:11:49 -07003222 fence_size = i915_gem_get_gtt_size(dev,
3223 obj->base.size,
3224 obj->tiling_mode);
3225 fence_alignment = i915_gem_get_gtt_alignment(dev,
3226 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003227 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003228 unfenced_alignment =
Imre Deakd865110c2013-01-07 21:47:33 +02003229 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003230 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003231 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003232
Eric Anholt673a3942008-07-30 12:06:12 -07003233 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003234 alignment = map_and_fenceable ? fence_alignment :
3235 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003236 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003237 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3238 return -EINVAL;
3239 }
3240
Chris Wilson05394f32010-11-08 19:18:58 +00003241 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003242
Chris Wilson654fc602010-05-27 13:18:21 +01003243 /* If the object is bigger than the entire aperture, reject it early
3244 * before evicting everything in a vain attempt to find space.
3245 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003246 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003247 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003248 obj->base.size,
3249 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003250 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003251 return -E2BIG;
3252 }
3253
Chris Wilson37e680a2012-06-07 15:38:42 +01003254 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003255 if (ret)
3256 return ret;
3257
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003258 i915_gem_object_pin_pages(obj);
3259
Ben Widawskyaccfef22013-08-14 11:38:35 +02003260 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003261 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003262 ret = PTR_ERR(vma);
3263 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003264 }
3265
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003266search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003267 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003268 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003269 obj->cache_level, 0, gtt_max,
3270 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003271 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003272 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003273 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003274 map_and_fenceable,
3275 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003276 if (ret == 0)
3277 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003278
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003279 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003280 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003281 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003282 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003283 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003284 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003285 }
3286
Daniel Vetter74163902012-02-15 23:50:21 +01003287 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003288 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003289 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003290
Ben Widawsky35c20a62013-05-31 11:28:48 -07003291 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003292 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003293
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003294 if (i915_is_ggtt(vm)) {
3295 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003296
Daniel Vetter49987092013-08-14 10:21:23 +02003297 fenceable = (vma->node.size == fence_size &&
3298 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003299
Daniel Vetter49987092013-08-14 10:21:23 +02003300 mappable = (vma->node.start + obj->base.size <=
3301 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003302
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003303 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003304 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003305
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003306 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003307
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003308 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003309 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003310 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003311
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003312err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003313 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003314err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003315 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003316err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003317 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003318 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003319}
3320
Chris Wilson000433b2013-08-08 14:41:09 +01003321bool
Chris Wilson2c225692013-08-09 12:26:45 +01003322i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3323 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003324{
Eric Anholt673a3942008-07-30 12:06:12 -07003325 /* If we don't have a page list set up, then we're not pinned
3326 * to GPU, and we can ignore the cache flush because it'll happen
3327 * again at bind time.
3328 */
Chris Wilson05394f32010-11-08 19:18:58 +00003329 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003330 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003331
Imre Deak769ce462013-02-13 21:56:05 +02003332 /*
3333 * Stolen memory is always coherent with the GPU as it is explicitly
3334 * marked as wc by the system, or the system is cache-coherent.
3335 */
3336 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003337 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003338
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003339 /* If the GPU is snooping the contents of the CPU cache,
3340 * we do not need to manually clear the CPU cache lines. However,
3341 * the caches are only snooped when the render cache is
3342 * flushed/invalidated. As we always have to emit invalidations
3343 * and flushes when moving into and out of the RENDER domain, correct
3344 * snooping behaviour occurs naturally as the result of our domain
3345 * tracking.
3346 */
Chris Wilson2c225692013-08-09 12:26:45 +01003347 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003348 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003349
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003350 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003351 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003352
3353 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003354}
3355
3356/** Flushes the GTT write domain for the object if it's dirty. */
3357static void
Chris Wilson05394f32010-11-08 19:18:58 +00003358i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003359{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003360 uint32_t old_write_domain;
3361
Chris Wilson05394f32010-11-08 19:18:58 +00003362 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003363 return;
3364
Chris Wilson63256ec2011-01-04 18:42:07 +00003365 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003366 * to it immediately go to main memory as far as we know, so there's
3367 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003368 *
3369 * However, we do have to enforce the order so that all writes through
3370 * the GTT land before any writes to the device, such as updates to
3371 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003372 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003373 wmb();
3374
Chris Wilson05394f32010-11-08 19:18:58 +00003375 old_write_domain = obj->base.write_domain;
3376 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003377
3378 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003379 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003380 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003381}
3382
3383/** Flushes the CPU write domain for the object if it's dirty. */
3384static void
Chris Wilson2c225692013-08-09 12:26:45 +01003385i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3386 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003387{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003388 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003389
Chris Wilson05394f32010-11-08 19:18:58 +00003390 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003391 return;
3392
Chris Wilson000433b2013-08-08 14:41:09 +01003393 if (i915_gem_clflush_object(obj, force))
3394 i915_gem_chipset_flush(obj->base.dev);
3395
Chris Wilson05394f32010-11-08 19:18:58 +00003396 old_write_domain = obj->base.write_domain;
3397 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003398
3399 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003400 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003401 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003402}
3403
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003404/**
3405 * Moves a single object to the GTT read, and possibly write domain.
3406 *
3407 * This function returns when the move is complete, including waiting on
3408 * flushes to occur.
3409 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003410int
Chris Wilson20217462010-11-23 15:26:33 +00003411i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003412{
Chris Wilson8325a092012-04-24 15:52:35 +01003413 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003414 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003415 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003416
Eric Anholt02354392008-11-26 13:58:13 -08003417 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003418 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003419 return -EINVAL;
3420
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003421 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3422 return 0;
3423
Chris Wilson0201f1e2012-07-20 12:41:01 +01003424 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003425 if (ret)
3426 return ret;
3427
Chris Wilson2c225692013-08-09 12:26:45 +01003428 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003429
Chris Wilsond0a57782012-10-09 19:24:37 +01003430 /* Serialise direct access to this object with the barriers for
3431 * coherent writes from the GPU, by effectively invalidating the
3432 * GTT domain upon first access.
3433 */
3434 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3435 mb();
3436
Chris Wilson05394f32010-11-08 19:18:58 +00003437 old_write_domain = obj->base.write_domain;
3438 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003439
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003440 /* It should now be out of any other write domains, and we can update
3441 * the domain values for our changes.
3442 */
Chris Wilson05394f32010-11-08 19:18:58 +00003443 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3444 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003445 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003446 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3447 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3448 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003449 }
3450
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003451 trace_i915_gem_object_change_domain(obj,
3452 old_read_domains,
3453 old_write_domain);
3454
Chris Wilson8325a092012-04-24 15:52:35 +01003455 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003456 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003457 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003458 if (vma)
3459 list_move_tail(&vma->mm_list,
3460 &dev_priv->gtt.base.inactive_list);
3461
3462 }
Chris Wilson8325a092012-04-24 15:52:35 +01003463
Eric Anholte47c68e2008-11-14 13:35:19 -08003464 return 0;
3465}
3466
Chris Wilsone4ffd172011-04-04 09:44:39 +01003467int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3468 enum i915_cache_level cache_level)
3469{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003470 struct drm_device *dev = obj->base.dev;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003471 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003472 int ret;
3473
3474 if (obj->cache_level == cache_level)
3475 return 0;
3476
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003477 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003478 DRM_DEBUG("can not change the cache level of pinned objects\n");
3479 return -EBUSY;
3480 }
3481
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003482 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3483 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003484 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003485 if (ret)
3486 return ret;
3487
3488 break;
3489 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003490 }
3491
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003492 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003493 ret = i915_gem_object_finish_gpu(obj);
3494 if (ret)
3495 return ret;
3496
3497 i915_gem_object_finish_gtt(obj);
3498
3499 /* Before SandyBridge, you could not use tiling or fence
3500 * registers with snooped memory, so relinquish any fences
3501 * currently pointing to our region in the aperture.
3502 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003503 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003504 ret = i915_gem_object_put_fence(obj);
3505 if (ret)
3506 return ret;
3507 }
3508
Ben Widawsky6f65e292013-12-06 14:10:56 -08003509 list_for_each_entry(vma, &obj->vma_list, vma_link)
3510 vma->bind_vma(vma, cache_level, 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003511 }
3512
Chris Wilson2c225692013-08-09 12:26:45 +01003513 list_for_each_entry(vma, &obj->vma_list, vma_link)
3514 vma->node.color = cache_level;
3515 obj->cache_level = cache_level;
3516
3517 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003518 u32 old_read_domains, old_write_domain;
3519
3520 /* If we're coming from LLC cached, then we haven't
3521 * actually been tracking whether the data is in the
3522 * CPU cache or not, since we only allow one bit set
3523 * in obj->write_domain and have been skipping the clflushes.
3524 * Just set it to the CPU cache for now.
3525 */
3526 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003527
3528 old_read_domains = obj->base.read_domains;
3529 old_write_domain = obj->base.write_domain;
3530
3531 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3532 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3533
3534 trace_i915_gem_object_change_domain(obj,
3535 old_read_domains,
3536 old_write_domain);
3537 }
3538
Chris Wilson42d6ab42012-07-26 11:49:32 +01003539 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003540 return 0;
3541}
3542
Ben Widawsky199adf42012-09-21 17:01:20 -07003543int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3544 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003545{
Ben Widawsky199adf42012-09-21 17:01:20 -07003546 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003547 struct drm_i915_gem_object *obj;
3548 int ret;
3549
3550 ret = i915_mutex_lock_interruptible(dev);
3551 if (ret)
3552 return ret;
3553
3554 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3555 if (&obj->base == NULL) {
3556 ret = -ENOENT;
3557 goto unlock;
3558 }
3559
Chris Wilson651d7942013-08-08 14:41:10 +01003560 switch (obj->cache_level) {
3561 case I915_CACHE_LLC:
3562 case I915_CACHE_L3_LLC:
3563 args->caching = I915_CACHING_CACHED;
3564 break;
3565
Chris Wilson4257d3b2013-08-08 14:41:11 +01003566 case I915_CACHE_WT:
3567 args->caching = I915_CACHING_DISPLAY;
3568 break;
3569
Chris Wilson651d7942013-08-08 14:41:10 +01003570 default:
3571 args->caching = I915_CACHING_NONE;
3572 break;
3573 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003574
3575 drm_gem_object_unreference(&obj->base);
3576unlock:
3577 mutex_unlock(&dev->struct_mutex);
3578 return ret;
3579}
3580
Ben Widawsky199adf42012-09-21 17:01:20 -07003581int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3582 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003583{
Ben Widawsky199adf42012-09-21 17:01:20 -07003584 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003585 struct drm_i915_gem_object *obj;
3586 enum i915_cache_level level;
3587 int ret;
3588
Ben Widawsky199adf42012-09-21 17:01:20 -07003589 switch (args->caching) {
3590 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003591 level = I915_CACHE_NONE;
3592 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003593 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003594 level = I915_CACHE_LLC;
3595 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003596 case I915_CACHING_DISPLAY:
3597 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3598 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003599 default:
3600 return -EINVAL;
3601 }
3602
Ben Widawsky3bc29132012-09-26 16:15:20 -07003603 ret = i915_mutex_lock_interruptible(dev);
3604 if (ret)
3605 return ret;
3606
Chris Wilsone6994ae2012-07-10 10:27:08 +01003607 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3608 if (&obj->base == NULL) {
3609 ret = -ENOENT;
3610 goto unlock;
3611 }
3612
3613 ret = i915_gem_object_set_cache_level(obj, level);
3614
3615 drm_gem_object_unreference(&obj->base);
3616unlock:
3617 mutex_unlock(&dev->struct_mutex);
3618 return ret;
3619}
3620
Chris Wilsoncc98b412013-08-09 12:25:09 +01003621static bool is_pin_display(struct drm_i915_gem_object *obj)
3622{
3623 /* There are 3 sources that pin objects:
3624 * 1. The display engine (scanouts, sprites, cursors);
3625 * 2. Reservations for execbuffer;
3626 * 3. The user.
3627 *
3628 * We can ignore reservations as we hold the struct_mutex and
3629 * are only called outside of the reservation path. The user
3630 * can only increment pin_count once, and so if after
3631 * subtracting the potential reference by the user, any pin_count
3632 * remains, it must be due to another use by the display engine.
3633 */
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003634 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003635}
3636
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003637/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003638 * Prepare buffer for display plane (scanout, cursors, etc).
3639 * Can be called from an uninterruptible phase (modesetting) and allows
3640 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003641 */
3642int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003643i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3644 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003645 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003646{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003647 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003648 int ret;
3649
Chris Wilson0be73282010-12-06 14:36:27 +00003650 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003651 ret = i915_gem_object_sync(obj, pipelined);
3652 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003653 return ret;
3654 }
3655
Chris Wilsoncc98b412013-08-09 12:25:09 +01003656 /* Mark the pin_display early so that we account for the
3657 * display coherency whilst setting up the cache domains.
3658 */
3659 obj->pin_display = true;
3660
Eric Anholta7ef0642011-03-29 16:59:54 -07003661 /* The display engine is not coherent with the LLC cache on gen6. As
3662 * a result, we make sure that the pinning that is about to occur is
3663 * done with uncached PTEs. This is lowest common denominator for all
3664 * chipsets.
3665 *
3666 * However for gen6+, we could do better by using the GFDT bit instead
3667 * of uncaching, which would allow us to flush all the LLC-cached data
3668 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3669 */
Chris Wilson651d7942013-08-08 14:41:10 +01003670 ret = i915_gem_object_set_cache_level(obj,
3671 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003672 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003673 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003674
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003675 /* As the user may map the buffer once pinned in the display plane
3676 * (e.g. libkms for the bootup splash), we have to ensure that we
3677 * always use map_and_fenceable for all scanout buffers.
3678 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003679 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003680 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003681 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003682
Chris Wilson2c225692013-08-09 12:26:45 +01003683 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003684
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003685 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003686 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003687
3688 /* It should now be out of any other write domains, and we can update
3689 * the domain values for our changes.
3690 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003691 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003692 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003693
3694 trace_i915_gem_object_change_domain(obj,
3695 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003696 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003697
3698 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003699
3700err_unpin_display:
3701 obj->pin_display = is_pin_display(obj);
3702 return ret;
3703}
3704
3705void
3706i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3707{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003708 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003709 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003710}
3711
Chris Wilson85345512010-11-13 09:49:11 +00003712int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003713i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003714{
Chris Wilson88241782011-01-07 17:09:48 +00003715 int ret;
3716
Chris Wilsona8198ee2011-04-13 22:04:09 +01003717 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003718 return 0;
3719
Chris Wilson0201f1e2012-07-20 12:41:01 +01003720 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003721 if (ret)
3722 return ret;
3723
Chris Wilsona8198ee2011-04-13 22:04:09 +01003724 /* Ensure that we invalidate the GPU's caches and TLBs. */
3725 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003726 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003727}
3728
Eric Anholte47c68e2008-11-14 13:35:19 -08003729/**
3730 * Moves a single object to the CPU read, and possibly write domain.
3731 *
3732 * This function returns when the move is complete, including waiting on
3733 * flushes to occur.
3734 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003735int
Chris Wilson919926a2010-11-12 13:42:53 +00003736i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003737{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003738 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003739 int ret;
3740
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003741 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3742 return 0;
3743
Chris Wilson0201f1e2012-07-20 12:41:01 +01003744 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003745 if (ret)
3746 return ret;
3747
Eric Anholte47c68e2008-11-14 13:35:19 -08003748 i915_gem_object_flush_gtt_write_domain(obj);
3749
Chris Wilson05394f32010-11-08 19:18:58 +00003750 old_write_domain = obj->base.write_domain;
3751 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003752
Eric Anholte47c68e2008-11-14 13:35:19 -08003753 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003754 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003755 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003756
Chris Wilson05394f32010-11-08 19:18:58 +00003757 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003758 }
3759
3760 /* It should now be out of any other write domains, and we can update
3761 * the domain values for our changes.
3762 */
Chris Wilson05394f32010-11-08 19:18:58 +00003763 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003764
3765 /* If we're writing through the CPU, then the GPU read domains will
3766 * need to be invalidated at next use.
3767 */
3768 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003769 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3770 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003771 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003772
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003773 trace_i915_gem_object_change_domain(obj,
3774 old_read_domains,
3775 old_write_domain);
3776
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003777 return 0;
3778}
3779
Eric Anholt673a3942008-07-30 12:06:12 -07003780/* Throttle our rendering by waiting until the ring has completed our requests
3781 * emitted over 20 msec ago.
3782 *
Eric Anholtb9624422009-06-03 07:27:35 +00003783 * Note that if we were to use the current jiffies each time around the loop,
3784 * we wouldn't escape the function with any frames outstanding if the time to
3785 * render a frame was over 20ms.
3786 *
Eric Anholt673a3942008-07-30 12:06:12 -07003787 * This should get us reasonable parallelism between CPU and GPU but also
3788 * relatively low latency when blocking on a particular request to finish.
3789 */
3790static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003791i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003792{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003793 struct drm_i915_private *dev_priv = dev->dev_private;
3794 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003795 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003796 struct drm_i915_gem_request *request;
3797 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003798 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003799 u32 seqno = 0;
3800 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003801
Daniel Vetter308887a2012-11-14 17:14:06 +01003802 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3803 if (ret)
3804 return ret;
3805
3806 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3807 if (ret)
3808 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003809
Chris Wilson1c255952010-09-26 11:03:27 +01003810 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003811 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003812 if (time_after_eq(request->emitted_jiffies, recent_enough))
3813 break;
3814
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003815 ring = request->ring;
3816 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003817 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003818 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003819 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003820
3821 if (seqno == 0)
3822 return 0;
3823
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003824 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003825 if (ret == 0)
3826 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003827
Eric Anholt673a3942008-07-30 12:06:12 -07003828 return ret;
3829}
3830
Eric Anholt673a3942008-07-30 12:06:12 -07003831int
Chris Wilson05394f32010-11-08 19:18:58 +00003832i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003833 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003834 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003835 bool map_and_fenceable,
3836 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003837{
Ben Widawsky6f65e292013-12-06 14:10:56 -08003838 const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003839 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003840 int ret;
3841
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003842 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3843
3844 vma = i915_gem_obj_to_vma(obj, vm);
3845
3846 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003847 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3848 return -EBUSY;
3849
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003850 if ((alignment &&
3851 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003852 (map_and_fenceable && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003853 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003854 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003855 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003856 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003857 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003858 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003859 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003860 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003861 if (ret)
3862 return ret;
3863 }
3864 }
3865
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003866 if (!i915_gem_obj_bound(obj, vm)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003867 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3868 map_and_fenceable,
3869 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003870 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003871 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003872
Chris Wilson22c344e2009-02-11 14:26:45 +00003873 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003874
Ben Widawsky6f65e292013-12-06 14:10:56 -08003875 vma = i915_gem_obj_to_vma(obj, vm);
Daniel Vetter74898d72012-02-15 23:50:22 +01003876
Ben Widawsky6f65e292013-12-06 14:10:56 -08003877 vma->bind_vma(vma, obj->cache_level, flags);
Jesse Barnes79e53942008-11-07 14:24:08 -08003878
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003879 i915_gem_obj_to_vma(obj, vm)->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003880 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003881
3882 return 0;
3883}
3884
3885void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003886i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003887{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003888 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003889
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003890 BUG_ON(!vma);
3891 BUG_ON(vma->pin_count == 0);
3892 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3893
3894 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003895 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003896}
3897
3898int
3899i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003900 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003901{
3902 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003903 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003904 int ret;
3905
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003906 if (INTEL_INFO(dev)->gen >= 6)
3907 return -ENODEV;
3908
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003909 ret = i915_mutex_lock_interruptible(dev);
3910 if (ret)
3911 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003912
Chris Wilson05394f32010-11-08 19:18:58 +00003913 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003914 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003915 ret = -ENOENT;
3916 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003917 }
Eric Anholt673a3942008-07-30 12:06:12 -07003918
Chris Wilson05394f32010-11-08 19:18:58 +00003919 if (obj->madv != I915_MADV_WILLNEED) {
Eric Anholt673a3942008-07-30 12:06:12 -07003920 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00003921 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003922 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003923 }
3924
Chris Wilson05394f32010-11-08 19:18:58 +00003925 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Eric Anholt673a3942008-07-30 12:06:12 -07003926 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3927 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003928 ret = -EINVAL;
3929 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003930 }
3931
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003932 if (obj->user_pin_count == ULONG_MAX) {
3933 ret = -EBUSY;
3934 goto out;
3935 }
3936
Chris Wilson93be8782013-01-02 10:31:22 +00003937 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003938 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003939 if (ret)
3940 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003941 }
3942
Chris Wilson93be8782013-01-02 10:31:22 +00003943 obj->user_pin_count++;
3944 obj->pin_filp = file;
3945
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003946 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003947out:
Chris Wilson05394f32010-11-08 19:18:58 +00003948 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003949unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003950 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003951 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003952}
3953
3954int
3955i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003956 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003957{
3958 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003959 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003960 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003961
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003962 ret = i915_mutex_lock_interruptible(dev);
3963 if (ret)
3964 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003965
Chris Wilson05394f32010-11-08 19:18:58 +00003966 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003967 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003968 ret = -ENOENT;
3969 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003970 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003971
Chris Wilson05394f32010-11-08 19:18:58 +00003972 if (obj->pin_filp != file) {
Eric Anholt673a3942008-07-30 12:06:12 -07003973 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3974 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003975 ret = -EINVAL;
3976 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003977 }
Chris Wilson05394f32010-11-08 19:18:58 +00003978 obj->user_pin_count--;
3979 if (obj->user_pin_count == 0) {
3980 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003981 i915_gem_object_ggtt_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003982 }
3983
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003984out:
Chris Wilson05394f32010-11-08 19:18:58 +00003985 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003986unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003987 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003988 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003989}
3990
3991int
3992i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003993 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003994{
3995 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003996 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003997 int ret;
3998
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003999 ret = i915_mutex_lock_interruptible(dev);
4000 if (ret)
4001 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004002
Chris Wilson05394f32010-11-08 19:18:58 +00004003 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004004 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004005 ret = -ENOENT;
4006 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004007 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004008
Chris Wilson0be555b2010-08-04 15:36:30 +01004009 /* Count all active objects as busy, even if they are currently not used
4010 * by the gpu. Users of this interface expect objects to eventually
4011 * become non-busy without any further actions, therefore emit any
4012 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004013 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004014 ret = i915_gem_object_flush_active(obj);
4015
Chris Wilson05394f32010-11-08 19:18:58 +00004016 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004017 if (obj->ring) {
4018 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4019 args->busy |= intel_ring_flag(obj->ring) << 16;
4020 }
Eric Anholt673a3942008-07-30 12:06:12 -07004021
Chris Wilson05394f32010-11-08 19:18:58 +00004022 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004023unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004024 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004025 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004026}
4027
4028int
4029i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4030 struct drm_file *file_priv)
4031{
Akshay Joshi0206e352011-08-16 15:34:10 -04004032 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004033}
4034
Chris Wilson3ef94da2009-09-14 16:50:29 +01004035int
4036i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4037 struct drm_file *file_priv)
4038{
4039 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004040 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004041 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004042
4043 switch (args->madv) {
4044 case I915_MADV_DONTNEED:
4045 case I915_MADV_WILLNEED:
4046 break;
4047 default:
4048 return -EINVAL;
4049 }
4050
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004051 ret = i915_mutex_lock_interruptible(dev);
4052 if (ret)
4053 return ret;
4054
Chris Wilson05394f32010-11-08 19:18:58 +00004055 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004056 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004057 ret = -ENOENT;
4058 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004059 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004060
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004061 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004062 ret = -EINVAL;
4063 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004064 }
4065
Chris Wilson05394f32010-11-08 19:18:58 +00004066 if (obj->madv != __I915_MADV_PURGED)
4067 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004068
Chris Wilson6c085a72012-08-20 11:40:46 +02004069 /* if the object is no longer attached, discard its backing storage */
4070 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004071 i915_gem_object_truncate(obj);
4072
Chris Wilson05394f32010-11-08 19:18:58 +00004073 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004074
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004075out:
Chris Wilson05394f32010-11-08 19:18:58 +00004076 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004077unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004078 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004079 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004080}
4081
Chris Wilson37e680a2012-06-07 15:38:42 +01004082void i915_gem_object_init(struct drm_i915_gem_object *obj,
4083 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004084{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004085 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004086 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004087 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004088 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004089
Chris Wilson37e680a2012-06-07 15:38:42 +01004090 obj->ops = ops;
4091
Chris Wilson0327d6b2012-08-11 15:41:06 +01004092 obj->fence_reg = I915_FENCE_REG_NONE;
4093 obj->madv = I915_MADV_WILLNEED;
4094 /* Avoid an unnecessary call to unbind on the first bind. */
4095 obj->map_and_fenceable = true;
4096
4097 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4098}
4099
Chris Wilson37e680a2012-06-07 15:38:42 +01004100static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4101 .get_pages = i915_gem_object_get_pages_gtt,
4102 .put_pages = i915_gem_object_put_pages_gtt,
4103};
4104
Chris Wilson05394f32010-11-08 19:18:58 +00004105struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4106 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004107{
Daniel Vetterc397b902010-04-09 19:05:07 +00004108 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004109 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004110 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004111
Chris Wilson42dcedd2012-11-15 11:32:30 +00004112 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004113 if (obj == NULL)
4114 return NULL;
4115
4116 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004117 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004118 return NULL;
4119 }
4120
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004121 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4122 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4123 /* 965gm cannot relocate objects above 4GiB. */
4124 mask &= ~__GFP_HIGHMEM;
4125 mask |= __GFP_DMA32;
4126 }
4127
Al Viro496ad9a2013-01-23 17:07:38 -05004128 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004129 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004130
Chris Wilson37e680a2012-06-07 15:38:42 +01004131 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004132
Daniel Vetterc397b902010-04-09 19:05:07 +00004133 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4134 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4135
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004136 if (HAS_LLC(dev)) {
4137 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004138 * cache) for about a 10% performance improvement
4139 * compared to uncached. Graphics requests other than
4140 * display scanout are coherent with the CPU in
4141 * accessing this cache. This means in this mode we
4142 * don't need to clflush on the CPU side, and on the
4143 * GPU side we only need to flush internal caches to
4144 * get data visible to the CPU.
4145 *
4146 * However, we maintain the display planes as UC, and so
4147 * need to rebind when first used as such.
4148 */
4149 obj->cache_level = I915_CACHE_LLC;
4150 } else
4151 obj->cache_level = I915_CACHE_NONE;
4152
Daniel Vetterd861e332013-07-24 23:25:03 +02004153 trace_i915_gem_object_create(obj);
4154
Chris Wilson05394f32010-11-08 19:18:58 +00004155 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004156}
4157
Chris Wilson1488fc02012-04-24 15:47:31 +01004158void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004159{
Chris Wilson1488fc02012-04-24 15:47:31 +01004160 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004161 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004162 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004163 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004164
Paulo Zanonif65c9162013-11-27 18:20:34 -02004165 intel_runtime_pm_get(dev_priv);
4166
Chris Wilson26e12f82011-03-20 11:20:19 +00004167 trace_i915_gem_object_destroy(obj);
4168
Chris Wilson1488fc02012-04-24 15:47:31 +01004169 if (obj->phys_obj)
4170 i915_gem_detach_phys_object(dev, obj);
4171
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004172 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004173 int ret;
4174
4175 vma->pin_count = 0;
4176 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004177 if (WARN_ON(ret == -ERESTARTSYS)) {
4178 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004179
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004180 was_interruptible = dev_priv->mm.interruptible;
4181 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004182
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004183 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004184
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004185 dev_priv->mm.interruptible = was_interruptible;
4186 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004187 }
4188
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004189 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4190 * before progressing. */
4191 if (obj->stolen)
4192 i915_gem_object_unpin_pages(obj);
4193
Ben Widawsky401c29f2013-05-31 11:28:47 -07004194 if (WARN_ON(obj->pages_pin_count))
4195 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004196 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004197 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004198 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004199
Chris Wilson9da3da62012-06-01 15:20:22 +01004200 BUG_ON(obj->pages);
4201
Chris Wilson2f745ad2012-09-04 21:02:58 +01004202 if (obj->base.import_attach)
4203 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004204
Chris Wilson05394f32010-11-08 19:18:58 +00004205 drm_gem_object_release(&obj->base);
4206 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004207
Chris Wilson05394f32010-11-08 19:18:58 +00004208 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004209 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004210
4211 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004212}
4213
Daniel Vettere656a6c2013-08-14 14:14:04 +02004214struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004215 struct i915_address_space *vm)
4216{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004217 struct i915_vma *vma;
4218 list_for_each_entry(vma, &obj->vma_list, vma_link)
4219 if (vma->vm == vm)
4220 return vma;
4221
4222 return NULL;
4223}
4224
Ben Widawsky2f633152013-07-17 12:19:03 -07004225void i915_gem_vma_destroy(struct i915_vma *vma)
4226{
4227 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004228
4229 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4230 if (!list_empty(&vma->exec_list))
4231 return;
4232
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004233 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004234
Ben Widawsky2f633152013-07-17 12:19:03 -07004235 kfree(vma);
4236}
4237
Jesse Barnes5669fca2009-02-17 15:13:31 -08004238int
Chris Wilson45c5f202013-10-16 11:50:01 +01004239i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004240{
4241 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004242 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004243
Chris Wilson45c5f202013-10-16 11:50:01 +01004244 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004245 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004246 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004247
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004248 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004249 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004250 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004251
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004252 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004253
Chris Wilson29105cc2010-01-07 10:39:13 +00004254 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004255 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004256 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004257
Chris Wilson29105cc2010-01-07 10:39:13 +00004258 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004259 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004260
Chris Wilson45c5f202013-10-16 11:50:01 +01004261 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4262 * We need to replace this with a semaphore, or something.
4263 * And not confound ums.mm_suspended!
4264 */
4265 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4266 DRIVER_MODESET);
4267 mutex_unlock(&dev->struct_mutex);
4268
4269 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004270 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004271 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004272
Eric Anholt673a3942008-07-30 12:06:12 -07004273 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004274
4275err:
4276 mutex_unlock(&dev->struct_mutex);
4277 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004278}
4279
Ben Widawskyc3787e22013-09-17 21:12:44 -07004280int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004281{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004282 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004283 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004284 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4285 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004286 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004287
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004288 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004289 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004290
Ben Widawskyc3787e22013-09-17 21:12:44 -07004291 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4292 if (ret)
4293 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004294
Ben Widawskyc3787e22013-09-17 21:12:44 -07004295 /*
4296 * Note: We do not worry about the concurrent register cacheline hang
4297 * here because no other code should access these registers other than
4298 * at initialization time.
4299 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004300 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004301 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4302 intel_ring_emit(ring, reg_base + i);
4303 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004304 }
4305
Ben Widawskyc3787e22013-09-17 21:12:44 -07004306 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004307
Ben Widawskyc3787e22013-09-17 21:12:44 -07004308 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004309}
4310
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004311void i915_gem_init_swizzling(struct drm_device *dev)
4312{
4313 drm_i915_private_t *dev_priv = dev->dev_private;
4314
Daniel Vetter11782b02012-01-31 16:47:55 +01004315 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004316 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4317 return;
4318
4319 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4320 DISP_TILE_SURFACE_SWIZZLING);
4321
Daniel Vetter11782b02012-01-31 16:47:55 +01004322 if (IS_GEN5(dev))
4323 return;
4324
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004325 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4326 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004327 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004328 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004329 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004330 else if (IS_GEN8(dev))
4331 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004332 else
4333 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004334}
Daniel Vettere21af882012-02-09 20:53:27 +01004335
Chris Wilson67b1b572012-07-05 23:49:40 +01004336static bool
4337intel_enable_blt(struct drm_device *dev)
4338{
4339 if (!HAS_BLT(dev))
4340 return false;
4341
4342 /* The blitter was dysfunctional on early prototypes */
4343 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4344 DRM_INFO("BLT not supported on this pre-production hardware;"
4345 " graphics performance will be degraded.\n");
4346 return false;
4347 }
4348
4349 return true;
4350}
4351
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004352static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004353{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004354 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004355 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004356
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004357 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004358 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004359 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004360
4361 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004362 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004363 if (ret)
4364 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004365 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004366
Chris Wilson67b1b572012-07-05 23:49:40 +01004367 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004368 ret = intel_init_blt_ring_buffer(dev);
4369 if (ret)
4370 goto cleanup_bsd_ring;
4371 }
4372
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004373 if (HAS_VEBOX(dev)) {
4374 ret = intel_init_vebox_ring_buffer(dev);
4375 if (ret)
4376 goto cleanup_blt_ring;
4377 }
4378
4379
Mika Kuoppala99433932013-01-22 14:12:17 +02004380 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4381 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004382 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004383
4384 return 0;
4385
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004386cleanup_vebox_ring:
4387 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004388cleanup_blt_ring:
4389 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4390cleanup_bsd_ring:
4391 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4392cleanup_render_ring:
4393 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4394
4395 return ret;
4396}
4397
4398int
4399i915_gem_init_hw(struct drm_device *dev)
4400{
4401 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004402 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004403
4404 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4405 return -EIO;
4406
Ben Widawsky59124502013-07-04 11:02:05 -07004407 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004408 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004409
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004410 if (IS_HASWELL(dev))
4411 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4412 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004413
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004414 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004415 if (IS_IVYBRIDGE(dev)) {
4416 u32 temp = I915_READ(GEN7_MSG_CTL);
4417 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4418 I915_WRITE(GEN7_MSG_CTL, temp);
4419 } else if (INTEL_INFO(dev)->gen >= 7) {
4420 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4421 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4422 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4423 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004424 }
4425
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004426 i915_gem_init_swizzling(dev);
4427
4428 ret = i915_gem_init_rings(dev);
4429 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004430 return ret;
4431
Ben Widawskyc3787e22013-09-17 21:12:44 -07004432 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4433 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4434
Ben Widawsky254f9652012-06-04 14:42:42 -07004435 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004436 * XXX: Contexts should only be initialized once. Doing a switch to the
4437 * default context switch however is something we'd like to do after
4438 * reset or thaw (the latter may not actually be necessary for HW, but
4439 * goes with our code better). Context switching requires rings (for
4440 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004441 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004442 ret = i915_gem_context_enable(dev_priv);
Ben Widawsky8245be32013-11-06 13:56:29 -02004443 if (ret) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004444 DRM_ERROR("Context enable failed %d\n", ret);
4445 goto err_out;
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004446 }
Daniel Vettere21af882012-02-09 20:53:27 +01004447
Chris Wilson68f95ba2010-05-27 13:18:22 +01004448 return 0;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004449
4450err_out:
4451 i915_gem_cleanup_ringbuffer(dev);
4452 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004453}
4454
Chris Wilson1070a422012-04-24 15:47:41 +01004455int i915_gem_init(struct drm_device *dev)
4456{
4457 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004458 int ret;
4459
Chris Wilson1070a422012-04-24 15:47:41 +01004460 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004461
4462 if (IS_VALLEYVIEW(dev)) {
4463 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4464 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4465 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4466 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4467 }
4468
Ben Widawskyd7e50082012-12-18 10:31:25 -08004469 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004470
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004471 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004472 if (ret) {
4473 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004474 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004475 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004476
Chris Wilson1070a422012-04-24 15:47:41 +01004477 ret = i915_gem_init_hw(dev);
4478 mutex_unlock(&dev->struct_mutex);
4479 if (ret) {
Ben Widawskybdf4fd72013-12-06 14:11:18 -08004480 WARN_ON(dev_priv->mm.aliasing_ppgtt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004481 i915_gem_context_fini(dev);
Ben Widawskyc39538a2013-12-06 14:10:50 -08004482 drm_mm_takedown(&dev_priv->gtt.base.mm);
Chris Wilson1070a422012-04-24 15:47:41 +01004483 return ret;
4484 }
4485
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004486 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4487 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4488 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004489 return 0;
4490}
4491
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004492void
4493i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4494{
4495 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004496 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004497 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004498
Chris Wilsonb4519512012-05-11 14:29:30 +01004499 for_each_ring(ring, dev_priv, i)
4500 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004501}
4502
4503int
Eric Anholt673a3942008-07-30 12:06:12 -07004504i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4505 struct drm_file *file_priv)
4506{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004507 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004508 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004509
Jesse Barnes79e53942008-11-07 14:24:08 -08004510 if (drm_core_check_feature(dev, DRIVER_MODESET))
4511 return 0;
4512
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004513 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004514 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004515 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004516 }
4517
Eric Anholt673a3942008-07-30 12:06:12 -07004518 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004519 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004520
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004521 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08004522 if (ret != 0) {
4523 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004524 return ret;
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08004525 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004526
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004527 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004528 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004529
Chris Wilson5f353082010-06-07 14:03:03 +01004530 ret = drm_irq_install(dev);
4531 if (ret)
4532 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004533
Eric Anholt673a3942008-07-30 12:06:12 -07004534 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004535
4536cleanup_ringbuffer:
4537 mutex_lock(&dev->struct_mutex);
4538 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004539 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004540 mutex_unlock(&dev->struct_mutex);
4541
4542 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004543}
4544
4545int
4546i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4547 struct drm_file *file_priv)
4548{
Jesse Barnes79e53942008-11-07 14:24:08 -08004549 if (drm_core_check_feature(dev, DRIVER_MODESET))
4550 return 0;
4551
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004552 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004553
Chris Wilson45c5f202013-10-16 11:50:01 +01004554 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004555}
4556
4557void
4558i915_gem_lastclose(struct drm_device *dev)
4559{
4560 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004561
Eric Anholte806b492009-01-22 09:56:58 -08004562 if (drm_core_check_feature(dev, DRIVER_MODESET))
4563 return;
4564
Chris Wilson45c5f202013-10-16 11:50:01 +01004565 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004566 if (ret)
4567 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004568}
4569
Chris Wilson64193402010-10-24 12:38:05 +01004570static void
4571init_ring_lists(struct intel_ring_buffer *ring)
4572{
4573 INIT_LIST_HEAD(&ring->active_list);
4574 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004575}
4576
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004577void i915_init_vm(struct drm_i915_private *dev_priv,
4578 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004579{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004580 if (!i915_is_ggtt(vm))
4581 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004582 vm->dev = dev_priv->dev;
4583 INIT_LIST_HEAD(&vm->active_list);
4584 INIT_LIST_HEAD(&vm->inactive_list);
4585 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004586 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004587}
4588
Eric Anholt673a3942008-07-30 12:06:12 -07004589void
4590i915_gem_load(struct drm_device *dev)
4591{
4592 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004593 int i;
4594
4595 dev_priv->slab =
4596 kmem_cache_create("i915_gem_object",
4597 sizeof(struct drm_i915_gem_object), 0,
4598 SLAB_HWCACHE_ALIGN,
4599 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004600
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004601 INIT_LIST_HEAD(&dev_priv->vm_list);
4602 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4603
Ben Widawskya33afea2013-09-17 21:12:45 -07004604 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004605 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4606 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004607 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004608 for (i = 0; i < I915_NUM_RINGS; i++)
4609 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004610 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004611 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004612 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4613 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004614 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4615 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004616 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004617
Dave Airlie94400122010-07-20 13:15:31 +10004618 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4619 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004620 I915_WRITE(MI_ARB_STATE,
4621 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004622 }
4623
Chris Wilson72bfa192010-12-19 11:42:05 +00004624 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4625
Jesse Barnesde151cf2008-11-12 10:03:55 -08004626 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004627 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4628 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004629
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004630 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4631 dev_priv->num_fence_regs = 32;
4632 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004633 dev_priv->num_fence_regs = 16;
4634 else
4635 dev_priv->num_fence_regs = 8;
4636
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004637 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004638 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4639 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004640
Eric Anholt673a3942008-07-30 12:06:12 -07004641 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004642 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004643
Chris Wilsonce453d82011-02-21 14:43:56 +00004644 dev_priv->mm.interruptible = true;
4645
Dave Chinner7dc19d52013-08-28 10:18:11 +10004646 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4647 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004648 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4649 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004650}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004651
4652/*
4653 * Create a physically contiguous memory object for this object
4654 * e.g. for cursor + overlay regs
4655 */
Chris Wilson995b67622010-08-20 13:23:26 +01004656static int i915_gem_init_phys_object(struct drm_device *dev,
4657 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004658{
4659 drm_i915_private_t *dev_priv = dev->dev_private;
4660 struct drm_i915_gem_phys_object *phys_obj;
4661 int ret;
4662
4663 if (dev_priv->mm.phys_objs[id - 1] || !size)
4664 return 0;
4665
Daniel Vetterb14c5672013-09-19 12:18:32 +02004666 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004667 if (!phys_obj)
4668 return -ENOMEM;
4669
4670 phys_obj->id = id;
4671
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004672 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004673 if (!phys_obj->handle) {
4674 ret = -ENOMEM;
4675 goto kfree_obj;
4676 }
4677#ifdef CONFIG_X86
4678 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4679#endif
4680
4681 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4682
4683 return 0;
4684kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004685 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004686 return ret;
4687}
4688
Chris Wilson995b67622010-08-20 13:23:26 +01004689static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004690{
4691 drm_i915_private_t *dev_priv = dev->dev_private;
4692 struct drm_i915_gem_phys_object *phys_obj;
4693
4694 if (!dev_priv->mm.phys_objs[id - 1])
4695 return;
4696
4697 phys_obj = dev_priv->mm.phys_objs[id - 1];
4698 if (phys_obj->cur_obj) {
4699 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4700 }
4701
4702#ifdef CONFIG_X86
4703 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4704#endif
4705 drm_pci_free(dev, phys_obj->handle);
4706 kfree(phys_obj);
4707 dev_priv->mm.phys_objs[id - 1] = NULL;
4708}
4709
4710void i915_gem_free_all_phys_object(struct drm_device *dev)
4711{
4712 int i;
4713
Dave Airlie260883c2009-01-22 17:58:49 +10004714 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004715 i915_gem_free_phys_object(dev, i);
4716}
4717
4718void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004719 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004720{
Al Viro496ad9a2013-01-23 17:07:38 -05004721 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004722 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004723 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004724 int page_count;
4725
Chris Wilson05394f32010-11-08 19:18:58 +00004726 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004727 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004728 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004729
Chris Wilson05394f32010-11-08 19:18:58 +00004730 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004731 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004732 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004733 if (!IS_ERR(page)) {
4734 char *dst = kmap_atomic(page);
4735 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4736 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004737
Chris Wilsone5281cc2010-10-28 13:45:36 +01004738 drm_clflush_pages(&page, 1);
4739
4740 set_page_dirty(page);
4741 mark_page_accessed(page);
4742 page_cache_release(page);
4743 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004744 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004745 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004746
Chris Wilson05394f32010-11-08 19:18:58 +00004747 obj->phys_obj->cur_obj = NULL;
4748 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004749}
4750
4751int
4752i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004753 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004754 int id,
4755 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004756{
Al Viro496ad9a2013-01-23 17:07:38 -05004757 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004758 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004759 int ret = 0;
4760 int page_count;
4761 int i;
4762
4763 if (id > I915_MAX_PHYS_OBJECT)
4764 return -EINVAL;
4765
Chris Wilson05394f32010-11-08 19:18:58 +00004766 if (obj->phys_obj) {
4767 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004768 return 0;
4769 i915_gem_detach_phys_object(dev, obj);
4770 }
4771
Dave Airlie71acb5e2008-12-30 20:31:46 +10004772 /* create a new object */
4773 if (!dev_priv->mm.phys_objs[id - 1]) {
4774 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004775 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004776 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004777 DRM_ERROR("failed to init phys object %d size: %zu\n",
4778 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004779 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004780 }
4781 }
4782
4783 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004784 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4785 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004786
Chris Wilson05394f32010-11-08 19:18:58 +00004787 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004788
4789 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004790 struct page *page;
4791 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004792
Hugh Dickins5949eac2011-06-27 16:18:18 -07004793 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004794 if (IS_ERR(page))
4795 return PTR_ERR(page);
4796
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004797 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004798 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004799 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004800 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004801
4802 mark_page_accessed(page);
4803 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004804 }
4805
4806 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004807}
4808
4809static int
Chris Wilson05394f32010-11-08 19:18:58 +00004810i915_gem_phys_pwrite(struct drm_device *dev,
4811 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004812 struct drm_i915_gem_pwrite *args,
4813 struct drm_file *file_priv)
4814{
Chris Wilson05394f32010-11-08 19:18:58 +00004815 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004816 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004817
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004818 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4819 unsigned long unwritten;
4820
4821 /* The physical object once assigned is fixed for the lifetime
4822 * of the obj, so we can safely drop the lock and continue
4823 * to access vaddr.
4824 */
4825 mutex_unlock(&dev->struct_mutex);
4826 unwritten = copy_from_user(vaddr, user_data, args->size);
4827 mutex_lock(&dev->struct_mutex);
4828 if (unwritten)
4829 return -EFAULT;
4830 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004831
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004832 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004833 return 0;
4834}
Eric Anholtb9624422009-06-03 07:27:35 +00004835
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004836void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004837{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004838 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004839
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004840 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4841
Eric Anholtb9624422009-06-03 07:27:35 +00004842 /* Clean up our request list when the client is going away, so that
4843 * later retire_requests won't dereference our soon-to-be-gone
4844 * file_priv.
4845 */
Chris Wilson1c255952010-09-26 11:03:27 +01004846 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004847 while (!list_empty(&file_priv->mm.request_list)) {
4848 struct drm_i915_gem_request *request;
4849
4850 request = list_first_entry(&file_priv->mm.request_list,
4851 struct drm_i915_gem_request,
4852 client_list);
4853 list_del(&request->client_list);
4854 request->file_priv = NULL;
4855 }
Chris Wilson1c255952010-09-26 11:03:27 +01004856 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004857}
Chris Wilson31169712009-09-14 16:50:28 +01004858
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004859static void
4860i915_gem_file_idle_work_handler(struct work_struct *work)
4861{
4862 struct drm_i915_file_private *file_priv =
4863 container_of(work, typeof(*file_priv), mm.idle_work.work);
4864
4865 atomic_set(&file_priv->rps_wait_boost, false);
4866}
4867
4868int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4869{
4870 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004871 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004872
4873 DRM_DEBUG_DRIVER("\n");
4874
4875 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4876 if (!file_priv)
4877 return -ENOMEM;
4878
4879 file->driver_priv = file_priv;
4880 file_priv->dev_priv = dev->dev_private;
4881
4882 spin_lock_init(&file_priv->mm.lock);
4883 INIT_LIST_HEAD(&file_priv->mm.request_list);
4884 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4885 i915_gem_file_idle_work_handler);
4886
Ben Widawskye422b882013-12-06 14:10:58 -08004887 ret = i915_gem_context_open(dev, file);
4888 if (ret)
4889 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004890
Ben Widawskye422b882013-12-06 14:10:58 -08004891 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004892}
4893
Chris Wilson57745062012-11-21 13:04:04 +00004894static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4895{
4896 if (!mutex_is_locked(mutex))
4897 return false;
4898
4899#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4900 return mutex->owner == task;
4901#else
4902 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4903 return false;
4904#endif
4905}
4906
Dave Chinner7dc19d52013-08-28 10:18:11 +10004907static unsigned long
4908i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004909{
Chris Wilson17250b72010-10-28 12:51:39 +01004910 struct drm_i915_private *dev_priv =
4911 container_of(shrinker,
4912 struct drm_i915_private,
4913 mm.inactive_shrinker);
4914 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004915 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004916 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004917 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004918
Chris Wilson57745062012-11-21 13:04:04 +00004919 if (!mutex_trylock(&dev->struct_mutex)) {
4920 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004921 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004922
Daniel Vetter677feac2012-12-19 14:33:45 +01004923 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004924 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004925
Chris Wilson57745062012-11-21 13:04:04 +00004926 unlock = false;
4927 }
Chris Wilson31169712009-09-14 16:50:28 +01004928
Dave Chinner7dc19d52013-08-28 10:18:11 +10004929 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004930 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004931 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004932 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004933
4934 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4935 if (obj->active)
4936 continue;
4937
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004938 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004939 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004940 }
Chris Wilson31169712009-09-14 16:50:28 +01004941
Chris Wilson57745062012-11-21 13:04:04 +00004942 if (unlock)
4943 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004944
Dave Chinner7dc19d52013-08-28 10:18:11 +10004945 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004946}
Ben Widawskya70a3142013-07-31 16:59:56 -07004947
4948/* All the new VM stuff */
4949unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4950 struct i915_address_space *vm)
4951{
4952 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4953 struct i915_vma *vma;
4954
Ben Widawsky6f425322013-12-06 14:10:48 -08004955 if (!dev_priv->mm.aliasing_ppgtt ||
4956 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004957 vm = &dev_priv->gtt.base;
4958
4959 BUG_ON(list_empty(&o->vma_list));
4960 list_for_each_entry(vma, &o->vma_list, vma_link) {
4961 if (vma->vm == vm)
4962 return vma->node.start;
4963
4964 }
4965 return -1;
4966}
4967
4968bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4969 struct i915_address_space *vm)
4970{
4971 struct i915_vma *vma;
4972
4973 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004974 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004975 return true;
4976
4977 return false;
4978}
4979
4980bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4981{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004982 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07004983
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004984 list_for_each_entry(vma, &o->vma_list, vma_link)
4985 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004986 return true;
4987
4988 return false;
4989}
4990
4991unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4992 struct i915_address_space *vm)
4993{
4994 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4995 struct i915_vma *vma;
4996
Ben Widawsky6f425322013-12-06 14:10:48 -08004997 if (!dev_priv->mm.aliasing_ppgtt ||
4998 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004999 vm = &dev_priv->gtt.base;
5000
5001 BUG_ON(list_empty(&o->vma_list));
5002
5003 list_for_each_entry(vma, &o->vma_list, vma_link)
5004 if (vma->vm == vm)
5005 return vma->node.size;
5006
5007 return 0;
5008}
5009
Dave Chinner7dc19d52013-08-28 10:18:11 +10005010static unsigned long
5011i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5012{
5013 struct drm_i915_private *dev_priv =
5014 container_of(shrinker,
5015 struct drm_i915_private,
5016 mm.inactive_shrinker);
5017 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005018 unsigned long freed;
5019 bool unlock = true;
5020
5021 if (!mutex_trylock(&dev->struct_mutex)) {
5022 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005023 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005024
5025 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005026 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005027
5028 unlock = false;
5029 }
5030
Chris Wilsond9973b42013-10-04 10:33:00 +01005031 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5032 if (freed < sc->nr_to_scan)
5033 freed += __i915_gem_shrink(dev_priv,
5034 sc->nr_to_scan - freed,
5035 false);
5036 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005037 freed += i915_gem_shrink_all(dev_priv);
5038
5039 if (unlock)
5040 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005041
Dave Chinner7dc19d52013-08-28 10:18:11 +10005042 return freed;
5043}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005044
5045struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5046{
5047 struct i915_vma *vma;
5048
5049 if (WARN_ON(list_empty(&obj->vma_list)))
5050 return NULL;
5051
5052 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005053 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005054 return NULL;
5055
5056 return vma;
5057}