blob: ea93898d51bcd40ea34a45517bad797eb55a8421 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
Chris Wilsonc8725f32014-03-17 12:21:55 +000046static void
47i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
Chris Wilson05394f32010-11-08 19:18:58 +000049static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100051 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000052 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070053
Chris Wilson61050802012-04-17 15:31:31 +010054static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
58 bool enable);
59
Chris Wilsonceabbba52014-03-25 13:23:04 +000060static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100061 struct shrink_control *sc);
Chris Wilsonceabbba52014-03-25 13:23:04 +000062static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
Dave Chinner7dc19d52013-08-28 10:18:11 +100063 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010064static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Chris Wilson31169712009-09-14 16:50:28 +010066
Chris Wilsonc76ce032013-08-08 14:41:03 +010067static bool cpu_cache_is_coherent(struct drm_device *dev,
68 enum i915_cache_level level)
69{
70 return HAS_LLC(dev) || level != I915_CACHE_NONE;
71}
72
Chris Wilson2c225692013-08-09 12:26:45 +010073static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
74{
75 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
76 return true;
77
78 return obj->pin_display;
79}
80
Chris Wilson61050802012-04-17 15:31:31 +010081static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
82{
83 if (obj->tiling_mode)
84 i915_gem_release_mmap(obj);
85
86 /* As we do not have an associated fence register, we will force
87 * a tiling change if we ever need to acquire one.
88 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010089 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010090 obj->fence_reg = I915_FENCE_REG_NONE;
91}
92
Chris Wilson73aa8082010-09-30 11:46:12 +010093/* some bookkeeping */
94static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
95 size_t size)
96{
Daniel Vetterc20e8352013-07-24 22:40:23 +020097 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010098 dev_priv->mm.object_count++;
99 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200100 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100101}
102
103static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
104 size_t size)
105{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200106 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100107 dev_priv->mm.object_count--;
108 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200109 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100110}
111
Chris Wilson21dd3732011-01-26 15:55:56 +0000112static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100113i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100115 int ret;
116
Daniel Vetter7abb6902013-05-24 21:29:32 +0200117#define EXIT_COND (!i915_reset_in_progress(error) || \
118 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100119 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100120 return 0;
121
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200122 /*
123 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
124 * userspace. If it takes that long something really bad is going on and
125 * we should simply try to bail out and fail as gracefully as possible.
126 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100127 ret = wait_event_interruptible_timeout(error->reset_queue,
128 EXIT_COND,
129 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200130 if (ret == 0) {
131 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
132 return -EIO;
133 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100134 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200135 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100136#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100137
Chris Wilson21dd3732011-01-26 15:55:56 +0000138 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100139}
140
Chris Wilson54cf91d2010-11-25 18:00:26 +0000141int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100142{
Daniel Vetter33196de2012-11-14 17:14:05 +0100143 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100144 int ret;
145
Daniel Vetter33196de2012-11-14 17:14:05 +0100146 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100147 if (ret)
148 return ret;
149
150 ret = mutex_lock_interruptible(&dev->struct_mutex);
151 if (ret)
152 return ret;
153
Chris Wilson23bc5982010-09-29 16:10:57 +0100154 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100155 return 0;
156}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100157
Chris Wilson7d1c4802010-08-07 21:45:03 +0100158static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000159i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100160{
Ben Widawsky98438772013-07-31 17:00:12 -0700161 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100162}
163
Eric Anholt673a3942008-07-30 12:06:12 -0700164int
165i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000166 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700167{
Ben Widawsky93d18792013-01-17 12:45:17 -0800168 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700169 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000170
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200171 if (drm_core_check_feature(dev, DRIVER_MODESET))
172 return -ENODEV;
173
Chris Wilson20217462010-11-23 15:26:33 +0000174 if (args->gtt_start >= args->gtt_end ||
175 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
176 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700177
Daniel Vetterf534bc02012-03-26 22:37:04 +0200178 /* GEM with user mode setting was never supported on ilk and later. */
179 if (INTEL_INFO(dev)->gen >= 5)
180 return -ENODEV;
181
Eric Anholt673a3942008-07-30 12:06:12 -0700182 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800183 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
184 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800185 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700186 mutex_unlock(&dev->struct_mutex);
187
Chris Wilson20217462010-11-23 15:26:33 +0000188 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700189}
190
Eric Anholt5a125c32008-10-22 21:40:13 -0700191int
192i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000193 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700194{
Chris Wilson73aa8082010-09-30 11:46:12 +0100195 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700196 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000197 struct drm_i915_gem_object *obj;
198 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700199
Chris Wilson6299f992010-11-24 12:23:44 +0000200 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100201 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700202 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800203 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700204 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100205 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700206
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700207 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400208 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000209
Eric Anholt5a125c32008-10-22 21:40:13 -0700210 return 0;
211}
212
Chris Wilson42dcedd2012-11-15 11:32:30 +0000213void *i915_gem_object_alloc(struct drm_device *dev)
214{
215 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700216 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000217}
218
219void i915_gem_object_free(struct drm_i915_gem_object *obj)
220{
221 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
222 kmem_cache_free(dev_priv->slab, obj);
223}
224
Dave Airlieff72145b2011-02-07 12:16:14 +1000225static int
226i915_gem_create(struct drm_file *file,
227 struct drm_device *dev,
228 uint64_t size,
229 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700230{
Chris Wilson05394f32010-11-08 19:18:58 +0000231 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300232 int ret;
233 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700234
Dave Airlieff72145b2011-02-07 12:16:14 +1000235 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200236 if (size == 0)
237 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700238
239 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000240 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700241 if (obj == NULL)
242 return -ENOMEM;
243
Chris Wilson05394f32010-11-08 19:18:58 +0000244 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100245 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200246 drm_gem_object_unreference_unlocked(&obj->base);
247 if (ret)
248 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100249
Dave Airlieff72145b2011-02-07 12:16:14 +1000250 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700251 return 0;
252}
253
Dave Airlieff72145b2011-02-07 12:16:14 +1000254int
255i915_gem_dumb_create(struct drm_file *file,
256 struct drm_device *dev,
257 struct drm_mode_create_dumb *args)
258{
259 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300260 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000261 args->size = args->pitch * args->height;
262 return i915_gem_create(file, dev,
263 args->size, &args->handle);
264}
265
Dave Airlieff72145b2011-02-07 12:16:14 +1000266/**
267 * Creates a new mm object and returns a handle to it.
268 */
269int
270i915_gem_create_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *file)
272{
273 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200274
Dave Airlieff72145b2011-02-07 12:16:14 +1000275 return i915_gem_create(file, dev,
276 args->size, &args->handle);
277}
278
Daniel Vetter8c599672011-12-14 13:57:31 +0100279static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100280__copy_to_user_swizzled(char __user *cpu_vaddr,
281 const char *gpu_vaddr, int gpu_offset,
282 int length)
283{
284 int ret, cpu_offset = 0;
285
286 while (length > 0) {
287 int cacheline_end = ALIGN(gpu_offset + 1, 64);
288 int this_length = min(cacheline_end - gpu_offset, length);
289 int swizzled_gpu_offset = gpu_offset ^ 64;
290
291 ret = __copy_to_user(cpu_vaddr + cpu_offset,
292 gpu_vaddr + swizzled_gpu_offset,
293 this_length);
294 if (ret)
295 return ret + length;
296
297 cpu_offset += this_length;
298 gpu_offset += this_length;
299 length -= this_length;
300 }
301
302 return 0;
303}
304
305static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700306__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
307 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100308 int length)
309{
310 int ret, cpu_offset = 0;
311
312 while (length > 0) {
313 int cacheline_end = ALIGN(gpu_offset + 1, 64);
314 int this_length = min(cacheline_end - gpu_offset, length);
315 int swizzled_gpu_offset = gpu_offset ^ 64;
316
317 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
318 cpu_vaddr + cpu_offset,
319 this_length);
320 if (ret)
321 return ret + length;
322
323 cpu_offset += this_length;
324 gpu_offset += this_length;
325 length -= this_length;
326 }
327
328 return 0;
329}
330
Brad Volkin4c914c02014-02-18 10:15:45 -0800331/*
332 * Pins the specified object's pages and synchronizes the object with
333 * GPU accesses. Sets needs_clflush to non-zero if the caller should
334 * flush the object from the CPU cache.
335 */
336int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
337 int *needs_clflush)
338{
339 int ret;
340
341 *needs_clflush = 0;
342
343 if (!obj->base.filp)
344 return -EINVAL;
345
346 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
347 /* If we're not in the cpu read domain, set ourself into the gtt
348 * read domain and manually flush cachelines (if required). This
349 * optimizes for the case when the gpu will dirty the data
350 * anyway again before the next pread happens. */
351 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
352 obj->cache_level);
353 ret = i915_gem_object_wait_rendering(obj, true);
354 if (ret)
355 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000356
357 i915_gem_object_retire(obj);
Brad Volkin4c914c02014-02-18 10:15:45 -0800358 }
359
360 ret = i915_gem_object_get_pages(obj);
361 if (ret)
362 return ret;
363
364 i915_gem_object_pin_pages(obj);
365
366 return ret;
367}
368
Daniel Vetterd174bd62012-03-25 19:47:40 +0200369/* Per-page copy function for the shmem pread fastpath.
370 * Flushes invalid cachelines before reading the target if
371 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700372static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200373shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
374 char __user *user_data,
375 bool page_do_bit17_swizzling, bool needs_clflush)
376{
377 char *vaddr;
378 int ret;
379
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200380 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200381 return -EINVAL;
382
383 vaddr = kmap_atomic(page);
384 if (needs_clflush)
385 drm_clflush_virt_range(vaddr + shmem_page_offset,
386 page_length);
387 ret = __copy_to_user_inatomic(user_data,
388 vaddr + shmem_page_offset,
389 page_length);
390 kunmap_atomic(vaddr);
391
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100392 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200393}
394
Daniel Vetter23c18c72012-03-25 19:47:42 +0200395static void
396shmem_clflush_swizzled_range(char *addr, unsigned long length,
397 bool swizzled)
398{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200399 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200400 unsigned long start = (unsigned long) addr;
401 unsigned long end = (unsigned long) addr + length;
402
403 /* For swizzling simply ensure that we always flush both
404 * channels. Lame, but simple and it works. Swizzled
405 * pwrite/pread is far from a hotpath - current userspace
406 * doesn't use it at all. */
407 start = round_down(start, 128);
408 end = round_up(end, 128);
409
410 drm_clflush_virt_range((void *)start, end - start);
411 } else {
412 drm_clflush_virt_range(addr, length);
413 }
414
415}
416
Daniel Vetterd174bd62012-03-25 19:47:40 +0200417/* Only difference to the fast-path function is that this can handle bit17
418 * and uses non-atomic copy and kmap functions. */
419static int
420shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
421 char __user *user_data,
422 bool page_do_bit17_swizzling, bool needs_clflush)
423{
424 char *vaddr;
425 int ret;
426
427 vaddr = kmap(page);
428 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200429 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
430 page_length,
431 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200432
433 if (page_do_bit17_swizzling)
434 ret = __copy_to_user_swizzled(user_data,
435 vaddr, shmem_page_offset,
436 page_length);
437 else
438 ret = __copy_to_user(user_data,
439 vaddr + shmem_page_offset,
440 page_length);
441 kunmap(page);
442
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100443 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200444}
445
Eric Anholteb014592009-03-10 11:44:52 -0700446static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200447i915_gem_shmem_pread(struct drm_device *dev,
448 struct drm_i915_gem_object *obj,
449 struct drm_i915_gem_pread *args,
450 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700451{
Daniel Vetter8461d222011-12-14 13:57:32 +0100452 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700453 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100454 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100455 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100456 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200457 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200458 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200459 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700460
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200461 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700462 remain = args->size;
463
Daniel Vetter8461d222011-12-14 13:57:32 +0100464 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700465
Brad Volkin4c914c02014-02-18 10:15:45 -0800466 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100467 if (ret)
468 return ret;
469
Eric Anholteb014592009-03-10 11:44:52 -0700470 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100471
Imre Deak67d5a502013-02-18 19:28:02 +0200472 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
473 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200474 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100475
476 if (remain <= 0)
477 break;
478
Eric Anholteb014592009-03-10 11:44:52 -0700479 /* Operation in this page
480 *
Eric Anholteb014592009-03-10 11:44:52 -0700481 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700482 * page_length = bytes to copy for this page
483 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100484 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700485 page_length = remain;
486 if ((shmem_page_offset + page_length) > PAGE_SIZE)
487 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700488
Daniel Vetter8461d222011-12-14 13:57:32 +0100489 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
490 (page_to_phys(page) & (1 << 17)) != 0;
491
Daniel Vetterd174bd62012-03-25 19:47:40 +0200492 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
493 user_data, page_do_bit17_swizzling,
494 needs_clflush);
495 if (ret == 0)
496 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700497
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200498 mutex_unlock(&dev->struct_mutex);
499
Jani Nikulad330a952014-01-21 11:24:25 +0200500 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200501 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200502 /* Userspace is tricking us, but we've already clobbered
503 * its pages with the prefault and promised to write the
504 * data up to the first fault. Hence ignore any errors
505 * and just continue. */
506 (void)ret;
507 prefaulted = 1;
508 }
509
Daniel Vetterd174bd62012-03-25 19:47:40 +0200510 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
511 user_data, page_do_bit17_swizzling,
512 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700513
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200514 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100515
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100516 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100517 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100518
Chris Wilson17793c92014-03-07 08:30:36 +0000519next_page:
Eric Anholteb014592009-03-10 11:44:52 -0700520 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100521 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700522 offset += page_length;
523 }
524
Chris Wilson4f27b752010-10-14 15:26:45 +0100525out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100526 i915_gem_object_unpin_pages(obj);
527
Eric Anholteb014592009-03-10 11:44:52 -0700528 return ret;
529}
530
Eric Anholt673a3942008-07-30 12:06:12 -0700531/**
532 * Reads data from the object referenced by handle.
533 *
534 * On error, the contents of *data are undefined.
535 */
536int
537i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000538 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700539{
540 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000541 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100542 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700543
Chris Wilson51311d02010-11-17 09:10:42 +0000544 if (args->size == 0)
545 return 0;
546
547 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200548 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000549 args->size))
550 return -EFAULT;
551
Chris Wilson4f27b752010-10-14 15:26:45 +0100552 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100553 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100554 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700555
Chris Wilson05394f32010-11-08 19:18:58 +0000556 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000557 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100558 ret = -ENOENT;
559 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100560 }
Eric Anholt673a3942008-07-30 12:06:12 -0700561
Chris Wilson7dcd2492010-09-26 20:21:44 +0100562 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000563 if (args->offset > obj->base.size ||
564 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100565 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100566 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100567 }
568
Daniel Vetter1286ff72012-05-10 15:25:09 +0200569 /* prime objects have no backing filp to GEM pread/pwrite
570 * pages from.
571 */
572 if (!obj->base.filp) {
573 ret = -EINVAL;
574 goto out;
575 }
576
Chris Wilsondb53a302011-02-03 11:57:46 +0000577 trace_i915_gem_object_pread(obj, args->offset, args->size);
578
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200579 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700580
Chris Wilson35b62a82010-09-26 20:23:38 +0100581out:
Chris Wilson05394f32010-11-08 19:18:58 +0000582 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100583unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100584 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700585 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700586}
587
Keith Packard0839ccb2008-10-30 19:38:48 -0700588/* This is the fast write path which cannot handle
589 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700590 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700591
Keith Packard0839ccb2008-10-30 19:38:48 -0700592static inline int
593fast_user_write(struct io_mapping *mapping,
594 loff_t page_base, int page_offset,
595 char __user *user_data,
596 int length)
597{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700598 void __iomem *vaddr_atomic;
599 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700600 unsigned long unwritten;
601
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700602 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700603 /* We can use the cpu mem copy function because this is X86. */
604 vaddr = (void __force*)vaddr_atomic + page_offset;
605 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700606 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700607 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100608 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700609}
610
Eric Anholt3de09aa2009-03-09 09:42:23 -0700611/**
612 * This is the fast pwrite path, where we copy the data directly from the
613 * user into the GTT, uncached.
614 */
Eric Anholt673a3942008-07-30 12:06:12 -0700615static int
Chris Wilson05394f32010-11-08 19:18:58 +0000616i915_gem_gtt_pwrite_fast(struct drm_device *dev,
617 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700618 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000619 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700620{
Jani Nikula3e31c6c2014-03-31 14:27:16 +0300621 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700622 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700623 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700624 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200625 int page_offset, page_length, ret;
626
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100627 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200628 if (ret)
629 goto out;
630
631 ret = i915_gem_object_set_to_gtt_domain(obj, true);
632 if (ret)
633 goto out_unpin;
634
635 ret = i915_gem_object_put_fence(obj);
636 if (ret)
637 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700638
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200639 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700640 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700641
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700642 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700643
644 while (remain > 0) {
645 /* Operation in this page
646 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700647 * page_base = page offset within aperture
648 * page_offset = offset within page
649 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700650 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100651 page_base = offset & PAGE_MASK;
652 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700653 page_length = remain;
654 if ((page_offset + remain) > PAGE_SIZE)
655 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700656
Keith Packard0839ccb2008-10-30 19:38:48 -0700657 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700658 * source page isn't available. Return the error and we'll
659 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700660 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800661 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200662 page_offset, user_data, page_length)) {
663 ret = -EFAULT;
664 goto out_unpin;
665 }
Eric Anholt673a3942008-07-30 12:06:12 -0700666
Keith Packard0839ccb2008-10-30 19:38:48 -0700667 remain -= page_length;
668 user_data += page_length;
669 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700670 }
Eric Anholt673a3942008-07-30 12:06:12 -0700671
Daniel Vetter935aaa62012-03-25 19:47:35 +0200672out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800673 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200674out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700676}
677
Daniel Vetterd174bd62012-03-25 19:47:40 +0200678/* Per-page copy function for the shmem pwrite fastpath.
679 * Flushes invalid cachelines before writing to the target if
680 * needs_clflush_before is set and flushes out any written cachelines after
681 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700682static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200683shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
684 char __user *user_data,
685 bool page_do_bit17_swizzling,
686 bool needs_clflush_before,
687 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700688{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200689 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700690 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700691
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200692 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200693 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700694
Daniel Vetterd174bd62012-03-25 19:47:40 +0200695 vaddr = kmap_atomic(page);
696 if (needs_clflush_before)
697 drm_clflush_virt_range(vaddr + shmem_page_offset,
698 page_length);
Chris Wilsonc2831a92014-03-07 08:30:37 +0000699 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
700 user_data, page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200701 if (needs_clflush_after)
702 drm_clflush_virt_range(vaddr + shmem_page_offset,
703 page_length);
704 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700705
Chris Wilson755d2212012-09-04 21:02:55 +0100706 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700707}
708
Daniel Vetterd174bd62012-03-25 19:47:40 +0200709/* Only difference to the fast-path function is that this can handle bit17
710 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700711static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200712shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
713 char __user *user_data,
714 bool page_do_bit17_swizzling,
715 bool needs_clflush_before,
716 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700717{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200718 char *vaddr;
719 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700720
Daniel Vetterd174bd62012-03-25 19:47:40 +0200721 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200722 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200723 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
724 page_length,
725 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200726 if (page_do_bit17_swizzling)
727 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100728 user_data,
729 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200730 else
731 ret = __copy_from_user(vaddr + shmem_page_offset,
732 user_data,
733 page_length);
734 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200735 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
736 page_length,
737 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200738 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100739
Chris Wilson755d2212012-09-04 21:02:55 +0100740 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700741}
742
Eric Anholt40123c12009-03-09 13:42:30 -0700743static int
Daniel Vettere244a442012-03-25 19:47:28 +0200744i915_gem_shmem_pwrite(struct drm_device *dev,
745 struct drm_i915_gem_object *obj,
746 struct drm_i915_gem_pwrite *args,
747 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700748{
Eric Anholt40123c12009-03-09 13:42:30 -0700749 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100750 loff_t offset;
751 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100752 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100753 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200754 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200755 int needs_clflush_after = 0;
756 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200757 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700758
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200759 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700760 remain = args->size;
761
Daniel Vetter8c599672011-12-14 13:57:31 +0100762 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700763
Daniel Vetter58642882012-03-25 19:47:37 +0200764 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
765 /* If we're not in the cpu write domain, set ourself into the gtt
766 * write domain and manually flush cachelines (if required). This
767 * optimizes for the case when the gpu will use the data
768 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100769 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700770 ret = i915_gem_object_wait_rendering(obj, false);
771 if (ret)
772 return ret;
Chris Wilsonc8725f32014-03-17 12:21:55 +0000773
774 i915_gem_object_retire(obj);
Daniel Vetter58642882012-03-25 19:47:37 +0200775 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100776 /* Same trick applies to invalidate partially written cachelines read
777 * before writing. */
778 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
779 needs_clflush_before =
780 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200781
Chris Wilson755d2212012-09-04 21:02:55 +0100782 ret = i915_gem_object_get_pages(obj);
783 if (ret)
784 return ret;
785
786 i915_gem_object_pin_pages(obj);
787
Eric Anholt40123c12009-03-09 13:42:30 -0700788 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000789 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700790
Imre Deak67d5a502013-02-18 19:28:02 +0200791 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
792 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200793 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200794 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100795
Chris Wilson9da3da62012-06-01 15:20:22 +0100796 if (remain <= 0)
797 break;
798
Eric Anholt40123c12009-03-09 13:42:30 -0700799 /* Operation in this page
800 *
Eric Anholt40123c12009-03-09 13:42:30 -0700801 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700802 * page_length = bytes to copy for this page
803 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100804 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700805
806 page_length = remain;
807 if ((shmem_page_offset + page_length) > PAGE_SIZE)
808 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700809
Daniel Vetter58642882012-03-25 19:47:37 +0200810 /* If we don't overwrite a cacheline completely we need to be
811 * careful to have up-to-date data by first clflushing. Don't
812 * overcomplicate things and flush the entire patch. */
813 partial_cacheline_write = needs_clflush_before &&
814 ((shmem_page_offset | page_length)
815 & (boot_cpu_data.x86_clflush_size - 1));
816
Daniel Vetter8c599672011-12-14 13:57:31 +0100817 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
818 (page_to_phys(page) & (1 << 17)) != 0;
819
Daniel Vetterd174bd62012-03-25 19:47:40 +0200820 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
821 user_data, page_do_bit17_swizzling,
822 partial_cacheline_write,
823 needs_clflush_after);
824 if (ret == 0)
825 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700826
Daniel Vettere244a442012-03-25 19:47:28 +0200827 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200828 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200829 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
830 user_data, page_do_bit17_swizzling,
831 partial_cacheline_write,
832 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700833
Daniel Vettere244a442012-03-25 19:47:28 +0200834 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100835
Chris Wilson755d2212012-09-04 21:02:55 +0100836 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100837 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100838
Chris Wilson17793c92014-03-07 08:30:36 +0000839next_page:
Eric Anholt40123c12009-03-09 13:42:30 -0700840 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100841 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700842 offset += page_length;
843 }
844
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100845out:
Chris Wilson755d2212012-09-04 21:02:55 +0100846 i915_gem_object_unpin_pages(obj);
847
Daniel Vettere244a442012-03-25 19:47:28 +0200848 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100849 /*
850 * Fixup: Flush cpu caches in case we didn't flush the dirty
851 * cachelines in-line while writing and the object moved
852 * out of the cpu write domain while we've dropped the lock.
853 */
854 if (!needs_clflush_after &&
855 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100856 if (i915_gem_clflush_object(obj, obj->pin_display))
857 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200858 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100859 }
Eric Anholt40123c12009-03-09 13:42:30 -0700860
Daniel Vetter58642882012-03-25 19:47:37 +0200861 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800862 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200863
Eric Anholt40123c12009-03-09 13:42:30 -0700864 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700865}
866
867/**
868 * Writes data to the object referenced by handle.
869 *
870 * On error, the contents of the buffer that were to be modified are undefined.
871 */
872int
873i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100874 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700875{
876 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000877 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000878 int ret;
879
880 if (args->size == 0)
881 return 0;
882
883 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200884 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000885 args->size))
886 return -EFAULT;
887
Jani Nikulad330a952014-01-21 11:24:25 +0200888 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800889 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
890 args->size);
891 if (ret)
892 return -EFAULT;
893 }
Eric Anholt673a3942008-07-30 12:06:12 -0700894
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100895 ret = i915_mutex_lock_interruptible(dev);
896 if (ret)
897 return ret;
898
Chris Wilson05394f32010-11-08 19:18:58 +0000899 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000900 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100901 ret = -ENOENT;
902 goto unlock;
903 }
Eric Anholt673a3942008-07-30 12:06:12 -0700904
Chris Wilson7dcd2492010-09-26 20:21:44 +0100905 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000906 if (args->offset > obj->base.size ||
907 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100908 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100909 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100910 }
911
Daniel Vetter1286ff72012-05-10 15:25:09 +0200912 /* prime objects have no backing filp to GEM pread/pwrite
913 * pages from.
914 */
915 if (!obj->base.filp) {
916 ret = -EINVAL;
917 goto out;
918 }
919
Chris Wilsondb53a302011-02-03 11:57:46 +0000920 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
921
Daniel Vetter935aaa62012-03-25 19:47:35 +0200922 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700923 /* We can only do the GTT pwrite on untiled buffers, as otherwise
924 * it would end up going through the fenced access, and we'll get
925 * different detiling behavior between reading and writing.
926 * pread/pwrite currently are reading and writing from the CPU
927 * perspective, requiring manual detiling by the client.
928 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100929 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100930 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100931 goto out;
932 }
933
Chris Wilson2c225692013-08-09 12:26:45 +0100934 if (obj->tiling_mode == I915_TILING_NONE &&
935 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
936 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100937 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200938 /* Note that the gtt paths might fail with non-page-backed user
939 * pointers (e.g. gtt mappings when moving data between
940 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700941 }
Eric Anholt673a3942008-07-30 12:06:12 -0700942
Chris Wilson86a1ee22012-08-11 15:41:04 +0100943 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200944 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100945
Chris Wilson35b62a82010-09-26 20:23:38 +0100946out:
Chris Wilson05394f32010-11-08 19:18:58 +0000947 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100948unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100949 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700950 return ret;
951}
952
Chris Wilsonb3612372012-08-24 09:35:08 +0100953int
Daniel Vetter33196de2012-11-14 17:14:05 +0100954i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100955 bool interruptible)
956{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100957 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100958 /* Non-interruptible callers can't handle -EAGAIN, hence return
959 * -EIO unconditionally for these. */
960 if (!interruptible)
961 return -EIO;
962
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100963 /* Recovery complete, but the reset failed ... */
964 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100965 return -EIO;
966
967 return -EAGAIN;
968 }
969
970 return 0;
971}
972
973/*
974 * Compare seqno against outstanding lazy request. Emit a request if they are
975 * equal.
976 */
977static int
978i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
979{
980 int ret;
981
982 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
983
984 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100985 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300986 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100987
988 return ret;
989}
990
Chris Wilson094f9a52013-09-25 17:34:55 +0100991static void fake_irq(unsigned long data)
992{
993 wake_up_process((struct task_struct *)data);
994}
995
996static bool missed_irq(struct drm_i915_private *dev_priv,
997 struct intel_ring_buffer *ring)
998{
999 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1000}
1001
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001002static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1003{
1004 if (file_priv == NULL)
1005 return true;
1006
1007 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1008}
1009
Chris Wilsonb3612372012-08-24 09:35:08 +01001010/**
1011 * __wait_seqno - wait until execution of seqno has finished
1012 * @ring: the ring expected to report seqno
1013 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +01001014 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +01001015 * @interruptible: do an interruptible wait (normally yes)
1016 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1017 *
Daniel Vetterf69061b2012-12-06 09:01:42 +01001018 * Note: It is of utmost importance that the passed in seqno and reset_counter
1019 * values have been read by the caller in an smp safe manner. Where read-side
1020 * locks are involved, it is sufficient to read the reset_counter before
1021 * unlocking the lock that protects the seqno. For lockless tricks, the
1022 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1023 * inserted.
1024 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001025 * Returns 0 if the seqno was found within the alloted time. Else returns the
1026 * errno with remaining time filled in timeout argument.
1027 */
1028static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001029 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001030 bool interruptible,
1031 struct timespec *timeout,
1032 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001033{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001034 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001035 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001036 const bool irq_test_in_progress =
1037 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001038 struct timespec before, now;
1039 DEFINE_WAIT(wait);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001040 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001041 int ret;
1042
Paulo Zanoni5d584b22014-03-07 20:08:15 -03001043 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
Paulo Zanonic67a4702013-08-19 13:18:09 -03001044
Chris Wilsonb3612372012-08-24 09:35:08 +01001045 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1046 return 0;
1047
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001048 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001049
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001050 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001051 gen6_rps_boost(dev_priv);
1052 if (file_priv)
1053 mod_delayed_work(dev_priv->wq,
1054 &file_priv->mm.idle_work,
1055 msecs_to_jiffies(100));
1056 }
1057
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001058 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001059 return -ENODEV;
1060
Chris Wilson094f9a52013-09-25 17:34:55 +01001061 /* Record current time in case interrupted by signal, or wedged */
1062 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001063 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001064 for (;;) {
1065 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001066
Chris Wilson094f9a52013-09-25 17:34:55 +01001067 prepare_to_wait(&ring->irq_queue, &wait,
1068 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001069
Daniel Vetterf69061b2012-12-06 09:01:42 +01001070 /* We need to check whether any gpu reset happened in between
1071 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001072 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1073 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1074 * is truely gone. */
1075 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1076 if (ret == 0)
1077 ret = -EAGAIN;
1078 break;
1079 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001080
Chris Wilson094f9a52013-09-25 17:34:55 +01001081 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1082 ret = 0;
1083 break;
1084 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001085
Chris Wilson094f9a52013-09-25 17:34:55 +01001086 if (interruptible && signal_pending(current)) {
1087 ret = -ERESTARTSYS;
1088 break;
1089 }
1090
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001091 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001092 ret = -ETIME;
1093 break;
1094 }
1095
1096 timer.function = NULL;
1097 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001098 unsigned long expire;
1099
Chris Wilson094f9a52013-09-25 17:34:55 +01001100 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e9766d2013-12-10 17:02:43 +02001101 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001102 mod_timer(&timer, expire);
1103 }
1104
Chris Wilson5035c272013-10-04 09:58:46 +01001105 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001106
Chris Wilson094f9a52013-09-25 17:34:55 +01001107 if (timer.function) {
1108 del_singleshot_timer_sync(&timer);
1109 destroy_timer_on_stack(&timer);
1110 }
1111 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001112 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001113 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001114
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001115 if (!irq_test_in_progress)
1116 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001117
1118 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001119
1120 if (timeout) {
1121 struct timespec sleep_time = timespec_sub(now, before);
1122 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001123 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1124 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001125 }
1126
Chris Wilson094f9a52013-09-25 17:34:55 +01001127 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001128}
1129
1130/**
1131 * Waits for a sequence number to be signaled, and cleans up the
1132 * request and object lists appropriately for that event.
1133 */
1134int
1135i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1136{
1137 struct drm_device *dev = ring->dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139 bool interruptible = dev_priv->mm.interruptible;
1140 int ret;
1141
1142 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1143 BUG_ON(seqno == 0);
1144
Daniel Vetter33196de2012-11-14 17:14:05 +01001145 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001146 if (ret)
1147 return ret;
1148
1149 ret = i915_gem_check_olr(ring, seqno);
1150 if (ret)
1151 return ret;
1152
Daniel Vetterf69061b2012-12-06 09:01:42 +01001153 return __wait_seqno(ring, seqno,
1154 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001155 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001156}
1157
Chris Wilsond26e3af2013-06-29 22:05:26 +01001158static int
1159i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1160 struct intel_ring_buffer *ring)
1161{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001162 if (!obj->active)
1163 return 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001164
1165 /* Manually manage the write flush as we may have not yet
1166 * retired the buffer.
1167 *
1168 * Note that the last_write_seqno is always the earlier of
1169 * the two (read/write) seqno, so if we haved successfully waited,
1170 * we know we have passed the last write.
1171 */
1172 obj->last_write_seqno = 0;
Chris Wilsond26e3af2013-06-29 22:05:26 +01001173
1174 return 0;
1175}
1176
Chris Wilsonb3612372012-08-24 09:35:08 +01001177/**
1178 * Ensures that all rendering to the object has completed and the object is
1179 * safe to unbind from the GTT or access from the CPU.
1180 */
1181static __must_check int
1182i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1183 bool readonly)
1184{
1185 struct intel_ring_buffer *ring = obj->ring;
1186 u32 seqno;
1187 int ret;
1188
1189 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1190 if (seqno == 0)
1191 return 0;
1192
1193 ret = i915_wait_seqno(ring, seqno);
1194 if (ret)
1195 return ret;
1196
Chris Wilsond26e3af2013-06-29 22:05:26 +01001197 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001198}
1199
Chris Wilson3236f572012-08-24 09:35:09 +01001200/* A nonblocking variant of the above wait. This is a highly dangerous routine
1201 * as the object state may change during this call.
1202 */
1203static __must_check int
1204i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001205 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001206 bool readonly)
1207{
1208 struct drm_device *dev = obj->base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001211 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001212 u32 seqno;
1213 int ret;
1214
1215 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1216 BUG_ON(!dev_priv->mm.interruptible);
1217
1218 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1219 if (seqno == 0)
1220 return 0;
1221
Daniel Vetter33196de2012-11-14 17:14:05 +01001222 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001223 if (ret)
1224 return ret;
1225
1226 ret = i915_gem_check_olr(ring, seqno);
1227 if (ret)
1228 return ret;
1229
Daniel Vetterf69061b2012-12-06 09:01:42 +01001230 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001231 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001232 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001233 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001234 if (ret)
1235 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001236
Chris Wilsond26e3af2013-06-29 22:05:26 +01001237 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001238}
1239
Eric Anholt673a3942008-07-30 12:06:12 -07001240/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001241 * Called when user space prepares to use an object with the CPU, either
1242 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001243 */
1244int
1245i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001246 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001247{
1248 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001249 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001250 uint32_t read_domains = args->read_domains;
1251 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001252 int ret;
1253
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001254 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001255 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001256 return -EINVAL;
1257
Chris Wilson21d509e2009-06-06 09:46:02 +01001258 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001259 return -EINVAL;
1260
1261 /* Having something in the write domain implies it's in the read
1262 * domain, and only that read domain. Enforce that in the request.
1263 */
1264 if (write_domain != 0 && read_domains != write_domain)
1265 return -EINVAL;
1266
Chris Wilson76c1dec2010-09-25 11:22:51 +01001267 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001268 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001269 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001270
Chris Wilson05394f32010-11-08 19:18:58 +00001271 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001272 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001273 ret = -ENOENT;
1274 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001275 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001276
Chris Wilson3236f572012-08-24 09:35:09 +01001277 /* Try to flush the object off the GPU without holding the lock.
1278 * We will repeat the flush holding the lock in the normal manner
1279 * to catch cases where we are gazumped.
1280 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001281 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1282 file->driver_priv,
1283 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001284 if (ret)
1285 goto unref;
1286
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001287 if (read_domains & I915_GEM_DOMAIN_GTT) {
1288 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001289
1290 /* Silently promote "you're not bound, there was nothing to do"
1291 * to success, since the client was just asking us to
1292 * make sure everything was done.
1293 */
1294 if (ret == -EINVAL)
1295 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001296 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001297 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001298 }
1299
Chris Wilson3236f572012-08-24 09:35:09 +01001300unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001301 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001302unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001303 mutex_unlock(&dev->struct_mutex);
1304 return ret;
1305}
1306
1307/**
1308 * Called when user space has done writes to this buffer
1309 */
1310int
1311i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001312 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001313{
1314 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001315 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001316 int ret = 0;
1317
Chris Wilson76c1dec2010-09-25 11:22:51 +01001318 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001319 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001320 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001321
Chris Wilson05394f32010-11-08 19:18:58 +00001322 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001323 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001324 ret = -ENOENT;
1325 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001326 }
1327
Eric Anholt673a3942008-07-30 12:06:12 -07001328 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001329 if (obj->pin_display)
1330 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001331
Chris Wilson05394f32010-11-08 19:18:58 +00001332 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001333unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001334 mutex_unlock(&dev->struct_mutex);
1335 return ret;
1336}
1337
1338/**
1339 * Maps the contents of an object, returning the address it is mapped
1340 * into.
1341 *
1342 * While the mapping holds a reference on the contents of the object, it doesn't
1343 * imply a ref on the object itself.
1344 */
1345int
1346i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001347 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001348{
1349 struct drm_i915_gem_mmap *args = data;
1350 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001351 unsigned long addr;
1352
Chris Wilson05394f32010-11-08 19:18:58 +00001353 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001354 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001355 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001356
Daniel Vetter1286ff72012-05-10 15:25:09 +02001357 /* prime objects have no backing filp to GEM mmap
1358 * pages from.
1359 */
1360 if (!obj->filp) {
1361 drm_gem_object_unreference_unlocked(obj);
1362 return -EINVAL;
1363 }
1364
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001365 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001366 PROT_READ | PROT_WRITE, MAP_SHARED,
1367 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001368 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001369 if (IS_ERR((void *)addr))
1370 return addr;
1371
1372 args->addr_ptr = (uint64_t) addr;
1373
1374 return 0;
1375}
1376
Jesse Barnesde151cf2008-11-12 10:03:55 -08001377/**
1378 * i915_gem_fault - fault a page into the GTT
1379 * vma: VMA in question
1380 * vmf: fault info
1381 *
1382 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1383 * from userspace. The fault handler takes care of binding the object to
1384 * the GTT (if needed), allocating and programming a fence register (again,
1385 * only if needed based on whether the old reg is still valid or the object
1386 * is tiled) and inserting a new PTE into the faulting process.
1387 *
1388 * Note that the faulting process may involve evicting existing objects
1389 * from the GTT and/or fence registers to make room. So performance may
1390 * suffer if the GTT working set is large or there are few fence registers
1391 * left.
1392 */
1393int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1394{
Chris Wilson05394f32010-11-08 19:18:58 +00001395 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1396 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03001397 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001398 pgoff_t page_offset;
1399 unsigned long pfn;
1400 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001401 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001402
Paulo Zanonif65c9162013-11-27 18:20:34 -02001403 intel_runtime_pm_get(dev_priv);
1404
Jesse Barnesde151cf2008-11-12 10:03:55 -08001405 /* We don't use vmf->pgoff since that has the fake offset */
1406 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1407 PAGE_SHIFT;
1408
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001409 ret = i915_mutex_lock_interruptible(dev);
1410 if (ret)
1411 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001412
Chris Wilsondb53a302011-02-03 11:57:46 +00001413 trace_i915_gem_object_fault(obj, page_offset, true, write);
1414
Chris Wilson6e4930f2014-02-07 18:37:06 -02001415 /* Try to flush the object off the GPU first without holding the lock.
1416 * Upon reacquiring the lock, we will perform our sanity checks and then
1417 * repeat the flush holding the lock in the normal manner to catch cases
1418 * where we are gazumped.
1419 */
1420 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1421 if (ret)
1422 goto unlock;
1423
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001424 /* Access to snoopable pages through the GTT is incoherent. */
1425 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1426 ret = -EINVAL;
1427 goto unlock;
1428 }
1429
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001430 /* Now bind it into the GTT if needed */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001431 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001432 if (ret)
1433 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001434
Chris Wilsonc9839302012-11-20 10:45:17 +00001435 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1436 if (ret)
1437 goto unpin;
1438
1439 ret = i915_gem_object_get_fence(obj);
1440 if (ret)
1441 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001442
Chris Wilson6299f992010-11-24 12:23:44 +00001443 obj->fault_mappable = true;
1444
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001445 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1446 pfn >>= PAGE_SHIFT;
1447 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001448
1449 /* Finally, remap it using the new GTT offset */
1450 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001451unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001452 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001453unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001454 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001455out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001456 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001457 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001458 /* If this -EIO is due to a gpu hang, give the reset code a
1459 * chance to clean up the mess. Otherwise return the proper
1460 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001461 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1462 ret = VM_FAULT_SIGBUS;
1463 break;
1464 }
Chris Wilson045e7692010-11-07 09:18:22 +00001465 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001466 /*
1467 * EAGAIN means the gpu is hung and we'll wait for the error
1468 * handler to reset everything when re-faulting in
1469 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001470 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001471 case 0:
1472 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001473 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001474 case -EBUSY:
1475 /*
1476 * EBUSY is ok: this just means that another thread
1477 * already did the job.
1478 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001479 ret = VM_FAULT_NOPAGE;
1480 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001481 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001482 ret = VM_FAULT_OOM;
1483 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001484 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001485 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001486 ret = VM_FAULT_SIGBUS;
1487 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001488 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001489 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001490 ret = VM_FAULT_SIGBUS;
1491 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001493
1494 intel_runtime_pm_put(dev_priv);
1495 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001496}
1497
Paulo Zanoni48018a52013-12-13 15:22:31 -02001498void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1499{
1500 struct i915_vma *vma;
1501
1502 /*
1503 * Only the global gtt is relevant for gtt memory mappings, so restrict
1504 * list traversal to objects bound into the global address space. Note
1505 * that the active list should be empty, but better safe than sorry.
1506 */
1507 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1508 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1509 i915_gem_release_mmap(vma->obj);
1510 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1511 i915_gem_release_mmap(vma->obj);
1512}
1513
Jesse Barnesde151cf2008-11-12 10:03:55 -08001514/**
Chris Wilson901782b2009-07-10 08:18:50 +01001515 * i915_gem_release_mmap - remove physical page mappings
1516 * @obj: obj in question
1517 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001518 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001519 * relinquish ownership of the pages back to the system.
1520 *
1521 * It is vital that we remove the page mapping if we have mapped a tiled
1522 * object through the GTT and then lose the fence register due to
1523 * resource pressure. Similarly if the object has been moved out of the
1524 * aperture, than pages mapped into userspace must be revoked. Removing the
1525 * mapping will then trigger a page fault on the next user access, allowing
1526 * fixup by i915_gem_fault().
1527 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001528void
Chris Wilson05394f32010-11-08 19:18:58 +00001529i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001530{
Chris Wilson6299f992010-11-24 12:23:44 +00001531 if (!obj->fault_mappable)
1532 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001533
David Herrmann6796cb12014-01-03 14:24:19 +01001534 drm_vma_node_unmap(&obj->base.vma_node,
1535 obj->base.dev->anon_inode->i_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001536 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001537}
1538
Imre Deak0fa87792013-01-07 21:47:35 +02001539uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001540i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001541{
Chris Wilsone28f8712011-07-18 13:11:49 -07001542 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001543
1544 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001545 tiling_mode == I915_TILING_NONE)
1546 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001547
1548 /* Previous chips need a power-of-two fence region when tiling */
1549 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001550 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001551 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001552 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001553
Chris Wilsone28f8712011-07-18 13:11:49 -07001554 while (gtt_size < size)
1555 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001556
Chris Wilsone28f8712011-07-18 13:11:49 -07001557 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001558}
1559
Jesse Barnesde151cf2008-11-12 10:03:55 -08001560/**
1561 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1562 * @obj: object to check
1563 *
1564 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001565 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001566 */
Imre Deakd8651102013-01-07 21:47:33 +02001567uint32_t
1568i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1569 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001570{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001571 /*
1572 * Minimum alignment is 4k (GTT page size), but might be greater
1573 * if a fence register is needed for the object.
1574 */
Imre Deakd8651102013-01-07 21:47:33 +02001575 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001576 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001577 return 4096;
1578
1579 /*
1580 * Previous chips need to be aligned to the size of the smallest
1581 * fence register that can contain the object.
1582 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001583 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001584}
1585
Chris Wilsond8cb5082012-08-11 15:41:03 +01001586static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1587{
1588 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1589 int ret;
1590
David Herrmann0de23972013-07-24 21:07:52 +02001591 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001592 return 0;
1593
Daniel Vetterda494d72012-12-20 15:11:16 +01001594 dev_priv->mm.shrinker_no_lock_stealing = true;
1595
Chris Wilsond8cb5082012-08-11 15:41:03 +01001596 ret = drm_gem_create_mmap_offset(&obj->base);
1597 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001598 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001599
1600 /* Badly fragmented mmap space? The only way we can recover
1601 * space is by destroying unwanted objects. We can't randomly release
1602 * mmap_offsets as userspace expects them to be persistent for the
1603 * lifetime of the objects. The closest we can is to release the
1604 * offsets on purgeable objects by truncating it and marking it purged,
1605 * which prevents userspace from ever using that object again.
1606 */
1607 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1608 ret = drm_gem_create_mmap_offset(&obj->base);
1609 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001610 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001611
1612 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001613 ret = drm_gem_create_mmap_offset(&obj->base);
1614out:
1615 dev_priv->mm.shrinker_no_lock_stealing = false;
1616
1617 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001618}
1619
1620static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1621{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001622 drm_gem_free_mmap_offset(&obj->base);
1623}
1624
Jesse Barnesde151cf2008-11-12 10:03:55 -08001625int
Dave Airlieff72145b2011-02-07 12:16:14 +10001626i915_gem_mmap_gtt(struct drm_file *file,
1627 struct drm_device *dev,
1628 uint32_t handle,
1629 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001630{
Chris Wilsonda761a62010-10-27 17:37:08 +01001631 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001632 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001633 int ret;
1634
Chris Wilson76c1dec2010-09-25 11:22:51 +01001635 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001636 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001637 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001638
Dave Airlieff72145b2011-02-07 12:16:14 +10001639 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001640 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001641 ret = -ENOENT;
1642 goto unlock;
1643 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001644
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001645 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001646 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001647 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001648 }
1649
Chris Wilson05394f32010-11-08 19:18:58 +00001650 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001651 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001652 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001653 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001654 }
1655
Chris Wilsond8cb5082012-08-11 15:41:03 +01001656 ret = i915_gem_object_create_mmap_offset(obj);
1657 if (ret)
1658 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001659
David Herrmann0de23972013-07-24 21:07:52 +02001660 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001661
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001662out:
Chris Wilson05394f32010-11-08 19:18:58 +00001663 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001664unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001665 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001666 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001667}
1668
Dave Airlieff72145b2011-02-07 12:16:14 +10001669/**
1670 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1671 * @dev: DRM device
1672 * @data: GTT mapping ioctl data
1673 * @file: GEM object info
1674 *
1675 * Simply returns the fake offset to userspace so it can mmap it.
1676 * The mmap call will end up in drm_gem_mmap(), which will set things
1677 * up so we can get faults in the handler above.
1678 *
1679 * The fault handler will take care of binding the object into the GTT
1680 * (since it may have been evicted to make room for something), allocating
1681 * a fence register, and mapping the appropriate aperture address into
1682 * userspace.
1683 */
1684int
1685i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1686 struct drm_file *file)
1687{
1688 struct drm_i915_gem_mmap_gtt *args = data;
1689
Dave Airlieff72145b2011-02-07 12:16:14 +10001690 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1691}
1692
Chris Wilson55372522014-03-25 13:23:06 +00001693static inline int
1694i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1695{
1696 return obj->madv == I915_MADV_DONTNEED;
1697}
1698
Daniel Vetter225067e2012-08-20 10:23:20 +02001699/* Immediately discard the backing storage */
1700static void
1701i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001702{
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001703 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001704
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001705 if (obj->base.filp == NULL)
1706 return;
1707
Daniel Vetter225067e2012-08-20 10:23:20 +02001708 /* Our goal here is to return as much of the memory as
1709 * is possible back to the system as we are called from OOM.
1710 * To do this we must instruct the shmfs to drop all of its
1711 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001712 */
Chris Wilson55372522014-03-25 13:23:06 +00001713 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
Daniel Vetter225067e2012-08-20 10:23:20 +02001714 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001715}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001716
Chris Wilson55372522014-03-25 13:23:06 +00001717/* Try to discard unwanted pages */
1718static void
1719i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
Daniel Vetter225067e2012-08-20 10:23:20 +02001720{
Chris Wilson55372522014-03-25 13:23:06 +00001721 struct address_space *mapping;
1722
1723 switch (obj->madv) {
1724 case I915_MADV_DONTNEED:
1725 i915_gem_object_truncate(obj);
1726 case __I915_MADV_PURGED:
1727 return;
1728 }
1729
1730 if (obj->base.filp == NULL)
1731 return;
1732
1733 mapping = file_inode(obj->base.filp)->i_mapping,
1734 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001735}
1736
Chris Wilson5cdf5882010-09-27 15:51:07 +01001737static void
Chris Wilson05394f32010-11-08 19:18:58 +00001738i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001739{
Imre Deak90797e62013-02-18 19:28:03 +02001740 struct sg_page_iter sg_iter;
1741 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001742
Chris Wilson05394f32010-11-08 19:18:58 +00001743 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001744
Chris Wilson6c085a72012-08-20 11:40:46 +02001745 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1746 if (ret) {
1747 /* In the event of a disaster, abandon all caches and
1748 * hope for the best.
1749 */
1750 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001751 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001752 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1753 }
1754
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001755 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001756 i915_gem_object_save_bit_17_swizzle(obj);
1757
Chris Wilson05394f32010-11-08 19:18:58 +00001758 if (obj->madv == I915_MADV_DONTNEED)
1759 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001760
Imre Deak90797e62013-02-18 19:28:03 +02001761 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001762 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001763
Chris Wilson05394f32010-11-08 19:18:58 +00001764 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001765 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001766
Chris Wilson05394f32010-11-08 19:18:58 +00001767 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001768 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001769
Chris Wilson9da3da62012-06-01 15:20:22 +01001770 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001771 }
Chris Wilson05394f32010-11-08 19:18:58 +00001772 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001773
Chris Wilson9da3da62012-06-01 15:20:22 +01001774 sg_free_table(obj->pages);
1775 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001776}
1777
Chris Wilsondd624af2013-01-15 12:39:35 +00001778int
Chris Wilson37e680a2012-06-07 15:38:42 +01001779i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1780{
1781 const struct drm_i915_gem_object_ops *ops = obj->ops;
1782
Chris Wilson2f745ad2012-09-04 21:02:58 +01001783 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001784 return 0;
1785
Chris Wilsona5570172012-09-04 21:02:54 +01001786 if (obj->pages_pin_count)
1787 return -EBUSY;
1788
Ben Widawsky98438772013-07-31 17:00:12 -07001789 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001790
Chris Wilsona2165e32012-12-03 11:49:00 +00001791 /* ->put_pages might need to allocate memory for the bit17 swizzle
1792 * array, hence protect them from being reaped by removing them from gtt
1793 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001794 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001795
Chris Wilson37e680a2012-06-07 15:38:42 +01001796 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001797 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001798
Chris Wilson55372522014-03-25 13:23:06 +00001799 i915_gem_object_invalidate(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02001800
1801 return 0;
1802}
1803
Chris Wilsond9973b42013-10-04 10:33:00 +01001804static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001805__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1806 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001807{
Chris Wilsonc8725f32014-03-17 12:21:55 +00001808 struct list_head still_in_list;
1809 struct drm_i915_gem_object *obj;
Chris Wilsond9973b42013-10-04 10:33:00 +01001810 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001811
Chris Wilson57094f82013-09-04 10:45:50 +01001812 /*
Chris Wilsonc8725f32014-03-17 12:21:55 +00001813 * As we may completely rewrite the (un)bound list whilst unbinding
Chris Wilson57094f82013-09-04 10:45:50 +01001814 * (due to retiring requests) we have to strictly process only
1815 * one element of the list at the time, and recheck the list
1816 * on every iteration.
Chris Wilsonc8725f32014-03-17 12:21:55 +00001817 *
1818 * In particular, we must hold a reference whilst removing the
1819 * object as we may end up waiting for and/or retiring the objects.
1820 * This might release the final reference (held by the active list)
1821 * and result in the object being freed from under us. This is
1822 * similar to the precautions the eviction code must take whilst
1823 * removing objects.
1824 *
1825 * Also note that although these lists do not hold a reference to
1826 * the object we can safely grab one here: The final object
1827 * unreferencing and the bound_list are both protected by the
1828 * dev->struct_mutex and so we won't ever be able to observe an
1829 * object on the bound_list with a reference count equals 0.
Chris Wilson57094f82013-09-04 10:45:50 +01001830 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00001831 INIT_LIST_HEAD(&still_in_list);
1832 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1833 obj = list_first_entry(&dev_priv->mm.unbound_list,
1834 typeof(*obj), global_list);
1835 list_move_tail(&obj->global_list, &still_in_list);
1836
1837 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1838 continue;
1839
1840 drm_gem_object_reference(&obj->base);
1841
1842 if (i915_gem_object_put_pages(obj) == 0)
1843 count += obj->base.size >> PAGE_SHIFT;
1844
1845 drm_gem_object_unreference(&obj->base);
1846 }
1847 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1848
1849 INIT_LIST_HEAD(&still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001850 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001851 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001852
Chris Wilson57094f82013-09-04 10:45:50 +01001853 obj = list_first_entry(&dev_priv->mm.bound_list,
1854 typeof(*obj), global_list);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001855 list_move_tail(&obj->global_list, &still_in_list);
Chris Wilson57094f82013-09-04 10:45:50 +01001856
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001857 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1858 continue;
1859
Chris Wilson57094f82013-09-04 10:45:50 +01001860 drm_gem_object_reference(&obj->base);
1861
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001862 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1863 if (i915_vma_unbind(vma))
1864 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001865
Chris Wilson57094f82013-09-04 10:45:50 +01001866 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001867 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001868
1869 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001870 }
Chris Wilsonc8725f32014-03-17 12:21:55 +00001871 list_splice(&still_in_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001872
1873 return count;
1874}
1875
Chris Wilsond9973b42013-10-04 10:33:00 +01001876static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001877i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1878{
1879 return __i915_gem_shrink(dev_priv, target, true);
1880}
1881
Chris Wilsond9973b42013-10-04 10:33:00 +01001882static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001883i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1884{
Chris Wilson6c085a72012-08-20 11:40:46 +02001885 i915_gem_evict_everything(dev_priv->dev);
Chris Wilsonc8725f32014-03-17 12:21:55 +00001886 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
Daniel Vetter225067e2012-08-20 10:23:20 +02001887}
1888
Chris Wilson37e680a2012-06-07 15:38:42 +01001889static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001890i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001891{
Chris Wilson6c085a72012-08-20 11:40:46 +02001892 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001893 int page_count, i;
1894 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001895 struct sg_table *st;
1896 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001897 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001898 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001899 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001900 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001901
Chris Wilson6c085a72012-08-20 11:40:46 +02001902 /* Assert that the object is not currently in any GPU domain. As it
1903 * wasn't in the GTT, there shouldn't be any way it could have been in
1904 * a GPU cache
1905 */
1906 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1907 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1908
Chris Wilson9da3da62012-06-01 15:20:22 +01001909 st = kmalloc(sizeof(*st), GFP_KERNEL);
1910 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001911 return -ENOMEM;
1912
Chris Wilson9da3da62012-06-01 15:20:22 +01001913 page_count = obj->base.size / PAGE_SIZE;
1914 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001915 kfree(st);
1916 return -ENOMEM;
1917 }
1918
1919 /* Get the list of pages out of our struct file. They'll be pinned
1920 * at this point until we release them.
1921 *
1922 * Fail silently without starting the shrinker
1923 */
Al Viro496ad9a2013-01-23 17:07:38 -05001924 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001925 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001926 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001927 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001928 sg = st->sgl;
1929 st->nents = 0;
1930 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001931 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1932 if (IS_ERR(page)) {
1933 i915_gem_purge(dev_priv, page_count);
1934 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1935 }
1936 if (IS_ERR(page)) {
1937 /* We've tried hard to allocate the memory by reaping
1938 * our own buffer, now let the real VM do its job and
1939 * go down in flames if truly OOM.
1940 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001941 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001942 gfp |= __GFP_IO | __GFP_WAIT;
1943
1944 i915_gem_shrink_all(dev_priv);
1945 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1946 if (IS_ERR(page))
1947 goto err_pages;
1948
Linus Torvaldscaf49192012-12-10 10:51:16 -08001949 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001950 gfp &= ~(__GFP_IO | __GFP_WAIT);
1951 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001952#ifdef CONFIG_SWIOTLB
1953 if (swiotlb_nr_tbl()) {
1954 st->nents++;
1955 sg_set_page(sg, page, PAGE_SIZE, 0);
1956 sg = sg_next(sg);
1957 continue;
1958 }
1959#endif
Imre Deak90797e62013-02-18 19:28:03 +02001960 if (!i || page_to_pfn(page) != last_pfn + 1) {
1961 if (i)
1962 sg = sg_next(sg);
1963 st->nents++;
1964 sg_set_page(sg, page, PAGE_SIZE, 0);
1965 } else {
1966 sg->length += PAGE_SIZE;
1967 }
1968 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001969
1970 /* Check that the i965g/gm workaround works. */
1971 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001972 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001973#ifdef CONFIG_SWIOTLB
1974 if (!swiotlb_nr_tbl())
1975#endif
1976 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001977 obj->pages = st;
1978
Eric Anholt673a3942008-07-30 12:06:12 -07001979 if (i915_gem_object_needs_bit17_swizzle(obj))
1980 i915_gem_object_do_bit_17_swizzle(obj);
1981
1982 return 0;
1983
1984err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001985 sg_mark_end(sg);
1986 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001987 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001988 sg_free_table(st);
1989 kfree(st);
Chris Wilson0820baf2014-03-25 13:23:03 +00001990
1991 /* shmemfs first checks if there is enough memory to allocate the page
1992 * and reports ENOSPC should there be insufficient, along with the usual
1993 * ENOMEM for a genuine allocation failure.
1994 *
1995 * We use ENOSPC in our driver to mean that we have run out of aperture
1996 * space and so want to translate the error from shmemfs back to our
1997 * usual understanding of ENOMEM.
1998 */
1999 if (PTR_ERR(page) == -ENOSPC)
2000 return -ENOMEM;
2001 else
2002 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002003}
2004
Chris Wilson37e680a2012-06-07 15:38:42 +01002005/* Ensure that the associated pages are gathered from the backing storage
2006 * and pinned into our object. i915_gem_object_get_pages() may be called
2007 * multiple times before they are released by a single call to
2008 * i915_gem_object_put_pages() - once the pages are no longer referenced
2009 * either as a result of memory pressure (reaping pages under the shrinker)
2010 * or as the object is itself released.
2011 */
2012int
2013i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2014{
2015 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2016 const struct drm_i915_gem_object_ops *ops = obj->ops;
2017 int ret;
2018
Chris Wilson2f745ad2012-09-04 21:02:58 +01002019 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01002020 return 0;
2021
Chris Wilson43e28f02013-01-08 10:53:09 +00002022 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00002023 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00002024 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00002025 }
2026
Chris Wilsona5570172012-09-04 21:02:54 +01002027 BUG_ON(obj->pages_pin_count);
2028
Chris Wilson37e680a2012-06-07 15:38:42 +01002029 ret = ops->get_pages(obj);
2030 if (ret)
2031 return ret;
2032
Ben Widawsky35c20a62013-05-31 11:28:48 -07002033 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01002034 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002035}
2036
Ben Widawskye2d05a82013-09-24 09:57:58 -07002037static void
Chris Wilson05394f32010-11-08 19:18:58 +00002038i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00002039 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002040{
Chris Wilson05394f32010-11-08 19:18:58 +00002041 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01002042 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00002043 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002044
Zou Nan hai852835f2010-05-21 09:08:56 +08002045 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002046 if (obj->ring != ring && obj->last_write_seqno) {
2047 /* Keep the seqno relative to the current ring */
2048 obj->last_write_seqno = seqno;
2049 }
Chris Wilson05394f32010-11-08 19:18:58 +00002050 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002051
2052 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002053 if (!obj->active) {
2054 drm_gem_object_reference(&obj->base);
2055 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002056 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002057
Chris Wilson05394f32010-11-08 19:18:58 +00002058 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002059
Chris Wilson0201f1e2012-07-20 12:41:01 +01002060 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002061
Chris Wilsoncaea7472010-11-12 13:53:37 +00002062 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002063 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002064
Chris Wilson7dd49062012-03-21 10:48:18 +00002065 /* Bump MRU to take account of the delayed flush */
2066 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2067 struct drm_i915_fence_reg *reg;
2068
2069 reg = &dev_priv->fence_regs[obj->fence_reg];
2070 list_move_tail(&reg->lru_list,
2071 &dev_priv->mm.fence_list);
2072 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002073 }
2074}
2075
Ben Widawskye2d05a82013-09-24 09:57:58 -07002076void i915_vma_move_to_active(struct i915_vma *vma,
2077 struct intel_ring_buffer *ring)
2078{
2079 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2080 return i915_gem_object_move_to_active(vma->obj, ring);
2081}
2082
Chris Wilsoncaea7472010-11-12 13:53:37 +00002083static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002084i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2085{
Ben Widawskyca191b12013-07-31 17:00:14 -07002086 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002087 struct i915_address_space *vm;
2088 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002089
Chris Wilson65ce3022012-07-20 12:41:02 +01002090 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002091 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002092
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002093 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2094 vma = i915_gem_obj_to_vma(obj, vm);
2095 if (vma && !list_empty(&vma->mm_list))
2096 list_move_tail(&vma->mm_list, &vm->inactive_list);
2097 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002098
Chris Wilson65ce3022012-07-20 12:41:02 +01002099 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002100 obj->ring = NULL;
2101
Chris Wilson65ce3022012-07-20 12:41:02 +01002102 obj->last_read_seqno = 0;
2103 obj->last_write_seqno = 0;
2104 obj->base.write_domain = 0;
2105
2106 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002107 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002108
2109 obj->active = 0;
2110 drm_gem_object_unreference(&obj->base);
2111
2112 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002113}
Eric Anholt673a3942008-07-30 12:06:12 -07002114
Chris Wilsonc8725f32014-03-17 12:21:55 +00002115static void
2116i915_gem_object_retire(struct drm_i915_gem_object *obj)
2117{
2118 struct intel_ring_buffer *ring = obj->ring;
2119
2120 if (ring == NULL)
2121 return;
2122
2123 if (i915_seqno_passed(ring->get_seqno(ring, true),
2124 obj->last_read_seqno))
2125 i915_gem_object_move_to_inactive(obj);
2126}
2127
Chris Wilson9d7730912012-11-27 16:22:52 +00002128static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002129i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002130{
Chris Wilson9d7730912012-11-27 16:22:52 +00002131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 struct intel_ring_buffer *ring;
2133 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002134
Chris Wilson107f27a52012-12-10 13:56:17 +02002135 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002136 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002137 ret = intel_ring_idle(ring);
2138 if (ret)
2139 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002140 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002141 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002142
2143 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002144 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002145 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002146
Ben Widawskyebc348b2014-04-29 14:52:28 -07002147 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2148 ring->semaphore.sync_seqno[j] = 0;
Chris Wilson9d7730912012-11-27 16:22:52 +00002149 }
2150
2151 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002152}
2153
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002154int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2155{
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 int ret;
2158
2159 if (seqno == 0)
2160 return -EINVAL;
2161
2162 /* HWS page needs to be set less than what we
2163 * will inject to ring
2164 */
2165 ret = i915_gem_init_seqno(dev, seqno - 1);
2166 if (ret)
2167 return ret;
2168
2169 /* Carefully set the last_seqno value so that wrap
2170 * detection still works
2171 */
2172 dev_priv->next_seqno = seqno;
2173 dev_priv->last_seqno = seqno - 1;
2174 if (dev_priv->last_seqno == 0)
2175 dev_priv->last_seqno--;
2176
2177 return 0;
2178}
2179
Chris Wilson9d7730912012-11-27 16:22:52 +00002180int
2181i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002182{
Chris Wilson9d7730912012-11-27 16:22:52 +00002183 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002184
Chris Wilson9d7730912012-11-27 16:22:52 +00002185 /* reserve 0 for non-seqno */
2186 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002187 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002188 if (ret)
2189 return ret;
2190
2191 dev_priv->next_seqno = 1;
2192 }
2193
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002194 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002195 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002196}
2197
Mika Kuoppala0025c072013-06-12 12:35:30 +03002198int __i915_add_request(struct intel_ring_buffer *ring,
2199 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002200 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002201 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002202{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002203 struct drm_i915_private *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002204 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002205 u32 request_ring_position, request_start;
Chris Wilson3cce4692010-10-27 16:11:02 +01002206 int ret;
2207
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002208 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002209 /*
2210 * Emit any outstanding flushes - execbuf can fail to emit the flush
2211 * after having emitted the batchbuffer command. Hence we need to fix
2212 * things up similar to emitting the lazy request. The difference here
2213 * is that the flush _must_ happen before the next request, no matter
2214 * what.
2215 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002216 ret = intel_ring_flush_all_caches(ring);
2217 if (ret)
2218 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002219
Chris Wilson3c0e2342013-09-04 10:45:52 +01002220 request = ring->preallocated_lazy_request;
2221 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002222 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002223
Chris Wilsona71d8d92012-02-15 11:25:36 +00002224 /* Record the position of the start of the request so that
2225 * should we detect the updated seqno part-way through the
2226 * GPU processing the request, we never over-estimate the
2227 * position of the head.
2228 */
2229 request_ring_position = intel_ring_get_tail(ring);
2230
Chris Wilson9d7730912012-11-27 16:22:52 +00002231 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002232 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002233 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002234
Chris Wilson9d7730912012-11-27 16:22:52 +00002235 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002236 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002237 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002238 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002239
2240 /* Whilst this request exists, batch_obj will be on the
2241 * active_list, and so will hold the active reference. Only when this
2242 * request is retired will the the batch_obj be moved onto the
2243 * inactive_list and lose its active reference. Hence we do not need
2244 * to explicitly hold another reference here.
2245 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002246 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002247
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002248 /* Hold a reference to the current context so that we can inspect
2249 * it later in case a hangcheck error event fires.
2250 */
2251 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002252 if (request->ctx)
2253 i915_gem_context_reference(request->ctx);
2254
Eric Anholt673a3942008-07-30 12:06:12 -07002255 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002256 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002257 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002258
Chris Wilsondb53a302011-02-03 11:57:46 +00002259 if (file) {
2260 struct drm_i915_file_private *file_priv = file->driver_priv;
2261
Chris Wilson1c255952010-09-26 11:03:27 +01002262 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002263 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002264 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002265 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002266 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002267 }
Eric Anholt673a3942008-07-30 12:06:12 -07002268
Chris Wilson9d7730912012-11-27 16:22:52 +00002269 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002270 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002271 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002272
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002273 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002274 i915_queue_hangcheck(ring->dev);
2275
Chris Wilsonf62a0072014-02-21 17:55:39 +00002276 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2277 queue_delayed_work(dev_priv->wq,
2278 &dev_priv->mm.retire_work,
2279 round_jiffies_up_relative(HZ));
2280 intel_mark_busy(dev_priv->dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04002281 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002282
Chris Wilsonacb868d2012-09-26 13:47:30 +01002283 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002284 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002285 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002286}
2287
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002288static inline void
2289i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002290{
Chris Wilson1c255952010-09-26 11:03:27 +01002291 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002292
Chris Wilson1c255952010-09-26 11:03:27 +01002293 if (!file_priv)
2294 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002295
Chris Wilson1c255952010-09-26 11:03:27 +01002296 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002297 list_del(&request->client_list);
2298 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002299 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002300}
2301
Mika Kuoppala939fd762014-01-30 19:04:44 +02002302static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002303 const struct i915_hw_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002304{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002305 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002306
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002307 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2308
2309 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002310 return true;
2311
2312 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002313 if (!i915_gem_context_is_default(ctx)) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002314 DRM_DEBUG("context hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002315 return true;
Mika Kuoppala88b4aa82014-03-28 18:18:18 +02002316 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2317 if (i915_stop_ring_allow_warn(dev_priv))
2318 DRM_ERROR("gpu hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002319 return true;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002320 }
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002321 }
2322
2323 return false;
2324}
2325
Mika Kuoppala939fd762014-01-30 19:04:44 +02002326static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2327 struct i915_hw_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002328 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002329{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002330 struct i915_ctx_hang_stats *hs;
2331
2332 if (WARN_ON(!ctx))
2333 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002334
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002335 hs = &ctx->hang_stats;
2336
2337 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002338 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002339 hs->batch_active++;
2340 hs->guilty_ts = get_seconds();
2341 } else {
2342 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002343 }
2344}
2345
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002346static void i915_gem_free_request(struct drm_i915_gem_request *request)
2347{
2348 list_del(&request->list);
2349 i915_gem_request_remove_from_client(request);
2350
2351 if (request->ctx)
2352 i915_gem_context_unreference(request->ctx);
2353
2354 kfree(request);
2355}
2356
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002357struct drm_i915_gem_request *
2358i915_gem_find_active_request(struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002359{
Chris Wilson4db080f2013-12-04 11:37:09 +00002360 struct drm_i915_gem_request *request;
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002361 u32 completed_seqno;
2362
2363 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002364
Chris Wilson4db080f2013-12-04 11:37:09 +00002365 list_for_each_entry(request, &ring->request_list, list) {
2366 if (i915_seqno_passed(completed_seqno, request->seqno))
2367 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002368
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002369 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002370 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002371
2372 return NULL;
2373}
2374
2375static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2376 struct intel_ring_buffer *ring)
2377{
2378 struct drm_i915_gem_request *request;
2379 bool ring_hung;
2380
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002381 request = i915_gem_find_active_request(ring);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002382
2383 if (request == NULL)
2384 return;
2385
2386 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2387
Mika Kuoppala939fd762014-01-30 19:04:44 +02002388 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002389
2390 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002391 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002392}
2393
2394static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2395 struct intel_ring_buffer *ring)
2396{
Chris Wilsondfaae392010-09-22 10:31:52 +01002397 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002398 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002399
Chris Wilson05394f32010-11-08 19:18:58 +00002400 obj = list_first_entry(&ring->active_list,
2401 struct drm_i915_gem_object,
2402 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002403
Chris Wilson05394f32010-11-08 19:18:58 +00002404 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002405 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002406
2407 /*
2408 * We must free the requests after all the corresponding objects have
2409 * been moved off active lists. Which is the same order as the normal
2410 * retire_requests function does. This is important if object hold
2411 * implicit references on things like e.g. ppgtt address spaces through
2412 * the request.
2413 */
2414 while (!list_empty(&ring->request_list)) {
2415 struct drm_i915_gem_request *request;
2416
2417 request = list_first_entry(&ring->request_list,
2418 struct drm_i915_gem_request,
2419 list);
2420
2421 i915_gem_free_request(request);
2422 }
Chris Wilsone3efda42014-04-09 09:19:41 +01002423
2424 /* These may not have been flush before the reset, do so now */
2425 kfree(ring->preallocated_lazy_request);
2426 ring->preallocated_lazy_request = NULL;
2427 ring->outstanding_lazy_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002428}
2429
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002430void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002431{
2432 struct drm_i915_private *dev_priv = dev->dev_private;
2433 int i;
2434
Daniel Vetter4b9de732011-10-09 21:52:02 +02002435 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002436 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002437
Daniel Vetter94a335d2013-07-17 14:51:28 +02002438 /*
2439 * Commit delayed tiling changes if we have an object still
2440 * attached to the fence, otherwise just clear the fence.
2441 */
2442 if (reg->obj) {
2443 i915_gem_object_update_fence(reg->obj, reg,
2444 reg->obj->tiling_mode);
2445 } else {
2446 i915_gem_write_fence(dev, i, NULL);
2447 }
Chris Wilson312817a2010-11-22 11:50:11 +00002448 }
2449}
2450
Chris Wilson069efc12010-09-30 16:53:18 +01002451void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002452{
Chris Wilsondfaae392010-09-22 10:31:52 +01002453 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002454 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002455 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002456
Chris Wilson4db080f2013-12-04 11:37:09 +00002457 /*
2458 * Before we free the objects from the requests, we need to inspect
2459 * them for finding the guilty party. As the requests only borrow
2460 * their reference to the objects, the inspection must be done first.
2461 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002462 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002463 i915_gem_reset_ring_status(dev_priv, ring);
2464
2465 for_each_ring(ring, dev_priv, i)
2466 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002467
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002468 i915_gem_context_reset(dev);
2469
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002470 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002471}
2472
2473/**
2474 * This function clears the request list as sequence numbers are passed.
2475 */
Chris Wilson1cf0ba12014-05-05 09:07:33 +01002476void
Chris Wilsondb53a302011-02-03 11:57:46 +00002477i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002478{
Eric Anholt673a3942008-07-30 12:06:12 -07002479 uint32_t seqno;
2480
Chris Wilsondb53a302011-02-03 11:57:46 +00002481 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002482 return;
2483
Chris Wilsondb53a302011-02-03 11:57:46 +00002484 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002485
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002486 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002487
Chris Wilsone9103032014-01-07 11:45:14 +00002488 /* Move any buffers on the active list that are no longer referenced
2489 * by the ringbuffer to the flushing/inactive lists as appropriate,
2490 * before we free the context associated with the requests.
2491 */
2492 while (!list_empty(&ring->active_list)) {
2493 struct drm_i915_gem_object *obj;
2494
2495 obj = list_first_entry(&ring->active_list,
2496 struct drm_i915_gem_object,
2497 ring_list);
2498
2499 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2500 break;
2501
2502 i915_gem_object_move_to_inactive(obj);
2503 }
2504
2505
Zou Nan hai852835f2010-05-21 09:08:56 +08002506 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002507 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002508
Zou Nan hai852835f2010-05-21 09:08:56 +08002509 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002510 struct drm_i915_gem_request,
2511 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002512
Chris Wilsondfaae392010-09-22 10:31:52 +01002513 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002514 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002515
Chris Wilsondb53a302011-02-03 11:57:46 +00002516 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002517 /* We know the GPU must have read the request to have
2518 * sent us the seqno + interrupt, so use the position
2519 * of tail of the request to update the last known position
2520 * of the GPU head.
2521 */
2522 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002523
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002524 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002525 }
2526
Chris Wilsondb53a302011-02-03 11:57:46 +00002527 if (unlikely(ring->trace_irq_seqno &&
2528 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002529 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002530 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002531 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002532
Chris Wilsondb53a302011-02-03 11:57:46 +00002533 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002534}
2535
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002536bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002537i915_gem_retire_requests(struct drm_device *dev)
2538{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002539 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002540 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002541 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002542 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002543
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002544 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002545 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002546 idle &= list_empty(&ring->request_list);
2547 }
2548
2549 if (idle)
2550 mod_delayed_work(dev_priv->wq,
2551 &dev_priv->mm.idle_work,
2552 msecs_to_jiffies(100));
2553
2554 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002555}
2556
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002557static void
Eric Anholt673a3942008-07-30 12:06:12 -07002558i915_gem_retire_work_handler(struct work_struct *work)
2559{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002560 struct drm_i915_private *dev_priv =
2561 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2562 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002563 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002564
Chris Wilson891b48c2010-09-29 12:26:37 +01002565 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002566 idle = false;
2567 if (mutex_trylock(&dev->struct_mutex)) {
2568 idle = i915_gem_retire_requests(dev);
2569 mutex_unlock(&dev->struct_mutex);
2570 }
2571 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002572 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2573 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002574}
Chris Wilson891b48c2010-09-29 12:26:37 +01002575
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002576static void
2577i915_gem_idle_work_handler(struct work_struct *work)
2578{
2579 struct drm_i915_private *dev_priv =
2580 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002581
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002582 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002583}
2584
Ben Widawsky5816d642012-04-11 11:18:19 -07002585/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002586 * Ensures that an object will eventually get non-busy by flushing any required
2587 * write domains, emitting any outstanding lazy request and retiring and
2588 * completed requests.
2589 */
2590static int
2591i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2592{
2593 int ret;
2594
2595 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002596 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002597 if (ret)
2598 return ret;
2599
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002600 i915_gem_retire_requests_ring(obj->ring);
2601 }
2602
2603 return 0;
2604}
2605
2606/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002607 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2608 * @DRM_IOCTL_ARGS: standard ioctl arguments
2609 *
2610 * Returns 0 if successful, else an error is returned with the remaining time in
2611 * the timeout parameter.
2612 * -ETIME: object is still busy after timeout
2613 * -ERESTARTSYS: signal interrupted the wait
2614 * -ENONENT: object doesn't exist
2615 * Also possible, but rare:
2616 * -EAGAIN: GPU wedged
2617 * -ENOMEM: damn
2618 * -ENODEV: Internal IRQ fail
2619 * -E?: The add request failed
2620 *
2621 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2622 * non-zero timeout parameter the wait ioctl will wait for the given number of
2623 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2624 * without holding struct_mutex the object may become re-busied before this
2625 * function completes. A similar but shorter * race condition exists in the busy
2626 * ioctl
2627 */
2628int
2629i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2630{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002631 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002632 struct drm_i915_gem_wait *args = data;
2633 struct drm_i915_gem_object *obj;
2634 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002635 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002636 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002637 u32 seqno = 0;
2638 int ret = 0;
2639
Ben Widawskyeac1f142012-06-05 15:24:24 -07002640 if (args->timeout_ns >= 0) {
2641 timeout_stack = ns_to_timespec(args->timeout_ns);
2642 timeout = &timeout_stack;
2643 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002644
2645 ret = i915_mutex_lock_interruptible(dev);
2646 if (ret)
2647 return ret;
2648
2649 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2650 if (&obj->base == NULL) {
2651 mutex_unlock(&dev->struct_mutex);
2652 return -ENOENT;
2653 }
2654
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002655 /* Need to make sure the object gets inactive eventually. */
2656 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002657 if (ret)
2658 goto out;
2659
2660 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002661 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002662 ring = obj->ring;
2663 }
2664
2665 if (seqno == 0)
2666 goto out;
2667
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002668 /* Do this after OLR check to make sure we make forward progress polling
2669 * on this IOCTL with a 0 timeout (like busy ioctl)
2670 */
2671 if (!args->timeout_ns) {
2672 ret = -ETIME;
2673 goto out;
2674 }
2675
2676 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002677 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002678 mutex_unlock(&dev->struct_mutex);
2679
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002680 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002681 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002682 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002683 return ret;
2684
2685out:
2686 drm_gem_object_unreference(&obj->base);
2687 mutex_unlock(&dev->struct_mutex);
2688 return ret;
2689}
2690
2691/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002692 * i915_gem_object_sync - sync an object to a ring.
2693 *
2694 * @obj: object which may be in use on another ring.
2695 * @to: ring we wish to use the object on. May be NULL.
2696 *
2697 * This code is meant to abstract object synchronization with the GPU.
2698 * Calling with NULL implies synchronizing the object with the CPU
2699 * rather than a particular GPU ring.
2700 *
2701 * Returns 0 if successful, else propagates up the lower layer error.
2702 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002703int
2704i915_gem_object_sync(struct drm_i915_gem_object *obj,
2705 struct intel_ring_buffer *to)
2706{
2707 struct intel_ring_buffer *from = obj->ring;
2708 u32 seqno;
2709 int ret, idx;
2710
2711 if (from == NULL || to == from)
2712 return 0;
2713
Ben Widawsky5816d642012-04-11 11:18:19 -07002714 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002715 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002716
2717 idx = intel_ring_sync_index(from, to);
2718
Chris Wilson0201f1e2012-07-20 12:41:01 +01002719 seqno = obj->last_read_seqno;
Ben Widawskyebc348b2014-04-29 14:52:28 -07002720 if (seqno <= from->semaphore.sync_seqno[idx])
Ben Widawsky2911a352012-04-05 14:47:36 -07002721 return 0;
2722
Ben Widawskyb4aca012012-04-25 20:50:12 -07002723 ret = i915_gem_check_olr(obj->ring, seqno);
2724 if (ret)
2725 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002726
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002727 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawskyebc348b2014-04-29 14:52:28 -07002728 ret = to->semaphore.sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002729 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002730 /* We use last_read_seqno because sync_to()
2731 * might have just caused seqno wrap under
2732 * the radar.
2733 */
Ben Widawskyebc348b2014-04-29 14:52:28 -07002734 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002735
Ben Widawskye3a5a222012-04-11 11:18:20 -07002736 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002737}
2738
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002739static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2740{
2741 u32 old_write_domain, old_read_domains;
2742
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002743 /* Force a pagefault for domain tracking on next user access */
2744 i915_gem_release_mmap(obj);
2745
Keith Packardb97c3d92011-06-24 21:02:59 -07002746 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2747 return;
2748
Chris Wilson97c809fd2012-10-09 19:24:38 +01002749 /* Wait for any direct GTT access to complete */
2750 mb();
2751
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002752 old_read_domains = obj->base.read_domains;
2753 old_write_domain = obj->base.write_domain;
2754
2755 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2756 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2757
2758 trace_i915_gem_object_change_domain(obj,
2759 old_read_domains,
2760 old_write_domain);
2761}
2762
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002763int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002764{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002765 struct drm_i915_gem_object *obj = vma->obj;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002766 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002767 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002768
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002769 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002770 return 0;
2771
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002772 if (!drm_mm_node_allocated(&vma->node)) {
2773 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002774 return 0;
2775 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002776
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002777 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002778 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002779
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002780 BUG_ON(obj->pages == NULL);
2781
Chris Wilsona8198ee2011-04-13 22:04:09 +01002782 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002783 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002784 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002785 /* Continue on if we fail due to EIO, the GPU is hung so we
2786 * should be safe and we need to cleanup or else we might
2787 * cause memory corruption through use-after-free.
2788 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002789
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002790 if (i915_is_ggtt(vma->vm)) {
2791 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002792
Daniel Vetter8b1bc9b2014-02-14 14:06:07 +01002793 /* release the fence reg _after_ flushing */
2794 ret = i915_gem_object_put_fence(obj);
2795 if (ret)
2796 return ret;
2797 }
Daniel Vetter96b47b62009-12-15 17:50:00 +01002798
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002799 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002800
Ben Widawsky6f65e292013-12-06 14:10:56 -08002801 vma->unbind_vma(vma);
2802
Daniel Vetter74163902012-02-15 23:50:21 +01002803 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002804
Chris Wilson64bf9302014-02-25 14:23:28 +00002805 list_del_init(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002806 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002807 if (i915_is_ggtt(vma->vm))
2808 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002809
Ben Widawsky2f633152013-07-17 12:19:03 -07002810 drm_mm_remove_node(&vma->node);
2811 i915_gem_vma_destroy(vma);
2812
2813 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002814 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002815 if (list_empty(&obj->vma_list))
2816 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002817
Chris Wilson70903c32013-12-04 09:59:09 +00002818 /* And finally now the object is completely decoupled from this vma,
2819 * we can drop its hold on the backing storage and allow it to be
2820 * reaped by the shrinker.
2821 */
2822 i915_gem_object_unpin_pages(obj);
2823
Chris Wilson88241782011-01-07 17:09:48 +00002824 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002825}
2826
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002827int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002828{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002829 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002830 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002831 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002832
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002833 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002834 for_each_ring(ring, dev_priv, i) {
Chris Wilson691e6412014-04-09 09:07:36 +01002835 ret = i915_switch_context(ring, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002836 if (ret)
2837 return ret;
2838
Chris Wilson3e960502012-11-27 16:22:54 +00002839 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002840 if (ret)
2841 return ret;
2842 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002843
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002844 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002845}
2846
Chris Wilson9ce079e2012-04-17 15:31:30 +01002847static void i965_write_fence_reg(struct drm_device *dev, int reg,
2848 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002849{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002850 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002851 int fence_reg;
2852 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002853
Imre Deak56c844e2013-01-07 21:47:34 +02002854 if (INTEL_INFO(dev)->gen >= 6) {
2855 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2856 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2857 } else {
2858 fence_reg = FENCE_REG_965_0;
2859 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2860 }
2861
Chris Wilsond18b9612013-07-10 13:36:23 +01002862 fence_reg += reg * 8;
2863
2864 /* To w/a incoherency with non-atomic 64-bit register updates,
2865 * we split the 64-bit update into two 32-bit writes. In order
2866 * for a partial fence not to be evaluated between writes, we
2867 * precede the update with write to turn off the fence register,
2868 * and only enable the fence as the last step.
2869 *
2870 * For extra levels of paranoia, we make sure each step lands
2871 * before applying the next step.
2872 */
2873 I915_WRITE(fence_reg, 0);
2874 POSTING_READ(fence_reg);
2875
Chris Wilson9ce079e2012-04-17 15:31:30 +01002876 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002877 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002878 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002879
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002880 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002881 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002882 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002883 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002884 if (obj->tiling_mode == I915_TILING_Y)
2885 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2886 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002887
Chris Wilsond18b9612013-07-10 13:36:23 +01002888 I915_WRITE(fence_reg + 4, val >> 32);
2889 POSTING_READ(fence_reg + 4);
2890
2891 I915_WRITE(fence_reg + 0, val);
2892 POSTING_READ(fence_reg);
2893 } else {
2894 I915_WRITE(fence_reg + 4, 0);
2895 POSTING_READ(fence_reg + 4);
2896 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002897}
2898
Chris Wilson9ce079e2012-04-17 15:31:30 +01002899static void i915_write_fence_reg(struct drm_device *dev, int reg,
2900 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002901{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002902 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002903 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002904
Chris Wilson9ce079e2012-04-17 15:31:30 +01002905 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002906 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002907 int pitch_val;
2908 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002909
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002910 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002911 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002912 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2913 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2914 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002915
2916 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2917 tile_width = 128;
2918 else
2919 tile_width = 512;
2920
2921 /* Note: pitch better be a power of two tile widths */
2922 pitch_val = obj->stride / tile_width;
2923 pitch_val = ffs(pitch_val) - 1;
2924
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002925 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002926 if (obj->tiling_mode == I915_TILING_Y)
2927 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2928 val |= I915_FENCE_SIZE_BITS(size);
2929 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2930 val |= I830_FENCE_REG_VALID;
2931 } else
2932 val = 0;
2933
2934 if (reg < 8)
2935 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002936 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002937 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002938
Chris Wilson9ce079e2012-04-17 15:31:30 +01002939 I915_WRITE(reg, val);
2940 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002941}
2942
Chris Wilson9ce079e2012-04-17 15:31:30 +01002943static void i830_write_fence_reg(struct drm_device *dev, int reg,
2944 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002945{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03002946 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002947 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002948
Chris Wilson9ce079e2012-04-17 15:31:30 +01002949 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002950 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002951 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002952
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002953 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002954 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002955 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2956 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2957 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002958
Chris Wilson9ce079e2012-04-17 15:31:30 +01002959 pitch_val = obj->stride / 128;
2960 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002961
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002962 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002963 if (obj->tiling_mode == I915_TILING_Y)
2964 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2965 val |= I830_FENCE_SIZE_BITS(size);
2966 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2967 val |= I830_FENCE_REG_VALID;
2968 } else
2969 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002970
Chris Wilson9ce079e2012-04-17 15:31:30 +01002971 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2972 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2973}
2974
Chris Wilsond0a57782012-10-09 19:24:37 +01002975inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2976{
2977 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2978}
2979
Chris Wilson9ce079e2012-04-17 15:31:30 +01002980static void i915_gem_write_fence(struct drm_device *dev, int reg,
2981 struct drm_i915_gem_object *obj)
2982{
Chris Wilsond0a57782012-10-09 19:24:37 +01002983 struct drm_i915_private *dev_priv = dev->dev_private;
2984
2985 /* Ensure that all CPU reads are completed before installing a fence
2986 * and all writes before removing the fence.
2987 */
2988 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2989 mb();
2990
Daniel Vetter94a335d2013-07-17 14:51:28 +02002991 WARN(obj && (!obj->stride || !obj->tiling_mode),
2992 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2993 obj->stride, obj->tiling_mode);
2994
Chris Wilson9ce079e2012-04-17 15:31:30 +01002995 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002996 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002997 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002998 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002999 case 5:
3000 case 4: i965_write_fence_reg(dev, reg, obj); break;
3001 case 3: i915_write_fence_reg(dev, reg, obj); break;
3002 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08003003 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01003004 }
Chris Wilsond0a57782012-10-09 19:24:37 +01003005
3006 /* And similarly be paranoid that no direct access to this region
3007 * is reordered to before the fence is installed.
3008 */
3009 if (i915_gem_object_needs_mb(obj))
3010 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003011}
3012
Chris Wilson61050802012-04-17 15:31:31 +01003013static inline int fence_number(struct drm_i915_private *dev_priv,
3014 struct drm_i915_fence_reg *fence)
3015{
3016 return fence - dev_priv->fence_regs;
3017}
3018
3019static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3020 struct drm_i915_fence_reg *fence,
3021 bool enable)
3022{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01003023 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01003024 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01003025
Chris Wilson46a0b632013-07-10 13:36:24 +01003026 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01003027
3028 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01003029 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01003030 fence->obj = obj;
3031 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3032 } else {
3033 obj->fence_reg = I915_FENCE_REG_NONE;
3034 fence->obj = NULL;
3035 list_del_init(&fence->lru_list);
3036 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02003037 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01003038}
3039
Chris Wilsond9e86c02010-11-10 16:40:20 +00003040static int
Chris Wilsond0a57782012-10-09 19:24:37 +01003041i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003042{
Chris Wilson1c293ea2012-04-17 15:31:27 +01003043 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01003044 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01003045 if (ret)
3046 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003047
3048 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003049 }
3050
Chris Wilson86d5bc32012-07-20 12:41:04 +01003051 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003052 return 0;
3053}
3054
3055int
3056i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3057{
Chris Wilson61050802012-04-17 15:31:31 +01003058 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003059 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003060 int ret;
3061
Chris Wilsond0a57782012-10-09 19:24:37 +01003062 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003063 if (ret)
3064 return ret;
3065
Chris Wilson61050802012-04-17 15:31:31 +01003066 if (obj->fence_reg == I915_FENCE_REG_NONE)
3067 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003068
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003069 fence = &dev_priv->fence_regs[obj->fence_reg];
3070
Daniel Vetteraff10b302014-02-14 14:06:05 +01003071 if (WARN_ON(fence->pin_count))
3072 return -EBUSY;
3073
Chris Wilson61050802012-04-17 15:31:31 +01003074 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003075 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003076
3077 return 0;
3078}
3079
3080static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003081i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003082{
Daniel Vetterae3db242010-02-19 11:51:58 +01003083 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003084 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003085 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003086
3087 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003088 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003089 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3090 reg = &dev_priv->fence_regs[i];
3091 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003092 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003093
Chris Wilson1690e1e2011-12-14 13:57:08 +01003094 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003095 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003096 }
3097
Chris Wilsond9e86c02010-11-10 16:40:20 +00003098 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003099 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003100
3101 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003102 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003103 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003104 continue;
3105
Chris Wilson8fe301a2012-04-17 15:31:28 +01003106 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003107 }
3108
Chris Wilson5dce5b932014-01-20 10:17:36 +00003109deadlock:
3110 /* Wait for completion of pending flips which consume fences */
3111 if (intel_has_pending_fb_unpin(dev))
3112 return ERR_PTR(-EAGAIN);
3113
3114 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003115}
3116
Jesse Barnesde151cf2008-11-12 10:03:55 -08003117/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003118 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003119 * @obj: object to map through a fence reg
3120 *
3121 * When mapping objects through the GTT, userspace wants to be able to write
3122 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003123 * This function walks the fence regs looking for a free one for @obj,
3124 * stealing one if it can't find any.
3125 *
3126 * It then sets up the reg based on the object's properties: address, pitch
3127 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003128 *
3129 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003130 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003131int
Chris Wilson06d98132012-04-17 15:31:24 +01003132i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003133{
Chris Wilson05394f32010-11-08 19:18:58 +00003134 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003135 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003136 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003137 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003138 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003139
Chris Wilson14415742012-04-17 15:31:33 +01003140 /* Have we updated the tiling parameters upon the object and so
3141 * will need to serialise the write to the associated fence register?
3142 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003143 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003144 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003145 if (ret)
3146 return ret;
3147 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003148
Chris Wilsond9e86c02010-11-10 16:40:20 +00003149 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003150 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3151 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003152 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003153 list_move_tail(&reg->lru_list,
3154 &dev_priv->mm.fence_list);
3155 return 0;
3156 }
3157 } else if (enable) {
3158 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003159 if (IS_ERR(reg))
3160 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003161
Chris Wilson14415742012-04-17 15:31:33 +01003162 if (reg->obj) {
3163 struct drm_i915_gem_object *old = reg->obj;
3164
Chris Wilsond0a57782012-10-09 19:24:37 +01003165 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003166 if (ret)
3167 return ret;
3168
Chris Wilson14415742012-04-17 15:31:33 +01003169 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003170 }
Chris Wilson14415742012-04-17 15:31:33 +01003171 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003172 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003173
Chris Wilson14415742012-04-17 15:31:33 +01003174 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003175
Chris Wilson9ce079e2012-04-17 15:31:30 +01003176 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003177}
3178
Chris Wilson42d6ab42012-07-26 11:49:32 +01003179static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3180 struct drm_mm_node *gtt_space,
3181 unsigned long cache_level)
3182{
3183 struct drm_mm_node *other;
3184
3185 /* On non-LLC machines we have to be careful when putting differing
3186 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003187 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003188 */
3189 if (HAS_LLC(dev))
3190 return true;
3191
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003192 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003193 return true;
3194
3195 if (list_empty(&gtt_space->node_list))
3196 return true;
3197
3198 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3199 if (other->allocated && !other->hole_follows && other->color != cache_level)
3200 return false;
3201
3202 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3203 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3204 return false;
3205
3206 return true;
3207}
3208
3209static void i915_gem_verify_gtt(struct drm_device *dev)
3210{
3211#if WATCH_GTT
3212 struct drm_i915_private *dev_priv = dev->dev_private;
3213 struct drm_i915_gem_object *obj;
3214 int err = 0;
3215
Ben Widawsky35c20a62013-05-31 11:28:48 -07003216 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003217 if (obj->gtt_space == NULL) {
3218 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3219 err++;
3220 continue;
3221 }
3222
3223 if (obj->cache_level != obj->gtt_space->color) {
3224 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003225 i915_gem_obj_ggtt_offset(obj),
3226 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003227 obj->cache_level,
3228 obj->gtt_space->color);
3229 err++;
3230 continue;
3231 }
3232
3233 if (!i915_gem_valid_gtt_space(dev,
3234 obj->gtt_space,
3235 obj->cache_level)) {
3236 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003237 i915_gem_obj_ggtt_offset(obj),
3238 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003239 obj->cache_level);
3240 err++;
3241 continue;
3242 }
3243 }
3244
3245 WARN_ON(err);
3246#endif
3247}
3248
Jesse Barnesde151cf2008-11-12 10:03:55 -08003249/**
Eric Anholt673a3942008-07-30 12:06:12 -07003250 * Finds free space in the GTT aperture and binds the object there.
3251 */
Daniel Vetter262de142014-02-14 14:01:20 +01003252static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003253i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3254 struct i915_address_space *vm,
3255 unsigned alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003256 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003257{
Chris Wilson05394f32010-11-08 19:18:58 +00003258 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003259 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003260 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003261 size_t gtt_max =
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003262 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003263 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003264 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003265
Chris Wilsone28f8712011-07-18 13:11:49 -07003266 fence_size = i915_gem_get_gtt_size(dev,
3267 obj->base.size,
3268 obj->tiling_mode);
3269 fence_alignment = i915_gem_get_gtt_alignment(dev,
3270 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003271 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003272 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003273 i915_gem_get_gtt_alignment(dev,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003274 obj->base.size,
3275 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003276
Eric Anholt673a3942008-07-30 12:06:12 -07003277 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003278 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003279 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003280 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003281 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003282 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003283 }
3284
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003285 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003286
Chris Wilson654fc602010-05-27 13:18:21 +01003287 /* If the object is bigger than the entire aperture, reject it early
3288 * before evicting everything in a vain attempt to find space.
3289 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003290 if (obj->base.size > gtt_max) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003291 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003292 obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003293 flags & PIN_MAPPABLE ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003294 gtt_max);
Daniel Vetter262de142014-02-14 14:01:20 +01003295 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003296 }
3297
Chris Wilson37e680a2012-06-07 15:38:42 +01003298 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003299 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003300 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003301
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003302 i915_gem_object_pin_pages(obj);
3303
Ben Widawskyaccfef22013-08-14 11:38:35 +02003304 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Daniel Vetter262de142014-02-14 14:01:20 +01003305 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003306 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003307
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003308search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003309 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003310 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003311 obj->cache_level, 0, gtt_max,
Lauri Kasanen62347f92014-04-02 20:03:57 +03003312 DRM_MM_SEARCH_DEFAULT,
3313 DRM_MM_CREATE_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003314 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003315 ret = i915_gem_evict_something(dev, vm, size, alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003316 obj->cache_level, flags);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003317 if (ret == 0)
3318 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003319
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003320 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003321 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003322 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003323 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003324 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003325 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003326 }
3327
Daniel Vetter74163902012-02-15 23:50:21 +01003328 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003329 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003330 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003331
Ben Widawsky35c20a62013-05-31 11:28:48 -07003332 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003333 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003334
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003335 if (i915_is_ggtt(vm)) {
3336 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003337
Daniel Vetter49987092013-08-14 10:21:23 +02003338 fenceable = (vma->node.size == fence_size &&
3339 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003340
Daniel Vetter49987092013-08-14 10:21:23 +02003341 mappable = (vma->node.start + obj->base.size <=
3342 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003343
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003344 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003345 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003346
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003347 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003348
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003349 trace_i915_vma_bind(vma, flags);
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003350 vma->bind_vma(vma, obj->cache_level,
3351 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3352
Chris Wilson42d6ab42012-07-26 11:49:32 +01003353 i915_gem_verify_gtt(dev);
Daniel Vetter262de142014-02-14 14:01:20 +01003354 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003355
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003356err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003357 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003358err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003359 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003360 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003361err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003362 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003363 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003364}
3365
Chris Wilson000433b2013-08-08 14:41:09 +01003366bool
Chris Wilson2c225692013-08-09 12:26:45 +01003367i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3368 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003369{
Eric Anholt673a3942008-07-30 12:06:12 -07003370 /* If we don't have a page list set up, then we're not pinned
3371 * to GPU, and we can ignore the cache flush because it'll happen
3372 * again at bind time.
3373 */
Chris Wilson05394f32010-11-08 19:18:58 +00003374 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003375 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003376
Imre Deak769ce462013-02-13 21:56:05 +02003377 /*
3378 * Stolen memory is always coherent with the GPU as it is explicitly
3379 * marked as wc by the system, or the system is cache-coherent.
3380 */
3381 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003382 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003383
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003384 /* If the GPU is snooping the contents of the CPU cache,
3385 * we do not need to manually clear the CPU cache lines. However,
3386 * the caches are only snooped when the render cache is
3387 * flushed/invalidated. As we always have to emit invalidations
3388 * and flushes when moving into and out of the RENDER domain, correct
3389 * snooping behaviour occurs naturally as the result of our domain
3390 * tracking.
3391 */
Chris Wilson2c225692013-08-09 12:26:45 +01003392 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003393 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003394
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003395 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003396 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003397
3398 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003399}
3400
3401/** Flushes the GTT write domain for the object if it's dirty. */
3402static void
Chris Wilson05394f32010-11-08 19:18:58 +00003403i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003404{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003405 uint32_t old_write_domain;
3406
Chris Wilson05394f32010-11-08 19:18:58 +00003407 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003408 return;
3409
Chris Wilson63256ec2011-01-04 18:42:07 +00003410 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003411 * to it immediately go to main memory as far as we know, so there's
3412 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003413 *
3414 * However, we do have to enforce the order so that all writes through
3415 * the GTT land before any writes to the device, such as updates to
3416 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003417 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003418 wmb();
3419
Chris Wilson05394f32010-11-08 19:18:58 +00003420 old_write_domain = obj->base.write_domain;
3421 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003422
3423 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003424 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003425 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003426}
3427
3428/** Flushes the CPU write domain for the object if it's dirty. */
3429static void
Chris Wilson2c225692013-08-09 12:26:45 +01003430i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3431 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003432{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003433 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003434
Chris Wilson05394f32010-11-08 19:18:58 +00003435 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003436 return;
3437
Chris Wilson000433b2013-08-08 14:41:09 +01003438 if (i915_gem_clflush_object(obj, force))
3439 i915_gem_chipset_flush(obj->base.dev);
3440
Chris Wilson05394f32010-11-08 19:18:58 +00003441 old_write_domain = obj->base.write_domain;
3442 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003443
3444 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003445 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003446 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003447}
3448
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003449/**
3450 * Moves a single object to the GTT read, and possibly write domain.
3451 *
3452 * This function returns when the move is complete, including waiting on
3453 * flushes to occur.
3454 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003455int
Chris Wilson20217462010-11-23 15:26:33 +00003456i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003457{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03003458 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003459 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003460 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003461
Eric Anholt02354392008-11-26 13:58:13 -08003462 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003463 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003464 return -EINVAL;
3465
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003466 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3467 return 0;
3468
Chris Wilson0201f1e2012-07-20 12:41:01 +01003469 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003470 if (ret)
3471 return ret;
3472
Chris Wilsonc8725f32014-03-17 12:21:55 +00003473 i915_gem_object_retire(obj);
Chris Wilson2c225692013-08-09 12:26:45 +01003474 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003475
Chris Wilsond0a57782012-10-09 19:24:37 +01003476 /* Serialise direct access to this object with the barriers for
3477 * coherent writes from the GPU, by effectively invalidating the
3478 * GTT domain upon first access.
3479 */
3480 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3481 mb();
3482
Chris Wilson05394f32010-11-08 19:18:58 +00003483 old_write_domain = obj->base.write_domain;
3484 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003485
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003486 /* It should now be out of any other write domains, and we can update
3487 * the domain values for our changes.
3488 */
Chris Wilson05394f32010-11-08 19:18:58 +00003489 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3490 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003491 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003492 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3493 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3494 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003495 }
3496
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003497 trace_i915_gem_object_change_domain(obj,
3498 old_read_domains,
3499 old_write_domain);
3500
Chris Wilson8325a092012-04-24 15:52:35 +01003501 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003502 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003503 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003504 if (vma)
3505 list_move_tail(&vma->mm_list,
3506 &dev_priv->gtt.base.inactive_list);
3507
3508 }
Chris Wilson8325a092012-04-24 15:52:35 +01003509
Eric Anholte47c68e2008-11-14 13:35:19 -08003510 return 0;
3511}
3512
Chris Wilsone4ffd172011-04-04 09:44:39 +01003513int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3514 enum i915_cache_level cache_level)
3515{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003516 struct drm_device *dev = obj->base.dev;
Chris Wilsondf6f7832014-03-21 07:40:56 +00003517 struct i915_vma *vma, *next;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003518 int ret;
3519
3520 if (obj->cache_level == cache_level)
3521 return 0;
3522
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003523 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003524 DRM_DEBUG("can not change the cache level of pinned objects\n");
3525 return -EBUSY;
3526 }
3527
Chris Wilsondf6f7832014-03-21 07:40:56 +00003528 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003529 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003530 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003531 if (ret)
3532 return ret;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003533 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003534 }
3535
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003536 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003537 ret = i915_gem_object_finish_gpu(obj);
3538 if (ret)
3539 return ret;
3540
3541 i915_gem_object_finish_gtt(obj);
3542
3543 /* Before SandyBridge, you could not use tiling or fence
3544 * registers with snooped memory, so relinquish any fences
3545 * currently pointing to our region in the aperture.
3546 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003547 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003548 ret = i915_gem_object_put_fence(obj);
3549 if (ret)
3550 return ret;
3551 }
3552
Ben Widawsky6f65e292013-12-06 14:10:56 -08003553 list_for_each_entry(vma, &obj->vma_list, vma_link)
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003554 if (drm_mm_node_allocated(&vma->node))
3555 vma->bind_vma(vma, cache_level,
3556 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003557 }
3558
Chris Wilson2c225692013-08-09 12:26:45 +01003559 list_for_each_entry(vma, &obj->vma_list, vma_link)
3560 vma->node.color = cache_level;
3561 obj->cache_level = cache_level;
3562
3563 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003564 u32 old_read_domains, old_write_domain;
3565
3566 /* If we're coming from LLC cached, then we haven't
3567 * actually been tracking whether the data is in the
3568 * CPU cache or not, since we only allow one bit set
3569 * in obj->write_domain and have been skipping the clflushes.
3570 * Just set it to the CPU cache for now.
3571 */
Chris Wilsonc8725f32014-03-17 12:21:55 +00003572 i915_gem_object_retire(obj);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003573 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003574
3575 old_read_domains = obj->base.read_domains;
3576 old_write_domain = obj->base.write_domain;
3577
3578 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3579 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3580
3581 trace_i915_gem_object_change_domain(obj,
3582 old_read_domains,
3583 old_write_domain);
3584 }
3585
Chris Wilson42d6ab42012-07-26 11:49:32 +01003586 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003587 return 0;
3588}
3589
Ben Widawsky199adf42012-09-21 17:01:20 -07003590int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3591 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003592{
Ben Widawsky199adf42012-09-21 17:01:20 -07003593 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003594 struct drm_i915_gem_object *obj;
3595 int ret;
3596
3597 ret = i915_mutex_lock_interruptible(dev);
3598 if (ret)
3599 return ret;
3600
3601 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3602 if (&obj->base == NULL) {
3603 ret = -ENOENT;
3604 goto unlock;
3605 }
3606
Chris Wilson651d7942013-08-08 14:41:10 +01003607 switch (obj->cache_level) {
3608 case I915_CACHE_LLC:
3609 case I915_CACHE_L3_LLC:
3610 args->caching = I915_CACHING_CACHED;
3611 break;
3612
Chris Wilson4257d3b2013-08-08 14:41:11 +01003613 case I915_CACHE_WT:
3614 args->caching = I915_CACHING_DISPLAY;
3615 break;
3616
Chris Wilson651d7942013-08-08 14:41:10 +01003617 default:
3618 args->caching = I915_CACHING_NONE;
3619 break;
3620 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003621
3622 drm_gem_object_unreference(&obj->base);
3623unlock:
3624 mutex_unlock(&dev->struct_mutex);
3625 return ret;
3626}
3627
Ben Widawsky199adf42012-09-21 17:01:20 -07003628int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3629 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003630{
Ben Widawsky199adf42012-09-21 17:01:20 -07003631 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003632 struct drm_i915_gem_object *obj;
3633 enum i915_cache_level level;
3634 int ret;
3635
Ben Widawsky199adf42012-09-21 17:01:20 -07003636 switch (args->caching) {
3637 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003638 level = I915_CACHE_NONE;
3639 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003640 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003641 level = I915_CACHE_LLC;
3642 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003643 case I915_CACHING_DISPLAY:
3644 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3645 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003646 default:
3647 return -EINVAL;
3648 }
3649
Ben Widawsky3bc29132012-09-26 16:15:20 -07003650 ret = i915_mutex_lock_interruptible(dev);
3651 if (ret)
3652 return ret;
3653
Chris Wilsone6994ae2012-07-10 10:27:08 +01003654 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3655 if (&obj->base == NULL) {
3656 ret = -ENOENT;
3657 goto unlock;
3658 }
3659
3660 ret = i915_gem_object_set_cache_level(obj, level);
3661
3662 drm_gem_object_unreference(&obj->base);
3663unlock:
3664 mutex_unlock(&dev->struct_mutex);
3665 return ret;
3666}
3667
Chris Wilsoncc98b412013-08-09 12:25:09 +01003668static bool is_pin_display(struct drm_i915_gem_object *obj)
3669{
Oscar Mateo19656432014-05-16 14:20:43 +01003670 struct i915_vma *vma;
3671
3672 if (list_empty(&obj->vma_list))
3673 return false;
3674
3675 vma = i915_gem_obj_to_ggtt(obj);
3676 if (!vma)
3677 return false;
3678
Chris Wilsoncc98b412013-08-09 12:25:09 +01003679 /* There are 3 sources that pin objects:
3680 * 1. The display engine (scanouts, sprites, cursors);
3681 * 2. Reservations for execbuffer;
3682 * 3. The user.
3683 *
3684 * We can ignore reservations as we hold the struct_mutex and
3685 * are only called outside of the reservation path. The user
3686 * can only increment pin_count once, and so if after
3687 * subtracting the potential reference by the user, any pin_count
3688 * remains, it must be due to another use by the display engine.
3689 */
Oscar Mateo19656432014-05-16 14:20:43 +01003690 return vma->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003691}
3692
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003693/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003694 * Prepare buffer for display plane (scanout, cursors, etc).
3695 * Can be called from an uninterruptible phase (modesetting) and allows
3696 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003697 */
3698int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003699i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3700 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003701 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003702{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003703 u32 old_read_domains, old_write_domain;
Oscar Mateo19656432014-05-16 14:20:43 +01003704 bool was_pin_display;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003705 int ret;
3706
Chris Wilson0be73282010-12-06 14:36:27 +00003707 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003708 ret = i915_gem_object_sync(obj, pipelined);
3709 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003710 return ret;
3711 }
3712
Chris Wilsoncc98b412013-08-09 12:25:09 +01003713 /* Mark the pin_display early so that we account for the
3714 * display coherency whilst setting up the cache domains.
3715 */
Oscar Mateo19656432014-05-16 14:20:43 +01003716 was_pin_display = obj->pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003717 obj->pin_display = true;
3718
Eric Anholta7ef0642011-03-29 16:59:54 -07003719 /* The display engine is not coherent with the LLC cache on gen6. As
3720 * a result, we make sure that the pinning that is about to occur is
3721 * done with uncached PTEs. This is lowest common denominator for all
3722 * chipsets.
3723 *
3724 * However for gen6+, we could do better by using the GFDT bit instead
3725 * of uncaching, which would allow us to flush all the LLC-cached data
3726 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3727 */
Chris Wilson651d7942013-08-08 14:41:10 +01003728 ret = i915_gem_object_set_cache_level(obj,
3729 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003730 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003731 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003732
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003733 /* As the user may map the buffer once pinned in the display plane
3734 * (e.g. libkms for the bootup splash), we have to ensure that we
3735 * always use map_and_fenceable for all scanout buffers.
3736 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003737 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003738 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003739 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003740
Chris Wilson2c225692013-08-09 12:26:45 +01003741 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003742
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003743 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003744 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003745
3746 /* It should now be out of any other write domains, and we can update
3747 * the domain values for our changes.
3748 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003749 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003750 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003751
3752 trace_i915_gem_object_change_domain(obj,
3753 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003754 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003755
3756 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003757
3758err_unpin_display:
Oscar Mateo19656432014-05-16 14:20:43 +01003759 WARN_ON(was_pin_display != is_pin_display(obj));
3760 obj->pin_display = was_pin_display;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003761 return ret;
3762}
3763
3764void
3765i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3766{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003767 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003768 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003769}
3770
Chris Wilson85345512010-11-13 09:49:11 +00003771int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003772i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003773{
Chris Wilson88241782011-01-07 17:09:48 +00003774 int ret;
3775
Chris Wilsona8198ee2011-04-13 22:04:09 +01003776 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003777 return 0;
3778
Chris Wilson0201f1e2012-07-20 12:41:01 +01003779 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003780 if (ret)
3781 return ret;
3782
Chris Wilsona8198ee2011-04-13 22:04:09 +01003783 /* Ensure that we invalidate the GPU's caches and TLBs. */
3784 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003785 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003786}
3787
Eric Anholte47c68e2008-11-14 13:35:19 -08003788/**
3789 * Moves a single object to the CPU read, and possibly write domain.
3790 *
3791 * This function returns when the move is complete, including waiting on
3792 * flushes to occur.
3793 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003794int
Chris Wilson919926a2010-11-12 13:42:53 +00003795i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003796{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003797 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003798 int ret;
3799
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003800 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3801 return 0;
3802
Chris Wilson0201f1e2012-07-20 12:41:01 +01003803 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003804 if (ret)
3805 return ret;
3806
Chris Wilsonc8725f32014-03-17 12:21:55 +00003807 i915_gem_object_retire(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003808 i915_gem_object_flush_gtt_write_domain(obj);
3809
Chris Wilson05394f32010-11-08 19:18:58 +00003810 old_write_domain = obj->base.write_domain;
3811 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003812
Eric Anholte47c68e2008-11-14 13:35:19 -08003813 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003814 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003815 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003816
Chris Wilson05394f32010-11-08 19:18:58 +00003817 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003818 }
3819
3820 /* It should now be out of any other write domains, and we can update
3821 * the domain values for our changes.
3822 */
Chris Wilson05394f32010-11-08 19:18:58 +00003823 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003824
3825 /* If we're writing through the CPU, then the GPU read domains will
3826 * need to be invalidated at next use.
3827 */
3828 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003829 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3830 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003831 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003832
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003833 trace_i915_gem_object_change_domain(obj,
3834 old_read_domains,
3835 old_write_domain);
3836
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003837 return 0;
3838}
3839
Eric Anholt673a3942008-07-30 12:06:12 -07003840/* Throttle our rendering by waiting until the ring has completed our requests
3841 * emitted over 20 msec ago.
3842 *
Eric Anholtb9624422009-06-03 07:27:35 +00003843 * Note that if we were to use the current jiffies each time around the loop,
3844 * we wouldn't escape the function with any frames outstanding if the time to
3845 * render a frame was over 20ms.
3846 *
Eric Anholt673a3942008-07-30 12:06:12 -07003847 * This should get us reasonable parallelism between CPU and GPU but also
3848 * relatively low latency when blocking on a particular request to finish.
3849 */
3850static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003851i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003852{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003853 struct drm_i915_private *dev_priv = dev->dev_private;
3854 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003855 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003856 struct drm_i915_gem_request *request;
3857 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003858 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003859 u32 seqno = 0;
3860 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003861
Daniel Vetter308887a2012-11-14 17:14:06 +01003862 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3863 if (ret)
3864 return ret;
3865
3866 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3867 if (ret)
3868 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003869
Chris Wilson1c255952010-09-26 11:03:27 +01003870 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003871 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003872 if (time_after_eq(request->emitted_jiffies, recent_enough))
3873 break;
3874
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003875 ring = request->ring;
3876 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003877 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003878 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003879 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003880
3881 if (seqno == 0)
3882 return 0;
3883
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003884 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003885 if (ret == 0)
3886 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003887
Eric Anholt673a3942008-07-30 12:06:12 -07003888 return ret;
3889}
3890
Eric Anholt673a3942008-07-30 12:06:12 -07003891int
Chris Wilson05394f32010-11-08 19:18:58 +00003892i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003893 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003894 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003895 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003896{
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003897 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003898 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003899 int ret;
3900
Ben Widawsky6e7186a2014-05-06 22:21:36 -07003901 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3902 return -ENODEV;
3903
Daniel Vetterbf3d1492014-02-14 14:01:12 +01003904 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003905 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003906
3907 vma = i915_gem_obj_to_vma(obj, vm);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003908 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003909 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3910 return -EBUSY;
3911
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003912 if ((alignment &&
3913 vma->node.start & (alignment - 1)) ||
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003914 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003915 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003916 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003917 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003918 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003919 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003920 flags & PIN_MAPPABLE,
Chris Wilson05394f32010-11-08 19:18:58 +00003921 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003922 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003923 if (ret)
3924 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003925
3926 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003927 }
3928 }
3929
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003930 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Daniel Vetter262de142014-02-14 14:01:20 +01003931 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3932 if (IS_ERR(vma))
3933 return PTR_ERR(vma);
Chris Wilson22c344e2009-02-11 14:26:45 +00003934 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003935
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003936 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3937 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter74898d72012-02-15 23:50:22 +01003938
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003939 vma->pin_count++;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003940 if (flags & PIN_MAPPABLE)
3941 obj->pin_mappable |= true;
Eric Anholt673a3942008-07-30 12:06:12 -07003942
3943 return 0;
3944}
3945
3946void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003947i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003948{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003949 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003950
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003951 BUG_ON(!vma);
3952 BUG_ON(vma->pin_count == 0);
3953 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3954
3955 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003956 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003957}
3958
Daniel Vetterd8ffa602014-05-13 12:11:26 +02003959bool
3960i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
3961{
3962 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3963 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3964 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
3965
3966 WARN_ON(!ggtt_vma ||
3967 dev_priv->fence_regs[obj->fence_reg].pin_count >
3968 ggtt_vma->pin_count);
3969 dev_priv->fence_regs[obj->fence_reg].pin_count++;
3970 return true;
3971 } else
3972 return false;
3973}
3974
3975void
3976i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
3977{
3978 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3979 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3980 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
3981 dev_priv->fence_regs[obj->fence_reg].pin_count--;
3982 }
3983}
3984
Eric Anholt673a3942008-07-30 12:06:12 -07003985int
3986i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003987 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003988{
3989 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003990 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003991 int ret;
3992
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003993 if (INTEL_INFO(dev)->gen >= 6)
3994 return -ENODEV;
3995
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003996 ret = i915_mutex_lock_interruptible(dev);
3997 if (ret)
3998 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003999
Chris Wilson05394f32010-11-08 19:18:58 +00004000 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004001 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004002 ret = -ENOENT;
4003 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004004 }
Eric Anholt673a3942008-07-30 12:06:12 -07004005
Chris Wilson05394f32010-11-08 19:18:58 +00004006 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004007 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00004008 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004009 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004010 }
4011
Chris Wilson05394f32010-11-08 19:18:58 +00004012 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004013 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004014 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004015 ret = -EINVAL;
4016 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004017 }
4018
Daniel Vetteraa5f8022013-10-10 14:46:37 +02004019 if (obj->user_pin_count == ULONG_MAX) {
4020 ret = -EBUSY;
4021 goto out;
4022 }
4023
Chris Wilson93be8782013-01-02 10:31:22 +00004024 if (obj->user_pin_count == 0) {
Daniel Vetter1ec9e262014-02-14 14:01:11 +01004025 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004026 if (ret)
4027 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07004028 }
4029
Chris Wilson93be8782013-01-02 10:31:22 +00004030 obj->user_pin_count++;
4031 obj->pin_filp = file;
4032
Ben Widawskyf343c5f2013-07-05 14:41:04 -07004033 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004034out:
Chris Wilson05394f32010-11-08 19:18:58 +00004035 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004036unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004037 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004038 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004039}
4040
4041int
4042i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004043 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004044{
4045 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004046 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004047 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004048
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004049 ret = i915_mutex_lock_interruptible(dev);
4050 if (ret)
4051 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004052
Chris Wilson05394f32010-11-08 19:18:58 +00004053 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004054 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004055 ret = -ENOENT;
4056 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004057 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01004058
Chris Wilson05394f32010-11-08 19:18:58 +00004059 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00004060 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08004061 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004062 ret = -EINVAL;
4063 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08004064 }
Chris Wilson05394f32010-11-08 19:18:58 +00004065 obj->user_pin_count--;
4066 if (obj->user_pin_count == 0) {
4067 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004068 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004069 }
Eric Anholt673a3942008-07-30 12:06:12 -07004070
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004071out:
Chris Wilson05394f32010-11-08 19:18:58 +00004072 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004073unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004074 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004075 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004076}
4077
4078int
4079i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00004080 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07004081{
4082 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004083 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004084 int ret;
4085
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004086 ret = i915_mutex_lock_interruptible(dev);
4087 if (ret)
4088 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004089
Chris Wilson05394f32010-11-08 19:18:58 +00004090 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004091 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004092 ret = -ENOENT;
4093 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07004094 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08004095
Chris Wilson0be555b2010-08-04 15:36:30 +01004096 /* Count all active objects as busy, even if they are currently not used
4097 * by the gpu. Users of this interface expect objects to eventually
4098 * become non-busy without any further actions, therefore emit any
4099 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004100 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02004101 ret = i915_gem_object_flush_active(obj);
4102
Chris Wilson05394f32010-11-08 19:18:58 +00004103 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01004104 if (obj->ring) {
4105 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4106 args->busy |= intel_ring_flag(obj->ring) << 16;
4107 }
Eric Anholt673a3942008-07-30 12:06:12 -07004108
Chris Wilson05394f32010-11-08 19:18:58 +00004109 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004110unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004111 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004112 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004113}
4114
4115int
4116i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4117 struct drm_file *file_priv)
4118{
Akshay Joshi0206e352011-08-16 15:34:10 -04004119 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004120}
4121
Chris Wilson3ef94da2009-09-14 16:50:29 +01004122int
4123i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4124 struct drm_file *file_priv)
4125{
4126 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004127 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004128 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004129
4130 switch (args->madv) {
4131 case I915_MADV_DONTNEED:
4132 case I915_MADV_WILLNEED:
4133 break;
4134 default:
4135 return -EINVAL;
4136 }
4137
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004138 ret = i915_mutex_lock_interruptible(dev);
4139 if (ret)
4140 return ret;
4141
Chris Wilson05394f32010-11-08 19:18:58 +00004142 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004143 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004144 ret = -ENOENT;
4145 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004146 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004147
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004148 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004149 ret = -EINVAL;
4150 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004151 }
4152
Chris Wilson05394f32010-11-08 19:18:58 +00004153 if (obj->madv != __I915_MADV_PURGED)
4154 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004155
Chris Wilson6c085a72012-08-20 11:40:46 +02004156 /* if the object is no longer attached, discard its backing storage */
4157 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004158 i915_gem_object_truncate(obj);
4159
Chris Wilson05394f32010-11-08 19:18:58 +00004160 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004161
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004162out:
Chris Wilson05394f32010-11-08 19:18:58 +00004163 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004164unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004165 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004166 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004167}
4168
Chris Wilson37e680a2012-06-07 15:38:42 +01004169void i915_gem_object_init(struct drm_i915_gem_object *obj,
4170 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004171{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004172 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004173 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004174 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004175 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004176
Chris Wilson37e680a2012-06-07 15:38:42 +01004177 obj->ops = ops;
4178
Chris Wilson0327d6b2012-08-11 15:41:06 +01004179 obj->fence_reg = I915_FENCE_REG_NONE;
4180 obj->madv = I915_MADV_WILLNEED;
4181 /* Avoid an unnecessary call to unbind on the first bind. */
4182 obj->map_and_fenceable = true;
4183
4184 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4185}
4186
Chris Wilson37e680a2012-06-07 15:38:42 +01004187static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4188 .get_pages = i915_gem_object_get_pages_gtt,
4189 .put_pages = i915_gem_object_put_pages_gtt,
4190};
4191
Chris Wilson05394f32010-11-08 19:18:58 +00004192struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4193 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004194{
Daniel Vetterc397b902010-04-09 19:05:07 +00004195 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004196 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004197 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004198
Chris Wilson42dcedd2012-11-15 11:32:30 +00004199 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004200 if (obj == NULL)
4201 return NULL;
4202
4203 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004204 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004205 return NULL;
4206 }
4207
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004208 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4209 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4210 /* 965gm cannot relocate objects above 4GiB. */
4211 mask &= ~__GFP_HIGHMEM;
4212 mask |= __GFP_DMA32;
4213 }
4214
Al Viro496ad9a2013-01-23 17:07:38 -05004215 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004216 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004217
Chris Wilson37e680a2012-06-07 15:38:42 +01004218 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004219
Daniel Vetterc397b902010-04-09 19:05:07 +00004220 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4221 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4222
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004223 if (HAS_LLC(dev)) {
4224 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004225 * cache) for about a 10% performance improvement
4226 * compared to uncached. Graphics requests other than
4227 * display scanout are coherent with the CPU in
4228 * accessing this cache. This means in this mode we
4229 * don't need to clflush on the CPU side, and on the
4230 * GPU side we only need to flush internal caches to
4231 * get data visible to the CPU.
4232 *
4233 * However, we maintain the display planes as UC, and so
4234 * need to rebind when first used as such.
4235 */
4236 obj->cache_level = I915_CACHE_LLC;
4237 } else
4238 obj->cache_level = I915_CACHE_NONE;
4239
Daniel Vetterd861e332013-07-24 23:25:03 +02004240 trace_i915_gem_object_create(obj);
4241
Chris Wilson05394f32010-11-08 19:18:58 +00004242 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004243}
4244
Chris Wilson1488fc02012-04-24 15:47:31 +01004245void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004246{
Chris Wilson1488fc02012-04-24 15:47:31 +01004247 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004248 struct drm_device *dev = obj->base.dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004249 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004250 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004251
Paulo Zanonif65c9162013-11-27 18:20:34 -02004252 intel_runtime_pm_get(dev_priv);
4253
Chris Wilson26e12f892011-03-20 11:20:19 +00004254 trace_i915_gem_object_destroy(obj);
4255
Chris Wilson1488fc02012-04-24 15:47:31 +01004256 if (obj->phys_obj)
4257 i915_gem_detach_phys_object(dev, obj);
4258
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004259 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004260 int ret;
4261
4262 vma->pin_count = 0;
4263 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004264 if (WARN_ON(ret == -ERESTARTSYS)) {
4265 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004266
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004267 was_interruptible = dev_priv->mm.interruptible;
4268 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004269
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004270 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004271
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004272 dev_priv->mm.interruptible = was_interruptible;
4273 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004274 }
4275
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004276 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4277 * before progressing. */
4278 if (obj->stolen)
4279 i915_gem_object_unpin_pages(obj);
4280
Ben Widawsky401c29f2013-05-31 11:28:47 -07004281 if (WARN_ON(obj->pages_pin_count))
4282 obj->pages_pin_count = 0;
Chris Wilson55372522014-03-25 13:23:06 +00004283 if (obj->madv != __I915_MADV_PURGED)
4284 obj->madv = I915_MADV_DONTNEED;
Chris Wilson37e680a2012-06-07 15:38:42 +01004285 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004286 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004287 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004288
Chris Wilson9da3da62012-06-01 15:20:22 +01004289 BUG_ON(obj->pages);
4290
Chris Wilson2f745ad2012-09-04 21:02:58 +01004291 if (obj->base.import_attach)
4292 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004293
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004294 if (obj->ops->release)
4295 obj->ops->release(obj);
4296
Chris Wilson05394f32010-11-08 19:18:58 +00004297 drm_gem_object_release(&obj->base);
4298 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004299
Chris Wilson05394f32010-11-08 19:18:58 +00004300 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004301 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004302
4303 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004304}
4305
Daniel Vettere656a6c2013-08-14 14:14:04 +02004306struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004307 struct i915_address_space *vm)
4308{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004309 struct i915_vma *vma;
4310 list_for_each_entry(vma, &obj->vma_list, vma_link)
4311 if (vma->vm == vm)
4312 return vma;
4313
4314 return NULL;
4315}
4316
Ben Widawsky2f633152013-07-17 12:19:03 -07004317void i915_gem_vma_destroy(struct i915_vma *vma)
4318{
4319 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004320
4321 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4322 if (!list_empty(&vma->exec_list))
4323 return;
4324
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004325 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004326
Ben Widawsky2f633152013-07-17 12:19:03 -07004327 kfree(vma);
4328}
4329
Chris Wilsone3efda42014-04-09 09:19:41 +01004330static void
4331i915_gem_stop_ringbuffers(struct drm_device *dev)
4332{
4333 struct drm_i915_private *dev_priv = dev->dev_private;
4334 struct intel_ring_buffer *ring;
4335 int i;
4336
4337 for_each_ring(ring, dev_priv, i)
4338 intel_stop_ring_buffer(ring);
4339}
4340
Jesse Barnes5669fca2009-02-17 15:13:31 -08004341int
Chris Wilson45c5f202013-10-16 11:50:01 +01004342i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004343{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004344 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004345 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004346
Chris Wilson45c5f202013-10-16 11:50:01 +01004347 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004348 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004349 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004350
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004351 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004352 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004353 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004354
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004355 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004356
Chris Wilson29105cc2010-01-07 10:39:13 +00004357 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004358 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004359 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004360
Chris Wilson29105cc2010-01-07 10:39:13 +00004361 i915_kernel_lost_context(dev);
Chris Wilsone3efda42014-04-09 09:19:41 +01004362 i915_gem_stop_ringbuffers(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004363
Chris Wilson45c5f202013-10-16 11:50:01 +01004364 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4365 * We need to replace this with a semaphore, or something.
4366 * And not confound ums.mm_suspended!
4367 */
4368 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4369 DRIVER_MODESET);
4370 mutex_unlock(&dev->struct_mutex);
4371
4372 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004373 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004374 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004375
Eric Anholt673a3942008-07-30 12:06:12 -07004376 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004377
4378err:
4379 mutex_unlock(&dev->struct_mutex);
4380 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004381}
4382
Ben Widawskyc3787e22013-09-17 21:12:44 -07004383int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004384{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004385 struct drm_device *dev = ring->dev;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004386 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004387 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4388 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004389 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004390
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004391 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004392 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004393
Ben Widawskyc3787e22013-09-17 21:12:44 -07004394 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4395 if (ret)
4396 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004397
Ben Widawskyc3787e22013-09-17 21:12:44 -07004398 /*
4399 * Note: We do not worry about the concurrent register cacheline hang
4400 * here because no other code should access these registers other than
4401 * at initialization time.
4402 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004403 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004404 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4405 intel_ring_emit(ring, reg_base + i);
4406 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004407 }
4408
Ben Widawskyc3787e22013-09-17 21:12:44 -07004409 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004410
Ben Widawskyc3787e22013-09-17 21:12:44 -07004411 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004412}
4413
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004414void i915_gem_init_swizzling(struct drm_device *dev)
4415{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004416 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004417
Daniel Vetter11782b02012-01-31 16:47:55 +01004418 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004419 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4420 return;
4421
4422 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4423 DISP_TILE_SURFACE_SWIZZLING);
4424
Daniel Vetter11782b02012-01-31 16:47:55 +01004425 if (IS_GEN5(dev))
4426 return;
4427
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004428 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4429 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004430 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004431 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004432 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004433 else if (IS_GEN8(dev))
4434 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004435 else
4436 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004437}
Daniel Vettere21af882012-02-09 20:53:27 +01004438
Chris Wilson67b1b572012-07-05 23:49:40 +01004439static bool
4440intel_enable_blt(struct drm_device *dev)
4441{
4442 if (!HAS_BLT(dev))
4443 return false;
4444
4445 /* The blitter was dysfunctional on early prototypes */
4446 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4447 DRM_INFO("BLT not supported on this pre-production hardware;"
4448 " graphics performance will be degraded.\n");
4449 return false;
4450 }
4451
4452 return true;
4453}
4454
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004455static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004456{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004457 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004458 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004459
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004460 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004461 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004462 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004463
4464 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004465 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004466 if (ret)
4467 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004468 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004469
Chris Wilson67b1b572012-07-05 23:49:40 +01004470 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004471 ret = intel_init_blt_ring_buffer(dev);
4472 if (ret)
4473 goto cleanup_bsd_ring;
4474 }
4475
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004476 if (HAS_VEBOX(dev)) {
4477 ret = intel_init_vebox_ring_buffer(dev);
4478 if (ret)
4479 goto cleanup_blt_ring;
4480 }
4481
Zhao Yakui845f74a2014-04-17 10:37:37 +08004482 if (HAS_BSD2(dev)) {
4483 ret = intel_init_bsd2_ring_buffer(dev);
4484 if (ret)
4485 goto cleanup_vebox_ring;
4486 }
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004487
Mika Kuoppala99433932013-01-22 14:12:17 +02004488 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4489 if (ret)
Zhao Yakui845f74a2014-04-17 10:37:37 +08004490 goto cleanup_bsd2_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004491
4492 return 0;
4493
Zhao Yakui845f74a2014-04-17 10:37:37 +08004494cleanup_bsd2_ring:
4495 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004496cleanup_vebox_ring:
4497 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004498cleanup_blt_ring:
4499 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4500cleanup_bsd_ring:
4501 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4502cleanup_render_ring:
4503 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4504
4505 return ret;
4506}
4507
4508int
4509i915_gem_init_hw(struct drm_device *dev)
4510{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004511 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004512 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004513
4514 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4515 return -EIO;
4516
Ben Widawsky59124502013-07-04 11:02:05 -07004517 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004518 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004519
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004520 if (IS_HASWELL(dev))
4521 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4522 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004523
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004524 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004525 if (IS_IVYBRIDGE(dev)) {
4526 u32 temp = I915_READ(GEN7_MSG_CTL);
4527 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4528 I915_WRITE(GEN7_MSG_CTL, temp);
4529 } else if (INTEL_INFO(dev)->gen >= 7) {
4530 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4531 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4532 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4533 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004534 }
4535
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004536 i915_gem_init_swizzling(dev);
4537
4538 ret = i915_gem_init_rings(dev);
4539 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004540 return ret;
4541
Ben Widawskyc3787e22013-09-17 21:12:44 -07004542 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4543 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4544
Ben Widawsky254f9652012-06-04 14:42:42 -07004545 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004546 * XXX: Contexts should only be initialized once. Doing a switch to the
4547 * default context switch however is something we'd like to do after
4548 * reset or thaw (the latter may not actually be necessary for HW, but
4549 * goes with our code better). Context switching requires rings (for
4550 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004551 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004552 ret = i915_gem_context_enable(dev_priv);
Chris Wilson60990322014-04-09 09:19:42 +01004553 if (ret && ret != -EIO) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004554 DRM_ERROR("Context enable failed %d\n", ret);
Chris Wilson60990322014-04-09 09:19:42 +01004555 i915_gem_cleanup_ringbuffer(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004556 }
Daniel Vettere21af882012-02-09 20:53:27 +01004557
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004558 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004559}
4560
Chris Wilson1070a422012-04-24 15:47:41 +01004561int i915_gem_init(struct drm_device *dev)
4562{
4563 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004564 int ret;
4565
Chris Wilson1070a422012-04-24 15:47:41 +01004566 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004567
4568 if (IS_VALLEYVIEW(dev)) {
4569 /* VLVA0 (potential hack), BIOS isn't actually waking us */
Imre Deak981a5ae2014-04-14 20:24:22 +03004570 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4571 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4572 VLV_GTLC_ALLOWWAKEACK), 10))
Jesse Barnesd62b4892013-03-08 10:45:53 -08004573 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4574 }
4575
Chris Wilson5cc9ed42014-05-16 14:22:37 +01004576 i915_gem_init_userptr(dev);
Ben Widawskyd7e50082012-12-18 10:31:25 -08004577 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004578
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004579 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004580 if (ret) {
4581 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004582 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004583 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004584
Chris Wilson1070a422012-04-24 15:47:41 +01004585 ret = i915_gem_init_hw(dev);
Chris Wilson60990322014-04-09 09:19:42 +01004586 if (ret == -EIO) {
4587 /* Allow ring initialisation to fail by marking the GPU as
4588 * wedged. But we only want to do this where the GPU is angry,
4589 * for all other failure, such as an allocation failure, bail.
4590 */
4591 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4592 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4593 ret = 0;
Chris Wilson1070a422012-04-24 15:47:41 +01004594 }
Chris Wilson60990322014-04-09 09:19:42 +01004595 mutex_unlock(&dev->struct_mutex);
Chris Wilson1070a422012-04-24 15:47:41 +01004596
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004597 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4598 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4599 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson60990322014-04-09 09:19:42 +01004600 return ret;
Chris Wilson1070a422012-04-24 15:47:41 +01004601}
4602
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004603void
4604i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4605{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004606 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004607 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004608 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004609
Chris Wilsonb4519512012-05-11 14:29:30 +01004610 for_each_ring(ring, dev_priv, i)
4611 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004612}
4613
4614int
Eric Anholt673a3942008-07-30 12:06:12 -07004615i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4616 struct drm_file *file_priv)
4617{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004618 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004619 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004620
Jesse Barnes79e53942008-11-07 14:24:08 -08004621 if (drm_core_check_feature(dev, DRIVER_MODESET))
4622 return 0;
4623
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004624 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004625 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004626 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004627 }
4628
Eric Anholt673a3942008-07-30 12:06:12 -07004629 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004630 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004631
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004632 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004633 if (ret != 0) {
4634 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004635 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004636 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004637
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004638 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004639
Daniel Vetterbb0f1b52013-11-03 21:09:27 +01004640 ret = drm_irq_install(dev, dev->pdev->irq);
Chris Wilson5f353082010-06-07 14:03:03 +01004641 if (ret)
4642 goto cleanup_ringbuffer;
Daniel Vettere090c532013-11-03 20:27:05 +01004643 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004644
Eric Anholt673a3942008-07-30 12:06:12 -07004645 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004646
4647cleanup_ringbuffer:
Chris Wilson5f353082010-06-07 14:03:03 +01004648 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004649 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004650 mutex_unlock(&dev->struct_mutex);
4651
4652 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004653}
4654
4655int
4656i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4657 struct drm_file *file_priv)
4658{
Jesse Barnes79e53942008-11-07 14:24:08 -08004659 if (drm_core_check_feature(dev, DRIVER_MODESET))
4660 return 0;
4661
Daniel Vettere090c532013-11-03 20:27:05 +01004662 mutex_lock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004663 drm_irq_uninstall(dev);
Daniel Vettere090c532013-11-03 20:27:05 +01004664 mutex_unlock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004665
Chris Wilson45c5f202013-10-16 11:50:01 +01004666 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004667}
4668
4669void
4670i915_gem_lastclose(struct drm_device *dev)
4671{
4672 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004673
Eric Anholte806b492009-01-22 09:56:58 -08004674 if (drm_core_check_feature(dev, DRIVER_MODESET))
4675 return;
4676
Chris Wilson45c5f202013-10-16 11:50:01 +01004677 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004678 if (ret)
4679 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004680}
4681
Chris Wilson64193402010-10-24 12:38:05 +01004682static void
4683init_ring_lists(struct intel_ring_buffer *ring)
4684{
4685 INIT_LIST_HEAD(&ring->active_list);
4686 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004687}
4688
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004689void i915_init_vm(struct drm_i915_private *dev_priv,
4690 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004691{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004692 if (!i915_is_ggtt(vm))
4693 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004694 vm->dev = dev_priv->dev;
4695 INIT_LIST_HEAD(&vm->active_list);
4696 INIT_LIST_HEAD(&vm->inactive_list);
4697 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004698 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004699}
4700
Eric Anholt673a3942008-07-30 12:06:12 -07004701void
4702i915_gem_load(struct drm_device *dev)
4703{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004704 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004705 int i;
4706
4707 dev_priv->slab =
4708 kmem_cache_create("i915_gem_object",
4709 sizeof(struct drm_i915_gem_object), 0,
4710 SLAB_HWCACHE_ALIGN,
4711 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004712
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004713 INIT_LIST_HEAD(&dev_priv->vm_list);
4714 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4715
Ben Widawskya33afea2013-09-17 21:12:45 -07004716 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004717 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4718 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004719 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004720 for (i = 0; i < I915_NUM_RINGS; i++)
4721 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004722 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004723 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004724 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4725 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004726 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4727 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004728 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004729
Dave Airlie94400122010-07-20 13:15:31 +10004730 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4731 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004732 I915_WRITE(MI_ARB_STATE,
4733 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004734 }
4735
Chris Wilson72bfa192010-12-19 11:42:05 +00004736 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4737
Jesse Barnesde151cf2008-11-12 10:03:55 -08004738 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004739 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4740 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004741
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004742 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4743 dev_priv->num_fence_regs = 32;
4744 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004745 dev_priv->num_fence_regs = 16;
4746 else
4747 dev_priv->num_fence_regs = 8;
4748
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004749 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004750 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4751 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004752
Eric Anholt673a3942008-07-30 12:06:12 -07004753 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004754 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004755
Chris Wilsonce453d82011-02-21 14:43:56 +00004756 dev_priv->mm.interruptible = true;
4757
Chris Wilsonceabbba52014-03-25 13:23:04 +00004758 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4759 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4760 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4761 register_shrinker(&dev_priv->mm.shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004762}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004763
4764/*
4765 * Create a physically contiguous memory object for this object
4766 * e.g. for cursor + overlay regs
4767 */
Chris Wilson995b6762010-08-20 13:23:26 +01004768static int i915_gem_init_phys_object(struct drm_device *dev,
4769 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004770{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004771 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004772 struct drm_i915_gem_phys_object *phys_obj;
4773 int ret;
4774
4775 if (dev_priv->mm.phys_objs[id - 1] || !size)
4776 return 0;
4777
Daniel Vetterb14c5672013-09-19 12:18:32 +02004778 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004779 if (!phys_obj)
4780 return -ENOMEM;
4781
4782 phys_obj->id = id;
4783
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004784 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004785 if (!phys_obj->handle) {
4786 ret = -ENOMEM;
4787 goto kfree_obj;
4788 }
4789#ifdef CONFIG_X86
4790 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4791#endif
4792
4793 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4794
4795 return 0;
4796kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004797 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004798 return ret;
4799}
4800
Chris Wilson995b6762010-08-20 13:23:26 +01004801static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004802{
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004803 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004804 struct drm_i915_gem_phys_object *phys_obj;
4805
4806 if (!dev_priv->mm.phys_objs[id - 1])
4807 return;
4808
4809 phys_obj = dev_priv->mm.phys_objs[id - 1];
4810 if (phys_obj->cur_obj) {
4811 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4812 }
4813
4814#ifdef CONFIG_X86
4815 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4816#endif
4817 drm_pci_free(dev, phys_obj->handle);
4818 kfree(phys_obj);
4819 dev_priv->mm.phys_objs[id - 1] = NULL;
4820}
4821
4822void i915_gem_free_all_phys_object(struct drm_device *dev)
4823{
4824 int i;
4825
Dave Airlie260883c2009-01-22 17:58:49 +10004826 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004827 i915_gem_free_phys_object(dev, i);
4828}
4829
4830void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004831 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004832{
Al Viro496ad9a2013-01-23 17:07:38 -05004833 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004834 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004835 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004836 int page_count;
4837
Chris Wilson05394f32010-11-08 19:18:58 +00004838 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004839 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004840 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004841
Chris Wilson05394f32010-11-08 19:18:58 +00004842 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004843 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004844 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004845 if (!IS_ERR(page)) {
4846 char *dst = kmap_atomic(page);
4847 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4848 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004849
Chris Wilsone5281cc2010-10-28 13:45:36 +01004850 drm_clflush_pages(&page, 1);
4851
4852 set_page_dirty(page);
4853 mark_page_accessed(page);
4854 page_cache_release(page);
4855 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004856 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004857 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004858
Chris Wilson05394f32010-11-08 19:18:58 +00004859 obj->phys_obj->cur_obj = NULL;
4860 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004861}
4862
4863int
4864i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004865 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004866 int id,
4867 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004868{
Al Viro496ad9a2013-01-23 17:07:38 -05004869 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Jani Nikula3e31c6c2014-03-31 14:27:16 +03004870 struct drm_i915_private *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004871 int ret = 0;
4872 int page_count;
4873 int i;
4874
4875 if (id > I915_MAX_PHYS_OBJECT)
4876 return -EINVAL;
4877
Chris Wilson05394f32010-11-08 19:18:58 +00004878 if (obj->phys_obj) {
4879 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004880 return 0;
4881 i915_gem_detach_phys_object(dev, obj);
4882 }
4883
Dave Airlie71acb5e2008-12-30 20:31:46 +10004884 /* create a new object */
4885 if (!dev_priv->mm.phys_objs[id - 1]) {
4886 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004887 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004888 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004889 DRM_ERROR("failed to init phys object %d size: %zu\n",
4890 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004891 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004892 }
4893 }
4894
4895 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004896 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4897 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004898
Chris Wilson05394f32010-11-08 19:18:58 +00004899 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004900
4901 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004902 struct page *page;
4903 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004904
Hugh Dickins5949eac2011-06-27 16:18:18 -07004905 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004906 if (IS_ERR(page))
4907 return PTR_ERR(page);
4908
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004909 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004910 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004911 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004912 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004913
4914 mark_page_accessed(page);
4915 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004916 }
4917
4918 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004919}
4920
4921static int
Chris Wilson05394f32010-11-08 19:18:58 +00004922i915_gem_phys_pwrite(struct drm_device *dev,
4923 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004924 struct drm_i915_gem_pwrite *args,
4925 struct drm_file *file_priv)
4926{
Chris Wilson05394f32010-11-08 19:18:58 +00004927 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004928 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004929
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004930 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4931 unsigned long unwritten;
4932
4933 /* The physical object once assigned is fixed for the lifetime
4934 * of the obj, so we can safely drop the lock and continue
4935 * to access vaddr.
4936 */
4937 mutex_unlock(&dev->struct_mutex);
4938 unwritten = copy_from_user(vaddr, user_data, args->size);
4939 mutex_lock(&dev->struct_mutex);
4940 if (unwritten)
4941 return -EFAULT;
4942 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004943
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004944 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004945 return 0;
4946}
Eric Anholtb9624422009-06-03 07:27:35 +00004947
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004948void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004949{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004950 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004951
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004952 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4953
Eric Anholtb9624422009-06-03 07:27:35 +00004954 /* Clean up our request list when the client is going away, so that
4955 * later retire_requests won't dereference our soon-to-be-gone
4956 * file_priv.
4957 */
Chris Wilson1c255952010-09-26 11:03:27 +01004958 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004959 while (!list_empty(&file_priv->mm.request_list)) {
4960 struct drm_i915_gem_request *request;
4961
4962 request = list_first_entry(&file_priv->mm.request_list,
4963 struct drm_i915_gem_request,
4964 client_list);
4965 list_del(&request->client_list);
4966 request->file_priv = NULL;
4967 }
Chris Wilson1c255952010-09-26 11:03:27 +01004968 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004969}
Chris Wilson31169712009-09-14 16:50:28 +01004970
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004971static void
4972i915_gem_file_idle_work_handler(struct work_struct *work)
4973{
4974 struct drm_i915_file_private *file_priv =
4975 container_of(work, typeof(*file_priv), mm.idle_work.work);
4976
4977 atomic_set(&file_priv->rps_wait_boost, false);
4978}
4979
4980int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4981{
4982 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004983 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004984
4985 DRM_DEBUG_DRIVER("\n");
4986
4987 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4988 if (!file_priv)
4989 return -ENOMEM;
4990
4991 file->driver_priv = file_priv;
4992 file_priv->dev_priv = dev->dev_private;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004993 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004994
4995 spin_lock_init(&file_priv->mm.lock);
4996 INIT_LIST_HEAD(&file_priv->mm.request_list);
4997 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4998 i915_gem_file_idle_work_handler);
4999
Ben Widawskye422b882013-12-06 14:10:58 -08005000 ret = i915_gem_context_open(dev, file);
5001 if (ret)
5002 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005003
Ben Widawskye422b882013-12-06 14:10:58 -08005004 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01005005}
5006
Chris Wilson57745062012-11-21 13:04:04 +00005007static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5008{
5009 if (!mutex_is_locked(mutex))
5010 return false;
5011
5012#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5013 return mutex->owner == task;
5014#else
5015 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5016 return false;
5017#endif
5018}
5019
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005020static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5021{
5022 if (!mutex_trylock(&dev->struct_mutex)) {
5023 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5024 return false;
5025
5026 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5027 return false;
5028
5029 *unlock = false;
5030 } else
5031 *unlock = true;
5032
5033 return true;
5034}
5035
Chris Wilsonceabbba52014-03-25 13:23:04 +00005036static int num_vma_bound(struct drm_i915_gem_object *obj)
5037{
5038 struct i915_vma *vma;
5039 int count = 0;
5040
5041 list_for_each_entry(vma, &obj->vma_list, vma_link)
5042 if (drm_mm_node_allocated(&vma->node))
5043 count++;
5044
5045 return count;
5046}
5047
Dave Chinner7dc19d52013-08-28 10:18:11 +10005048static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005049i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01005050{
Chris Wilson17250b72010-10-28 12:51:39 +01005051 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005052 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Chris Wilson17250b72010-10-28 12:51:39 +01005053 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02005054 struct drm_i915_gem_object *obj;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005055 unsigned long count;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005056 bool unlock;
Chris Wilson17250b72010-10-28 12:51:39 +01005057
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005058 if (!i915_gem_shrinker_lock(dev, &unlock))
5059 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01005060
Dave Chinner7dc19d52013-08-28 10:18:11 +10005061 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07005062 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01005063 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005064 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005065
5066 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilsonceabbba52014-03-25 13:23:04 +00005067 if (!i915_gem_obj_is_pinned(obj) &&
5068 obj->pages_pin_count == num_vma_bound(obj))
Dave Chinner7dc19d52013-08-28 10:18:11 +10005069 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07005070 }
Chris Wilson31169712009-09-14 16:50:28 +01005071
Chris Wilson57745062012-11-21 13:04:04 +00005072 if (unlock)
5073 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005074
Dave Chinner7dc19d52013-08-28 10:18:11 +10005075 return count;
Chris Wilson31169712009-09-14 16:50:28 +01005076}
Ben Widawskya70a3142013-07-31 16:59:56 -07005077
5078/* All the new VM stuff */
5079unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5080 struct i915_address_space *vm)
5081{
5082 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5083 struct i915_vma *vma;
5084
Ben Widawsky6f425322013-12-06 14:10:48 -08005085 if (!dev_priv->mm.aliasing_ppgtt ||
5086 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005087 vm = &dev_priv->gtt.base;
5088
5089 BUG_ON(list_empty(&o->vma_list));
5090 list_for_each_entry(vma, &o->vma_list, vma_link) {
5091 if (vma->vm == vm)
5092 return vma->node.start;
5093
5094 }
5095 return -1;
5096}
5097
5098bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5099 struct i915_address_space *vm)
5100{
5101 struct i915_vma *vma;
5102
5103 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07005104 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005105 return true;
5106
5107 return false;
5108}
5109
5110bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5111{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005112 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07005113
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01005114 list_for_each_entry(vma, &o->vma_list, vma_link)
5115 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07005116 return true;
5117
5118 return false;
5119}
5120
5121unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5122 struct i915_address_space *vm)
5123{
5124 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5125 struct i915_vma *vma;
5126
Ben Widawsky6f425322013-12-06 14:10:48 -08005127 if (!dev_priv->mm.aliasing_ppgtt ||
5128 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07005129 vm = &dev_priv->gtt.base;
5130
5131 BUG_ON(list_empty(&o->vma_list));
5132
5133 list_for_each_entry(vma, &o->vma_list, vma_link)
5134 if (vma->vm == vm)
5135 return vma->node.size;
5136
5137 return 0;
5138}
5139
Dave Chinner7dc19d52013-08-28 10:18:11 +10005140static unsigned long
Chris Wilsonceabbba52014-03-25 13:23:04 +00005141i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005142{
5143 struct drm_i915_private *dev_priv =
Chris Wilsonceabbba52014-03-25 13:23:04 +00005144 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Dave Chinner7dc19d52013-08-28 10:18:11 +10005145 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005146 unsigned long freed;
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005147 bool unlock;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005148
Chris Wilsonb453c4d2014-03-25 13:23:05 +00005149 if (!i915_gem_shrinker_lock(dev, &unlock))
5150 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005151
Chris Wilsond9973b42013-10-04 10:33:00 +01005152 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5153 if (freed < sc->nr_to_scan)
5154 freed += __i915_gem_shrink(dev_priv,
5155 sc->nr_to_scan - freed,
5156 false);
5157 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005158 freed += i915_gem_shrink_all(dev_priv);
5159
5160 if (unlock)
5161 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005162
Dave Chinner7dc19d52013-08-28 10:18:11 +10005163 return freed;
5164}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005165
5166struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5167{
5168 struct i915_vma *vma;
5169
Oscar Mateo19656432014-05-16 14:20:43 +01005170 /* This WARN has probably outlived its usefulness (callers already
5171 * WARN if they don't find the GGTT vma they expect). When removing,
5172 * remember to remove the pre-check in is_pin_display() as well */
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005173 if (WARN_ON(list_empty(&obj->vma_list)))
5174 return NULL;
5175
5176 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005177 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005178 return NULL;
5179
5180 return vma;
5181}