blob: 18ea6bccbbf04c9f67e659d8eea33f548a06bbd6 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
Ben Widawsky23f54482013-09-11 14:57:48 -070044i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
Chris Wilson05394f32010-11-08 19:18:58 +000046static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100048 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000049 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070050
Chris Wilson61050802012-04-17 15:31:31 +010051static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
56
Dave Chinner7dc19d52013-08-28 10:18:11 +100057static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
58 struct shrink_control *sc);
59static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
60 struct shrink_control *sc);
Chris Wilsond9973b42013-10-04 10:33:00 +010061static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010063static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Damien Lespiaucb216aa2014-03-03 17:42:36 +000064static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
Chris Wilson31169712009-09-14 16:50:28 +010065
Chris Wilsonc76ce032013-08-08 14:41:03 +010066static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
Chris Wilson2c225692013-08-09 12:26:45 +010072static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
Chris Wilson61050802012-04-17 15:31:31 +010080static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
81{
82 if (obj->tiling_mode)
83 i915_gem_release_mmap(obj);
84
85 /* As we do not have an associated fence register, we will force
86 * a tiling change if we ever need to acquire one.
87 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010088 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010089 obj->fence_reg = I915_FENCE_REG_NONE;
90}
91
Chris Wilson73aa8082010-09-30 11:46:12 +010092/* some bookkeeping */
93static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count++;
98 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
102static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
103 size_t size)
104{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200105 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100106 dev_priv->mm.object_count--;
107 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200108 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100109}
110
Chris Wilson21dd3732011-01-26 15:55:56 +0000111static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100112i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114 int ret;
115
Daniel Vetter7abb6902013-05-24 21:29:32 +0200116#define EXIT_COND (!i915_reset_in_progress(error) || \
117 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100118 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 return 0;
120
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200121 /*
122 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
123 * userspace. If it takes that long something really bad is going on and
124 * we should simply try to bail out and fail as gracefully as possible.
125 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100126 ret = wait_event_interruptible_timeout(error->reset_queue,
127 EXIT_COND,
128 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200129 if (ret == 0) {
130 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131 return -EIO;
132 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100133 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100135#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100136
Chris Wilson21dd3732011-01-26 15:55:56 +0000137 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138}
139
Chris Wilson54cf91d2010-11-25 18:00:26 +0000140int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100141{
Daniel Vetter33196de2012-11-14 17:14:05 +0100142 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143 int ret;
144
Daniel Vetter33196de2012-11-14 17:14:05 +0100145 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146 if (ret)
147 return ret;
148
149 ret = mutex_lock_interruptible(&dev->struct_mutex);
150 if (ret)
151 return ret;
152
Chris Wilson23bc5982010-09-29 16:10:57 +0100153 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100154 return 0;
155}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100156
Chris Wilson7d1c4802010-08-07 21:45:03 +0100157static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000158i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100159{
Ben Widawsky98438772013-07-31 17:00:12 -0700160 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100161}
162
Eric Anholt673a3942008-07-30 12:06:12 -0700163int
164i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000165 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700166{
Ben Widawsky93d18792013-01-17 12:45:17 -0800167 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700168 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000169
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200170 if (drm_core_check_feature(dev, DRIVER_MODESET))
171 return -ENODEV;
172
Chris Wilson20217462010-11-23 15:26:33 +0000173 if (args->gtt_start >= args->gtt_end ||
174 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
175 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700176
Daniel Vetterf534bc02012-03-26 22:37:04 +0200177 /* GEM with user mode setting was never supported on ilk and later. */
178 if (INTEL_INFO(dev)->gen >= 5)
179 return -ENODEV;
180
Eric Anholt673a3942008-07-30 12:06:12 -0700181 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800182 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800184 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700185 mutex_unlock(&dev->struct_mutex);
186
Chris Wilson20217462010-11-23 15:26:33 +0000187 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700188}
189
Eric Anholt5a125c32008-10-22 21:40:13 -0700190int
191i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000192 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700193{
Chris Wilson73aa8082010-09-30 11:46:12 +0100194 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700195 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000196 struct drm_i915_gem_object *obj;
197 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700198
Chris Wilson6299f992010-11-24 12:23:44 +0000199 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100200 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800202 if (i915_gem_obj_is_pinned(obj))
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700203 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100204 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700205
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700206 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400207 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000208
Eric Anholt5a125c32008-10-22 21:40:13 -0700209 return 0;
210}
211
Chris Wilson42dcedd2012-11-15 11:32:30 +0000212void *i915_gem_object_alloc(struct drm_device *dev)
213{
214 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700215 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000216}
217
218void i915_gem_object_free(struct drm_i915_gem_object *obj)
219{
220 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
221 kmem_cache_free(dev_priv->slab, obj);
222}
223
Dave Airlieff72145b2011-02-07 12:16:14 +1000224static int
225i915_gem_create(struct drm_file *file,
226 struct drm_device *dev,
227 uint64_t size,
228 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700229{
Chris Wilson05394f32010-11-08 19:18:58 +0000230 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300231 int ret;
232 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700233
Dave Airlieff72145b2011-02-07 12:16:14 +1000234 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200235 if (size == 0)
236 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700237
238 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700240 if (obj == NULL)
241 return -ENOMEM;
242
Chris Wilson05394f32010-11-08 19:18:58 +0000243 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100244 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200245 drm_gem_object_unreference_unlocked(&obj->base);
246 if (ret)
247 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100248
Dave Airlieff72145b2011-02-07 12:16:14 +1000249 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700250 return 0;
251}
252
Dave Airlieff72145b2011-02-07 12:16:14 +1000253int
254i915_gem_dumb_create(struct drm_file *file,
255 struct drm_device *dev,
256 struct drm_mode_create_dumb *args)
257{
258 /* have to work out size/pitch and return them */
Paulo Zanonide45eaf2013-10-18 18:48:24 -0300259 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000260 args->size = args->pitch * args->height;
261 return i915_gem_create(file, dev,
262 args->size, &args->handle);
263}
264
Dave Airlieff72145b2011-02-07 12:16:14 +1000265/**
266 * Creates a new mm object and returns a handle to it.
267 */
268int
269i915_gem_create_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200273
Dave Airlieff72145b2011-02-07 12:16:14 +1000274 return i915_gem_create(file, dev,
275 args->size, &args->handle);
276}
277
Daniel Vetter8c599672011-12-14 13:57:31 +0100278static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100279__copy_to_user_swizzled(char __user *cpu_vaddr,
280 const char *gpu_vaddr, int gpu_offset,
281 int length)
282{
283 int ret, cpu_offset = 0;
284
285 while (length > 0) {
286 int cacheline_end = ALIGN(gpu_offset + 1, 64);
287 int this_length = min(cacheline_end - gpu_offset, length);
288 int swizzled_gpu_offset = gpu_offset ^ 64;
289
290 ret = __copy_to_user(cpu_vaddr + cpu_offset,
291 gpu_vaddr + swizzled_gpu_offset,
292 this_length);
293 if (ret)
294 return ret + length;
295
296 cpu_offset += this_length;
297 gpu_offset += this_length;
298 length -= this_length;
299 }
300
301 return 0;
302}
303
304static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700305__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
306 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100307 int length)
308{
309 int ret, cpu_offset = 0;
310
311 while (length > 0) {
312 int cacheline_end = ALIGN(gpu_offset + 1, 64);
313 int this_length = min(cacheline_end - gpu_offset, length);
314 int swizzled_gpu_offset = gpu_offset ^ 64;
315
316 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
317 cpu_vaddr + cpu_offset,
318 this_length);
319 if (ret)
320 return ret + length;
321
322 cpu_offset += this_length;
323 gpu_offset += this_length;
324 length -= this_length;
325 }
326
327 return 0;
328}
329
Daniel Vetterd174bd62012-03-25 19:47:40 +0200330/* Per-page copy function for the shmem pread fastpath.
331 * Flushes invalid cachelines before reading the target if
332 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700333static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200334shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
335 char __user *user_data,
336 bool page_do_bit17_swizzling, bool needs_clflush)
337{
338 char *vaddr;
339 int ret;
340
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200341 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200342 return -EINVAL;
343
344 vaddr = kmap_atomic(page);
345 if (needs_clflush)
346 drm_clflush_virt_range(vaddr + shmem_page_offset,
347 page_length);
348 ret = __copy_to_user_inatomic(user_data,
349 vaddr + shmem_page_offset,
350 page_length);
351 kunmap_atomic(vaddr);
352
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100353 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200354}
355
Daniel Vetter23c18c72012-03-25 19:47:42 +0200356static void
357shmem_clflush_swizzled_range(char *addr, unsigned long length,
358 bool swizzled)
359{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200360 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361 unsigned long start = (unsigned long) addr;
362 unsigned long end = (unsigned long) addr + length;
363
364 /* For swizzling simply ensure that we always flush both
365 * channels. Lame, but simple and it works. Swizzled
366 * pwrite/pread is far from a hotpath - current userspace
367 * doesn't use it at all. */
368 start = round_down(start, 128);
369 end = round_up(end, 128);
370
371 drm_clflush_virt_range((void *)start, end - start);
372 } else {
373 drm_clflush_virt_range(addr, length);
374 }
375
376}
377
Daniel Vetterd174bd62012-03-25 19:47:40 +0200378/* Only difference to the fast-path function is that this can handle bit17
379 * and uses non-atomic copy and kmap functions. */
380static int
381shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
382 char __user *user_data,
383 bool page_do_bit17_swizzling, bool needs_clflush)
384{
385 char *vaddr;
386 int ret;
387
388 vaddr = kmap(page);
389 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200390 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
391 page_length,
392 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200393
394 if (page_do_bit17_swizzling)
395 ret = __copy_to_user_swizzled(user_data,
396 vaddr, shmem_page_offset,
397 page_length);
398 else
399 ret = __copy_to_user(user_data,
400 vaddr + shmem_page_offset,
401 page_length);
402 kunmap(page);
403
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100404 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200405}
406
Eric Anholteb014592009-03-10 11:44:52 -0700407static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200408i915_gem_shmem_pread(struct drm_device *dev,
409 struct drm_i915_gem_object *obj,
410 struct drm_i915_gem_pread *args,
411 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700412{
Daniel Vetter8461d222011-12-14 13:57:32 +0100413 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700414 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100415 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100416 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100417 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200418 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200419 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200420 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700421
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200422 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700423 remain = args->size;
424
Daniel Vetter8461d222011-12-14 13:57:32 +0100425 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700426
Daniel Vetter84897312012-03-25 19:47:31 +0200427 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
428 /* If we're not in the cpu read domain, set ourself into the gtt
429 * read domain and manually flush cachelines (if required). This
430 * optimizes for the case when the gpu will dirty the data
431 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky23f54482013-09-11 14:57:48 -0700433 ret = i915_gem_object_wait_rendering(obj, true);
434 if (ret)
435 return ret;
Daniel Vetter84897312012-03-25 19:47:31 +0200436 }
Eric Anholteb014592009-03-10 11:44:52 -0700437
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100438 ret = i915_gem_object_get_pages(obj);
439 if (ret)
440 return ret;
441
442 i915_gem_object_pin_pages(obj);
443
Eric Anholteb014592009-03-10 11:44:52 -0700444 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100445
Imre Deak67d5a502013-02-18 19:28:02 +0200446 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
447 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200448 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100449
450 if (remain <= 0)
451 break;
452
Eric Anholteb014592009-03-10 11:44:52 -0700453 /* Operation in this page
454 *
Eric Anholteb014592009-03-10 11:44:52 -0700455 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700456 * page_length = bytes to copy for this page
457 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100458 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700459 page_length = remain;
460 if ((shmem_page_offset + page_length) > PAGE_SIZE)
461 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700462
Daniel Vetter8461d222011-12-14 13:57:32 +0100463 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
464 (page_to_phys(page) & (1 << 17)) != 0;
465
Daniel Vetterd174bd62012-03-25 19:47:40 +0200466 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
467 user_data, page_do_bit17_swizzling,
468 needs_clflush);
469 if (ret == 0)
470 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700471
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200472 mutex_unlock(&dev->struct_mutex);
473
Jani Nikulad330a952014-01-21 11:24:25 +0200474 if (likely(!i915.prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200475 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200476 /* Userspace is tricking us, but we've already clobbered
477 * its pages with the prefault and promised to write the
478 * data up to the first fault. Hence ignore any errors
479 * and just continue. */
480 (void)ret;
481 prefaulted = 1;
482 }
483
Daniel Vetterd174bd62012-03-25 19:47:40 +0200484 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
485 user_data, page_do_bit17_swizzling,
486 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700487
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200488 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100489
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200490next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100491 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100492
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100493 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100494 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100495
Eric Anholteb014592009-03-10 11:44:52 -0700496 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100497 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700498 offset += page_length;
499 }
500
Chris Wilson4f27b752010-10-14 15:26:45 +0100501out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100502 i915_gem_object_unpin_pages(obj);
503
Eric Anholteb014592009-03-10 11:44:52 -0700504 return ret;
505}
506
Eric Anholt673a3942008-07-30 12:06:12 -0700507/**
508 * Reads data from the object referenced by handle.
509 *
510 * On error, the contents of *data are undefined.
511 */
512int
513i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000514 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700515{
516 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000517 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100518 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700519
Chris Wilson51311d02010-11-17 09:10:42 +0000520 if (args->size == 0)
521 return 0;
522
523 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200524 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000525 args->size))
526 return -EFAULT;
527
Chris Wilson4f27b752010-10-14 15:26:45 +0100528 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100529 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100530 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700531
Chris Wilson05394f32010-11-08 19:18:58 +0000532 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000533 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100534 ret = -ENOENT;
535 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100536 }
Eric Anholt673a3942008-07-30 12:06:12 -0700537
Chris Wilson7dcd2492010-09-26 20:21:44 +0100538 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000539 if (args->offset > obj->base.size ||
540 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100541 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100542 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100543 }
544
Daniel Vetter1286ff72012-05-10 15:25:09 +0200545 /* prime objects have no backing filp to GEM pread/pwrite
546 * pages from.
547 */
548 if (!obj->base.filp) {
549 ret = -EINVAL;
550 goto out;
551 }
552
Chris Wilsondb53a302011-02-03 11:57:46 +0000553 trace_i915_gem_object_pread(obj, args->offset, args->size);
554
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200555 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700556
Chris Wilson35b62a82010-09-26 20:23:38 +0100557out:
Chris Wilson05394f32010-11-08 19:18:58 +0000558 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100559unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100560 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700561 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700562}
563
Keith Packard0839ccb2008-10-30 19:38:48 -0700564/* This is the fast write path which cannot handle
565 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700566 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700567
Keith Packard0839ccb2008-10-30 19:38:48 -0700568static inline int
569fast_user_write(struct io_mapping *mapping,
570 loff_t page_base, int page_offset,
571 char __user *user_data,
572 int length)
573{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700574 void __iomem *vaddr_atomic;
575 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700576 unsigned long unwritten;
577
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700578 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700579 /* We can use the cpu mem copy function because this is X86. */
580 vaddr = (void __force*)vaddr_atomic + page_offset;
581 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700582 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100584 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700585}
586
Eric Anholt3de09aa2009-03-09 09:42:23 -0700587/**
588 * This is the fast pwrite path, where we copy the data directly from the
589 * user into the GTT, uncached.
590 */
Eric Anholt673a3942008-07-30 12:06:12 -0700591static int
Chris Wilson05394f32010-11-08 19:18:58 +0000592i915_gem_gtt_pwrite_fast(struct drm_device *dev,
593 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700594 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000595 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700596{
Keith Packard0839ccb2008-10-30 19:38:48 -0700597 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700598 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700600 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200601 int page_offset, page_length, ret;
602
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100603 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200604 if (ret)
605 goto out;
606
607 ret = i915_gem_object_set_to_gtt_domain(obj, true);
608 if (ret)
609 goto out_unpin;
610
611 ret = i915_gem_object_put_fence(obj);
612 if (ret)
613 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700614
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200615 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700616 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700617
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700618 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
620 while (remain > 0) {
621 /* Operation in this page
622 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700623 * page_base = page offset within aperture
624 * page_offset = offset within page
625 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700626 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100627 page_base = offset & PAGE_MASK;
628 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700629 page_length = remain;
630 if ((page_offset + remain) > PAGE_SIZE)
631 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700632
Keith Packard0839ccb2008-10-30 19:38:48 -0700633 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700634 * source page isn't available. Return the error and we'll
635 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700636 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800637 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200638 page_offset, user_data, page_length)) {
639 ret = -EFAULT;
640 goto out_unpin;
641 }
Eric Anholt673a3942008-07-30 12:06:12 -0700642
Keith Packard0839ccb2008-10-30 19:38:48 -0700643 remain -= page_length;
644 user_data += page_length;
645 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700646 }
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Daniel Vetter935aaa62012-03-25 19:47:35 +0200648out_unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800649 i915_gem_object_ggtt_unpin(obj);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200650out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700651 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700652}
653
Daniel Vetterd174bd62012-03-25 19:47:40 +0200654/* Per-page copy function for the shmem pwrite fastpath.
655 * Flushes invalid cachelines before writing to the target if
656 * needs_clflush_before is set and flushes out any written cachelines after
657 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700658static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
660 char __user *user_data,
661 bool page_do_bit17_swizzling,
662 bool needs_clflush_before,
663 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700664{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200665 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700666 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700667
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200668 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200669 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700670
Daniel Vetterd174bd62012-03-25 19:47:40 +0200671 vaddr = kmap_atomic(page);
672 if (needs_clflush_before)
673 drm_clflush_virt_range(vaddr + shmem_page_offset,
674 page_length);
675 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
676 user_data,
677 page_length);
678 if (needs_clflush_after)
679 drm_clflush_virt_range(vaddr + shmem_page_offset,
680 page_length);
681 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700682
Chris Wilson755d2212012-09-04 21:02:55 +0100683 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700684}
685
Daniel Vetterd174bd62012-03-25 19:47:40 +0200686/* Only difference to the fast-path function is that this can handle bit17
687 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700688static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200689shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
690 char __user *user_data,
691 bool page_do_bit17_swizzling,
692 bool needs_clflush_before,
693 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700694{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200695 char *vaddr;
696 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700697
Daniel Vetterd174bd62012-03-25 19:47:40 +0200698 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200699 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200700 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
701 page_length,
702 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 if (page_do_bit17_swizzling)
704 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100705 user_data,
706 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200707 else
708 ret = __copy_from_user(vaddr + shmem_page_offset,
709 user_data,
710 page_length);
711 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200712 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
713 page_length,
714 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200715 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100716
Chris Wilson755d2212012-09-04 21:02:55 +0100717 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700718}
719
Eric Anholt40123c12009-03-09 13:42:30 -0700720static int
Daniel Vettere244a442012-03-25 19:47:28 +0200721i915_gem_shmem_pwrite(struct drm_device *dev,
722 struct drm_i915_gem_object *obj,
723 struct drm_i915_gem_pwrite *args,
724 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700725{
Eric Anholt40123c12009-03-09 13:42:30 -0700726 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100727 loff_t offset;
728 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100729 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100730 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200731 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200732 int needs_clflush_after = 0;
733 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200734 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700735
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200736 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700737 remain = args->size;
738
Daniel Vetter8c599672011-12-14 13:57:31 +0100739 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700740
Daniel Vetter58642882012-03-25 19:47:37 +0200741 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
742 /* If we're not in the cpu write domain, set ourself into the gtt
743 * write domain and manually flush cachelines (if required). This
744 * optimizes for the case when the gpu will use the data
745 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100746 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky23f54482013-09-11 14:57:48 -0700747 ret = i915_gem_object_wait_rendering(obj, false);
748 if (ret)
749 return ret;
Daniel Vetter58642882012-03-25 19:47:37 +0200750 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100751 /* Same trick applies to invalidate partially written cachelines read
752 * before writing. */
753 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
754 needs_clflush_before =
755 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200756
Chris Wilson755d2212012-09-04 21:02:55 +0100757 ret = i915_gem_object_get_pages(obj);
758 if (ret)
759 return ret;
760
761 i915_gem_object_pin_pages(obj);
762
Eric Anholt40123c12009-03-09 13:42:30 -0700763 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000764 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700765
Imre Deak67d5a502013-02-18 19:28:02 +0200766 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
767 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200768 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200769 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100770
Chris Wilson9da3da62012-06-01 15:20:22 +0100771 if (remain <= 0)
772 break;
773
Eric Anholt40123c12009-03-09 13:42:30 -0700774 /* Operation in this page
775 *
Eric Anholt40123c12009-03-09 13:42:30 -0700776 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700777 * page_length = bytes to copy for this page
778 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100779 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700780
781 page_length = remain;
782 if ((shmem_page_offset + page_length) > PAGE_SIZE)
783 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700784
Daniel Vetter58642882012-03-25 19:47:37 +0200785 /* If we don't overwrite a cacheline completely we need to be
786 * careful to have up-to-date data by first clflushing. Don't
787 * overcomplicate things and flush the entire patch. */
788 partial_cacheline_write = needs_clflush_before &&
789 ((shmem_page_offset | page_length)
790 & (boot_cpu_data.x86_clflush_size - 1));
791
Daniel Vetter8c599672011-12-14 13:57:31 +0100792 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
793 (page_to_phys(page) & (1 << 17)) != 0;
794
Daniel Vetterd174bd62012-03-25 19:47:40 +0200795 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
796 user_data, page_do_bit17_swizzling,
797 partial_cacheline_write,
798 needs_clflush_after);
799 if (ret == 0)
800 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700801
Daniel Vettere244a442012-03-25 19:47:28 +0200802 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200803 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200804 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
805 user_data, page_do_bit17_swizzling,
806 partial_cacheline_write,
807 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700808
Daniel Vettere244a442012-03-25 19:47:28 +0200809 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100810
Daniel Vettere244a442012-03-25 19:47:28 +0200811next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100812 set_page_dirty(page);
813 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100814
Chris Wilson755d2212012-09-04 21:02:55 +0100815 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100816 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100817
Eric Anholt40123c12009-03-09 13:42:30 -0700818 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100819 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700820 offset += page_length;
821 }
822
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100823out:
Chris Wilson755d2212012-09-04 21:02:55 +0100824 i915_gem_object_unpin_pages(obj);
825
Daniel Vettere244a442012-03-25 19:47:28 +0200826 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100827 /*
828 * Fixup: Flush cpu caches in case we didn't flush the dirty
829 * cachelines in-line while writing and the object moved
830 * out of the cpu write domain while we've dropped the lock.
831 */
832 if (!needs_clflush_after &&
833 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100834 if (i915_gem_clflush_object(obj, obj->pin_display))
835 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200836 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100837 }
Eric Anholt40123c12009-03-09 13:42:30 -0700838
Daniel Vetter58642882012-03-25 19:47:37 +0200839 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800840 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200841
Eric Anholt40123c12009-03-09 13:42:30 -0700842 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700843}
844
845/**
846 * Writes data to the object referenced by handle.
847 *
848 * On error, the contents of the buffer that were to be modified are undefined.
849 */
850int
851i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100852 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700853{
854 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000855 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000856 int ret;
857
858 if (args->size == 0)
859 return 0;
860
861 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200862 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000863 args->size))
864 return -EFAULT;
865
Jani Nikulad330a952014-01-21 11:24:25 +0200866 if (likely(!i915.prefault_disable)) {
Xiong Zhang0b74b502013-07-19 13:51:24 +0800867 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
868 args->size);
869 if (ret)
870 return -EFAULT;
871 }
Eric Anholt673a3942008-07-30 12:06:12 -0700872
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100873 ret = i915_mutex_lock_interruptible(dev);
874 if (ret)
875 return ret;
876
Chris Wilson05394f32010-11-08 19:18:58 +0000877 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000878 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100879 ret = -ENOENT;
880 goto unlock;
881 }
Eric Anholt673a3942008-07-30 12:06:12 -0700882
Chris Wilson7dcd2492010-09-26 20:21:44 +0100883 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000884 if (args->offset > obj->base.size ||
885 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100886 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100887 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100888 }
889
Daniel Vetter1286ff72012-05-10 15:25:09 +0200890 /* prime objects have no backing filp to GEM pread/pwrite
891 * pages from.
892 */
893 if (!obj->base.filp) {
894 ret = -EINVAL;
895 goto out;
896 }
897
Chris Wilsondb53a302011-02-03 11:57:46 +0000898 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
899
Daniel Vetter935aaa62012-03-25 19:47:35 +0200900 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700901 /* We can only do the GTT pwrite on untiled buffers, as otherwise
902 * it would end up going through the fenced access, and we'll get
903 * different detiling behavior between reading and writing.
904 * pread/pwrite currently are reading and writing from the CPU
905 * perspective, requiring manual detiling by the client.
906 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100907 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100908 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100909 goto out;
910 }
911
Chris Wilson2c225692013-08-09 12:26:45 +0100912 if (obj->tiling_mode == I915_TILING_NONE &&
913 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
914 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100915 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200916 /* Note that the gtt paths might fail with non-page-backed user
917 * pointers (e.g. gtt mappings when moving data between
918 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700919 }
Eric Anholt673a3942008-07-30 12:06:12 -0700920
Chris Wilson86a1ee22012-08-11 15:41:04 +0100921 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200922 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100923
Chris Wilson35b62a82010-09-26 20:23:38 +0100924out:
Chris Wilson05394f32010-11-08 19:18:58 +0000925 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100926unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100927 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700928 return ret;
929}
930
Chris Wilsonb3612372012-08-24 09:35:08 +0100931int
Daniel Vetter33196de2012-11-14 17:14:05 +0100932i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100933 bool interruptible)
934{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100935 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100936 /* Non-interruptible callers can't handle -EAGAIN, hence return
937 * -EIO unconditionally for these. */
938 if (!interruptible)
939 return -EIO;
940
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100941 /* Recovery complete, but the reset failed ... */
942 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100943 return -EIO;
944
945 return -EAGAIN;
946 }
947
948 return 0;
949}
950
951/*
952 * Compare seqno against outstanding lazy request. Emit a request if they are
953 * equal.
954 */
955static int
956i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
957{
958 int ret;
959
960 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
961
962 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100963 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300964 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100965
966 return ret;
967}
968
Chris Wilson094f9a52013-09-25 17:34:55 +0100969static void fake_irq(unsigned long data)
970{
971 wake_up_process((struct task_struct *)data);
972}
973
974static bool missed_irq(struct drm_i915_private *dev_priv,
975 struct intel_ring_buffer *ring)
976{
977 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
978}
979
Chris Wilsonb29c19b2013-09-25 17:34:56 +0100980static bool can_wait_boost(struct drm_i915_file_private *file_priv)
981{
982 if (file_priv == NULL)
983 return true;
984
985 return !atomic_xchg(&file_priv->rps_wait_boost, true);
986}
987
Chris Wilsonb3612372012-08-24 09:35:08 +0100988/**
989 * __wait_seqno - wait until execution of seqno has finished
990 * @ring: the ring expected to report seqno
991 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100992 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100993 * @interruptible: do an interruptible wait (normally yes)
994 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 *
Daniel Vetterf69061b2012-12-06 09:01:42 +0100996 * Note: It is of utmost importance that the passed in seqno and reset_counter
997 * values have been read by the caller in an smp safe manner. Where read-side
998 * locks are involved, it is sufficient to read the reset_counter before
999 * unlocking the lock that protects the seqno. For lockless tricks, the
1000 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1001 * inserted.
1002 *
Chris Wilsonb3612372012-08-24 09:35:08 +01001003 * Returns 0 if the seqno was found within the alloted time. Else returns the
1004 * errno with remaining time filled in timeout argument.
1005 */
1006static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +01001007 unsigned reset_counter,
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001008 bool interruptible,
1009 struct timespec *timeout,
1010 struct drm_i915_file_private *file_priv)
Chris Wilsonb3612372012-08-24 09:35:08 +01001011{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001012 struct drm_device *dev = ring->dev;
1013 drm_i915_private_t *dev_priv = dev->dev_private;
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001014 const bool irq_test_in_progress =
1015 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001016 struct timespec before, now;
1017 DEFINE_WAIT(wait);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001018 unsigned long timeout_expire;
Chris Wilsonb3612372012-08-24 09:35:08 +01001019 int ret;
1020
Paulo Zanonic67a4702013-08-19 13:18:09 -03001021 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1022
Chris Wilsonb3612372012-08-24 09:35:08 +01001023 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1024 return 0;
1025
Mika Kuoppala47e97662013-12-10 17:02:43 +02001026 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
Chris Wilsonb3612372012-08-24 09:35:08 +01001027
Damien Lespiau3d13ef22014-02-07 19:12:47 +00001028 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001029 gen6_rps_boost(dev_priv);
1030 if (file_priv)
1031 mod_delayed_work(dev_priv->wq,
1032 &file_priv->mm.idle_work,
1033 msecs_to_jiffies(100));
1034 }
1035
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001036 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
Chris Wilsonb3612372012-08-24 09:35:08 +01001037 return -ENODEV;
1038
Chris Wilson094f9a52013-09-25 17:34:55 +01001039 /* Record current time in case interrupted by signal, or wedged */
1040 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001041 getrawmonotonic(&before);
Chris Wilson094f9a52013-09-25 17:34:55 +01001042 for (;;) {
1043 struct timer_list timer;
Chris Wilsonb3612372012-08-24 09:35:08 +01001044
Chris Wilson094f9a52013-09-25 17:34:55 +01001045 prepare_to_wait(&ring->irq_queue, &wait,
1046 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
Chris Wilsonb3612372012-08-24 09:35:08 +01001047
Daniel Vetterf69061b2012-12-06 09:01:42 +01001048 /* We need to check whether any gpu reset happened in between
1049 * the caller grabbing the seqno and now ... */
Chris Wilson094f9a52013-09-25 17:34:55 +01001050 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1051 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1052 * is truely gone. */
1053 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1054 if (ret == 0)
1055 ret = -EAGAIN;
1056 break;
1057 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01001058
Chris Wilson094f9a52013-09-25 17:34:55 +01001059 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1060 ret = 0;
1061 break;
1062 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001063
Chris Wilson094f9a52013-09-25 17:34:55 +01001064 if (interruptible && signal_pending(current)) {
1065 ret = -ERESTARTSYS;
1066 break;
1067 }
1068
Mika Kuoppala47e97662013-12-10 17:02:43 +02001069 if (timeout && time_after_eq(jiffies, timeout_expire)) {
Chris Wilson094f9a52013-09-25 17:34:55 +01001070 ret = -ETIME;
1071 break;
1072 }
1073
1074 timer.function = NULL;
1075 if (timeout || missed_irq(dev_priv, ring)) {
Mika Kuoppala47e97662013-12-10 17:02:43 +02001076 unsigned long expire;
1077
Chris Wilson094f9a52013-09-25 17:34:55 +01001078 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
Mika Kuoppala47e97662013-12-10 17:02:43 +02001079 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
Chris Wilson094f9a52013-09-25 17:34:55 +01001080 mod_timer(&timer, expire);
1081 }
1082
Chris Wilson5035c272013-10-04 09:58:46 +01001083 io_schedule();
Chris Wilson094f9a52013-09-25 17:34:55 +01001084
Chris Wilson094f9a52013-09-25 17:34:55 +01001085 if (timer.function) {
1086 del_singleshot_timer_sync(&timer);
1087 destroy_timer_on_stack(&timer);
1088 }
1089 }
Chris Wilsonb3612372012-08-24 09:35:08 +01001090 getrawmonotonic(&now);
Chris Wilson094f9a52013-09-25 17:34:55 +01001091 trace_i915_gem_request_wait_end(ring, seqno);
Chris Wilsonb3612372012-08-24 09:35:08 +01001092
Mika Kuoppala168c3f22013-12-12 17:54:42 +02001093 if (!irq_test_in_progress)
1094 ring->irq_put(ring);
Chris Wilson094f9a52013-09-25 17:34:55 +01001095
1096 finish_wait(&ring->irq_queue, &wait);
Chris Wilsonb3612372012-08-24 09:35:08 +01001097
1098 if (timeout) {
1099 struct timespec sleep_time = timespec_sub(now, before);
1100 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001101 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1102 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001103 }
1104
Chris Wilson094f9a52013-09-25 17:34:55 +01001105 return ret;
Chris Wilsonb3612372012-08-24 09:35:08 +01001106}
1107
1108/**
1109 * Waits for a sequence number to be signaled, and cleans up the
1110 * request and object lists appropriately for that event.
1111 */
1112int
1113i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1114{
1115 struct drm_device *dev = ring->dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private;
1117 bool interruptible = dev_priv->mm.interruptible;
1118 int ret;
1119
1120 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1121 BUG_ON(seqno == 0);
1122
Daniel Vetter33196de2012-11-14 17:14:05 +01001123 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001124 if (ret)
1125 return ret;
1126
1127 ret = i915_gem_check_olr(ring, seqno);
1128 if (ret)
1129 return ret;
1130
Daniel Vetterf69061b2012-12-06 09:01:42 +01001131 return __wait_seqno(ring, seqno,
1132 atomic_read(&dev_priv->gpu_error.reset_counter),
Chris Wilsonb29c19b2013-09-25 17:34:56 +01001133 interruptible, NULL, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001134}
1135
Chris Wilsond26e3af2013-06-29 22:05:26 +01001136static int
1137i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1138 struct intel_ring_buffer *ring)
1139{
1140 i915_gem_retire_requests_ring(ring);
1141
1142 /* Manually manage the write flush as we may have not yet
1143 * retired the buffer.
1144 *
1145 * Note that the last_write_seqno is always the earlier of
1146 * the two (read/write) seqno, so if we haved successfully waited,
1147 * we know we have passed the last write.
1148 */
1149 obj->last_write_seqno = 0;
1150 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1151
1152 return 0;
1153}
1154
Chris Wilsonb3612372012-08-24 09:35:08 +01001155/**
1156 * Ensures that all rendering to the object has completed and the object is
1157 * safe to unbind from the GTT or access from the CPU.
1158 */
1159static __must_check int
1160i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1161 bool readonly)
1162{
1163 struct intel_ring_buffer *ring = obj->ring;
1164 u32 seqno;
1165 int ret;
1166
1167 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1168 if (seqno == 0)
1169 return 0;
1170
1171 ret = i915_wait_seqno(ring, seqno);
1172 if (ret)
1173 return ret;
1174
Chris Wilsond26e3af2013-06-29 22:05:26 +01001175 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001176}
1177
Chris Wilson3236f572012-08-24 09:35:09 +01001178/* A nonblocking variant of the above wait. This is a highly dangerous routine
1179 * as the object state may change during this call.
1180 */
1181static __must_check int
1182i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
Chris Wilson6e4930f2014-02-07 18:37:06 -02001183 struct drm_i915_file_private *file_priv,
Chris Wilson3236f572012-08-24 09:35:09 +01001184 bool readonly)
1185{
1186 struct drm_device *dev = obj->base.dev;
1187 struct drm_i915_private *dev_priv = dev->dev_private;
1188 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001189 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001190 u32 seqno;
1191 int ret;
1192
1193 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1194 BUG_ON(!dev_priv->mm.interruptible);
1195
1196 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1197 if (seqno == 0)
1198 return 0;
1199
Daniel Vetter33196de2012-11-14 17:14:05 +01001200 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001201 if (ret)
1202 return ret;
1203
1204 ret = i915_gem_check_olr(ring, seqno);
1205 if (ret)
1206 return ret;
1207
Daniel Vetterf69061b2012-12-06 09:01:42 +01001208 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001209 mutex_unlock(&dev->struct_mutex);
Chris Wilson6e4930f2014-02-07 18:37:06 -02001210 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
Chris Wilson3236f572012-08-24 09:35:09 +01001211 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001212 if (ret)
1213 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001214
Chris Wilsond26e3af2013-06-29 22:05:26 +01001215 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001216}
1217
Eric Anholt673a3942008-07-30 12:06:12 -07001218/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001219 * Called when user space prepares to use an object with the CPU, either
1220 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001221 */
1222int
1223i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001224 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001225{
1226 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001227 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001228 uint32_t read_domains = args->read_domains;
1229 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001230 int ret;
1231
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001232 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001233 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001234 return -EINVAL;
1235
Chris Wilson21d509e2009-06-06 09:46:02 +01001236 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001237 return -EINVAL;
1238
1239 /* Having something in the write domain implies it's in the read
1240 * domain, and only that read domain. Enforce that in the request.
1241 */
1242 if (write_domain != 0 && read_domains != write_domain)
1243 return -EINVAL;
1244
Chris Wilson76c1dec2010-09-25 11:22:51 +01001245 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001246 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001247 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001248
Chris Wilson05394f32010-11-08 19:18:58 +00001249 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001250 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001251 ret = -ENOENT;
1252 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001253 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001254
Chris Wilson3236f572012-08-24 09:35:09 +01001255 /* Try to flush the object off the GPU without holding the lock.
1256 * We will repeat the flush holding the lock in the normal manner
1257 * to catch cases where we are gazumped.
1258 */
Chris Wilson6e4930f2014-02-07 18:37:06 -02001259 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1260 file->driver_priv,
1261 !write_domain);
Chris Wilson3236f572012-08-24 09:35:09 +01001262 if (ret)
1263 goto unref;
1264
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001265 if (read_domains & I915_GEM_DOMAIN_GTT) {
1266 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001267
1268 /* Silently promote "you're not bound, there was nothing to do"
1269 * to success, since the client was just asking us to
1270 * make sure everything was done.
1271 */
1272 if (ret == -EINVAL)
1273 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001274 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001275 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001276 }
1277
Chris Wilson3236f572012-08-24 09:35:09 +01001278unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001279 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001280unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001281 mutex_unlock(&dev->struct_mutex);
1282 return ret;
1283}
1284
1285/**
1286 * Called when user space has done writes to this buffer
1287 */
1288int
1289i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001290 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001291{
1292 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001293 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001294 int ret = 0;
1295
Chris Wilson76c1dec2010-09-25 11:22:51 +01001296 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001297 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001298 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001299
Chris Wilson05394f32010-11-08 19:18:58 +00001300 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001301 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001302 ret = -ENOENT;
1303 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001304 }
1305
Eric Anholt673a3942008-07-30 12:06:12 -07001306 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001307 if (obj->pin_display)
1308 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001309
Chris Wilson05394f32010-11-08 19:18:58 +00001310 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001311unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001312 mutex_unlock(&dev->struct_mutex);
1313 return ret;
1314}
1315
1316/**
1317 * Maps the contents of an object, returning the address it is mapped
1318 * into.
1319 *
1320 * While the mapping holds a reference on the contents of the object, it doesn't
1321 * imply a ref on the object itself.
1322 */
1323int
1324i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001325 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001326{
1327 struct drm_i915_gem_mmap *args = data;
1328 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001329 unsigned long addr;
1330
Chris Wilson05394f32010-11-08 19:18:58 +00001331 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001332 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001333 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001334
Daniel Vetter1286ff72012-05-10 15:25:09 +02001335 /* prime objects have no backing filp to GEM mmap
1336 * pages from.
1337 */
1338 if (!obj->filp) {
1339 drm_gem_object_unreference_unlocked(obj);
1340 return -EINVAL;
1341 }
1342
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001343 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001344 PROT_READ | PROT_WRITE, MAP_SHARED,
1345 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001346 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001347 if (IS_ERR((void *)addr))
1348 return addr;
1349
1350 args->addr_ptr = (uint64_t) addr;
1351
1352 return 0;
1353}
1354
Jesse Barnesde151cf2008-11-12 10:03:55 -08001355/**
1356 * i915_gem_fault - fault a page into the GTT
1357 * vma: VMA in question
1358 * vmf: fault info
1359 *
1360 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1361 * from userspace. The fault handler takes care of binding the object to
1362 * the GTT (if needed), allocating and programming a fence register (again,
1363 * only if needed based on whether the old reg is still valid or the object
1364 * is tiled) and inserting a new PTE into the faulting process.
1365 *
1366 * Note that the faulting process may involve evicting existing objects
1367 * from the GTT and/or fence registers to make room. So performance may
1368 * suffer if the GTT working set is large or there are few fence registers
1369 * left.
1370 */
1371int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1372{
Chris Wilson05394f32010-11-08 19:18:58 +00001373 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1374 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001375 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001376 pgoff_t page_offset;
1377 unsigned long pfn;
1378 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001379 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001380
Paulo Zanonif65c9162013-11-27 18:20:34 -02001381 intel_runtime_pm_get(dev_priv);
1382
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383 /* We don't use vmf->pgoff since that has the fake offset */
1384 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1385 PAGE_SHIFT;
1386
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001387 ret = i915_mutex_lock_interruptible(dev);
1388 if (ret)
1389 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001390
Chris Wilsondb53a302011-02-03 11:57:46 +00001391 trace_i915_gem_object_fault(obj, page_offset, true, write);
1392
Chris Wilson6e4930f2014-02-07 18:37:06 -02001393 /* Try to flush the object off the GPU first without holding the lock.
1394 * Upon reacquiring the lock, we will perform our sanity checks and then
1395 * repeat the flush holding the lock in the normal manner to catch cases
1396 * where we are gazumped.
1397 */
1398 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1399 if (ret)
1400 goto unlock;
1401
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001402 /* Access to snoopable pages through the GTT is incoherent. */
1403 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1404 ret = -EINVAL;
1405 goto unlock;
1406 }
1407
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001408 /* Now bind it into the GTT if needed */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001409 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001410 if (ret)
1411 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001412
Chris Wilsonc9839302012-11-20 10:45:17 +00001413 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1414 if (ret)
1415 goto unpin;
1416
1417 ret = i915_gem_object_get_fence(obj);
1418 if (ret)
1419 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001420
Chris Wilson6299f992010-11-24 12:23:44 +00001421 obj->fault_mappable = true;
1422
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001423 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1424 pfn >>= PAGE_SHIFT;
1425 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001426
1427 /* Finally, remap it using the new GTT offset */
1428 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001429unpin:
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08001430 i915_gem_object_ggtt_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001431unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001432 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001433out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001434 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001435 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001436 /* If this -EIO is due to a gpu hang, give the reset code a
1437 * chance to clean up the mess. Otherwise return the proper
1438 * SIGBUS. */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001439 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1440 ret = VM_FAULT_SIGBUS;
1441 break;
1442 }
Chris Wilson045e7692010-11-07 09:18:22 +00001443 case -EAGAIN:
Daniel Vetter571c6082013-09-12 17:57:28 +02001444 /*
1445 * EAGAIN means the gpu is hung and we'll wait for the error
1446 * handler to reset everything when re-faulting in
1447 * i915_mutex_lock_interruptible.
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001448 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001449 case 0:
1450 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001451 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001452 case -EBUSY:
1453 /*
1454 * EBUSY is ok: this just means that another thread
1455 * already did the job.
1456 */
Paulo Zanonif65c9162013-11-27 18:20:34 -02001457 ret = VM_FAULT_NOPAGE;
1458 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001459 case -ENOMEM:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001460 ret = VM_FAULT_OOM;
1461 break;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001462 case -ENOSPC:
Chris Wilson45d67812014-01-31 11:34:57 +00001463 case -EFAULT:
Paulo Zanonif65c9162013-11-27 18:20:34 -02001464 ret = VM_FAULT_SIGBUS;
1465 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001466 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001467 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Paulo Zanonif65c9162013-11-27 18:20:34 -02001468 ret = VM_FAULT_SIGBUS;
1469 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001470 }
Paulo Zanonif65c9162013-11-27 18:20:34 -02001471
1472 intel_runtime_pm_put(dev_priv);
1473 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001474}
1475
Paulo Zanoni48018a52013-12-13 15:22:31 -02001476void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1477{
1478 struct i915_vma *vma;
1479
1480 /*
1481 * Only the global gtt is relevant for gtt memory mappings, so restrict
1482 * list traversal to objects bound into the global address space. Note
1483 * that the active list should be empty, but better safe than sorry.
1484 */
1485 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1486 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1487 i915_gem_release_mmap(vma->obj);
1488 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1489 i915_gem_release_mmap(vma->obj);
1490}
1491
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492/**
Chris Wilson901782b2009-07-10 08:18:50 +01001493 * i915_gem_release_mmap - remove physical page mappings
1494 * @obj: obj in question
1495 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001496 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001497 * relinquish ownership of the pages back to the system.
1498 *
1499 * It is vital that we remove the page mapping if we have mapped a tiled
1500 * object through the GTT and then lose the fence register due to
1501 * resource pressure. Similarly if the object has been moved out of the
1502 * aperture, than pages mapped into userspace must be revoked. Removing the
1503 * mapping will then trigger a page fault on the next user access, allowing
1504 * fixup by i915_gem_fault().
1505 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001506void
Chris Wilson05394f32010-11-08 19:18:58 +00001507i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001508{
Chris Wilson6299f992010-11-24 12:23:44 +00001509 if (!obj->fault_mappable)
1510 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001511
David Herrmann51335df2013-07-24 21:10:03 +02001512 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001513 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001514}
1515
Imre Deak0fa87792013-01-07 21:47:35 +02001516uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001517i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001518{
Chris Wilsone28f8712011-07-18 13:11:49 -07001519 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001520
1521 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001522 tiling_mode == I915_TILING_NONE)
1523 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001524
1525 /* Previous chips need a power-of-two fence region when tiling */
1526 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001527 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001528 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001529 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001530
Chris Wilsone28f8712011-07-18 13:11:49 -07001531 while (gtt_size < size)
1532 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001533
Chris Wilsone28f8712011-07-18 13:11:49 -07001534 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001535}
1536
Jesse Barnesde151cf2008-11-12 10:03:55 -08001537/**
1538 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1539 * @obj: object to check
1540 *
1541 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001542 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001543 */
Imre Deakd865110c2013-01-07 21:47:33 +02001544uint32_t
1545i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1546 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001547{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001548 /*
1549 * Minimum alignment is 4k (GTT page size), but might be greater
1550 * if a fence register is needed for the object.
1551 */
Imre Deakd865110c2013-01-07 21:47:33 +02001552 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001553 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001554 return 4096;
1555
1556 /*
1557 * Previous chips need to be aligned to the size of the smallest
1558 * fence register that can contain the object.
1559 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001560 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001561}
1562
Chris Wilsond8cb5082012-08-11 15:41:03 +01001563static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1564{
1565 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1566 int ret;
1567
David Herrmann0de23972013-07-24 21:07:52 +02001568 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001569 return 0;
1570
Daniel Vetterda494d72012-12-20 15:11:16 +01001571 dev_priv->mm.shrinker_no_lock_stealing = true;
1572
Chris Wilsond8cb5082012-08-11 15:41:03 +01001573 ret = drm_gem_create_mmap_offset(&obj->base);
1574 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001575 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001576
1577 /* Badly fragmented mmap space? The only way we can recover
1578 * space is by destroying unwanted objects. We can't randomly release
1579 * mmap_offsets as userspace expects them to be persistent for the
1580 * lifetime of the objects. The closest we can is to release the
1581 * offsets on purgeable objects by truncating it and marking it purged,
1582 * which prevents userspace from ever using that object again.
1583 */
1584 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1585 ret = drm_gem_create_mmap_offset(&obj->base);
1586 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001587 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001588
1589 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001590 ret = drm_gem_create_mmap_offset(&obj->base);
1591out:
1592 dev_priv->mm.shrinker_no_lock_stealing = false;
1593
1594 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001595}
1596
1597static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1598{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001599 drm_gem_free_mmap_offset(&obj->base);
1600}
1601
Jesse Barnesde151cf2008-11-12 10:03:55 -08001602int
Dave Airlieff72145b2011-02-07 12:16:14 +10001603i915_gem_mmap_gtt(struct drm_file *file,
1604 struct drm_device *dev,
1605 uint32_t handle,
1606 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001607{
Chris Wilsonda761a62010-10-27 17:37:08 +01001608 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001609 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001610 int ret;
1611
Chris Wilson76c1dec2010-09-25 11:22:51 +01001612 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001613 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001614 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001615
Dave Airlieff72145b2011-02-07 12:16:14 +10001616 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001617 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001618 ret = -ENOENT;
1619 goto unlock;
1620 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001621
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001622 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001623 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001624 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001625 }
1626
Chris Wilson05394f32010-11-08 19:18:58 +00001627 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001628 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001629 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001630 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001631 }
1632
Chris Wilsond8cb5082012-08-11 15:41:03 +01001633 ret = i915_gem_object_create_mmap_offset(obj);
1634 if (ret)
1635 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001636
David Herrmann0de23972013-07-24 21:07:52 +02001637 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001638
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001639out:
Chris Wilson05394f32010-11-08 19:18:58 +00001640 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001641unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001642 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001643 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001644}
1645
Dave Airlieff72145b2011-02-07 12:16:14 +10001646/**
1647 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1648 * @dev: DRM device
1649 * @data: GTT mapping ioctl data
1650 * @file: GEM object info
1651 *
1652 * Simply returns the fake offset to userspace so it can mmap it.
1653 * The mmap call will end up in drm_gem_mmap(), which will set things
1654 * up so we can get faults in the handler above.
1655 *
1656 * The fault handler will take care of binding the object into the GTT
1657 * (since it may have been evicted to make room for something), allocating
1658 * a fence register, and mapping the appropriate aperture address into
1659 * userspace.
1660 */
1661int
1662i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1663 struct drm_file *file)
1664{
1665 struct drm_i915_gem_mmap_gtt *args = data;
1666
Dave Airlieff72145b2011-02-07 12:16:14 +10001667 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1668}
1669
Daniel Vetter225067e2012-08-20 10:23:20 +02001670/* Immediately discard the backing storage */
1671static void
1672i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001673{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001674 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001675
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001676 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001677
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001678 if (obj->base.filp == NULL)
1679 return;
1680
Daniel Vetter225067e2012-08-20 10:23:20 +02001681 /* Our goal here is to return as much of the memory as
1682 * is possible back to the system as we are called from OOM.
1683 * To do this we must instruct the shmfs to drop all of its
1684 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001685 */
Al Viro496ad9a2013-01-23 17:07:38 -05001686 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001687 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001688
Daniel Vetter225067e2012-08-20 10:23:20 +02001689 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001690}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001691
Daniel Vetter225067e2012-08-20 10:23:20 +02001692static inline int
1693i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1694{
1695 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001696}
1697
Chris Wilson5cdf5882010-09-27 15:51:07 +01001698static void
Chris Wilson05394f32010-11-08 19:18:58 +00001699i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001700{
Imre Deak90797e62013-02-18 19:28:03 +02001701 struct sg_page_iter sg_iter;
1702 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001703
Chris Wilson05394f32010-11-08 19:18:58 +00001704 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001705
Chris Wilson6c085a72012-08-20 11:40:46 +02001706 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1707 if (ret) {
1708 /* In the event of a disaster, abandon all caches and
1709 * hope for the best.
1710 */
1711 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001712 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001713 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1714 }
1715
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001716 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001717 i915_gem_object_save_bit_17_swizzle(obj);
1718
Chris Wilson05394f32010-11-08 19:18:58 +00001719 if (obj->madv == I915_MADV_DONTNEED)
1720 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001721
Imre Deak90797e62013-02-18 19:28:03 +02001722 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001723 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001724
Chris Wilson05394f32010-11-08 19:18:58 +00001725 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001726 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001727
Chris Wilson05394f32010-11-08 19:18:58 +00001728 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001729 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001730
Chris Wilson9da3da62012-06-01 15:20:22 +01001731 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001732 }
Chris Wilson05394f32010-11-08 19:18:58 +00001733 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001734
Chris Wilson9da3da62012-06-01 15:20:22 +01001735 sg_free_table(obj->pages);
1736 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001737}
1738
Chris Wilsondd624af2013-01-15 12:39:35 +00001739int
Chris Wilson37e680a2012-06-07 15:38:42 +01001740i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1741{
1742 const struct drm_i915_gem_object_ops *ops = obj->ops;
1743
Chris Wilson2f745ad2012-09-04 21:02:58 +01001744 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001745 return 0;
1746
Chris Wilsona5570172012-09-04 21:02:54 +01001747 if (obj->pages_pin_count)
1748 return -EBUSY;
1749
Ben Widawsky98438772013-07-31 17:00:12 -07001750 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001751
Chris Wilsona2165e32012-12-03 11:49:00 +00001752 /* ->put_pages might need to allocate memory for the bit17 swizzle
1753 * array, hence protect them from being reaped by removing them from gtt
1754 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001755 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001756
Chris Wilson37e680a2012-06-07 15:38:42 +01001757 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001758 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001759
Chris Wilson6c085a72012-08-20 11:40:46 +02001760 if (i915_gem_object_is_purgeable(obj))
1761 i915_gem_object_truncate(obj);
1762
1763 return 0;
1764}
1765
Chris Wilsond9973b42013-10-04 10:33:00 +01001766static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001767__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1768 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001769{
Chris Wilson57094f82013-09-04 10:45:50 +01001770 struct list_head still_bound_list;
Chris Wilson6c085a72012-08-20 11:40:46 +02001771 struct drm_i915_gem_object *obj, *next;
Chris Wilsond9973b42013-10-04 10:33:00 +01001772 unsigned long count = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001773
1774 list_for_each_entry_safe(obj, next,
1775 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001776 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001777 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001778 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001779 count += obj->base.size >> PAGE_SHIFT;
1780 if (count >= target)
1781 return count;
1782 }
1783 }
1784
Chris Wilson57094f82013-09-04 10:45:50 +01001785 /*
1786 * As we may completely rewrite the bound list whilst unbinding
1787 * (due to retiring requests) we have to strictly process only
1788 * one element of the list at the time, and recheck the list
1789 * on every iteration.
1790 */
1791 INIT_LIST_HEAD(&still_bound_list);
1792 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001793 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001794
Chris Wilson57094f82013-09-04 10:45:50 +01001795 obj = list_first_entry(&dev_priv->mm.bound_list,
1796 typeof(*obj), global_list);
1797 list_move_tail(&obj->global_list, &still_bound_list);
1798
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001799 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1800 continue;
1801
Chris Wilson57094f82013-09-04 10:45:50 +01001802 /*
1803 * Hold a reference whilst we unbind this object, as we may
1804 * end up waiting for and retiring requests. This might
1805 * release the final reference (held by the active list)
1806 * and result in the object being freed from under us.
1807 * in this object being freed.
1808 *
1809 * Note 1: Shrinking the bound list is special since only active
1810 * (and hence bound objects) can contain such limbo objects, so
1811 * we don't need special tricks for shrinking the unbound list.
1812 * The only other place where we have to be careful with active
1813 * objects suddenly disappearing due to retiring requests is the
1814 * eviction code.
1815 *
1816 * Note 2: Even though the bound list doesn't hold a reference
1817 * to the object we can safely grab one here: The final object
1818 * unreferencing and the bound_list are both protected by the
1819 * dev->struct_mutex and so we won't ever be able to observe an
1820 * object on the bound_list with a reference count equals 0.
1821 */
1822 drm_gem_object_reference(&obj->base);
1823
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001824 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1825 if (i915_vma_unbind(vma))
1826 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001827
Chris Wilson57094f82013-09-04 10:45:50 +01001828 if (i915_gem_object_put_pages(obj) == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02001829 count += obj->base.size >> PAGE_SHIFT;
Chris Wilson57094f82013-09-04 10:45:50 +01001830
1831 drm_gem_object_unreference(&obj->base);
Chris Wilson6c085a72012-08-20 11:40:46 +02001832 }
Chris Wilson57094f82013-09-04 10:45:50 +01001833 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02001834
1835 return count;
1836}
1837
Chris Wilsond9973b42013-10-04 10:33:00 +01001838static unsigned long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001839i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1840{
1841 return __i915_gem_shrink(dev_priv, target, true);
1842}
1843
Chris Wilsond9973b42013-10-04 10:33:00 +01001844static unsigned long
Chris Wilson6c085a72012-08-20 11:40:46 +02001845i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1846{
1847 struct drm_i915_gem_object *obj, *next;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001848 long freed = 0;
Chris Wilson6c085a72012-08-20 11:40:46 +02001849
1850 i915_gem_evict_everything(dev_priv->dev);
1851
Ben Widawsky35c20a62013-05-31 11:28:48 -07001852 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
Dave Chinner7dc19d52013-08-28 10:18:11 +10001853 global_list) {
Chris Wilsond9973b42013-10-04 10:33:00 +01001854 if (i915_gem_object_put_pages(obj) == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10001855 freed += obj->base.size >> PAGE_SHIFT;
Dave Chinner7dc19d52013-08-28 10:18:11 +10001856 }
1857 return freed;
Daniel Vetter225067e2012-08-20 10:23:20 +02001858}
1859
Chris Wilson37e680a2012-06-07 15:38:42 +01001860static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001861i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001862{
Chris Wilson6c085a72012-08-20 11:40:46 +02001863 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001864 int page_count, i;
1865 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001866 struct sg_table *st;
1867 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001868 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001869 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001870 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001871 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001872
Chris Wilson6c085a72012-08-20 11:40:46 +02001873 /* Assert that the object is not currently in any GPU domain. As it
1874 * wasn't in the GTT, there shouldn't be any way it could have been in
1875 * a GPU cache
1876 */
1877 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1878 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1879
Chris Wilson9da3da62012-06-01 15:20:22 +01001880 st = kmalloc(sizeof(*st), GFP_KERNEL);
1881 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001882 return -ENOMEM;
1883
Chris Wilson9da3da62012-06-01 15:20:22 +01001884 page_count = obj->base.size / PAGE_SIZE;
1885 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001886 kfree(st);
1887 return -ENOMEM;
1888 }
1889
1890 /* Get the list of pages out of our struct file. They'll be pinned
1891 * at this point until we release them.
1892 *
1893 * Fail silently without starting the shrinker
1894 */
Al Viro496ad9a2013-01-23 17:07:38 -05001895 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001896 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001897 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001898 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001899 sg = st->sgl;
1900 st->nents = 0;
1901 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001902 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1903 if (IS_ERR(page)) {
1904 i915_gem_purge(dev_priv, page_count);
1905 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1906 }
1907 if (IS_ERR(page)) {
1908 /* We've tried hard to allocate the memory by reaping
1909 * our own buffer, now let the real VM do its job and
1910 * go down in flames if truly OOM.
1911 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001912 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001913 gfp |= __GFP_IO | __GFP_WAIT;
1914
1915 i915_gem_shrink_all(dev_priv);
1916 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1917 if (IS_ERR(page))
1918 goto err_pages;
1919
Linus Torvaldscaf49192012-12-10 10:51:16 -08001920 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001921 gfp &= ~(__GFP_IO | __GFP_WAIT);
1922 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001923#ifdef CONFIG_SWIOTLB
1924 if (swiotlb_nr_tbl()) {
1925 st->nents++;
1926 sg_set_page(sg, page, PAGE_SIZE, 0);
1927 sg = sg_next(sg);
1928 continue;
1929 }
1930#endif
Imre Deak90797e62013-02-18 19:28:03 +02001931 if (!i || page_to_pfn(page) != last_pfn + 1) {
1932 if (i)
1933 sg = sg_next(sg);
1934 st->nents++;
1935 sg_set_page(sg, page, PAGE_SIZE, 0);
1936 } else {
1937 sg->length += PAGE_SIZE;
1938 }
1939 last_pfn = page_to_pfn(page);
Daniel Vetter3bbbe702013-10-07 17:15:45 -03001940
1941 /* Check that the i965g/gm workaround works. */
1942 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
Eric Anholt673a3942008-07-30 12:06:12 -07001943 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001944#ifdef CONFIG_SWIOTLB
1945 if (!swiotlb_nr_tbl())
1946#endif
1947 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001948 obj->pages = st;
1949
Eric Anholt673a3942008-07-30 12:06:12 -07001950 if (i915_gem_object_needs_bit17_swizzle(obj))
1951 i915_gem_object_do_bit_17_swizzle(obj);
1952
1953 return 0;
1954
1955err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001956 sg_mark_end(sg);
1957 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001958 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001959 sg_free_table(st);
1960 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001961 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001962}
1963
Chris Wilson37e680a2012-06-07 15:38:42 +01001964/* Ensure that the associated pages are gathered from the backing storage
1965 * and pinned into our object. i915_gem_object_get_pages() may be called
1966 * multiple times before they are released by a single call to
1967 * i915_gem_object_put_pages() - once the pages are no longer referenced
1968 * either as a result of memory pressure (reaping pages under the shrinker)
1969 * or as the object is itself released.
1970 */
1971int
1972i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1973{
1974 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1975 const struct drm_i915_gem_object_ops *ops = obj->ops;
1976 int ret;
1977
Chris Wilson2f745ad2012-09-04 21:02:58 +01001978 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001979 return 0;
1980
Chris Wilson43e28f02013-01-08 10:53:09 +00001981 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00001982 DRM_DEBUG("Attempting to obtain a purgeable object\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00001983 return -EFAULT;
Chris Wilson43e28f02013-01-08 10:53:09 +00001984 }
1985
Chris Wilsona5570172012-09-04 21:02:54 +01001986 BUG_ON(obj->pages_pin_count);
1987
Chris Wilson37e680a2012-06-07 15:38:42 +01001988 ret = ops->get_pages(obj);
1989 if (ret)
1990 return ret;
1991
Ben Widawsky35c20a62013-05-31 11:28:48 -07001992 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001993 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001994}
1995
Ben Widawskye2d05a82013-09-24 09:57:58 -07001996static void
Chris Wilson05394f32010-11-08 19:18:58 +00001997i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001998 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001999{
Chris Wilson05394f32010-11-08 19:18:58 +00002000 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01002001 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00002002 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01002003
Zou Nan hai852835f2010-05-21 09:08:56 +08002004 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01002005 if (obj->ring != ring && obj->last_write_seqno) {
2006 /* Keep the seqno relative to the current ring */
2007 obj->last_write_seqno = seqno;
2008 }
Chris Wilson05394f32010-11-08 19:18:58 +00002009 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002010
2011 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00002012 if (!obj->active) {
2013 drm_gem_object_reference(&obj->base);
2014 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07002015 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01002016
Chris Wilson05394f32010-11-08 19:18:58 +00002017 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002018
Chris Wilson0201f1e2012-07-20 12:41:01 +01002019 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00002020
Chris Wilsoncaea7472010-11-12 13:53:37 +00002021 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00002022 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002023
Chris Wilson7dd49062012-03-21 10:48:18 +00002024 /* Bump MRU to take account of the delayed flush */
2025 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2026 struct drm_i915_fence_reg *reg;
2027
2028 reg = &dev_priv->fence_regs[obj->fence_reg];
2029 list_move_tail(&reg->lru_list,
2030 &dev_priv->mm.fence_list);
2031 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002032 }
2033}
2034
Ben Widawskye2d05a82013-09-24 09:57:58 -07002035void i915_vma_move_to_active(struct i915_vma *vma,
2036 struct intel_ring_buffer *ring)
2037{
2038 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2039 return i915_gem_object_move_to_active(vma->obj, ring);
2040}
2041
Chris Wilsoncaea7472010-11-12 13:53:37 +00002042static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00002043i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2044{
Ben Widawskyca191b12013-07-31 17:00:14 -07002045 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002046 struct i915_address_space *vm;
2047 struct i915_vma *vma;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002048
Chris Wilson65ce3022012-07-20 12:41:02 +01002049 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002050 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01002051
Ben Widawskyfeb822c2013-12-06 14:10:51 -08002052 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2053 vma = i915_gem_obj_to_vma(obj, vm);
2054 if (vma && !list_empty(&vma->mm_list))
2055 list_move_tail(&vma->mm_list, &vm->inactive_list);
2056 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00002057
Chris Wilson65ce3022012-07-20 12:41:02 +01002058 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002059 obj->ring = NULL;
2060
Chris Wilson65ce3022012-07-20 12:41:02 +01002061 obj->last_read_seqno = 0;
2062 obj->last_write_seqno = 0;
2063 obj->base.write_domain = 0;
2064
2065 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002066 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002067
2068 obj->active = 0;
2069 drm_gem_object_unreference(&obj->base);
2070
2071 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08002072}
Eric Anholt673a3942008-07-30 12:06:12 -07002073
Chris Wilson9d7730912012-11-27 16:22:52 +00002074static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002075i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002076{
Chris Wilson9d7730912012-11-27 16:22:52 +00002077 struct drm_i915_private *dev_priv = dev->dev_private;
2078 struct intel_ring_buffer *ring;
2079 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002080
Chris Wilson107f27a52012-12-10 13:56:17 +02002081 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00002082 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02002083 ret = intel_ring_idle(ring);
2084 if (ret)
2085 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00002086 }
Chris Wilson9d7730912012-11-27 16:22:52 +00002087 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02002088
2089 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00002090 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002091 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02002092
Chris Wilson9d7730912012-11-27 16:22:52 +00002093 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2094 ring->sync_seqno[j] = 0;
2095 }
2096
2097 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002098}
2099
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002100int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2101{
2102 struct drm_i915_private *dev_priv = dev->dev_private;
2103 int ret;
2104
2105 if (seqno == 0)
2106 return -EINVAL;
2107
2108 /* HWS page needs to be set less than what we
2109 * will inject to ring
2110 */
2111 ret = i915_gem_init_seqno(dev, seqno - 1);
2112 if (ret)
2113 return ret;
2114
2115 /* Carefully set the last_seqno value so that wrap
2116 * detection still works
2117 */
2118 dev_priv->next_seqno = seqno;
2119 dev_priv->last_seqno = seqno - 1;
2120 if (dev_priv->last_seqno == 0)
2121 dev_priv->last_seqno--;
2122
2123 return 0;
2124}
2125
Chris Wilson9d7730912012-11-27 16:22:52 +00002126int
2127i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002128{
Chris Wilson9d7730912012-11-27 16:22:52 +00002129 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002130
Chris Wilson9d7730912012-11-27 16:22:52 +00002131 /* reserve 0 for non-seqno */
2132 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002133 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002134 if (ret)
2135 return ret;
2136
2137 dev_priv->next_seqno = 1;
2138 }
2139
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002140 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002141 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002142}
2143
Mika Kuoppala0025c072013-06-12 12:35:30 +03002144int __i915_add_request(struct intel_ring_buffer *ring,
2145 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002146 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002147 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002148{
Chris Wilsondb53a302011-02-03 11:57:46 +00002149 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002150 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002151 u32 request_ring_position, request_start;
Chris Wilson3cce4692010-10-27 16:11:02 +01002152 int ret;
2153
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002154 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002155 /*
2156 * Emit any outstanding flushes - execbuf can fail to emit the flush
2157 * after having emitted the batchbuffer command. Hence we need to fix
2158 * things up similar to emitting the lazy request. The difference here
2159 * is that the flush _must_ happen before the next request, no matter
2160 * what.
2161 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002162 ret = intel_ring_flush_all_caches(ring);
2163 if (ret)
2164 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002165
Chris Wilson3c0e2342013-09-04 10:45:52 +01002166 request = ring->preallocated_lazy_request;
2167 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002168 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002169
Chris Wilsona71d8d92012-02-15 11:25:36 +00002170 /* Record the position of the start of the request so that
2171 * should we detect the updated seqno part-way through the
2172 * GPU processing the request, we never over-estimate the
2173 * position of the head.
2174 */
2175 request_ring_position = intel_ring_get_tail(ring);
2176
Chris Wilson9d7730912012-11-27 16:22:52 +00002177 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002178 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002179 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002180
Chris Wilson9d7730912012-11-27 16:22:52 +00002181 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002182 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002183 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002184 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002185
2186 /* Whilst this request exists, batch_obj will be on the
2187 * active_list, and so will hold the active reference. Only when this
2188 * request is retired will the the batch_obj be moved onto the
2189 * inactive_list and lose its active reference. Hence we do not need
2190 * to explicitly hold another reference here.
2191 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002192 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002193
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002194 /* Hold a reference to the current context so that we can inspect
2195 * it later in case a hangcheck error event fires.
2196 */
2197 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002198 if (request->ctx)
2199 i915_gem_context_reference(request->ctx);
2200
Eric Anholt673a3942008-07-30 12:06:12 -07002201 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002202 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002203 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002204
Chris Wilsondb53a302011-02-03 11:57:46 +00002205 if (file) {
2206 struct drm_i915_file_private *file_priv = file->driver_priv;
2207
Chris Wilson1c255952010-09-26 11:03:27 +01002208 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002209 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002210 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002211 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002212 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002213 }
Eric Anholt673a3942008-07-30 12:06:12 -07002214
Chris Wilson9d7730912012-11-27 16:22:52 +00002215 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002216 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002217 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002218
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002219 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002220 i915_queue_hangcheck(ring->dev);
2221
Chris Wilsonf62a0072014-02-21 17:55:39 +00002222 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2223 queue_delayed_work(dev_priv->wq,
2224 &dev_priv->mm.retire_work,
2225 round_jiffies_up_relative(HZ));
2226 intel_mark_busy(dev_priv->dev);
Ben Gamarif65d9422009-09-14 17:48:44 -04002227 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002228
Chris Wilsonacb868d2012-09-26 13:47:30 +01002229 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002230 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002231 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002232}
2233
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002234static inline void
2235i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002236{
Chris Wilson1c255952010-09-26 11:03:27 +01002237 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002238
Chris Wilson1c255952010-09-26 11:03:27 +01002239 if (!file_priv)
2240 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002241
Chris Wilson1c255952010-09-26 11:03:27 +01002242 spin_lock(&file_priv->mm.lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002243 list_del(&request->client_list);
2244 request->file_priv = NULL;
Chris Wilson1c255952010-09-26 11:03:27 +01002245 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002246}
2247
Mika Kuoppala939fd762014-01-30 19:04:44 +02002248static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002249 const struct i915_hw_context *ctx)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002250{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002251 unsigned long elapsed;
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002252
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002253 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2254
2255 if (ctx->hang_stats.banned)
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002256 return true;
2257
2258 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002259 if (!i915_gem_context_is_default(ctx)) {
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002260 DRM_DEBUG("context hanging too fast, banning!\n");
Ville Syrjäläccc7bed2014-02-21 16:26:47 +02002261 return true;
2262 } else if (dev_priv->gpu_error.stop_rings == 0) {
2263 DRM_ERROR("gpu hanging too fast, banning!\n");
2264 return true;
Mika Kuoppala3fac8972014-01-30 16:05:48 +02002265 }
Mika Kuoppalabe62acb2013-08-30 16:19:28 +03002266 }
2267
2268 return false;
2269}
2270
Mika Kuoppala939fd762014-01-30 19:04:44 +02002271static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2272 struct i915_hw_context *ctx,
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002273 const bool guilty)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002274{
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002275 struct i915_ctx_hang_stats *hs;
2276
2277 if (WARN_ON(!ctx))
2278 return;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002279
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002280 hs = &ctx->hang_stats;
2281
2282 if (guilty) {
Mika Kuoppala939fd762014-01-30 19:04:44 +02002283 hs->banned = i915_context_is_banned(dev_priv, ctx);
Mika Kuoppala44e2c072014-01-30 16:01:15 +02002284 hs->batch_active++;
2285 hs->guilty_ts = get_seconds();
2286 } else {
2287 hs->batch_pending++;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002288 }
2289}
2290
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002291static void i915_gem_free_request(struct drm_i915_gem_request *request)
2292{
2293 list_del(&request->list);
2294 i915_gem_request_remove_from_client(request);
2295
2296 if (request->ctx)
2297 i915_gem_context_unreference(request->ctx);
2298
2299 kfree(request);
2300}
2301
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002302struct drm_i915_gem_request *
2303i915_gem_find_active_request(struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002304{
Chris Wilson4db080f2013-12-04 11:37:09 +00002305 struct drm_i915_gem_request *request;
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002306 u32 completed_seqno;
2307
2308 completed_seqno = ring->get_seqno(ring, false);
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002309
Chris Wilson4db080f2013-12-04 11:37:09 +00002310 list_for_each_entry(request, &ring->request_list, list) {
2311 if (i915_seqno_passed(completed_seqno, request->seqno))
2312 continue;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002313
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002314 return request;
Chris Wilson4db080f2013-12-04 11:37:09 +00002315 }
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002316
2317 return NULL;
2318}
2319
2320static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2321 struct intel_ring_buffer *ring)
2322{
2323 struct drm_i915_gem_request *request;
2324 bool ring_hung;
2325
Chris Wilson8d9fc7f2014-02-25 17:11:23 +02002326 request = i915_gem_find_active_request(ring);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002327
2328 if (request == NULL)
2329 return;
2330
2331 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2332
Mika Kuoppala939fd762014-01-30 19:04:44 +02002333 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +02002334
2335 list_for_each_entry_continue(request, &ring->request_list, list)
Mika Kuoppala939fd762014-01-30 19:04:44 +02002336 i915_set_reset_status(dev_priv, request->ctx, false);
Chris Wilson4db080f2013-12-04 11:37:09 +00002337}
2338
2339static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2340 struct intel_ring_buffer *ring)
2341{
Chris Wilsondfaae392010-09-22 10:31:52 +01002342 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002343 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002344
Chris Wilson05394f32010-11-08 19:18:58 +00002345 obj = list_first_entry(&ring->active_list,
2346 struct drm_i915_gem_object,
2347 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002348
Chris Wilson05394f32010-11-08 19:18:58 +00002349 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002350 }
Ben Widawsky1d62bee2014-01-01 10:15:13 -08002351
2352 /*
2353 * We must free the requests after all the corresponding objects have
2354 * been moved off active lists. Which is the same order as the normal
2355 * retire_requests function does. This is important if object hold
2356 * implicit references on things like e.g. ppgtt address spaces through
2357 * the request.
2358 */
2359 while (!list_empty(&ring->request_list)) {
2360 struct drm_i915_gem_request *request;
2361
2362 request = list_first_entry(&ring->request_list,
2363 struct drm_i915_gem_request,
2364 list);
2365
2366 i915_gem_free_request(request);
2367 }
Eric Anholt673a3942008-07-30 12:06:12 -07002368}
2369
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002370void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002371{
2372 struct drm_i915_private *dev_priv = dev->dev_private;
2373 int i;
2374
Daniel Vetter4b9de732011-10-09 21:52:02 +02002375 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002376 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002377
Daniel Vetter94a335d2013-07-17 14:51:28 +02002378 /*
2379 * Commit delayed tiling changes if we have an object still
2380 * attached to the fence, otherwise just clear the fence.
2381 */
2382 if (reg->obj) {
2383 i915_gem_object_update_fence(reg->obj, reg,
2384 reg->obj->tiling_mode);
2385 } else {
2386 i915_gem_write_fence(dev, i, NULL);
2387 }
Chris Wilson312817a2010-11-22 11:50:11 +00002388 }
2389}
2390
Chris Wilson069efc12010-09-30 16:53:18 +01002391void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002392{
Chris Wilsondfaae392010-09-22 10:31:52 +01002393 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002394 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002395 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002396
Chris Wilson4db080f2013-12-04 11:37:09 +00002397 /*
2398 * Before we free the objects from the requests, we need to inspect
2399 * them for finding the guilty party. As the requests only borrow
2400 * their reference to the objects, the inspection must be done first.
2401 */
Chris Wilsonb4519512012-05-11 14:29:30 +01002402 for_each_ring(ring, dev_priv, i)
Chris Wilson4db080f2013-12-04 11:37:09 +00002403 i915_gem_reset_ring_status(dev_priv, ring);
2404
2405 for_each_ring(ring, dev_priv, i)
2406 i915_gem_reset_ring_cleanup(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002407
Ben Widawsky3d57e5b2013-10-14 10:01:36 -07002408 i915_gem_cleanup_ringbuffer(dev);
2409
Ben Widawskyacce9ff2013-12-06 14:11:03 -08002410 i915_gem_context_reset(dev);
2411
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002412 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002413}
2414
2415/**
2416 * This function clears the request list as sequence numbers are passed.
2417 */
Damien Lespiaucb216aa2014-03-03 17:42:36 +00002418static void
Chris Wilsondb53a302011-02-03 11:57:46 +00002419i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002420{
Eric Anholt673a3942008-07-30 12:06:12 -07002421 uint32_t seqno;
2422
Chris Wilsondb53a302011-02-03 11:57:46 +00002423 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002424 return;
2425
Chris Wilsondb53a302011-02-03 11:57:46 +00002426 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002427
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002428 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002429
Chris Wilsone9103032014-01-07 11:45:14 +00002430 /* Move any buffers on the active list that are no longer referenced
2431 * by the ringbuffer to the flushing/inactive lists as appropriate,
2432 * before we free the context associated with the requests.
2433 */
2434 while (!list_empty(&ring->active_list)) {
2435 struct drm_i915_gem_object *obj;
2436
2437 obj = list_first_entry(&ring->active_list,
2438 struct drm_i915_gem_object,
2439 ring_list);
2440
2441 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2442 break;
2443
2444 i915_gem_object_move_to_inactive(obj);
2445 }
2446
2447
Zou Nan hai852835f2010-05-21 09:08:56 +08002448 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002449 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002450
Zou Nan hai852835f2010-05-21 09:08:56 +08002451 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002452 struct drm_i915_gem_request,
2453 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002454
Chris Wilsondfaae392010-09-22 10:31:52 +01002455 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002456 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002457
Chris Wilsondb53a302011-02-03 11:57:46 +00002458 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002459 /* We know the GPU must have read the request to have
2460 * sent us the seqno + interrupt, so use the position
2461 * of tail of the request to update the last known position
2462 * of the GPU head.
2463 */
2464 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002465
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002466 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002467 }
2468
Chris Wilsondb53a302011-02-03 11:57:46 +00002469 if (unlikely(ring->trace_irq_seqno &&
2470 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002471 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002472 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002473 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002474
Chris Wilsondb53a302011-02-03 11:57:46 +00002475 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002476}
2477
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002478bool
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002479i915_gem_retire_requests(struct drm_device *dev)
2480{
2481 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002482 struct intel_ring_buffer *ring;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002483 bool idle = true;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002484 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002485
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002486 for_each_ring(ring, dev_priv, i) {
Chris Wilsonb4519512012-05-11 14:29:30 +01002487 i915_gem_retire_requests_ring(ring);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002488 idle &= list_empty(&ring->request_list);
2489 }
2490
2491 if (idle)
2492 mod_delayed_work(dev_priv->wq,
2493 &dev_priv->mm.idle_work,
2494 msecs_to_jiffies(100));
2495
2496 return idle;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002497}
2498
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002499static void
Eric Anholt673a3942008-07-30 12:06:12 -07002500i915_gem_retire_work_handler(struct work_struct *work)
2501{
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002502 struct drm_i915_private *dev_priv =
2503 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2504 struct drm_device *dev = dev_priv->dev;
Chris Wilson0a587052011-01-09 21:05:44 +00002505 bool idle;
Eric Anholt673a3942008-07-30 12:06:12 -07002506
Chris Wilson891b48c2010-09-29 12:26:37 +01002507 /* Come back later if the device is busy... */
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002508 idle = false;
2509 if (mutex_trylock(&dev->struct_mutex)) {
2510 idle = i915_gem_retire_requests(dev);
2511 mutex_unlock(&dev->struct_mutex);
2512 }
2513 if (!idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002514 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2515 round_jiffies_up_relative(HZ));
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002516}
Chris Wilson891b48c2010-09-29 12:26:37 +01002517
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002518static void
2519i915_gem_idle_work_handler(struct work_struct *work)
2520{
2521 struct drm_i915_private *dev_priv =
2522 container_of(work, typeof(*dev_priv), mm.idle_work.work);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002523
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002524 intel_mark_idle(dev_priv->dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002525}
2526
Ben Widawsky5816d642012-04-11 11:18:19 -07002527/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002528 * Ensures that an object will eventually get non-busy by flushing any required
2529 * write domains, emitting any outstanding lazy request and retiring and
2530 * completed requests.
2531 */
2532static int
2533i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2534{
2535 int ret;
2536
2537 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002538 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002539 if (ret)
2540 return ret;
2541
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002542 i915_gem_retire_requests_ring(obj->ring);
2543 }
2544
2545 return 0;
2546}
2547
2548/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002549 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2550 * @DRM_IOCTL_ARGS: standard ioctl arguments
2551 *
2552 * Returns 0 if successful, else an error is returned with the remaining time in
2553 * the timeout parameter.
2554 * -ETIME: object is still busy after timeout
2555 * -ERESTARTSYS: signal interrupted the wait
2556 * -ENONENT: object doesn't exist
2557 * Also possible, but rare:
2558 * -EAGAIN: GPU wedged
2559 * -ENOMEM: damn
2560 * -ENODEV: Internal IRQ fail
2561 * -E?: The add request failed
2562 *
2563 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2564 * non-zero timeout parameter the wait ioctl will wait for the given number of
2565 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2566 * without holding struct_mutex the object may become re-busied before this
2567 * function completes. A similar but shorter * race condition exists in the busy
2568 * ioctl
2569 */
2570int
2571i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2572{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002573 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002574 struct drm_i915_gem_wait *args = data;
2575 struct drm_i915_gem_object *obj;
2576 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002577 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002578 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002579 u32 seqno = 0;
2580 int ret = 0;
2581
Ben Widawskyeac1f142012-06-05 15:24:24 -07002582 if (args->timeout_ns >= 0) {
2583 timeout_stack = ns_to_timespec(args->timeout_ns);
2584 timeout = &timeout_stack;
2585 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002586
2587 ret = i915_mutex_lock_interruptible(dev);
2588 if (ret)
2589 return ret;
2590
2591 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2592 if (&obj->base == NULL) {
2593 mutex_unlock(&dev->struct_mutex);
2594 return -ENOENT;
2595 }
2596
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002597 /* Need to make sure the object gets inactive eventually. */
2598 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002599 if (ret)
2600 goto out;
2601
2602 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002603 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002604 ring = obj->ring;
2605 }
2606
2607 if (seqno == 0)
2608 goto out;
2609
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002610 /* Do this after OLR check to make sure we make forward progress polling
2611 * on this IOCTL with a 0 timeout (like busy ioctl)
2612 */
2613 if (!args->timeout_ns) {
2614 ret = -ETIME;
2615 goto out;
2616 }
2617
2618 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002619 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002620 mutex_unlock(&dev->struct_mutex);
2621
Chris Wilsonb29c19b2013-09-25 17:34:56 +01002622 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002623 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002624 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002625 return ret;
2626
2627out:
2628 drm_gem_object_unreference(&obj->base);
2629 mutex_unlock(&dev->struct_mutex);
2630 return ret;
2631}
2632
2633/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002634 * i915_gem_object_sync - sync an object to a ring.
2635 *
2636 * @obj: object which may be in use on another ring.
2637 * @to: ring we wish to use the object on. May be NULL.
2638 *
2639 * This code is meant to abstract object synchronization with the GPU.
2640 * Calling with NULL implies synchronizing the object with the CPU
2641 * rather than a particular GPU ring.
2642 *
2643 * Returns 0 if successful, else propagates up the lower layer error.
2644 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002645int
2646i915_gem_object_sync(struct drm_i915_gem_object *obj,
2647 struct intel_ring_buffer *to)
2648{
2649 struct intel_ring_buffer *from = obj->ring;
2650 u32 seqno;
2651 int ret, idx;
2652
2653 if (from == NULL || to == from)
2654 return 0;
2655
Ben Widawsky5816d642012-04-11 11:18:19 -07002656 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002657 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002658
2659 idx = intel_ring_sync_index(from, to);
2660
Chris Wilson0201f1e2012-07-20 12:41:01 +01002661 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002662 if (seqno <= from->sync_seqno[idx])
2663 return 0;
2664
Ben Widawskyb4aca012012-04-25 20:50:12 -07002665 ret = i915_gem_check_olr(obj->ring, seqno);
2666 if (ret)
2667 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002668
Chris Wilsonb52b89d2013-09-25 11:43:28 +01002669 trace_i915_gem_ring_sync_to(from, to, seqno);
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002670 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002671 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002672 /* We use last_read_seqno because sync_to()
2673 * might have just caused seqno wrap under
2674 * the radar.
2675 */
2676 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002677
Ben Widawskye3a5a222012-04-11 11:18:20 -07002678 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002679}
2680
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002681static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2682{
2683 u32 old_write_domain, old_read_domains;
2684
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002685 /* Force a pagefault for domain tracking on next user access */
2686 i915_gem_release_mmap(obj);
2687
Keith Packardb97c3d92011-06-24 21:02:59 -07002688 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2689 return;
2690
Chris Wilson97c809fd2012-10-09 19:24:38 +01002691 /* Wait for any direct GTT access to complete */
2692 mb();
2693
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002694 old_read_domains = obj->base.read_domains;
2695 old_write_domain = obj->base.write_domain;
2696
2697 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2698 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2699
2700 trace_i915_gem_object_change_domain(obj,
2701 old_read_domains,
2702 old_write_domain);
2703}
2704
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002705int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002706{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002707 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002708 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002709 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002710
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002711 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002712 return 0;
2713
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002714 if (!drm_mm_node_allocated(&vma->node)) {
2715 i915_gem_vma_destroy(vma);
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002716 return 0;
2717 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002718
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08002719 if (vma->pin_count)
Chris Wilson31d8d652012-05-24 19:11:20 +01002720 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002721
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002722 BUG_ON(obj->pages == NULL);
2723
Chris Wilsona8198ee2011-04-13 22:04:09 +01002724 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002725 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002726 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002727 /* Continue on if we fail due to EIO, the GPU is hung so we
2728 * should be safe and we need to cleanup or else we might
2729 * cause memory corruption through use-after-free.
2730 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002731
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002732 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002733
Daniel Vetter96b47b62009-12-15 17:50:00 +01002734 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002735 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002736 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002737 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002738
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002739 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002740
Ben Widawsky6f65e292013-12-06 14:10:56 -08002741 vma->unbind_vma(vma);
2742
Daniel Vetter74163902012-02-15 23:50:21 +01002743 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002744
Chris Wilson64bf9302014-02-25 14:23:28 +00002745 list_del_init(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002746 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002747 if (i915_is_ggtt(vma->vm))
2748 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002749
Ben Widawsky2f633152013-07-17 12:19:03 -07002750 drm_mm_remove_node(&vma->node);
2751 i915_gem_vma_destroy(vma);
2752
2753 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002754 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002755 if (list_empty(&obj->vma_list))
2756 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002757
Chris Wilson70903c32013-12-04 09:59:09 +00002758 /* And finally now the object is completely decoupled from this vma,
2759 * we can drop its hold on the backing storage and allow it to be
2760 * reaped by the shrinker.
2761 */
2762 i915_gem_object_unpin_pages(obj);
2763
Chris Wilson88241782011-01-07 17:09:48 +00002764 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002765}
2766
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002767int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002768{
2769 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002770 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002771 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002772
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002773 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002774 for_each_ring(ring, dev_priv, i) {
Ben Widawsky41bde552013-12-06 14:11:21 -08002775 ret = i915_switch_context(ring, NULL, ring->default_context);
Ben Widawskyb6c74882012-08-14 14:35:14 -07002776 if (ret)
2777 return ret;
2778
Chris Wilson3e960502012-11-27 16:22:54 +00002779 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002780 if (ret)
2781 return ret;
2782 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002783
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002784 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002785}
2786
Chris Wilson9ce079e2012-04-17 15:31:30 +01002787static void i965_write_fence_reg(struct drm_device *dev, int reg,
2788 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002789{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002790 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002791 int fence_reg;
2792 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002793
Imre Deak56c844e2013-01-07 21:47:34 +02002794 if (INTEL_INFO(dev)->gen >= 6) {
2795 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2796 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2797 } else {
2798 fence_reg = FENCE_REG_965_0;
2799 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2800 }
2801
Chris Wilsond18b9612013-07-10 13:36:23 +01002802 fence_reg += reg * 8;
2803
2804 /* To w/a incoherency with non-atomic 64-bit register updates,
2805 * we split the 64-bit update into two 32-bit writes. In order
2806 * for a partial fence not to be evaluated between writes, we
2807 * precede the update with write to turn off the fence register,
2808 * and only enable the fence as the last step.
2809 *
2810 * For extra levels of paranoia, we make sure each step lands
2811 * before applying the next step.
2812 */
2813 I915_WRITE(fence_reg, 0);
2814 POSTING_READ(fence_reg);
2815
Chris Wilson9ce079e2012-04-17 15:31:30 +01002816 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002817 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002818 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002819
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002820 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002821 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002822 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002823 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002824 if (obj->tiling_mode == I915_TILING_Y)
2825 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2826 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002827
Chris Wilsond18b9612013-07-10 13:36:23 +01002828 I915_WRITE(fence_reg + 4, val >> 32);
2829 POSTING_READ(fence_reg + 4);
2830
2831 I915_WRITE(fence_reg + 0, val);
2832 POSTING_READ(fence_reg);
2833 } else {
2834 I915_WRITE(fence_reg + 4, 0);
2835 POSTING_READ(fence_reg + 4);
2836 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002837}
2838
Chris Wilson9ce079e2012-04-17 15:31:30 +01002839static void i915_write_fence_reg(struct drm_device *dev, int reg,
2840 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002841{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002842 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002843 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002844
Chris Wilson9ce079e2012-04-17 15:31:30 +01002845 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002846 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002847 int pitch_val;
2848 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002849
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002850 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002851 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002852 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2853 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2854 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002855
2856 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2857 tile_width = 128;
2858 else
2859 tile_width = 512;
2860
2861 /* Note: pitch better be a power of two tile widths */
2862 pitch_val = obj->stride / tile_width;
2863 pitch_val = ffs(pitch_val) - 1;
2864
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002865 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002866 if (obj->tiling_mode == I915_TILING_Y)
2867 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2868 val |= I915_FENCE_SIZE_BITS(size);
2869 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2870 val |= I830_FENCE_REG_VALID;
2871 } else
2872 val = 0;
2873
2874 if (reg < 8)
2875 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002876 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002877 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002878
Chris Wilson9ce079e2012-04-17 15:31:30 +01002879 I915_WRITE(reg, val);
2880 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002881}
2882
Chris Wilson9ce079e2012-04-17 15:31:30 +01002883static void i830_write_fence_reg(struct drm_device *dev, int reg,
2884 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002885{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002886 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002887 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002888
Chris Wilson9ce079e2012-04-17 15:31:30 +01002889 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002890 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002891 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002892
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002893 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002894 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002895 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2896 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2897 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002898
Chris Wilson9ce079e2012-04-17 15:31:30 +01002899 pitch_val = obj->stride / 128;
2900 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002901
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002902 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002903 if (obj->tiling_mode == I915_TILING_Y)
2904 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2905 val |= I830_FENCE_SIZE_BITS(size);
2906 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2907 val |= I830_FENCE_REG_VALID;
2908 } else
2909 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002910
Chris Wilson9ce079e2012-04-17 15:31:30 +01002911 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2912 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2913}
2914
Chris Wilsond0a57782012-10-09 19:24:37 +01002915inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2916{
2917 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2918}
2919
Chris Wilson9ce079e2012-04-17 15:31:30 +01002920static void i915_gem_write_fence(struct drm_device *dev, int reg,
2921 struct drm_i915_gem_object *obj)
2922{
Chris Wilsond0a57782012-10-09 19:24:37 +01002923 struct drm_i915_private *dev_priv = dev->dev_private;
2924
2925 /* Ensure that all CPU reads are completed before installing a fence
2926 * and all writes before removing the fence.
2927 */
2928 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2929 mb();
2930
Daniel Vetter94a335d2013-07-17 14:51:28 +02002931 WARN(obj && (!obj->stride || !obj->tiling_mode),
2932 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2933 obj->stride, obj->tiling_mode);
2934
Chris Wilson9ce079e2012-04-17 15:31:30 +01002935 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -07002936 case 8:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002937 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002938 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002939 case 5:
2940 case 4: i965_write_fence_reg(dev, reg, obj); break;
2941 case 3: i915_write_fence_reg(dev, reg, obj); break;
2942 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002943 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002944 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002945
2946 /* And similarly be paranoid that no direct access to this region
2947 * is reordered to before the fence is installed.
2948 */
2949 if (i915_gem_object_needs_mb(obj))
2950 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002951}
2952
Chris Wilson61050802012-04-17 15:31:31 +01002953static inline int fence_number(struct drm_i915_private *dev_priv,
2954 struct drm_i915_fence_reg *fence)
2955{
2956 return fence - dev_priv->fence_regs;
2957}
2958
2959static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2960 struct drm_i915_fence_reg *fence,
2961 bool enable)
2962{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002963 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002964 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002965
Chris Wilson46a0b632013-07-10 13:36:24 +01002966 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01002967
2968 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01002969 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01002970 fence->obj = obj;
2971 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2972 } else {
2973 obj->fence_reg = I915_FENCE_REG_NONE;
2974 fence->obj = NULL;
2975 list_del_init(&fence->lru_list);
2976 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02002977 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01002978}
2979
Chris Wilsond9e86c02010-11-10 16:40:20 +00002980static int
Chris Wilsond0a57782012-10-09 19:24:37 +01002981i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002982{
Chris Wilson1c293ea2012-04-17 15:31:27 +01002983 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01002984 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01002985 if (ret)
2986 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002987
2988 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002989 }
2990
Chris Wilson86d5bc32012-07-20 12:41:04 +01002991 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002992 return 0;
2993}
2994
2995int
2996i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2997{
Chris Wilson61050802012-04-17 15:31:31 +01002998 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002999 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003000 int ret;
3001
Chris Wilsond0a57782012-10-09 19:24:37 +01003002 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003003 if (ret)
3004 return ret;
3005
Chris Wilson61050802012-04-17 15:31:31 +01003006 if (obj->fence_reg == I915_FENCE_REG_NONE)
3007 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01003008
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003009 fence = &dev_priv->fence_regs[obj->fence_reg];
3010
Chris Wilson61050802012-04-17 15:31:31 +01003011 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00003012 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003013
3014 return 0;
3015}
3016
3017static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01003018i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01003019{
Daniel Vetterae3db242010-02-19 11:51:58 +01003020 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01003021 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003022 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01003023
3024 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003025 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01003026 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3027 reg = &dev_priv->fence_regs[i];
3028 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003029 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003030
Chris Wilson1690e1e2011-12-14 13:57:08 +01003031 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00003032 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003033 }
3034
Chris Wilsond9e86c02010-11-10 16:40:20 +00003035 if (avail == NULL)
Chris Wilson5dce5b932014-01-20 10:17:36 +00003036 goto deadlock;
Daniel Vetterae3db242010-02-19 11:51:58 +01003037
3038 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00003039 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01003040 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01003041 continue;
3042
Chris Wilson8fe301a2012-04-17 15:31:28 +01003043 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003044 }
3045
Chris Wilson5dce5b932014-01-20 10:17:36 +00003046deadlock:
3047 /* Wait for completion of pending flips which consume fences */
3048 if (intel_has_pending_fb_unpin(dev))
3049 return ERR_PTR(-EAGAIN);
3050
3051 return ERR_PTR(-EDEADLK);
Daniel Vetterae3db242010-02-19 11:51:58 +01003052}
3053
Jesse Barnesde151cf2008-11-12 10:03:55 -08003054/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003055 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08003056 * @obj: object to map through a fence reg
3057 *
3058 * When mapping objects through the GTT, userspace wants to be able to write
3059 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003060 * This function walks the fence regs looking for a free one for @obj,
3061 * stealing one if it can't find any.
3062 *
3063 * It then sets up the reg based on the object's properties: address, pitch
3064 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003065 *
3066 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08003067 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003068int
Chris Wilson06d98132012-04-17 15:31:24 +01003069i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08003070{
Chris Wilson05394f32010-11-08 19:18:58 +00003071 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08003072 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01003073 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003074 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003075 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003076
Chris Wilson14415742012-04-17 15:31:33 +01003077 /* Have we updated the tiling parameters upon the object and so
3078 * will need to serialise the write to the associated fence register?
3079 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003080 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003081 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003082 if (ret)
3083 return ret;
3084 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003085
Chris Wilsond9e86c02010-11-10 16:40:20 +00003086 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003087 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3088 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003089 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003090 list_move_tail(&reg->lru_list,
3091 &dev_priv->mm.fence_list);
3092 return 0;
3093 }
3094 } else if (enable) {
3095 reg = i915_find_fence_reg(dev);
Chris Wilson5dce5b932014-01-20 10:17:36 +00003096 if (IS_ERR(reg))
3097 return PTR_ERR(reg);
Chris Wilsond9e86c02010-11-10 16:40:20 +00003098
Chris Wilson14415742012-04-17 15:31:33 +01003099 if (reg->obj) {
3100 struct drm_i915_gem_object *old = reg->obj;
3101
Chris Wilsond0a57782012-10-09 19:24:37 +01003102 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003103 if (ret)
3104 return ret;
3105
Chris Wilson14415742012-04-17 15:31:33 +01003106 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003107 }
Chris Wilson14415742012-04-17 15:31:33 +01003108 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003109 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003110
Chris Wilson14415742012-04-17 15:31:33 +01003111 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003112
Chris Wilson9ce079e2012-04-17 15:31:30 +01003113 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003114}
3115
Chris Wilson42d6ab42012-07-26 11:49:32 +01003116static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3117 struct drm_mm_node *gtt_space,
3118 unsigned long cache_level)
3119{
3120 struct drm_mm_node *other;
3121
3122 /* On non-LLC machines we have to be careful when putting differing
3123 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003124 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003125 */
3126 if (HAS_LLC(dev))
3127 return true;
3128
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003129 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003130 return true;
3131
3132 if (list_empty(&gtt_space->node_list))
3133 return true;
3134
3135 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3136 if (other->allocated && !other->hole_follows && other->color != cache_level)
3137 return false;
3138
3139 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3140 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3141 return false;
3142
3143 return true;
3144}
3145
3146static void i915_gem_verify_gtt(struct drm_device *dev)
3147{
3148#if WATCH_GTT
3149 struct drm_i915_private *dev_priv = dev->dev_private;
3150 struct drm_i915_gem_object *obj;
3151 int err = 0;
3152
Ben Widawsky35c20a62013-05-31 11:28:48 -07003153 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003154 if (obj->gtt_space == NULL) {
3155 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3156 err++;
3157 continue;
3158 }
3159
3160 if (obj->cache_level != obj->gtt_space->color) {
3161 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003162 i915_gem_obj_ggtt_offset(obj),
3163 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003164 obj->cache_level,
3165 obj->gtt_space->color);
3166 err++;
3167 continue;
3168 }
3169
3170 if (!i915_gem_valid_gtt_space(dev,
3171 obj->gtt_space,
3172 obj->cache_level)) {
3173 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003174 i915_gem_obj_ggtt_offset(obj),
3175 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003176 obj->cache_level);
3177 err++;
3178 continue;
3179 }
3180 }
3181
3182 WARN_ON(err);
3183#endif
3184}
3185
Jesse Barnesde151cf2008-11-12 10:03:55 -08003186/**
Eric Anholt673a3942008-07-30 12:06:12 -07003187 * Finds free space in the GTT aperture and binds the object there.
3188 */
Daniel Vetter262de142014-02-14 14:01:20 +01003189static struct i915_vma *
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003190i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3191 struct i915_address_space *vm,
3192 unsigned alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003193 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003194{
Chris Wilson05394f32010-11-08 19:18:58 +00003195 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003196 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003197 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003198 size_t gtt_max =
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003199 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003200 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003201 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003202
Chris Wilsone28f8712011-07-18 13:11:49 -07003203 fence_size = i915_gem_get_gtt_size(dev,
3204 obj->base.size,
3205 obj->tiling_mode);
3206 fence_alignment = i915_gem_get_gtt_alignment(dev,
3207 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003208 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003209 unfenced_alignment =
Imre Deakd865110c2013-01-07 21:47:33 +02003210 i915_gem_get_gtt_alignment(dev,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003211 obj->base.size,
3212 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003213
Eric Anholt673a3942008-07-30 12:06:12 -07003214 if (alignment == 0)
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003215 alignment = flags & PIN_MAPPABLE ? fence_alignment :
Daniel Vetter5e783302010-11-14 22:32:36 +01003216 unfenced_alignment;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003217 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003218 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
Daniel Vetter262de142014-02-14 14:01:20 +01003219 return ERR_PTR(-EINVAL);
Eric Anholt673a3942008-07-30 12:06:12 -07003220 }
3221
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003222 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003223
Chris Wilson654fc602010-05-27 13:18:21 +01003224 /* If the object is bigger than the entire aperture, reject it early
3225 * before evicting everything in a vain attempt to find space.
3226 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003227 if (obj->base.size > gtt_max) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003228 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003229 obj->base.size,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003230 flags & PIN_MAPPABLE ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003231 gtt_max);
Daniel Vetter262de142014-02-14 14:01:20 +01003232 return ERR_PTR(-E2BIG);
Chris Wilson654fc602010-05-27 13:18:21 +01003233 }
3234
Chris Wilson37e680a2012-06-07 15:38:42 +01003235 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003236 if (ret)
Daniel Vetter262de142014-02-14 14:01:20 +01003237 return ERR_PTR(ret);
Chris Wilson6c085a72012-08-20 11:40:46 +02003238
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003239 i915_gem_object_pin_pages(obj);
3240
Ben Widawskyaccfef22013-08-14 11:38:35 +02003241 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Daniel Vetter262de142014-02-14 14:01:20 +01003242 if (IS_ERR(vma))
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003243 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003244
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003245search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003246 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003247 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003248 obj->cache_level, 0, gtt_max,
3249 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003250 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003251 ret = i915_gem_evict_something(dev, vm, size, alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003252 obj->cache_level, flags);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003253 if (ret == 0)
3254 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003255
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003256 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003257 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003258 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003259 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003260 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003261 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003262 }
3263
Daniel Vetter74163902012-02-15 23:50:21 +01003264 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003265 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003266 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003267
Ben Widawsky35c20a62013-05-31 11:28:48 -07003268 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003269 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003270
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003271 if (i915_is_ggtt(vm)) {
3272 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003273
Daniel Vetter49987092013-08-14 10:21:23 +02003274 fenceable = (vma->node.size == fence_size &&
3275 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003276
Daniel Vetter49987092013-08-14 10:21:23 +02003277 mappable = (vma->node.start + obj->base.size <=
3278 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003279
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003280 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003281 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003282
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003283 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003284
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003285 trace_i915_vma_bind(vma, flags);
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003286 vma->bind_vma(vma, obj->cache_level,
3287 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3288
Chris Wilson42d6ab42012-07-26 11:49:32 +01003289 i915_gem_verify_gtt(dev);
Daniel Vetter262de142014-02-14 14:01:20 +01003290 return vma;
Ben Widawsky2f633152013-07-17 12:19:03 -07003291
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003292err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003293 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003294err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003295 i915_gem_vma_destroy(vma);
Daniel Vetter262de142014-02-14 14:01:20 +01003296 vma = ERR_PTR(ret);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003297err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003298 i915_gem_object_unpin_pages(obj);
Daniel Vetter262de142014-02-14 14:01:20 +01003299 return vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003300}
3301
Chris Wilson000433b2013-08-08 14:41:09 +01003302bool
Chris Wilson2c225692013-08-09 12:26:45 +01003303i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3304 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003305{
Eric Anholt673a3942008-07-30 12:06:12 -07003306 /* If we don't have a page list set up, then we're not pinned
3307 * to GPU, and we can ignore the cache flush because it'll happen
3308 * again at bind time.
3309 */
Chris Wilson05394f32010-11-08 19:18:58 +00003310 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003311 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003312
Imre Deak769ce462013-02-13 21:56:05 +02003313 /*
3314 * Stolen memory is always coherent with the GPU as it is explicitly
3315 * marked as wc by the system, or the system is cache-coherent.
3316 */
3317 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003318 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003319
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003320 /* If the GPU is snooping the contents of the CPU cache,
3321 * we do not need to manually clear the CPU cache lines. However,
3322 * the caches are only snooped when the render cache is
3323 * flushed/invalidated. As we always have to emit invalidations
3324 * and flushes when moving into and out of the RENDER domain, correct
3325 * snooping behaviour occurs naturally as the result of our domain
3326 * tracking.
3327 */
Chris Wilson2c225692013-08-09 12:26:45 +01003328 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003329 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003330
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003331 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003332 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003333
3334 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003335}
3336
3337/** Flushes the GTT write domain for the object if it's dirty. */
3338static void
Chris Wilson05394f32010-11-08 19:18:58 +00003339i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003340{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003341 uint32_t old_write_domain;
3342
Chris Wilson05394f32010-11-08 19:18:58 +00003343 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003344 return;
3345
Chris Wilson63256ec2011-01-04 18:42:07 +00003346 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003347 * to it immediately go to main memory as far as we know, so there's
3348 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003349 *
3350 * However, we do have to enforce the order so that all writes through
3351 * the GTT land before any writes to the device, such as updates to
3352 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003353 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003354 wmb();
3355
Chris Wilson05394f32010-11-08 19:18:58 +00003356 old_write_domain = obj->base.write_domain;
3357 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003358
3359 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003360 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003361 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003362}
3363
3364/** Flushes the CPU write domain for the object if it's dirty. */
3365static void
Chris Wilson2c225692013-08-09 12:26:45 +01003366i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3367 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003368{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003369 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003370
Chris Wilson05394f32010-11-08 19:18:58 +00003371 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003372 return;
3373
Chris Wilson000433b2013-08-08 14:41:09 +01003374 if (i915_gem_clflush_object(obj, force))
3375 i915_gem_chipset_flush(obj->base.dev);
3376
Chris Wilson05394f32010-11-08 19:18:58 +00003377 old_write_domain = obj->base.write_domain;
3378 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003379
3380 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003381 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003382 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003383}
3384
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003385/**
3386 * Moves a single object to the GTT read, and possibly write domain.
3387 *
3388 * This function returns when the move is complete, including waiting on
3389 * flushes to occur.
3390 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003391int
Chris Wilson20217462010-11-23 15:26:33 +00003392i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003393{
Chris Wilson8325a092012-04-24 15:52:35 +01003394 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003395 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003396 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003397
Eric Anholt02354392008-11-26 13:58:13 -08003398 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003399 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003400 return -EINVAL;
3401
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003402 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3403 return 0;
3404
Chris Wilson0201f1e2012-07-20 12:41:01 +01003405 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003406 if (ret)
3407 return ret;
3408
Chris Wilson2c225692013-08-09 12:26:45 +01003409 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003410
Chris Wilsond0a57782012-10-09 19:24:37 +01003411 /* Serialise direct access to this object with the barriers for
3412 * coherent writes from the GPU, by effectively invalidating the
3413 * GTT domain upon first access.
3414 */
3415 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3416 mb();
3417
Chris Wilson05394f32010-11-08 19:18:58 +00003418 old_write_domain = obj->base.write_domain;
3419 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003420
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003421 /* It should now be out of any other write domains, and we can update
3422 * the domain values for our changes.
3423 */
Chris Wilson05394f32010-11-08 19:18:58 +00003424 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3425 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003426 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003427 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3428 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3429 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003430 }
3431
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003432 trace_i915_gem_object_change_domain(obj,
3433 old_read_domains,
3434 old_write_domain);
3435
Chris Wilson8325a092012-04-24 15:52:35 +01003436 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003437 if (i915_gem_object_is_inactive(obj)) {
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07003438 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Ben Widawskyca191b12013-07-31 17:00:14 -07003439 if (vma)
3440 list_move_tail(&vma->mm_list,
3441 &dev_priv->gtt.base.inactive_list);
3442
3443 }
Chris Wilson8325a092012-04-24 15:52:35 +01003444
Eric Anholte47c68e2008-11-14 13:35:19 -08003445 return 0;
3446}
3447
Chris Wilsone4ffd172011-04-04 09:44:39 +01003448int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3449 enum i915_cache_level cache_level)
3450{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003451 struct drm_device *dev = obj->base.dev;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003452 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003453 int ret;
3454
3455 if (obj->cache_level == cache_level)
3456 return 0;
3457
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003458 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003459 DRM_DEBUG("can not change the cache level of pinned objects\n");
3460 return -EBUSY;
3461 }
3462
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003463 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3464 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003465 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003466 if (ret)
3467 return ret;
3468
3469 break;
3470 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003471 }
3472
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003473 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003474 ret = i915_gem_object_finish_gpu(obj);
3475 if (ret)
3476 return ret;
3477
3478 i915_gem_object_finish_gtt(obj);
3479
3480 /* Before SandyBridge, you could not use tiling or fence
3481 * registers with snooped memory, so relinquish any fences
3482 * currently pointing to our region in the aperture.
3483 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003484 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003485 ret = i915_gem_object_put_fence(obj);
3486 if (ret)
3487 return ret;
3488 }
3489
Ben Widawsky6f65e292013-12-06 14:10:56 -08003490 list_for_each_entry(vma, &obj->vma_list, vma_link)
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003491 if (drm_mm_node_allocated(&vma->node))
3492 vma->bind_vma(vma, cache_level,
3493 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003494 }
3495
Chris Wilson2c225692013-08-09 12:26:45 +01003496 list_for_each_entry(vma, &obj->vma_list, vma_link)
3497 vma->node.color = cache_level;
3498 obj->cache_level = cache_level;
3499
3500 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003501 u32 old_read_domains, old_write_domain;
3502
3503 /* If we're coming from LLC cached, then we haven't
3504 * actually been tracking whether the data is in the
3505 * CPU cache or not, since we only allow one bit set
3506 * in obj->write_domain and have been skipping the clflushes.
3507 * Just set it to the CPU cache for now.
3508 */
3509 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003510
3511 old_read_domains = obj->base.read_domains;
3512 old_write_domain = obj->base.write_domain;
3513
3514 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3515 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3516
3517 trace_i915_gem_object_change_domain(obj,
3518 old_read_domains,
3519 old_write_domain);
3520 }
3521
Chris Wilson42d6ab42012-07-26 11:49:32 +01003522 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003523 return 0;
3524}
3525
Ben Widawsky199adf42012-09-21 17:01:20 -07003526int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3527 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003528{
Ben Widawsky199adf42012-09-21 17:01:20 -07003529 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003530 struct drm_i915_gem_object *obj;
3531 int ret;
3532
3533 ret = i915_mutex_lock_interruptible(dev);
3534 if (ret)
3535 return ret;
3536
3537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3538 if (&obj->base == NULL) {
3539 ret = -ENOENT;
3540 goto unlock;
3541 }
3542
Chris Wilson651d7942013-08-08 14:41:10 +01003543 switch (obj->cache_level) {
3544 case I915_CACHE_LLC:
3545 case I915_CACHE_L3_LLC:
3546 args->caching = I915_CACHING_CACHED;
3547 break;
3548
Chris Wilson4257d3b2013-08-08 14:41:11 +01003549 case I915_CACHE_WT:
3550 args->caching = I915_CACHING_DISPLAY;
3551 break;
3552
Chris Wilson651d7942013-08-08 14:41:10 +01003553 default:
3554 args->caching = I915_CACHING_NONE;
3555 break;
3556 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003557
3558 drm_gem_object_unreference(&obj->base);
3559unlock:
3560 mutex_unlock(&dev->struct_mutex);
3561 return ret;
3562}
3563
Ben Widawsky199adf42012-09-21 17:01:20 -07003564int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3565 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003566{
Ben Widawsky199adf42012-09-21 17:01:20 -07003567 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003568 struct drm_i915_gem_object *obj;
3569 enum i915_cache_level level;
3570 int ret;
3571
Ben Widawsky199adf42012-09-21 17:01:20 -07003572 switch (args->caching) {
3573 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003574 level = I915_CACHE_NONE;
3575 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003576 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003577 level = I915_CACHE_LLC;
3578 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003579 case I915_CACHING_DISPLAY:
3580 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3581 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003582 default:
3583 return -EINVAL;
3584 }
3585
Ben Widawsky3bc29132012-09-26 16:15:20 -07003586 ret = i915_mutex_lock_interruptible(dev);
3587 if (ret)
3588 return ret;
3589
Chris Wilsone6994ae2012-07-10 10:27:08 +01003590 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3591 if (&obj->base == NULL) {
3592 ret = -ENOENT;
3593 goto unlock;
3594 }
3595
3596 ret = i915_gem_object_set_cache_level(obj, level);
3597
3598 drm_gem_object_unreference(&obj->base);
3599unlock:
3600 mutex_unlock(&dev->struct_mutex);
3601 return ret;
3602}
3603
Chris Wilsoncc98b412013-08-09 12:25:09 +01003604static bool is_pin_display(struct drm_i915_gem_object *obj)
3605{
3606 /* There are 3 sources that pin objects:
3607 * 1. The display engine (scanouts, sprites, cursors);
3608 * 2. Reservations for execbuffer;
3609 * 3. The user.
3610 *
3611 * We can ignore reservations as we hold the struct_mutex and
3612 * are only called outside of the reservation path. The user
3613 * can only increment pin_count once, and so if after
3614 * subtracting the potential reference by the user, any pin_count
3615 * remains, it must be due to another use by the display engine.
3616 */
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003617 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003618}
3619
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003620/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003621 * Prepare buffer for display plane (scanout, cursors, etc).
3622 * Can be called from an uninterruptible phase (modesetting) and allows
3623 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003624 */
3625int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003626i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3627 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003628 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003629{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003630 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003631 int ret;
3632
Chris Wilson0be73282010-12-06 14:36:27 +00003633 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003634 ret = i915_gem_object_sync(obj, pipelined);
3635 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003636 return ret;
3637 }
3638
Chris Wilsoncc98b412013-08-09 12:25:09 +01003639 /* Mark the pin_display early so that we account for the
3640 * display coherency whilst setting up the cache domains.
3641 */
3642 obj->pin_display = true;
3643
Eric Anholta7ef0642011-03-29 16:59:54 -07003644 /* The display engine is not coherent with the LLC cache on gen6. As
3645 * a result, we make sure that the pinning that is about to occur is
3646 * done with uncached PTEs. This is lowest common denominator for all
3647 * chipsets.
3648 *
3649 * However for gen6+, we could do better by using the GFDT bit instead
3650 * of uncaching, which would allow us to flush all the LLC-cached data
3651 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3652 */
Chris Wilson651d7942013-08-08 14:41:10 +01003653 ret = i915_gem_object_set_cache_level(obj,
3654 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003655 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003656 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003657
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003658 /* As the user may map the buffer once pinned in the display plane
3659 * (e.g. libkms for the bootup splash), we have to ensure that we
3660 * always use map_and_fenceable for all scanout buffers.
3661 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003662 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003663 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003664 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003665
Chris Wilson2c225692013-08-09 12:26:45 +01003666 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003667
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003668 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003669 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003670
3671 /* It should now be out of any other write domains, and we can update
3672 * the domain values for our changes.
3673 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003674 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003675 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003676
3677 trace_i915_gem_object_change_domain(obj,
3678 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003679 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003680
3681 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003682
3683err_unpin_display:
3684 obj->pin_display = is_pin_display(obj);
3685 return ret;
3686}
3687
3688void
3689i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3690{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003691 i915_gem_object_ggtt_unpin(obj);
Chris Wilsoncc98b412013-08-09 12:25:09 +01003692 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003693}
3694
Chris Wilson85345512010-11-13 09:49:11 +00003695int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003696i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003697{
Chris Wilson88241782011-01-07 17:09:48 +00003698 int ret;
3699
Chris Wilsona8198ee2011-04-13 22:04:09 +01003700 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003701 return 0;
3702
Chris Wilson0201f1e2012-07-20 12:41:01 +01003703 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003704 if (ret)
3705 return ret;
3706
Chris Wilsona8198ee2011-04-13 22:04:09 +01003707 /* Ensure that we invalidate the GPU's caches and TLBs. */
3708 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003709 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003710}
3711
Eric Anholte47c68e2008-11-14 13:35:19 -08003712/**
3713 * Moves a single object to the CPU read, and possibly write domain.
3714 *
3715 * This function returns when the move is complete, including waiting on
3716 * flushes to occur.
3717 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003718int
Chris Wilson919926a2010-11-12 13:42:53 +00003719i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003720{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003721 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003722 int ret;
3723
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003724 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3725 return 0;
3726
Chris Wilson0201f1e2012-07-20 12:41:01 +01003727 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003728 if (ret)
3729 return ret;
3730
Eric Anholte47c68e2008-11-14 13:35:19 -08003731 i915_gem_object_flush_gtt_write_domain(obj);
3732
Chris Wilson05394f32010-11-08 19:18:58 +00003733 old_write_domain = obj->base.write_domain;
3734 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003735
Eric Anholte47c68e2008-11-14 13:35:19 -08003736 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003737 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003738 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003739
Chris Wilson05394f32010-11-08 19:18:58 +00003740 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003741 }
3742
3743 /* It should now be out of any other write domains, and we can update
3744 * the domain values for our changes.
3745 */
Chris Wilson05394f32010-11-08 19:18:58 +00003746 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003747
3748 /* If we're writing through the CPU, then the GPU read domains will
3749 * need to be invalidated at next use.
3750 */
3751 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003752 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3753 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003754 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003755
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003756 trace_i915_gem_object_change_domain(obj,
3757 old_read_domains,
3758 old_write_domain);
3759
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003760 return 0;
3761}
3762
Eric Anholt673a3942008-07-30 12:06:12 -07003763/* Throttle our rendering by waiting until the ring has completed our requests
3764 * emitted over 20 msec ago.
3765 *
Eric Anholtb9624422009-06-03 07:27:35 +00003766 * Note that if we were to use the current jiffies each time around the loop,
3767 * we wouldn't escape the function with any frames outstanding if the time to
3768 * render a frame was over 20ms.
3769 *
Eric Anholt673a3942008-07-30 12:06:12 -07003770 * This should get us reasonable parallelism between CPU and GPU but also
3771 * relatively low latency when blocking on a particular request to finish.
3772 */
3773static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003774i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003775{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003776 struct drm_i915_private *dev_priv = dev->dev_private;
3777 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003778 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003779 struct drm_i915_gem_request *request;
3780 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003781 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003782 u32 seqno = 0;
3783 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003784
Daniel Vetter308887a2012-11-14 17:14:06 +01003785 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3786 if (ret)
3787 return ret;
3788
3789 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3790 if (ret)
3791 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003792
Chris Wilson1c255952010-09-26 11:03:27 +01003793 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003794 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003795 if (time_after_eq(request->emitted_jiffies, recent_enough))
3796 break;
3797
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003798 ring = request->ring;
3799 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003800 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003801 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003802 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003803
3804 if (seqno == 0)
3805 return 0;
3806
Chris Wilsonb29c19b2013-09-25 17:34:56 +01003807 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003808 if (ret == 0)
3809 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003810
Eric Anholt673a3942008-07-30 12:06:12 -07003811 return ret;
3812}
3813
Eric Anholt673a3942008-07-30 12:06:12 -07003814int
Chris Wilson05394f32010-11-08 19:18:58 +00003815i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003816 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003817 uint32_t alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003818 unsigned flags)
Eric Anholt673a3942008-07-30 12:06:12 -07003819{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003820 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003821 int ret;
3822
Daniel Vetterbf3d1492014-02-14 14:01:12 +01003823 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003824 return -EINVAL;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003825
3826 vma = i915_gem_obj_to_vma(obj, vm);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003827 if (vma) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003828 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3829 return -EBUSY;
3830
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003831 if ((alignment &&
3832 vma->node.start & (alignment - 1)) ||
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003833 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003834 WARN(vma->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003835 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003836 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003837 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003838 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003839 flags & PIN_MAPPABLE,
Chris Wilson05394f32010-11-08 19:18:58 +00003840 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003841 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003842 if (ret)
3843 return ret;
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003844
3845 vma = NULL;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003846 }
3847 }
3848
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003849 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
Daniel Vetter262de142014-02-14 14:01:20 +01003850 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3851 if (IS_ERR(vma))
3852 return PTR_ERR(vma);
Chris Wilson22c344e2009-02-11 14:26:45 +00003853 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003854
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003855 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3856 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
Daniel Vetter74898d72012-02-15 23:50:22 +01003857
Daniel Vetter8ea99c92014-02-14 14:01:21 +01003858 vma->pin_count++;
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003859 if (flags & PIN_MAPPABLE)
3860 obj->pin_mappable |= true;
Eric Anholt673a3942008-07-30 12:06:12 -07003861
3862 return 0;
3863}
3864
3865void
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003866i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003867{
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003868 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003869
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003870 BUG_ON(!vma);
3871 BUG_ON(vma->pin_count == 0);
3872 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3873
3874 if (--vma->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003875 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003876}
3877
3878int
3879i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003880 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003881{
3882 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003883 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003884 int ret;
3885
Daniel Vetter02f6bcc2013-12-18 16:30:22 +01003886 if (INTEL_INFO(dev)->gen >= 6)
3887 return -ENODEV;
3888
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003889 ret = i915_mutex_lock_interruptible(dev);
3890 if (ret)
3891 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003892
Chris Wilson05394f32010-11-08 19:18:58 +00003893 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003894 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003895 ret = -ENOENT;
3896 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003897 }
Eric Anholt673a3942008-07-30 12:06:12 -07003898
Chris Wilson05394f32010-11-08 19:18:58 +00003899 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003900 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
Chris Wilson8c99e572014-01-31 11:34:58 +00003901 ret = -EFAULT;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003902 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003903 }
3904
Chris Wilson05394f32010-11-08 19:18:58 +00003905 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003906 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08003907 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003908 ret = -EINVAL;
3909 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003910 }
3911
Daniel Vetteraa5f8022013-10-10 14:46:37 +02003912 if (obj->user_pin_count == ULONG_MAX) {
3913 ret = -EBUSY;
3914 goto out;
3915 }
3916
Chris Wilson93be8782013-01-02 10:31:22 +00003917 if (obj->user_pin_count == 0) {
Daniel Vetter1ec9e262014-02-14 14:01:11 +01003918 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003919 if (ret)
3920 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003921 }
3922
Chris Wilson93be8782013-01-02 10:31:22 +00003923 obj->user_pin_count++;
3924 obj->pin_filp = file;
3925
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003926 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003927out:
Chris Wilson05394f32010-11-08 19:18:58 +00003928 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003929unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003930 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003931 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003932}
3933
3934int
3935i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003936 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003937{
3938 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003939 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003940 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003941
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003942 ret = i915_mutex_lock_interruptible(dev);
3943 if (ret)
3944 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003945
Chris Wilson05394f32010-11-08 19:18:58 +00003946 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003947 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003948 ret = -ENOENT;
3949 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003950 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003951
Chris Wilson05394f32010-11-08 19:18:58 +00003952 if (obj->pin_filp != file) {
Chris Wilsonbd9b6a42014-02-10 09:03:50 +00003953 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
Jesse Barnes79e53942008-11-07 14:24:08 -08003954 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003955 ret = -EINVAL;
3956 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003957 }
Chris Wilson05394f32010-11-08 19:18:58 +00003958 obj->user_pin_count--;
3959 if (obj->user_pin_count == 0) {
3960 obj->pin_filp = NULL;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08003961 i915_gem_object_ggtt_unpin(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08003962 }
Eric Anholt673a3942008-07-30 12:06:12 -07003963
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003964out:
Chris Wilson05394f32010-11-08 19:18:58 +00003965 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003966unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003967 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003968 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003969}
3970
3971int
3972i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003973 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003974{
3975 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003976 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003977 int ret;
3978
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003979 ret = i915_mutex_lock_interruptible(dev);
3980 if (ret)
3981 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003982
Chris Wilson05394f32010-11-08 19:18:58 +00003983 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003984 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003985 ret = -ENOENT;
3986 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003987 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003988
Chris Wilson0be555b2010-08-04 15:36:30 +01003989 /* Count all active objects as busy, even if they are currently not used
3990 * by the gpu. Users of this interface expect objects to eventually
3991 * become non-busy without any further actions, therefore emit any
3992 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003993 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003994 ret = i915_gem_object_flush_active(obj);
3995
Chris Wilson05394f32010-11-08 19:18:58 +00003996 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01003997 if (obj->ring) {
3998 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3999 args->busy |= intel_ring_flag(obj->ring) << 16;
4000 }
Eric Anholt673a3942008-07-30 12:06:12 -07004001
Chris Wilson05394f32010-11-08 19:18:58 +00004002 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004003unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07004004 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004005 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004006}
4007
4008int
4009i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4010 struct drm_file *file_priv)
4011{
Akshay Joshi0206e352011-08-16 15:34:10 -04004012 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004013}
4014
Chris Wilson3ef94da2009-09-14 16:50:29 +01004015int
4016i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4017 struct drm_file *file_priv)
4018{
4019 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00004020 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004021 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004022
4023 switch (args->madv) {
4024 case I915_MADV_DONTNEED:
4025 case I915_MADV_WILLNEED:
4026 break;
4027 default:
4028 return -EINVAL;
4029 }
4030
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004031 ret = i915_mutex_lock_interruptible(dev);
4032 if (ret)
4033 return ret;
4034
Chris Wilson05394f32010-11-08 19:18:58 +00004035 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00004036 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004037 ret = -ENOENT;
4038 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004039 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01004040
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004041 if (i915_gem_obj_is_pinned(obj)) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004042 ret = -EINVAL;
4043 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004044 }
4045
Chris Wilson05394f32010-11-08 19:18:58 +00004046 if (obj->madv != __I915_MADV_PURGED)
4047 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004048
Chris Wilson6c085a72012-08-20 11:40:46 +02004049 /* if the object is no longer attached, discard its backing storage */
4050 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01004051 i915_gem_object_truncate(obj);
4052
Chris Wilson05394f32010-11-08 19:18:58 +00004053 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004054
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004055out:
Chris Wilson05394f32010-11-08 19:18:58 +00004056 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004057unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01004058 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01004059 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004060}
4061
Chris Wilson37e680a2012-06-07 15:38:42 +01004062void i915_gem_object_init(struct drm_i915_gem_object *obj,
4063 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01004064{
Ben Widawsky35c20a62013-05-31 11:28:48 -07004065 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004066 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02004067 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004068 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01004069
Chris Wilson37e680a2012-06-07 15:38:42 +01004070 obj->ops = ops;
4071
Chris Wilson0327d6b2012-08-11 15:41:06 +01004072 obj->fence_reg = I915_FENCE_REG_NONE;
4073 obj->madv = I915_MADV_WILLNEED;
4074 /* Avoid an unnecessary call to unbind on the first bind. */
4075 obj->map_and_fenceable = true;
4076
4077 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4078}
4079
Chris Wilson37e680a2012-06-07 15:38:42 +01004080static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4081 .get_pages = i915_gem_object_get_pages_gtt,
4082 .put_pages = i915_gem_object_put_pages_gtt,
4083};
4084
Chris Wilson05394f32010-11-08 19:18:58 +00004085struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4086 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004087{
Daniel Vetterc397b902010-04-09 19:05:07 +00004088 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004089 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004090 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004091
Chris Wilson42dcedd2012-11-15 11:32:30 +00004092 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004093 if (obj == NULL)
4094 return NULL;
4095
4096 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004097 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004098 return NULL;
4099 }
4100
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004101 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4102 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4103 /* 965gm cannot relocate objects above 4GiB. */
4104 mask &= ~__GFP_HIGHMEM;
4105 mask |= __GFP_DMA32;
4106 }
4107
Al Viro496ad9a2013-01-23 17:07:38 -05004108 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004109 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004110
Chris Wilson37e680a2012-06-07 15:38:42 +01004111 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004112
Daniel Vetterc397b902010-04-09 19:05:07 +00004113 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4114 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4115
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004116 if (HAS_LLC(dev)) {
4117 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004118 * cache) for about a 10% performance improvement
4119 * compared to uncached. Graphics requests other than
4120 * display scanout are coherent with the CPU in
4121 * accessing this cache. This means in this mode we
4122 * don't need to clflush on the CPU side, and on the
4123 * GPU side we only need to flush internal caches to
4124 * get data visible to the CPU.
4125 *
4126 * However, we maintain the display planes as UC, and so
4127 * need to rebind when first used as such.
4128 */
4129 obj->cache_level = I915_CACHE_LLC;
4130 } else
4131 obj->cache_level = I915_CACHE_NONE;
4132
Daniel Vetterd861e332013-07-24 23:25:03 +02004133 trace_i915_gem_object_create(obj);
4134
Chris Wilson05394f32010-11-08 19:18:58 +00004135 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004136}
4137
Chris Wilson1488fc02012-04-24 15:47:31 +01004138void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004139{
Chris Wilson1488fc02012-04-24 15:47:31 +01004140 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004141 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004142 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004143 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004144
Paulo Zanonif65c9162013-11-27 18:20:34 -02004145 intel_runtime_pm_get(dev_priv);
4146
Chris Wilson26e12f82011-03-20 11:20:19 +00004147 trace_i915_gem_object_destroy(obj);
4148
Chris Wilson1488fc02012-04-24 15:47:31 +01004149 if (obj->phys_obj)
4150 i915_gem_detach_phys_object(dev, obj);
4151
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004152 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004153 int ret;
4154
4155 vma->pin_count = 0;
4156 ret = i915_vma_unbind(vma);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004157 if (WARN_ON(ret == -ERESTARTSYS)) {
4158 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004159
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004160 was_interruptible = dev_priv->mm.interruptible;
4161 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004162
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004163 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004164
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004165 dev_priv->mm.interruptible = was_interruptible;
4166 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004167 }
4168
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004169 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4170 * before progressing. */
4171 if (obj->stolen)
4172 i915_gem_object_unpin_pages(obj);
4173
Ben Widawsky401c29f2013-05-31 11:28:47 -07004174 if (WARN_ON(obj->pages_pin_count))
4175 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004176 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004177 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004178 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004179
Chris Wilson9da3da62012-06-01 15:20:22 +01004180 BUG_ON(obj->pages);
4181
Chris Wilson2f745ad2012-09-04 21:02:58 +01004182 if (obj->base.import_attach)
4183 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004184
Chris Wilson05394f32010-11-08 19:18:58 +00004185 drm_gem_object_release(&obj->base);
4186 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004187
Chris Wilson05394f32010-11-08 19:18:58 +00004188 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004189 i915_gem_object_free(obj);
Paulo Zanonif65c9162013-11-27 18:20:34 -02004190
4191 intel_runtime_pm_put(dev_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +01004192}
4193
Daniel Vettere656a6c2013-08-14 14:14:04 +02004194struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004195 struct i915_address_space *vm)
4196{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004197 struct i915_vma *vma;
4198 list_for_each_entry(vma, &obj->vma_list, vma_link)
4199 if (vma->vm == vm)
4200 return vma;
4201
4202 return NULL;
4203}
4204
Ben Widawsky2f633152013-07-17 12:19:03 -07004205void i915_gem_vma_destroy(struct i915_vma *vma)
4206{
4207 WARN_ON(vma->node.allocated);
Chris Wilsonaaa05662013-08-20 12:56:40 +01004208
4209 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4210 if (!list_empty(&vma->exec_list))
4211 return;
4212
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004213 list_del(&vma->vma_link);
Daniel Vetterb93dab62013-08-26 11:23:47 +02004214
Ben Widawsky2f633152013-07-17 12:19:03 -07004215 kfree(vma);
4216}
4217
Jesse Barnes5669fca2009-02-17 15:13:31 -08004218int
Chris Wilson45c5f202013-10-16 11:50:01 +01004219i915_gem_suspend(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004220{
4221 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson45c5f202013-10-16 11:50:01 +01004222 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004223
Chris Wilson45c5f202013-10-16 11:50:01 +01004224 mutex_lock(&dev->struct_mutex);
Chris Wilsonf7403342013-09-13 23:57:04 +01004225 if (dev_priv->ums.mm_suspended)
Chris Wilson45c5f202013-10-16 11:50:01 +01004226 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004227
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004228 ret = i915_gpu_idle(dev);
Chris Wilsonf7403342013-09-13 23:57:04 +01004229 if (ret)
Chris Wilson45c5f202013-10-16 11:50:01 +01004230 goto err;
Chris Wilsonf7403342013-09-13 23:57:04 +01004231
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004232 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004233
Chris Wilson29105cc2010-01-07 10:39:13 +00004234 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004235 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004236 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004237
Chris Wilson29105cc2010-01-07 10:39:13 +00004238 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004239 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004240
Chris Wilson45c5f202013-10-16 11:50:01 +01004241 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4242 * We need to replace this with a semaphore, or something.
4243 * And not confound ums.mm_suspended!
4244 */
4245 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4246 DRIVER_MODESET);
4247 mutex_unlock(&dev->struct_mutex);
4248
4249 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004250 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004251 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
Chris Wilson29105cc2010-01-07 10:39:13 +00004252
Eric Anholt673a3942008-07-30 12:06:12 -07004253 return 0;
Chris Wilson45c5f202013-10-16 11:50:01 +01004254
4255err:
4256 mutex_unlock(&dev->struct_mutex);
4257 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004258}
4259
Ben Widawskyc3787e22013-09-17 21:12:44 -07004260int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004261{
Ben Widawskyc3787e22013-09-17 21:12:44 -07004262 struct drm_device *dev = ring->dev;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004263 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004264 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4265 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
Ben Widawskyc3787e22013-09-17 21:12:44 -07004266 int i, ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004267
Ben Widawsky040d2ba2013-09-19 11:01:40 -07004268 if (!HAS_L3_DPF(dev) || !remap_info)
Ben Widawskyc3787e22013-09-17 21:12:44 -07004269 return 0;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004270
Ben Widawskyc3787e22013-09-17 21:12:44 -07004271 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4272 if (ret)
4273 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004274
Ben Widawskyc3787e22013-09-17 21:12:44 -07004275 /*
4276 * Note: We do not worry about the concurrent register cacheline hang
4277 * here because no other code should access these registers other than
4278 * at initialization time.
4279 */
Ben Widawskyb9524a12012-05-25 16:56:24 -07004280 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
Ben Widawskyc3787e22013-09-17 21:12:44 -07004281 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4282 intel_ring_emit(ring, reg_base + i);
4283 intel_ring_emit(ring, remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004284 }
4285
Ben Widawskyc3787e22013-09-17 21:12:44 -07004286 intel_ring_advance(ring);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004287
Ben Widawskyc3787e22013-09-17 21:12:44 -07004288 return ret;
Ben Widawskyb9524a12012-05-25 16:56:24 -07004289}
4290
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004291void i915_gem_init_swizzling(struct drm_device *dev)
4292{
4293 drm_i915_private_t *dev_priv = dev->dev_private;
4294
Daniel Vetter11782b02012-01-31 16:47:55 +01004295 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004296 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4297 return;
4298
4299 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4300 DISP_TILE_SURFACE_SWIZZLING);
4301
Daniel Vetter11782b02012-01-31 16:47:55 +01004302 if (IS_GEN5(dev))
4303 return;
4304
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004305 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4306 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004307 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004308 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004309 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky31a53362013-11-02 21:07:04 -07004310 else if (IS_GEN8(dev))
4311 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
Ben Widawsky8782e262012-12-18 10:31:23 -08004312 else
4313 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004314}
Daniel Vettere21af882012-02-09 20:53:27 +01004315
Chris Wilson67b1b572012-07-05 23:49:40 +01004316static bool
4317intel_enable_blt(struct drm_device *dev)
4318{
4319 if (!HAS_BLT(dev))
4320 return false;
4321
4322 /* The blitter was dysfunctional on early prototypes */
4323 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4324 DRM_INFO("BLT not supported on this pre-production hardware;"
4325 " graphics performance will be degraded.\n");
4326 return false;
4327 }
4328
4329 return true;
4330}
4331
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004332static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004333{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004334 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004335 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004336
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004337 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004338 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004339 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004340
4341 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004342 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004343 if (ret)
4344 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004345 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004346
Chris Wilson67b1b572012-07-05 23:49:40 +01004347 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004348 ret = intel_init_blt_ring_buffer(dev);
4349 if (ret)
4350 goto cleanup_bsd_ring;
4351 }
4352
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004353 if (HAS_VEBOX(dev)) {
4354 ret = intel_init_vebox_ring_buffer(dev);
4355 if (ret)
4356 goto cleanup_blt_ring;
4357 }
4358
4359
Mika Kuoppala99433932013-01-22 14:12:17 +02004360 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4361 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004362 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004363
4364 return 0;
4365
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004366cleanup_vebox_ring:
4367 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004368cleanup_blt_ring:
4369 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4370cleanup_bsd_ring:
4371 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4372cleanup_render_ring:
4373 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4374
4375 return ret;
4376}
4377
4378int
4379i915_gem_init_hw(struct drm_device *dev)
4380{
4381 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07004382 int ret, i;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004383
4384 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4385 return -EIO;
4386
Ben Widawsky59124502013-07-04 11:02:05 -07004387 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004388 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004389
Ville Syrjälä0bf21342013-11-29 14:56:12 +02004390 if (IS_HASWELL(dev))
4391 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4392 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
Rodrigo Vivi94353732013-08-28 16:45:46 -03004393
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004394 if (HAS_PCH_NOP(dev)) {
Daniel Vetter6ba844b2014-01-22 23:39:30 +01004395 if (IS_IVYBRIDGE(dev)) {
4396 u32 temp = I915_READ(GEN7_MSG_CTL);
4397 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4398 I915_WRITE(GEN7_MSG_CTL, temp);
4399 } else if (INTEL_INFO(dev)->gen >= 7) {
4400 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4401 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4402 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4403 }
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004404 }
4405
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004406 i915_gem_init_swizzling(dev);
4407
4408 ret = i915_gem_init_rings(dev);
4409 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004410 return ret;
4411
Ben Widawskyc3787e22013-09-17 21:12:44 -07004412 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4413 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4414
Ben Widawsky254f9652012-06-04 14:42:42 -07004415 /*
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004416 * XXX: Contexts should only be initialized once. Doing a switch to the
4417 * default context switch however is something we'd like to do after
4418 * reset or thaw (the latter may not actually be necessary for HW, but
4419 * goes with our code better). Context switching requires rings (for
4420 * the do_switch), but before enabling PPGTT. So don't move this.
Ben Widawsky254f9652012-06-04 14:42:42 -07004421 */
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004422 ret = i915_gem_context_enable(dev_priv);
Ben Widawsky8245be32013-11-06 13:56:29 -02004423 if (ret) {
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004424 DRM_ERROR("Context enable failed %d\n", ret);
4425 goto err_out;
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004426 }
Daniel Vettere21af882012-02-09 20:53:27 +01004427
Chris Wilson68f95ba2010-05-27 13:18:22 +01004428 return 0;
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004429
4430err_out:
4431 i915_gem_cleanup_ringbuffer(dev);
4432 return ret;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004433}
4434
Chris Wilson1070a422012-04-24 15:47:41 +01004435int i915_gem_init(struct drm_device *dev)
4436{
4437 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004438 int ret;
4439
Chris Wilson1070a422012-04-24 15:47:41 +01004440 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004441
4442 if (IS_VALLEYVIEW(dev)) {
4443 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4444 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4445 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4446 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4447 }
4448
Ben Widawskyd7e50082012-12-18 10:31:25 -08004449 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004450
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004451 ret = i915_gem_context_init(dev);
Mika Kuoppalae3848692014-01-31 17:14:02 +02004452 if (ret) {
4453 mutex_unlock(&dev->struct_mutex);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004454 return ret;
Mika Kuoppalae3848692014-01-31 17:14:02 +02004455 }
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004456
Chris Wilson1070a422012-04-24 15:47:41 +01004457 ret = i915_gem_init_hw(dev);
4458 mutex_unlock(&dev->struct_mutex);
4459 if (ret) {
Ben Widawskybdf4fd72013-12-06 14:11:18 -08004460 WARN_ON(dev_priv->mm.aliasing_ppgtt);
Ben Widawsky2fa48d82013-12-06 14:11:04 -08004461 i915_gem_context_fini(dev);
Ben Widawskyc39538a2013-12-06 14:10:50 -08004462 drm_mm_takedown(&dev_priv->gtt.base.mm);
Chris Wilson1070a422012-04-24 15:47:41 +01004463 return ret;
4464 }
4465
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004466 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4467 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4468 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004469 return 0;
4470}
4471
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004472void
4473i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4474{
4475 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004476 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004477 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004478
Chris Wilsonb4519512012-05-11 14:29:30 +01004479 for_each_ring(ring, dev_priv, i)
4480 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004481}
4482
4483int
Eric Anholt673a3942008-07-30 12:06:12 -07004484i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4485 struct drm_file *file_priv)
4486{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004487 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004488 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004489
Jesse Barnes79e53942008-11-07 14:24:08 -08004490 if (drm_core_check_feature(dev, DRIVER_MODESET))
4491 return 0;
4492
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004493 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004494 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004495 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004496 }
4497
Eric Anholt673a3942008-07-30 12:06:12 -07004498 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004499 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004500
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004501 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08004502 if (ret != 0) {
4503 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004504 return ret;
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08004505 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004506
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004507 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004508 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004509
Chris Wilson5f353082010-06-07 14:03:03 +01004510 ret = drm_irq_install(dev);
4511 if (ret)
4512 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004513
Eric Anholt673a3942008-07-30 12:06:12 -07004514 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004515
4516cleanup_ringbuffer:
4517 mutex_lock(&dev->struct_mutex);
4518 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004519 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004520 mutex_unlock(&dev->struct_mutex);
4521
4522 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004523}
4524
4525int
4526i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4527 struct drm_file *file_priv)
4528{
Jesse Barnes79e53942008-11-07 14:24:08 -08004529 if (drm_core_check_feature(dev, DRIVER_MODESET))
4530 return 0;
4531
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004532 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004533
Chris Wilson45c5f202013-10-16 11:50:01 +01004534 return i915_gem_suspend(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004535}
4536
4537void
4538i915_gem_lastclose(struct drm_device *dev)
4539{
4540 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004541
Eric Anholte806b492009-01-22 09:56:58 -08004542 if (drm_core_check_feature(dev, DRIVER_MODESET))
4543 return;
4544
Chris Wilson45c5f202013-10-16 11:50:01 +01004545 ret = i915_gem_suspend(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004546 if (ret)
4547 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004548}
4549
Chris Wilson64193402010-10-24 12:38:05 +01004550static void
4551init_ring_lists(struct intel_ring_buffer *ring)
4552{
4553 INIT_LIST_HEAD(&ring->active_list);
4554 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004555}
4556
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004557void i915_init_vm(struct drm_i915_private *dev_priv,
4558 struct i915_address_space *vm)
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004559{
Ben Widawsky7e0d96b2013-12-06 14:11:26 -08004560 if (!i915_is_ggtt(vm))
4561 drm_mm_init(&vm->mm, vm->start, vm->total);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004562 vm->dev = dev_priv->dev;
4563 INIT_LIST_HEAD(&vm->active_list);
4564 INIT_LIST_HEAD(&vm->inactive_list);
4565 INIT_LIST_HEAD(&vm->global_link);
Chris Wilsonf72d21e2014-01-09 22:57:22 +00004566 list_add_tail(&vm->global_link, &dev_priv->vm_list);
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004567}
4568
Eric Anholt673a3942008-07-30 12:06:12 -07004569void
4570i915_gem_load(struct drm_device *dev)
4571{
4572 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004573 int i;
4574
4575 dev_priv->slab =
4576 kmem_cache_create("i915_gem_object",
4577 sizeof(struct drm_i915_gem_object), 0,
4578 SLAB_HWCACHE_ALIGN,
4579 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004580
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004581 INIT_LIST_HEAD(&dev_priv->vm_list);
4582 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4583
Ben Widawskya33afea2013-09-17 21:12:45 -07004584 INIT_LIST_HEAD(&dev_priv->context_list);
Chris Wilson6c085a72012-08-20 11:40:46 +02004585 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4586 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004587 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004588 for (i = 0; i < I915_NUM_RINGS; i++)
4589 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004590 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004591 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004592 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4593 i915_gem_retire_work_handler);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004594 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4595 i915_gem_idle_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004596 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004597
Dave Airlie94400122010-07-20 13:15:31 +10004598 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4599 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004600 I915_WRITE(MI_ARB_STATE,
4601 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004602 }
4603
Chris Wilson72bfa192010-12-19 11:42:05 +00004604 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4605
Jesse Barnesde151cf2008-11-12 10:03:55 -08004606 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004607 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4608 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004609
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004610 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4611 dev_priv->num_fence_regs = 32;
4612 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004613 dev_priv->num_fence_regs = 16;
4614 else
4615 dev_priv->num_fence_regs = 8;
4616
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004617 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004618 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4619 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004620
Eric Anholt673a3942008-07-30 12:06:12 -07004621 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004622 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004623
Chris Wilsonce453d82011-02-21 14:43:56 +00004624 dev_priv->mm.interruptible = true;
4625
Dave Chinner7dc19d52013-08-28 10:18:11 +10004626 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4627 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
Chris Wilson17250b72010-10-28 12:51:39 +01004628 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4629 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004630}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004631
4632/*
4633 * Create a physically contiguous memory object for this object
4634 * e.g. for cursor + overlay regs
4635 */
Chris Wilson995b67622010-08-20 13:23:26 +01004636static int i915_gem_init_phys_object(struct drm_device *dev,
4637 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004638{
4639 drm_i915_private_t *dev_priv = dev->dev_private;
4640 struct drm_i915_gem_phys_object *phys_obj;
4641 int ret;
4642
4643 if (dev_priv->mm.phys_objs[id - 1] || !size)
4644 return 0;
4645
Daniel Vetterb14c5672013-09-19 12:18:32 +02004646 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004647 if (!phys_obj)
4648 return -ENOMEM;
4649
4650 phys_obj->id = id;
4651
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004652 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004653 if (!phys_obj->handle) {
4654 ret = -ENOMEM;
4655 goto kfree_obj;
4656 }
4657#ifdef CONFIG_X86
4658 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4659#endif
4660
4661 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4662
4663 return 0;
4664kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004665 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004666 return ret;
4667}
4668
Chris Wilson995b67622010-08-20 13:23:26 +01004669static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004670{
4671 drm_i915_private_t *dev_priv = dev->dev_private;
4672 struct drm_i915_gem_phys_object *phys_obj;
4673
4674 if (!dev_priv->mm.phys_objs[id - 1])
4675 return;
4676
4677 phys_obj = dev_priv->mm.phys_objs[id - 1];
4678 if (phys_obj->cur_obj) {
4679 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4680 }
4681
4682#ifdef CONFIG_X86
4683 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4684#endif
4685 drm_pci_free(dev, phys_obj->handle);
4686 kfree(phys_obj);
4687 dev_priv->mm.phys_objs[id - 1] = NULL;
4688}
4689
4690void i915_gem_free_all_phys_object(struct drm_device *dev)
4691{
4692 int i;
4693
Dave Airlie260883c2009-01-22 17:58:49 +10004694 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004695 i915_gem_free_phys_object(dev, i);
4696}
4697
4698void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004699 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004700{
Al Viro496ad9a2013-01-23 17:07:38 -05004701 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004702 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004703 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004704 int page_count;
4705
Chris Wilson05394f32010-11-08 19:18:58 +00004706 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004707 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004708 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004709
Chris Wilson05394f32010-11-08 19:18:58 +00004710 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004711 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004712 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004713 if (!IS_ERR(page)) {
4714 char *dst = kmap_atomic(page);
4715 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4716 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004717
Chris Wilsone5281cc2010-10-28 13:45:36 +01004718 drm_clflush_pages(&page, 1);
4719
4720 set_page_dirty(page);
4721 mark_page_accessed(page);
4722 page_cache_release(page);
4723 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004724 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004725 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004726
Chris Wilson05394f32010-11-08 19:18:58 +00004727 obj->phys_obj->cur_obj = NULL;
4728 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004729}
4730
4731int
4732i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004733 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004734 int id,
4735 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004736{
Al Viro496ad9a2013-01-23 17:07:38 -05004737 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004738 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004739 int ret = 0;
4740 int page_count;
4741 int i;
4742
4743 if (id > I915_MAX_PHYS_OBJECT)
4744 return -EINVAL;
4745
Chris Wilson05394f32010-11-08 19:18:58 +00004746 if (obj->phys_obj) {
4747 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004748 return 0;
4749 i915_gem_detach_phys_object(dev, obj);
4750 }
4751
Dave Airlie71acb5e2008-12-30 20:31:46 +10004752 /* create a new object */
4753 if (!dev_priv->mm.phys_objs[id - 1]) {
4754 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004755 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004756 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004757 DRM_ERROR("failed to init phys object %d size: %zu\n",
4758 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004759 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004760 }
4761 }
4762
4763 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004764 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4765 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004766
Chris Wilson05394f32010-11-08 19:18:58 +00004767 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004768
4769 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004770 struct page *page;
4771 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004772
Hugh Dickins5949eac2011-06-27 16:18:18 -07004773 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004774 if (IS_ERR(page))
4775 return PTR_ERR(page);
4776
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004777 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004778 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004779 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004780 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004781
4782 mark_page_accessed(page);
4783 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004784 }
4785
4786 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004787}
4788
4789static int
Chris Wilson05394f32010-11-08 19:18:58 +00004790i915_gem_phys_pwrite(struct drm_device *dev,
4791 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004792 struct drm_i915_gem_pwrite *args,
4793 struct drm_file *file_priv)
4794{
Chris Wilson05394f32010-11-08 19:18:58 +00004795 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004796 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004797
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004798 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4799 unsigned long unwritten;
4800
4801 /* The physical object once assigned is fixed for the lifetime
4802 * of the obj, so we can safely drop the lock and continue
4803 * to access vaddr.
4804 */
4805 mutex_unlock(&dev->struct_mutex);
4806 unwritten = copy_from_user(vaddr, user_data, args->size);
4807 mutex_lock(&dev->struct_mutex);
4808 if (unwritten)
4809 return -EFAULT;
4810 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004811
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004812 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004813 return 0;
4814}
Eric Anholtb9624422009-06-03 07:27:35 +00004815
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004816void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004817{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004818 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004819
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004820 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4821
Eric Anholtb9624422009-06-03 07:27:35 +00004822 /* Clean up our request list when the client is going away, so that
4823 * later retire_requests won't dereference our soon-to-be-gone
4824 * file_priv.
4825 */
Chris Wilson1c255952010-09-26 11:03:27 +01004826 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004827 while (!list_empty(&file_priv->mm.request_list)) {
4828 struct drm_i915_gem_request *request;
4829
4830 request = list_first_entry(&file_priv->mm.request_list,
4831 struct drm_i915_gem_request,
4832 client_list);
4833 list_del(&request->client_list);
4834 request->file_priv = NULL;
4835 }
Chris Wilson1c255952010-09-26 11:03:27 +01004836 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004837}
Chris Wilson31169712009-09-14 16:50:28 +01004838
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004839static void
4840i915_gem_file_idle_work_handler(struct work_struct *work)
4841{
4842 struct drm_i915_file_private *file_priv =
4843 container_of(work, typeof(*file_priv), mm.idle_work.work);
4844
4845 atomic_set(&file_priv->rps_wait_boost, false);
4846}
4847
4848int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4849{
4850 struct drm_i915_file_private *file_priv;
Ben Widawskye422b882013-12-06 14:10:58 -08004851 int ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004852
4853 DRM_DEBUG_DRIVER("\n");
4854
4855 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4856 if (!file_priv)
4857 return -ENOMEM;
4858
4859 file->driver_priv = file_priv;
4860 file_priv->dev_priv = dev->dev_private;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02004861 file_priv->file = file;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004862
4863 spin_lock_init(&file_priv->mm.lock);
4864 INIT_LIST_HEAD(&file_priv->mm.request_list);
4865 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4866 i915_gem_file_idle_work_handler);
4867
Ben Widawskye422b882013-12-06 14:10:58 -08004868 ret = i915_gem_context_open(dev, file);
4869 if (ret)
4870 kfree(file_priv);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004871
Ben Widawskye422b882013-12-06 14:10:58 -08004872 return ret;
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004873}
4874
Chris Wilson57745062012-11-21 13:04:04 +00004875static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4876{
4877 if (!mutex_is_locked(mutex))
4878 return false;
4879
4880#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4881 return mutex->owner == task;
4882#else
4883 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4884 return false;
4885#endif
4886}
4887
Dave Chinner7dc19d52013-08-28 10:18:11 +10004888static unsigned long
4889i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004890{
Chris Wilson17250b72010-10-28 12:51:39 +01004891 struct drm_i915_private *dev_priv =
4892 container_of(shrinker,
4893 struct drm_i915_private,
4894 mm.inactive_shrinker);
4895 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004896 struct drm_i915_gem_object *obj;
Chris Wilson57745062012-11-21 13:04:04 +00004897 bool unlock = true;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004898 unsigned long count;
Chris Wilson17250b72010-10-28 12:51:39 +01004899
Chris Wilson57745062012-11-21 13:04:04 +00004900 if (!mutex_trylock(&dev->struct_mutex)) {
4901 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02004902 return 0;
Chris Wilson57745062012-11-21 13:04:04 +00004903
Daniel Vetter677feac2012-12-19 14:33:45 +01004904 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02004905 return 0;
Daniel Vetter677feac2012-12-19 14:33:45 +01004906
Chris Wilson57745062012-11-21 13:04:04 +00004907 unlock = false;
4908 }
Chris Wilson31169712009-09-14 16:50:28 +01004909
Dave Chinner7dc19d52013-08-28 10:18:11 +10004910 count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004911 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004912 if (obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004913 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004914
4915 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4916 if (obj->active)
4917 continue;
4918
Ben Widawskyd7f46fc2013-12-06 14:10:55 -08004919 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
Dave Chinner7dc19d52013-08-28 10:18:11 +10004920 count += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004921 }
Chris Wilson31169712009-09-14 16:50:28 +01004922
Chris Wilson57745062012-11-21 13:04:04 +00004923 if (unlock)
4924 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01004925
Dave Chinner7dc19d52013-08-28 10:18:11 +10004926 return count;
Chris Wilson31169712009-09-14 16:50:28 +01004927}
Ben Widawskya70a3142013-07-31 16:59:56 -07004928
4929/* All the new VM stuff */
4930unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4931 struct i915_address_space *vm)
4932{
4933 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4934 struct i915_vma *vma;
4935
Ben Widawsky6f425322013-12-06 14:10:48 -08004936 if (!dev_priv->mm.aliasing_ppgtt ||
4937 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004938 vm = &dev_priv->gtt.base;
4939
4940 BUG_ON(list_empty(&o->vma_list));
4941 list_for_each_entry(vma, &o->vma_list, vma_link) {
4942 if (vma->vm == vm)
4943 return vma->node.start;
4944
4945 }
4946 return -1;
4947}
4948
4949bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4950 struct i915_address_space *vm)
4951{
4952 struct i915_vma *vma;
4953
4954 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004955 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004956 return true;
4957
4958 return false;
4959}
4960
4961bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4962{
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004963 struct i915_vma *vma;
Ben Widawskya70a3142013-07-31 16:59:56 -07004964
Chris Wilson5a1d5eb2013-09-10 11:27:37 +01004965 list_for_each_entry(vma, &o->vma_list, vma_link)
4966 if (drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004967 return true;
4968
4969 return false;
4970}
4971
4972unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4973 struct i915_address_space *vm)
4974{
4975 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4976 struct i915_vma *vma;
4977
Ben Widawsky6f425322013-12-06 14:10:48 -08004978 if (!dev_priv->mm.aliasing_ppgtt ||
4979 vm == &dev_priv->mm.aliasing_ppgtt->base)
Ben Widawskya70a3142013-07-31 16:59:56 -07004980 vm = &dev_priv->gtt.base;
4981
4982 BUG_ON(list_empty(&o->vma_list));
4983
4984 list_for_each_entry(vma, &o->vma_list, vma_link)
4985 if (vma->vm == vm)
4986 return vma->node.size;
4987
4988 return 0;
4989}
4990
Dave Chinner7dc19d52013-08-28 10:18:11 +10004991static unsigned long
4992i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4993{
4994 struct drm_i915_private *dev_priv =
4995 container_of(shrinker,
4996 struct drm_i915_private,
4997 mm.inactive_shrinker);
4998 struct drm_device *dev = dev_priv->dev;
Dave Chinner7dc19d52013-08-28 10:18:11 +10004999 unsigned long freed;
5000 bool unlock = true;
5001
5002 if (!mutex_trylock(&dev->struct_mutex)) {
5003 if (!mutex_is_locked_by(&dev->struct_mutex, current))
Daniel Vetterd3227042013-09-25 14:00:02 +02005004 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005005
5006 if (dev_priv->mm.shrinker_no_lock_stealing)
Daniel Vetterd3227042013-09-25 14:00:02 +02005007 return SHRINK_STOP;
Dave Chinner7dc19d52013-08-28 10:18:11 +10005008
5009 unlock = false;
5010 }
5011
Chris Wilsond9973b42013-10-04 10:33:00 +01005012 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5013 if (freed < sc->nr_to_scan)
5014 freed += __i915_gem_shrink(dev_priv,
5015 sc->nr_to_scan - freed,
5016 false);
5017 if (freed < sc->nr_to_scan)
Dave Chinner7dc19d52013-08-28 10:18:11 +10005018 freed += i915_gem_shrink_all(dev_priv);
5019
5020 if (unlock)
5021 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9973b42013-10-04 10:33:00 +01005022
Dave Chinner7dc19d52013-08-28 10:18:11 +10005023 return freed;
5024}
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005025
5026struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5027{
5028 struct i915_vma *vma;
5029
5030 if (WARN_ON(list_empty(&obj->vma_list)))
5031 return NULL;
5032
5033 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
Ben Widawsky6e164c32013-12-06 14:10:49 -08005034 if (vma->vm != obj_to_ggtt(obj))
Ben Widawsky5c2abbe2013-09-24 09:57:57 -07005035 return NULL;
5036
5037 return vma;
5038}