blob: 399e159016e2eb4a62f6848acafbc0332e3d042a [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020029#include <drm/drm_vma_manager.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070031#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010041static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070043static __must_check int
44i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
45 struct i915_address_space *vm,
46 unsigned alignment,
47 bool map_and_fenceable,
48 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000049static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100051 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000052 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070053
Chris Wilson61050802012-04-17 15:31:31 +010054static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
58 bool enable);
59
Chris Wilson17250b72010-10-28 12:51:39 +010060static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070061 struct shrink_control *sc);
Chris Wilson6c085a72012-08-20 11:40:46 +020062static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
63static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010064static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010065
Chris Wilsonc76ce032013-08-08 14:41:03 +010066static bool cpu_cache_is_coherent(struct drm_device *dev,
67 enum i915_cache_level level)
68{
69 return HAS_LLC(dev) || level != I915_CACHE_NONE;
70}
71
Chris Wilson2c225692013-08-09 12:26:45 +010072static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73{
74 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75 return true;
76
77 return obj->pin_display;
78}
79
Chris Wilson61050802012-04-17 15:31:31 +010080static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
81{
82 if (obj->tiling_mode)
83 i915_gem_release_mmap(obj);
84
85 /* As we do not have an associated fence register, we will force
86 * a tiling change if we ever need to acquire one.
87 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010088 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010089 obj->fence_reg = I915_FENCE_REG_NONE;
90}
91
Chris Wilson73aa8082010-09-30 11:46:12 +010092/* some bookkeeping */
93static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
94 size_t size)
95{
Daniel Vetterc20e8352013-07-24 22:40:23 +020096 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010097 dev_priv->mm.object_count++;
98 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020099 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100100}
101
102static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
103 size_t size)
104{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200105 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100106 dev_priv->mm.object_count--;
107 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200108 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100109}
110
Chris Wilson21dd3732011-01-26 15:55:56 +0000111static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100112i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114 int ret;
115
Daniel Vetter7abb6902013-05-24 21:29:32 +0200116#define EXIT_COND (!i915_reset_in_progress(error) || \
117 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100118 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119 return 0;
120
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200121 /*
122 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
123 * userspace. If it takes that long something really bad is going on and
124 * we should simply try to bail out and fail as gracefully as possible.
125 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100126 ret = wait_event_interruptible_timeout(error->reset_queue,
127 EXIT_COND,
128 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200129 if (ret == 0) {
130 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131 return -EIO;
132 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100133 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200134 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100135#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100136
Chris Wilson21dd3732011-01-26 15:55:56 +0000137 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100138}
139
Chris Wilson54cf91d2010-11-25 18:00:26 +0000140int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100141{
Daniel Vetter33196de2012-11-14 17:14:05 +0100142 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100143 int ret;
144
Daniel Vetter33196de2012-11-14 17:14:05 +0100145 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100146 if (ret)
147 return ret;
148
149 ret = mutex_lock_interruptible(&dev->struct_mutex);
150 if (ret)
151 return ret;
152
Chris Wilson23bc5982010-09-29 16:10:57 +0100153 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100154 return 0;
155}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100156
Chris Wilson7d1c4802010-08-07 21:45:03 +0100157static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000158i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100159{
Ben Widawsky98438772013-07-31 17:00:12 -0700160 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100161}
162
Eric Anholt673a3942008-07-30 12:06:12 -0700163int
164i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000165 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700166{
Ben Widawsky93d18792013-01-17 12:45:17 -0800167 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700168 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000169
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200170 if (drm_core_check_feature(dev, DRIVER_MODESET))
171 return -ENODEV;
172
Chris Wilson20217462010-11-23 15:26:33 +0000173 if (args->gtt_start >= args->gtt_end ||
174 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
175 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700176
Daniel Vetterf534bc02012-03-26 22:37:04 +0200177 /* GEM with user mode setting was never supported on ilk and later. */
178 if (INTEL_INFO(dev)->gen >= 5)
179 return -ENODEV;
180
Eric Anholt673a3942008-07-30 12:06:12 -0700181 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800182 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800184 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700185 mutex_unlock(&dev->struct_mutex);
186
Chris Wilson20217462010-11-23 15:26:33 +0000187 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700188}
189
Eric Anholt5a125c32008-10-22 21:40:13 -0700190int
191i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000192 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700193{
Chris Wilson73aa8082010-09-30 11:46:12 +0100194 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700195 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000196 struct drm_i915_gem_object *obj;
197 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700198
Chris Wilson6299f992010-11-24 12:23:44 +0000199 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100200 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700201 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Chris Wilson1b502472012-04-24 15:47:30 +0100202 if (obj->pin_count)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700203 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100204 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700205
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700206 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400207 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000208
Eric Anholt5a125c32008-10-22 21:40:13 -0700209 return 0;
210}
211
Chris Wilson42dcedd2012-11-15 11:32:30 +0000212void *i915_gem_object_alloc(struct drm_device *dev)
213{
214 struct drm_i915_private *dev_priv = dev->dev_private;
Joe Perchesfac15c12013-08-29 13:11:07 -0700215 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
Chris Wilson42dcedd2012-11-15 11:32:30 +0000216}
217
218void i915_gem_object_free(struct drm_i915_gem_object *obj)
219{
220 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
221 kmem_cache_free(dev_priv->slab, obj);
222}
223
Dave Airlieff72145b2011-02-07 12:16:14 +1000224static int
225i915_gem_create(struct drm_file *file,
226 struct drm_device *dev,
227 uint64_t size,
228 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700229{
Chris Wilson05394f32010-11-08 19:18:58 +0000230 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300231 int ret;
232 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700233
Dave Airlieff72145b2011-02-07 12:16:14 +1000234 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200235 if (size == 0)
236 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700237
238 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000239 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700240 if (obj == NULL)
241 return -ENOMEM;
242
Chris Wilson05394f32010-11-08 19:18:58 +0000243 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100244 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200245 drm_gem_object_unreference_unlocked(&obj->base);
246 if (ret)
247 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100248
Dave Airlieff72145b2011-02-07 12:16:14 +1000249 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700250 return 0;
251}
252
Dave Airlieff72145b2011-02-07 12:16:14 +1000253int
254i915_gem_dumb_create(struct drm_file *file,
255 struct drm_device *dev,
256 struct drm_mode_create_dumb *args)
257{
258 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000259 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000260 args->size = args->pitch * args->height;
261 return i915_gem_create(file, dev,
262 args->size, &args->handle);
263}
264
Dave Airlieff72145b2011-02-07 12:16:14 +1000265/**
266 * Creates a new mm object and returns a handle to it.
267 */
268int
269i915_gem_create_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file)
271{
272 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200273
Dave Airlieff72145b2011-02-07 12:16:14 +1000274 return i915_gem_create(file, dev,
275 args->size, &args->handle);
276}
277
Daniel Vetter8c599672011-12-14 13:57:31 +0100278static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100279__copy_to_user_swizzled(char __user *cpu_vaddr,
280 const char *gpu_vaddr, int gpu_offset,
281 int length)
282{
283 int ret, cpu_offset = 0;
284
285 while (length > 0) {
286 int cacheline_end = ALIGN(gpu_offset + 1, 64);
287 int this_length = min(cacheline_end - gpu_offset, length);
288 int swizzled_gpu_offset = gpu_offset ^ 64;
289
290 ret = __copy_to_user(cpu_vaddr + cpu_offset,
291 gpu_vaddr + swizzled_gpu_offset,
292 this_length);
293 if (ret)
294 return ret + length;
295
296 cpu_offset += this_length;
297 gpu_offset += this_length;
298 length -= this_length;
299 }
300
301 return 0;
302}
303
304static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700305__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
306 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100307 int length)
308{
309 int ret, cpu_offset = 0;
310
311 while (length > 0) {
312 int cacheline_end = ALIGN(gpu_offset + 1, 64);
313 int this_length = min(cacheline_end - gpu_offset, length);
314 int swizzled_gpu_offset = gpu_offset ^ 64;
315
316 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
317 cpu_vaddr + cpu_offset,
318 this_length);
319 if (ret)
320 return ret + length;
321
322 cpu_offset += this_length;
323 gpu_offset += this_length;
324 length -= this_length;
325 }
326
327 return 0;
328}
329
Daniel Vetterd174bd62012-03-25 19:47:40 +0200330/* Per-page copy function for the shmem pread fastpath.
331 * Flushes invalid cachelines before reading the target if
332 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700333static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200334shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
335 char __user *user_data,
336 bool page_do_bit17_swizzling, bool needs_clflush)
337{
338 char *vaddr;
339 int ret;
340
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200341 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200342 return -EINVAL;
343
344 vaddr = kmap_atomic(page);
345 if (needs_clflush)
346 drm_clflush_virt_range(vaddr + shmem_page_offset,
347 page_length);
348 ret = __copy_to_user_inatomic(user_data,
349 vaddr + shmem_page_offset,
350 page_length);
351 kunmap_atomic(vaddr);
352
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100353 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200354}
355
Daniel Vetter23c18c72012-03-25 19:47:42 +0200356static void
357shmem_clflush_swizzled_range(char *addr, unsigned long length,
358 bool swizzled)
359{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200360 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200361 unsigned long start = (unsigned long) addr;
362 unsigned long end = (unsigned long) addr + length;
363
364 /* For swizzling simply ensure that we always flush both
365 * channels. Lame, but simple and it works. Swizzled
366 * pwrite/pread is far from a hotpath - current userspace
367 * doesn't use it at all. */
368 start = round_down(start, 128);
369 end = round_up(end, 128);
370
371 drm_clflush_virt_range((void *)start, end - start);
372 } else {
373 drm_clflush_virt_range(addr, length);
374 }
375
376}
377
Daniel Vetterd174bd62012-03-25 19:47:40 +0200378/* Only difference to the fast-path function is that this can handle bit17
379 * and uses non-atomic copy and kmap functions. */
380static int
381shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
382 char __user *user_data,
383 bool page_do_bit17_swizzling, bool needs_clflush)
384{
385 char *vaddr;
386 int ret;
387
388 vaddr = kmap(page);
389 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200390 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
391 page_length,
392 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200393
394 if (page_do_bit17_swizzling)
395 ret = __copy_to_user_swizzled(user_data,
396 vaddr, shmem_page_offset,
397 page_length);
398 else
399 ret = __copy_to_user(user_data,
400 vaddr + shmem_page_offset,
401 page_length);
402 kunmap(page);
403
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100404 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200405}
406
Eric Anholteb014592009-03-10 11:44:52 -0700407static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200408i915_gem_shmem_pread(struct drm_device *dev,
409 struct drm_i915_gem_object *obj,
410 struct drm_i915_gem_pread *args,
411 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700412{
Daniel Vetter8461d222011-12-14 13:57:32 +0100413 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700414 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100415 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100416 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100417 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200418 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200419 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200420 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700421
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200422 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700423 remain = args->size;
424
Daniel Vetter8461d222011-12-14 13:57:32 +0100425 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700426
Daniel Vetter84897312012-03-25 19:47:31 +0200427 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
428 /* If we're not in the cpu read domain, set ourself into the gtt
429 * read domain and manually flush cachelines (if required). This
430 * optimizes for the case when the gpu will dirty the data
431 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100432 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky98438772013-07-31 17:00:12 -0700433 if (i915_gem_obj_bound_any(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +0200434 ret = i915_gem_object_set_to_gtt_domain(obj, false);
435 if (ret)
436 return ret;
437 }
Daniel Vetter84897312012-03-25 19:47:31 +0200438 }
Eric Anholteb014592009-03-10 11:44:52 -0700439
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100440 ret = i915_gem_object_get_pages(obj);
441 if (ret)
442 return ret;
443
444 i915_gem_object_pin_pages(obj);
445
Eric Anholteb014592009-03-10 11:44:52 -0700446 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100447
Imre Deak67d5a502013-02-18 19:28:02 +0200448 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
449 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200450 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100451
452 if (remain <= 0)
453 break;
454
Eric Anholteb014592009-03-10 11:44:52 -0700455 /* Operation in this page
456 *
Eric Anholteb014592009-03-10 11:44:52 -0700457 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700458 * page_length = bytes to copy for this page
459 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100460 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700461 page_length = remain;
462 if ((shmem_page_offset + page_length) > PAGE_SIZE)
463 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700464
Daniel Vetter8461d222011-12-14 13:57:32 +0100465 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
466 (page_to_phys(page) & (1 << 17)) != 0;
467
Daniel Vetterd174bd62012-03-25 19:47:40 +0200468 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
469 user_data, page_do_bit17_swizzling,
470 needs_clflush);
471 if (ret == 0)
472 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700473
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200474 mutex_unlock(&dev->struct_mutex);
475
Xiong Zhang0b74b502013-07-19 13:51:24 +0800476 if (likely(!i915_prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200477 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200478 /* Userspace is tricking us, but we've already clobbered
479 * its pages with the prefault and promised to write the
480 * data up to the first fault. Hence ignore any errors
481 * and just continue. */
482 (void)ret;
483 prefaulted = 1;
484 }
485
Daniel Vetterd174bd62012-03-25 19:47:40 +0200486 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
487 user_data, page_do_bit17_swizzling,
488 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700489
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200490 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100491
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200492next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100493 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100494
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100495 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100496 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100497
Eric Anholteb014592009-03-10 11:44:52 -0700498 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700500 offset += page_length;
501 }
502
Chris Wilson4f27b752010-10-14 15:26:45 +0100503out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100504 i915_gem_object_unpin_pages(obj);
505
Eric Anholteb014592009-03-10 11:44:52 -0700506 return ret;
507}
508
Eric Anholt673a3942008-07-30 12:06:12 -0700509/**
510 * Reads data from the object referenced by handle.
511 *
512 * On error, the contents of *data are undefined.
513 */
514int
515i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000516 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700517{
518 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000519 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100520 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700521
Chris Wilson51311d02010-11-17 09:10:42 +0000522 if (args->size == 0)
523 return 0;
524
525 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200526 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000527 args->size))
528 return -EFAULT;
529
Chris Wilson4f27b752010-10-14 15:26:45 +0100530 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100531 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100532 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700533
Chris Wilson05394f32010-11-08 19:18:58 +0000534 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000535 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100536 ret = -ENOENT;
537 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100538 }
Eric Anholt673a3942008-07-30 12:06:12 -0700539
Chris Wilson7dcd2492010-09-26 20:21:44 +0100540 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000541 if (args->offset > obj->base.size ||
542 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100543 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100544 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100545 }
546
Daniel Vetter1286ff72012-05-10 15:25:09 +0200547 /* prime objects have no backing filp to GEM pread/pwrite
548 * pages from.
549 */
550 if (!obj->base.filp) {
551 ret = -EINVAL;
552 goto out;
553 }
554
Chris Wilsondb53a302011-02-03 11:57:46 +0000555 trace_i915_gem_object_pread(obj, args->offset, args->size);
556
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200557 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700558
Chris Wilson35b62a82010-09-26 20:23:38 +0100559out:
Chris Wilson05394f32010-11-08 19:18:58 +0000560 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100561unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100562 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700563 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700564}
565
Keith Packard0839ccb2008-10-30 19:38:48 -0700566/* This is the fast write path which cannot handle
567 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700568 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700569
Keith Packard0839ccb2008-10-30 19:38:48 -0700570static inline int
571fast_user_write(struct io_mapping *mapping,
572 loff_t page_base, int page_offset,
573 char __user *user_data,
574 int length)
575{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700576 void __iomem *vaddr_atomic;
577 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700578 unsigned long unwritten;
579
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700580 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700581 /* We can use the cpu mem copy function because this is X86. */
582 vaddr = (void __force*)vaddr_atomic + page_offset;
583 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700584 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700585 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100586 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700587}
588
Eric Anholt3de09aa2009-03-09 09:42:23 -0700589/**
590 * This is the fast pwrite path, where we copy the data directly from the
591 * user into the GTT, uncached.
592 */
Eric Anholt673a3942008-07-30 12:06:12 -0700593static int
Chris Wilson05394f32010-11-08 19:18:58 +0000594i915_gem_gtt_pwrite_fast(struct drm_device *dev,
595 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700596 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000597 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700598{
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700600 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700601 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700602 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200603 int page_offset, page_length, ret;
604
Ben Widawskyc37e2202013-07-31 16:59:58 -0700605 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200606 if (ret)
607 goto out;
608
609 ret = i915_gem_object_set_to_gtt_domain(obj, true);
610 if (ret)
611 goto out_unpin;
612
613 ret = i915_gem_object_put_fence(obj);
614 if (ret)
615 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700616
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200617 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700618 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700619
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700620 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700621
622 while (remain > 0) {
623 /* Operation in this page
624 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700625 * page_base = page offset within aperture
626 * page_offset = offset within page
627 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700628 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100629 page_base = offset & PAGE_MASK;
630 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700631 page_length = remain;
632 if ((page_offset + remain) > PAGE_SIZE)
633 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700634
Keith Packard0839ccb2008-10-30 19:38:48 -0700635 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700636 * source page isn't available. Return the error and we'll
637 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800639 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200640 page_offset, user_data, page_length)) {
641 ret = -EFAULT;
642 goto out_unpin;
643 }
Eric Anholt673a3942008-07-30 12:06:12 -0700644
Keith Packard0839ccb2008-10-30 19:38:48 -0700645 remain -= page_length;
646 user_data += page_length;
647 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700648 }
Eric Anholt673a3942008-07-30 12:06:12 -0700649
Daniel Vetter935aaa62012-03-25 19:47:35 +0200650out_unpin:
651 i915_gem_object_unpin(obj);
652out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700653 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700654}
655
Daniel Vetterd174bd62012-03-25 19:47:40 +0200656/* Per-page copy function for the shmem pwrite fastpath.
657 * Flushes invalid cachelines before writing to the target if
658 * needs_clflush_before is set and flushes out any written cachelines after
659 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700660static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200661shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
662 char __user *user_data,
663 bool page_do_bit17_swizzling,
664 bool needs_clflush_before,
665 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700666{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200667 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700668 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700669
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200670 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200671 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
Daniel Vetterd174bd62012-03-25 19:47:40 +0200673 vaddr = kmap_atomic(page);
674 if (needs_clflush_before)
675 drm_clflush_virt_range(vaddr + shmem_page_offset,
676 page_length);
677 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
678 user_data,
679 page_length);
680 if (needs_clflush_after)
681 drm_clflush_virt_range(vaddr + shmem_page_offset,
682 page_length);
683 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700684
Chris Wilson755d2212012-09-04 21:02:55 +0100685 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700686}
687
Daniel Vetterd174bd62012-03-25 19:47:40 +0200688/* Only difference to the fast-path function is that this can handle bit17
689 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700690static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
692 char __user *user_data,
693 bool page_do_bit17_swizzling,
694 bool needs_clflush_before,
695 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700696{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200697 char *vaddr;
698 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700699
Daniel Vetterd174bd62012-03-25 19:47:40 +0200700 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200701 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200702 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
703 page_length,
704 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200705 if (page_do_bit17_swizzling)
706 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100707 user_data,
708 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200709 else
710 ret = __copy_from_user(vaddr + shmem_page_offset,
711 user_data,
712 page_length);
713 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200714 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
715 page_length,
716 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200717 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100718
Chris Wilson755d2212012-09-04 21:02:55 +0100719 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700720}
721
Eric Anholt40123c12009-03-09 13:42:30 -0700722static int
Daniel Vettere244a442012-03-25 19:47:28 +0200723i915_gem_shmem_pwrite(struct drm_device *dev,
724 struct drm_i915_gem_object *obj,
725 struct drm_i915_gem_pwrite *args,
726 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700727{
Eric Anholt40123c12009-03-09 13:42:30 -0700728 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100729 loff_t offset;
730 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100731 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100732 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200733 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200734 int needs_clflush_after = 0;
735 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200736 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700737
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200738 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700739 remain = args->size;
740
Daniel Vetter8c599672011-12-14 13:57:31 +0100741 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700742
Daniel Vetter58642882012-03-25 19:47:37 +0200743 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
744 /* If we're not in the cpu write domain, set ourself into the gtt
745 * write domain and manually flush cachelines (if required). This
746 * optimizes for the case when the gpu will use the data
747 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100748 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky98438772013-07-31 17:00:12 -0700749 if (i915_gem_obj_bound_any(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +0200750 ret = i915_gem_object_set_to_gtt_domain(obj, true);
751 if (ret)
752 return ret;
753 }
Daniel Vetter58642882012-03-25 19:47:37 +0200754 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100755 /* Same trick applies to invalidate partially written cachelines read
756 * before writing. */
757 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
758 needs_clflush_before =
759 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200760
Chris Wilson755d2212012-09-04 21:02:55 +0100761 ret = i915_gem_object_get_pages(obj);
762 if (ret)
763 return ret;
764
765 i915_gem_object_pin_pages(obj);
766
Eric Anholt40123c12009-03-09 13:42:30 -0700767 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000768 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700769
Imre Deak67d5a502013-02-18 19:28:02 +0200770 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
771 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200772 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200773 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100774
Chris Wilson9da3da62012-06-01 15:20:22 +0100775 if (remain <= 0)
776 break;
777
Eric Anholt40123c12009-03-09 13:42:30 -0700778 /* Operation in this page
779 *
Eric Anholt40123c12009-03-09 13:42:30 -0700780 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700781 * page_length = bytes to copy for this page
782 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100783 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700784
785 page_length = remain;
786 if ((shmem_page_offset + page_length) > PAGE_SIZE)
787 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700788
Daniel Vetter58642882012-03-25 19:47:37 +0200789 /* If we don't overwrite a cacheline completely we need to be
790 * careful to have up-to-date data by first clflushing. Don't
791 * overcomplicate things and flush the entire patch. */
792 partial_cacheline_write = needs_clflush_before &&
793 ((shmem_page_offset | page_length)
794 & (boot_cpu_data.x86_clflush_size - 1));
795
Daniel Vetter8c599672011-12-14 13:57:31 +0100796 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
797 (page_to_phys(page) & (1 << 17)) != 0;
798
Daniel Vetterd174bd62012-03-25 19:47:40 +0200799 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
800 user_data, page_do_bit17_swizzling,
801 partial_cacheline_write,
802 needs_clflush_after);
803 if (ret == 0)
804 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700805
Daniel Vettere244a442012-03-25 19:47:28 +0200806 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200807 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200808 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
809 user_data, page_do_bit17_swizzling,
810 partial_cacheline_write,
811 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700812
Daniel Vettere244a442012-03-25 19:47:28 +0200813 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100814
Daniel Vettere244a442012-03-25 19:47:28 +0200815next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100816 set_page_dirty(page);
817 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100818
Chris Wilson755d2212012-09-04 21:02:55 +0100819 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100820 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100821
Eric Anholt40123c12009-03-09 13:42:30 -0700822 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100823 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700824 offset += page_length;
825 }
826
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100827out:
Chris Wilson755d2212012-09-04 21:02:55 +0100828 i915_gem_object_unpin_pages(obj);
829
Daniel Vettere244a442012-03-25 19:47:28 +0200830 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100831 /*
832 * Fixup: Flush cpu caches in case we didn't flush the dirty
833 * cachelines in-line while writing and the object moved
834 * out of the cpu write domain while we've dropped the lock.
835 */
836 if (!needs_clflush_after &&
837 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100838 if (i915_gem_clflush_object(obj, obj->pin_display))
839 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200840 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100841 }
Eric Anholt40123c12009-03-09 13:42:30 -0700842
Daniel Vetter58642882012-03-25 19:47:37 +0200843 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800844 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200845
Eric Anholt40123c12009-03-09 13:42:30 -0700846 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700847}
848
849/**
850 * Writes data to the object referenced by handle.
851 *
852 * On error, the contents of the buffer that were to be modified are undefined.
853 */
854int
855i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100856 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700857{
858 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000859 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000860 int ret;
861
862 if (args->size == 0)
863 return 0;
864
865 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200866 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000867 args->size))
868 return -EFAULT;
869
Xiong Zhang0b74b502013-07-19 13:51:24 +0800870 if (likely(!i915_prefault_disable)) {
871 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
872 args->size);
873 if (ret)
874 return -EFAULT;
875 }
Eric Anholt673a3942008-07-30 12:06:12 -0700876
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100877 ret = i915_mutex_lock_interruptible(dev);
878 if (ret)
879 return ret;
880
Chris Wilson05394f32010-11-08 19:18:58 +0000881 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000882 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100883 ret = -ENOENT;
884 goto unlock;
885 }
Eric Anholt673a3942008-07-30 12:06:12 -0700886
Chris Wilson7dcd2492010-09-26 20:21:44 +0100887 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000888 if (args->offset > obj->base.size ||
889 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100890 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100891 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100892 }
893
Daniel Vetter1286ff72012-05-10 15:25:09 +0200894 /* prime objects have no backing filp to GEM pread/pwrite
895 * pages from.
896 */
897 if (!obj->base.filp) {
898 ret = -EINVAL;
899 goto out;
900 }
901
Chris Wilsondb53a302011-02-03 11:57:46 +0000902 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
903
Daniel Vetter935aaa62012-03-25 19:47:35 +0200904 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700905 /* We can only do the GTT pwrite on untiled buffers, as otherwise
906 * it would end up going through the fenced access, and we'll get
907 * different detiling behavior between reading and writing.
908 * pread/pwrite currently are reading and writing from the CPU
909 * perspective, requiring manual detiling by the client.
910 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100911 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100912 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100913 goto out;
914 }
915
Chris Wilson2c225692013-08-09 12:26:45 +0100916 if (obj->tiling_mode == I915_TILING_NONE &&
917 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
918 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100919 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200920 /* Note that the gtt paths might fail with non-page-backed user
921 * pointers (e.g. gtt mappings when moving data between
922 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700923 }
Eric Anholt673a3942008-07-30 12:06:12 -0700924
Chris Wilson86a1ee22012-08-11 15:41:04 +0100925 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200926 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100927
Chris Wilson35b62a82010-09-26 20:23:38 +0100928out:
Chris Wilson05394f32010-11-08 19:18:58 +0000929 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100930unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100931 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700932 return ret;
933}
934
Chris Wilsonb3612372012-08-24 09:35:08 +0100935int
Daniel Vetter33196de2012-11-14 17:14:05 +0100936i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100937 bool interruptible)
938{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100939 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100940 /* Non-interruptible callers can't handle -EAGAIN, hence return
941 * -EIO unconditionally for these. */
942 if (!interruptible)
943 return -EIO;
944
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100945 /* Recovery complete, but the reset failed ... */
946 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100947 return -EIO;
948
949 return -EAGAIN;
950 }
951
952 return 0;
953}
954
955/*
956 * Compare seqno against outstanding lazy request. Emit a request if they are
957 * equal.
958 */
959static int
960i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
961{
962 int ret;
963
964 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
965
966 ret = 0;
Chris Wilson18235212013-09-04 10:45:51 +0100967 if (seqno == ring->outstanding_lazy_seqno)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300968 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100969
970 return ret;
971}
972
973/**
974 * __wait_seqno - wait until execution of seqno has finished
975 * @ring: the ring expected to report seqno
976 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100977 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100978 * @interruptible: do an interruptible wait (normally yes)
979 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
980 *
Daniel Vetterf69061b2012-12-06 09:01:42 +0100981 * Note: It is of utmost importance that the passed in seqno and reset_counter
982 * values have been read by the caller in an smp safe manner. Where read-side
983 * locks are involved, it is sufficient to read the reset_counter before
984 * unlocking the lock that protects the seqno. For lockless tricks, the
985 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
986 * inserted.
987 *
Chris Wilsonb3612372012-08-24 09:35:08 +0100988 * Returns 0 if the seqno was found within the alloted time. Else returns the
989 * errno with remaining time filled in timeout argument.
990 */
991static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +0100992 unsigned reset_counter,
Chris Wilsonb3612372012-08-24 09:35:08 +0100993 bool interruptible, struct timespec *timeout)
994{
995 drm_i915_private_t *dev_priv = ring->dev->dev_private;
996 struct timespec before, now, wait_time={1,0};
997 unsigned long timeout_jiffies;
998 long end;
999 bool wait_forever = true;
1000 int ret;
1001
Paulo Zanonic67a4702013-08-19 13:18:09 -03001002 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1003
Chris Wilsonb3612372012-08-24 09:35:08 +01001004 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1005 return 0;
1006
1007 trace_i915_gem_request_wait_begin(ring, seqno);
1008
1009 if (timeout != NULL) {
1010 wait_time = *timeout;
1011 wait_forever = false;
1012 }
1013
Imre Deake054cc32013-05-21 20:03:19 +03001014 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
Chris Wilsonb3612372012-08-24 09:35:08 +01001015
1016 if (WARN_ON(!ring->irq_get(ring)))
1017 return -ENODEV;
1018
1019 /* Record current time in case interrupted by signal, or wedged * */
1020 getrawmonotonic(&before);
1021
1022#define EXIT_COND \
1023 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
Daniel Vetterf69061b2012-12-06 09:01:42 +01001024 i915_reset_in_progress(&dev_priv->gpu_error) || \
1025 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
Chris Wilsonb3612372012-08-24 09:35:08 +01001026 do {
1027 if (interruptible)
1028 end = wait_event_interruptible_timeout(ring->irq_queue,
1029 EXIT_COND,
1030 timeout_jiffies);
1031 else
1032 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1033 timeout_jiffies);
1034
Daniel Vetterf69061b2012-12-06 09:01:42 +01001035 /* We need to check whether any gpu reset happened in between
1036 * the caller grabbing the seqno and now ... */
1037 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1038 end = -EAGAIN;
1039
1040 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1041 * gone. */
Daniel Vetter33196de2012-11-14 17:14:05 +01001042 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001043 if (ret)
1044 end = ret;
1045 } while (end == 0 && wait_forever);
1046
1047 getrawmonotonic(&now);
1048
1049 ring->irq_put(ring);
1050 trace_i915_gem_request_wait_end(ring, seqno);
1051#undef EXIT_COND
1052
1053 if (timeout) {
1054 struct timespec sleep_time = timespec_sub(now, before);
1055 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001056 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1057 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001058 }
1059
1060 switch (end) {
1061 case -EIO:
1062 case -EAGAIN: /* Wedged */
1063 case -ERESTARTSYS: /* Signal */
1064 return (int)end;
1065 case 0: /* Timeout */
Chris Wilsonb3612372012-08-24 09:35:08 +01001066 return -ETIME;
1067 default: /* Completed */
1068 WARN_ON(end < 0); /* We're not aware of other errors */
1069 return 0;
1070 }
1071}
1072
1073/**
1074 * Waits for a sequence number to be signaled, and cleans up the
1075 * request and object lists appropriately for that event.
1076 */
1077int
1078i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1079{
1080 struct drm_device *dev = ring->dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 bool interruptible = dev_priv->mm.interruptible;
1083 int ret;
1084
1085 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1086 BUG_ON(seqno == 0);
1087
Daniel Vetter33196de2012-11-14 17:14:05 +01001088 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001089 if (ret)
1090 return ret;
1091
1092 ret = i915_gem_check_olr(ring, seqno);
1093 if (ret)
1094 return ret;
1095
Daniel Vetterf69061b2012-12-06 09:01:42 +01001096 return __wait_seqno(ring, seqno,
1097 atomic_read(&dev_priv->gpu_error.reset_counter),
1098 interruptible, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001099}
1100
Chris Wilsond26e3af2013-06-29 22:05:26 +01001101static int
1102i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1103 struct intel_ring_buffer *ring)
1104{
1105 i915_gem_retire_requests_ring(ring);
1106
1107 /* Manually manage the write flush as we may have not yet
1108 * retired the buffer.
1109 *
1110 * Note that the last_write_seqno is always the earlier of
1111 * the two (read/write) seqno, so if we haved successfully waited,
1112 * we know we have passed the last write.
1113 */
1114 obj->last_write_seqno = 0;
1115 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1116
1117 return 0;
1118}
1119
Chris Wilsonb3612372012-08-24 09:35:08 +01001120/**
1121 * Ensures that all rendering to the object has completed and the object is
1122 * safe to unbind from the GTT or access from the CPU.
1123 */
1124static __must_check int
1125i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1126 bool readonly)
1127{
1128 struct intel_ring_buffer *ring = obj->ring;
1129 u32 seqno;
1130 int ret;
1131
1132 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1133 if (seqno == 0)
1134 return 0;
1135
1136 ret = i915_wait_seqno(ring, seqno);
1137 if (ret)
1138 return ret;
1139
Chris Wilsond26e3af2013-06-29 22:05:26 +01001140 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001141}
1142
Chris Wilson3236f572012-08-24 09:35:09 +01001143/* A nonblocking variant of the above wait. This is a highly dangerous routine
1144 * as the object state may change during this call.
1145 */
1146static __must_check int
1147i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1148 bool readonly)
1149{
1150 struct drm_device *dev = obj->base.dev;
1151 struct drm_i915_private *dev_priv = dev->dev_private;
1152 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001153 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001154 u32 seqno;
1155 int ret;
1156
1157 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1158 BUG_ON(!dev_priv->mm.interruptible);
1159
1160 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1161 if (seqno == 0)
1162 return 0;
1163
Daniel Vetter33196de2012-11-14 17:14:05 +01001164 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001165 if (ret)
1166 return ret;
1167
1168 ret = i915_gem_check_olr(ring, seqno);
1169 if (ret)
1170 return ret;
1171
Daniel Vetterf69061b2012-12-06 09:01:42 +01001172 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001173 mutex_unlock(&dev->struct_mutex);
Daniel Vetterf69061b2012-12-06 09:01:42 +01001174 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
Chris Wilson3236f572012-08-24 09:35:09 +01001175 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001176 if (ret)
1177 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001178
Chris Wilsond26e3af2013-06-29 22:05:26 +01001179 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001180}
1181
Eric Anholt673a3942008-07-30 12:06:12 -07001182/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001183 * Called when user space prepares to use an object with the CPU, either
1184 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001185 */
1186int
1187i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001188 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001189{
1190 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001191 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001192 uint32_t read_domains = args->read_domains;
1193 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001194 int ret;
1195
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001196 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001197 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001198 return -EINVAL;
1199
Chris Wilson21d509e2009-06-06 09:46:02 +01001200 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001201 return -EINVAL;
1202
1203 /* Having something in the write domain implies it's in the read
1204 * domain, and only that read domain. Enforce that in the request.
1205 */
1206 if (write_domain != 0 && read_domains != write_domain)
1207 return -EINVAL;
1208
Chris Wilson76c1dec2010-09-25 11:22:51 +01001209 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001210 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001211 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001212
Chris Wilson05394f32010-11-08 19:18:58 +00001213 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001214 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001215 ret = -ENOENT;
1216 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001217 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001218
Chris Wilson3236f572012-08-24 09:35:09 +01001219 /* Try to flush the object off the GPU without holding the lock.
1220 * We will repeat the flush holding the lock in the normal manner
1221 * to catch cases where we are gazumped.
1222 */
1223 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1224 if (ret)
1225 goto unref;
1226
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001227 if (read_domains & I915_GEM_DOMAIN_GTT) {
1228 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001229
1230 /* Silently promote "you're not bound, there was nothing to do"
1231 * to success, since the client was just asking us to
1232 * make sure everything was done.
1233 */
1234 if (ret == -EINVAL)
1235 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001236 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001237 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001238 }
1239
Chris Wilson3236f572012-08-24 09:35:09 +01001240unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001241 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001242unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001243 mutex_unlock(&dev->struct_mutex);
1244 return ret;
1245}
1246
1247/**
1248 * Called when user space has done writes to this buffer
1249 */
1250int
1251i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001252 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001253{
1254 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001255 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001256 int ret = 0;
1257
Chris Wilson76c1dec2010-09-25 11:22:51 +01001258 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001259 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001260 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001261
Chris Wilson05394f32010-11-08 19:18:58 +00001262 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001263 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001264 ret = -ENOENT;
1265 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001266 }
1267
Eric Anholt673a3942008-07-30 12:06:12 -07001268 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001269 if (obj->pin_display)
1270 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001271
Chris Wilson05394f32010-11-08 19:18:58 +00001272 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001273unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001274 mutex_unlock(&dev->struct_mutex);
1275 return ret;
1276}
1277
1278/**
1279 * Maps the contents of an object, returning the address it is mapped
1280 * into.
1281 *
1282 * While the mapping holds a reference on the contents of the object, it doesn't
1283 * imply a ref on the object itself.
1284 */
1285int
1286i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001287 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001288{
1289 struct drm_i915_gem_mmap *args = data;
1290 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001291 unsigned long addr;
1292
Chris Wilson05394f32010-11-08 19:18:58 +00001293 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001294 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001295 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001296
Daniel Vetter1286ff72012-05-10 15:25:09 +02001297 /* prime objects have no backing filp to GEM mmap
1298 * pages from.
1299 */
1300 if (!obj->filp) {
1301 drm_gem_object_unreference_unlocked(obj);
1302 return -EINVAL;
1303 }
1304
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001305 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001306 PROT_READ | PROT_WRITE, MAP_SHARED,
1307 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001308 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001309 if (IS_ERR((void *)addr))
1310 return addr;
1311
1312 args->addr_ptr = (uint64_t) addr;
1313
1314 return 0;
1315}
1316
Jesse Barnesde151cf2008-11-12 10:03:55 -08001317/**
1318 * i915_gem_fault - fault a page into the GTT
1319 * vma: VMA in question
1320 * vmf: fault info
1321 *
1322 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1323 * from userspace. The fault handler takes care of binding the object to
1324 * the GTT (if needed), allocating and programming a fence register (again,
1325 * only if needed based on whether the old reg is still valid or the object
1326 * is tiled) and inserting a new PTE into the faulting process.
1327 *
1328 * Note that the faulting process may involve evicting existing objects
1329 * from the GTT and/or fence registers to make room. So performance may
1330 * suffer if the GTT working set is large or there are few fence registers
1331 * left.
1332 */
1333int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1334{
Chris Wilson05394f32010-11-08 19:18:58 +00001335 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1336 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001337 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001338 pgoff_t page_offset;
1339 unsigned long pfn;
1340 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001341 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001342
1343 /* We don't use vmf->pgoff since that has the fake offset */
1344 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1345 PAGE_SHIFT;
1346
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001347 ret = i915_mutex_lock_interruptible(dev);
1348 if (ret)
1349 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001350
Chris Wilsondb53a302011-02-03 11:57:46 +00001351 trace_i915_gem_object_fault(obj, page_offset, true, write);
1352
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001353 /* Access to snoopable pages through the GTT is incoherent. */
1354 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1355 ret = -EINVAL;
1356 goto unlock;
1357 }
1358
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001359 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001360 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001361 if (ret)
1362 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001363
Chris Wilsonc9839302012-11-20 10:45:17 +00001364 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1365 if (ret)
1366 goto unpin;
1367
1368 ret = i915_gem_object_get_fence(obj);
1369 if (ret)
1370 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001371
Chris Wilson6299f992010-11-24 12:23:44 +00001372 obj->fault_mappable = true;
1373
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001374 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1375 pfn >>= PAGE_SHIFT;
1376 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001377
1378 /* Finally, remap it using the new GTT offset */
1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001380unpin:
1381 i915_gem_object_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001382unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001384out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001385 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001386 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001387 /* If this -EIO is due to a gpu hang, give the reset code a
1388 * chance to clean up the mess. Otherwise return the proper
1389 * SIGBUS. */
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001390 if (i915_terminally_wedged(&dev_priv->gpu_error))
Daniel Vettera9340cc2012-07-04 22:18:42 +02001391 return VM_FAULT_SIGBUS;
Chris Wilson045e7692010-11-07 09:18:22 +00001392 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001393 /* Give the error handler a chance to run and move the
1394 * objects off the GPU active list. Next time we service the
1395 * fault, we should be able to transition the page into the
1396 * GTT without touching the GPU (and so avoid further
1397 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1398 * with coherency, just lost writes.
1399 */
Chris Wilson045e7692010-11-07 09:18:22 +00001400 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001401 case 0:
1402 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001403 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001404 case -EBUSY:
1405 /*
1406 * EBUSY is ok: this just means that another thread
1407 * already did the job.
1408 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001409 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001410 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001411 return VM_FAULT_OOM;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001412 case -ENOSPC:
1413 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001414 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001415 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Chris Wilsonc7150892009-09-23 00:43:56 +01001416 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417 }
1418}
1419
1420/**
Chris Wilson901782b2009-07-10 08:18:50 +01001421 * i915_gem_release_mmap - remove physical page mappings
1422 * @obj: obj in question
1423 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001424 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001425 * relinquish ownership of the pages back to the system.
1426 *
1427 * It is vital that we remove the page mapping if we have mapped a tiled
1428 * object through the GTT and then lose the fence register due to
1429 * resource pressure. Similarly if the object has been moved out of the
1430 * aperture, than pages mapped into userspace must be revoked. Removing the
1431 * mapping will then trigger a page fault on the next user access, allowing
1432 * fixup by i915_gem_fault().
1433 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001434void
Chris Wilson05394f32010-11-08 19:18:58 +00001435i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001436{
Chris Wilson6299f992010-11-24 12:23:44 +00001437 if (!obj->fault_mappable)
1438 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001439
David Herrmann51335df2013-07-24 21:10:03 +02001440 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
Chris Wilson6299f992010-11-24 12:23:44 +00001441 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001442}
1443
Imre Deak0fa87792013-01-07 21:47:35 +02001444uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001445i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001446{
Chris Wilsone28f8712011-07-18 13:11:49 -07001447 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001448
1449 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001450 tiling_mode == I915_TILING_NONE)
1451 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001452
1453 /* Previous chips need a power-of-two fence region when tiling */
1454 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001455 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001456 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001457 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001458
Chris Wilsone28f8712011-07-18 13:11:49 -07001459 while (gtt_size < size)
1460 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001461
Chris Wilsone28f8712011-07-18 13:11:49 -07001462 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001463}
1464
Jesse Barnesde151cf2008-11-12 10:03:55 -08001465/**
1466 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1467 * @obj: object to check
1468 *
1469 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001470 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001471 */
Imre Deakd865110c2013-01-07 21:47:33 +02001472uint32_t
1473i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1474 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001476 /*
1477 * Minimum alignment is 4k (GTT page size), but might be greater
1478 * if a fence register is needed for the object.
1479 */
Imre Deakd865110c2013-01-07 21:47:33 +02001480 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001481 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001482 return 4096;
1483
1484 /*
1485 * Previous chips need to be aligned to the size of the smallest
1486 * fence register that can contain the object.
1487 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001488 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001489}
1490
Chris Wilsond8cb5082012-08-11 15:41:03 +01001491static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1492{
1493 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1494 int ret;
1495
David Herrmann0de23972013-07-24 21:07:52 +02001496 if (drm_vma_node_has_offset(&obj->base.vma_node))
Chris Wilsond8cb5082012-08-11 15:41:03 +01001497 return 0;
1498
Daniel Vetterda494d72012-12-20 15:11:16 +01001499 dev_priv->mm.shrinker_no_lock_stealing = true;
1500
Chris Wilsond8cb5082012-08-11 15:41:03 +01001501 ret = drm_gem_create_mmap_offset(&obj->base);
1502 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001503 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001504
1505 /* Badly fragmented mmap space? The only way we can recover
1506 * space is by destroying unwanted objects. We can't randomly release
1507 * mmap_offsets as userspace expects them to be persistent for the
1508 * lifetime of the objects. The closest we can is to release the
1509 * offsets on purgeable objects by truncating it and marking it purged,
1510 * which prevents userspace from ever using that object again.
1511 */
1512 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1513 ret = drm_gem_create_mmap_offset(&obj->base);
1514 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001515 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001516
1517 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001518 ret = drm_gem_create_mmap_offset(&obj->base);
1519out:
1520 dev_priv->mm.shrinker_no_lock_stealing = false;
1521
1522 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001523}
1524
1525static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1526{
Chris Wilsond8cb5082012-08-11 15:41:03 +01001527 drm_gem_free_mmap_offset(&obj->base);
1528}
1529
Jesse Barnesde151cf2008-11-12 10:03:55 -08001530int
Dave Airlieff72145b2011-02-07 12:16:14 +10001531i915_gem_mmap_gtt(struct drm_file *file,
1532 struct drm_device *dev,
1533 uint32_t handle,
1534 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001535{
Chris Wilsonda761a62010-10-27 17:37:08 +01001536 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001537 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001538 int ret;
1539
Chris Wilson76c1dec2010-09-25 11:22:51 +01001540 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001541 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001542 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001543
Dave Airlieff72145b2011-02-07 12:16:14 +10001544 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001545 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001546 ret = -ENOENT;
1547 goto unlock;
1548 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001549
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001550 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001551 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001552 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001553 }
1554
Chris Wilson05394f32010-11-08 19:18:58 +00001555 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001556 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001557 ret = -EINVAL;
1558 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001559 }
1560
Chris Wilsond8cb5082012-08-11 15:41:03 +01001561 ret = i915_gem_object_create_mmap_offset(obj);
1562 if (ret)
1563 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001564
David Herrmann0de23972013-07-24 21:07:52 +02001565 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001566
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001567out:
Chris Wilson05394f32010-11-08 19:18:58 +00001568 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001569unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001570 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001571 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001572}
1573
Dave Airlieff72145b2011-02-07 12:16:14 +10001574/**
1575 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1576 * @dev: DRM device
1577 * @data: GTT mapping ioctl data
1578 * @file: GEM object info
1579 *
1580 * Simply returns the fake offset to userspace so it can mmap it.
1581 * The mmap call will end up in drm_gem_mmap(), which will set things
1582 * up so we can get faults in the handler above.
1583 *
1584 * The fault handler will take care of binding the object into the GTT
1585 * (since it may have been evicted to make room for something), allocating
1586 * a fence register, and mapping the appropriate aperture address into
1587 * userspace.
1588 */
1589int
1590i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1591 struct drm_file *file)
1592{
1593 struct drm_i915_gem_mmap_gtt *args = data;
1594
Dave Airlieff72145b2011-02-07 12:16:14 +10001595 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1596}
1597
Daniel Vetter225067e2012-08-20 10:23:20 +02001598/* Immediately discard the backing storage */
1599static void
1600i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001601{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001602 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001603
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001604 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001605
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001606 if (obj->base.filp == NULL)
1607 return;
1608
Daniel Vetter225067e2012-08-20 10:23:20 +02001609 /* Our goal here is to return as much of the memory as
1610 * is possible back to the system as we are called from OOM.
1611 * To do this we must instruct the shmfs to drop all of its
1612 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001613 */
Al Viro496ad9a2013-01-23 17:07:38 -05001614 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001615 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001616
Daniel Vetter225067e2012-08-20 10:23:20 +02001617 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001618}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001619
Daniel Vetter225067e2012-08-20 10:23:20 +02001620static inline int
1621i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1622{
1623 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001624}
1625
Chris Wilson5cdf5882010-09-27 15:51:07 +01001626static void
Chris Wilson05394f32010-11-08 19:18:58 +00001627i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001628{
Imre Deak90797e62013-02-18 19:28:03 +02001629 struct sg_page_iter sg_iter;
1630 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001631
Chris Wilson05394f32010-11-08 19:18:58 +00001632 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001633
Chris Wilson6c085a72012-08-20 11:40:46 +02001634 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1635 if (ret) {
1636 /* In the event of a disaster, abandon all caches and
1637 * hope for the best.
1638 */
1639 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001640 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001641 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1642 }
1643
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001644 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001645 i915_gem_object_save_bit_17_swizzle(obj);
1646
Chris Wilson05394f32010-11-08 19:18:58 +00001647 if (obj->madv == I915_MADV_DONTNEED)
1648 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001649
Imre Deak90797e62013-02-18 19:28:03 +02001650 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001651 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001652
Chris Wilson05394f32010-11-08 19:18:58 +00001653 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001654 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001655
Chris Wilson05394f32010-11-08 19:18:58 +00001656 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001657 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001658
Chris Wilson9da3da62012-06-01 15:20:22 +01001659 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001660 }
Chris Wilson05394f32010-11-08 19:18:58 +00001661 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001662
Chris Wilson9da3da62012-06-01 15:20:22 +01001663 sg_free_table(obj->pages);
1664 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001665}
1666
Chris Wilsondd624af2013-01-15 12:39:35 +00001667int
Chris Wilson37e680a2012-06-07 15:38:42 +01001668i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1669{
1670 const struct drm_i915_gem_object_ops *ops = obj->ops;
1671
Chris Wilson2f745ad2012-09-04 21:02:58 +01001672 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001673 return 0;
1674
Chris Wilsona5570172012-09-04 21:02:54 +01001675 if (obj->pages_pin_count)
1676 return -EBUSY;
1677
Ben Widawsky98438772013-07-31 17:00:12 -07001678 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001679
Chris Wilsona2165e32012-12-03 11:49:00 +00001680 /* ->put_pages might need to allocate memory for the bit17 swizzle
1681 * array, hence protect them from being reaped by removing them from gtt
1682 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001683 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001684
Chris Wilson37e680a2012-06-07 15:38:42 +01001685 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001686 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001687
Chris Wilson6c085a72012-08-20 11:40:46 +02001688 if (i915_gem_object_is_purgeable(obj))
1689 i915_gem_object_truncate(obj);
1690
1691 return 0;
1692}
1693
1694static long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001695__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1696 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001697{
1698 struct drm_i915_gem_object *obj, *next;
1699 long count = 0;
1700
1701 list_for_each_entry_safe(obj, next,
1702 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001703 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001704 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001705 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001706 count += obj->base.size >> PAGE_SHIFT;
1707 if (count >= target)
1708 return count;
1709 }
1710 }
1711
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001712 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1713 global_list) {
1714 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001715
1716 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1717 continue;
1718
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001719 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1720 if (i915_vma_unbind(vma))
1721 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001722
1723 if (!i915_gem_object_put_pages(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001724 count += obj->base.size >> PAGE_SHIFT;
1725 if (count >= target)
1726 return count;
1727 }
1728 }
1729
1730 return count;
1731}
1732
Daniel Vetter93927ca2013-01-10 18:03:00 +01001733static long
1734i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1735{
1736 return __i915_gem_shrink(dev_priv, target, true);
1737}
1738
Chris Wilson6c085a72012-08-20 11:40:46 +02001739static void
1740i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1741{
1742 struct drm_i915_gem_object *obj, *next;
1743
1744 i915_gem_evict_everything(dev_priv->dev);
1745
Ben Widawsky35c20a62013-05-31 11:28:48 -07001746 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1747 global_list)
Chris Wilson37e680a2012-06-07 15:38:42 +01001748 i915_gem_object_put_pages(obj);
Daniel Vetter225067e2012-08-20 10:23:20 +02001749}
1750
Chris Wilson37e680a2012-06-07 15:38:42 +01001751static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001752i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001753{
Chris Wilson6c085a72012-08-20 11:40:46 +02001754 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001755 int page_count, i;
1756 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001757 struct sg_table *st;
1758 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001759 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001760 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001761 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001762 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001763
Chris Wilson6c085a72012-08-20 11:40:46 +02001764 /* Assert that the object is not currently in any GPU domain. As it
1765 * wasn't in the GTT, there shouldn't be any way it could have been in
1766 * a GPU cache
1767 */
1768 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1769 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1770
Chris Wilson9da3da62012-06-01 15:20:22 +01001771 st = kmalloc(sizeof(*st), GFP_KERNEL);
1772 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001773 return -ENOMEM;
1774
Chris Wilson9da3da62012-06-01 15:20:22 +01001775 page_count = obj->base.size / PAGE_SIZE;
1776 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
Chris Wilson9da3da62012-06-01 15:20:22 +01001777 kfree(st);
1778 return -ENOMEM;
1779 }
1780
1781 /* Get the list of pages out of our struct file. They'll be pinned
1782 * at this point until we release them.
1783 *
1784 * Fail silently without starting the shrinker
1785 */
Al Viro496ad9a2013-01-23 17:07:38 -05001786 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001787 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001788 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001789 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001790 sg = st->sgl;
1791 st->nents = 0;
1792 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001793 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1794 if (IS_ERR(page)) {
1795 i915_gem_purge(dev_priv, page_count);
1796 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1797 }
1798 if (IS_ERR(page)) {
1799 /* We've tried hard to allocate the memory by reaping
1800 * our own buffer, now let the real VM do its job and
1801 * go down in flames if truly OOM.
1802 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001803 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001804 gfp |= __GFP_IO | __GFP_WAIT;
1805
1806 i915_gem_shrink_all(dev_priv);
1807 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1808 if (IS_ERR(page))
1809 goto err_pages;
1810
Linus Torvaldscaf49192012-12-10 10:51:16 -08001811 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001812 gfp &= ~(__GFP_IO | __GFP_WAIT);
1813 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001814#ifdef CONFIG_SWIOTLB
1815 if (swiotlb_nr_tbl()) {
1816 st->nents++;
1817 sg_set_page(sg, page, PAGE_SIZE, 0);
1818 sg = sg_next(sg);
1819 continue;
1820 }
1821#endif
Imre Deak90797e62013-02-18 19:28:03 +02001822 if (!i || page_to_pfn(page) != last_pfn + 1) {
1823 if (i)
1824 sg = sg_next(sg);
1825 st->nents++;
1826 sg_set_page(sg, page, PAGE_SIZE, 0);
1827 } else {
1828 sg->length += PAGE_SIZE;
1829 }
1830 last_pfn = page_to_pfn(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001831 }
Konrad Rzeszutek Wilk426729d2013-06-24 11:47:48 -04001832#ifdef CONFIG_SWIOTLB
1833 if (!swiotlb_nr_tbl())
1834#endif
1835 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001836 obj->pages = st;
1837
Eric Anholt673a3942008-07-30 12:06:12 -07001838 if (i915_gem_object_needs_bit17_swizzle(obj))
1839 i915_gem_object_do_bit_17_swizzle(obj);
1840
1841 return 0;
1842
1843err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001844 sg_mark_end(sg);
1845 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001846 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001847 sg_free_table(st);
1848 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001849 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001850}
1851
Chris Wilson37e680a2012-06-07 15:38:42 +01001852/* Ensure that the associated pages are gathered from the backing storage
1853 * and pinned into our object. i915_gem_object_get_pages() may be called
1854 * multiple times before they are released by a single call to
1855 * i915_gem_object_put_pages() - once the pages are no longer referenced
1856 * either as a result of memory pressure (reaping pages under the shrinker)
1857 * or as the object is itself released.
1858 */
1859int
1860i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1861{
1862 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1863 const struct drm_i915_gem_object_ops *ops = obj->ops;
1864 int ret;
1865
Chris Wilson2f745ad2012-09-04 21:02:58 +01001866 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001867 return 0;
1868
Chris Wilson43e28f02013-01-08 10:53:09 +00001869 if (obj->madv != I915_MADV_WILLNEED) {
1870 DRM_ERROR("Attempting to obtain a purgeable object\n");
1871 return -EINVAL;
1872 }
1873
Chris Wilsona5570172012-09-04 21:02:54 +01001874 BUG_ON(obj->pages_pin_count);
1875
Chris Wilson37e680a2012-06-07 15:38:42 +01001876 ret = ops->get_pages(obj);
1877 if (ret)
1878 return ret;
1879
Ben Widawsky35c20a62013-05-31 11:28:48 -07001880 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001881 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001882}
1883
Chris Wilson54cf91d2010-11-25 18:00:26 +00001884void
Chris Wilson05394f32010-11-08 19:18:58 +00001885i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001886 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001887{
Chris Wilson05394f32010-11-08 19:18:58 +00001888 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001889 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001890 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001891
Zou Nan hai852835f2010-05-21 09:08:56 +08001892 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001893 if (obj->ring != ring && obj->last_write_seqno) {
1894 /* Keep the seqno relative to the current ring */
1895 obj->last_write_seqno = seqno;
1896 }
Chris Wilson05394f32010-11-08 19:18:58 +00001897 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001898
1899 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001900 if (!obj->active) {
1901 drm_gem_object_reference(&obj->base);
1902 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001903 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001904
Chris Wilson05394f32010-11-08 19:18:58 +00001905 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001906
Chris Wilson0201f1e2012-07-20 12:41:01 +01001907 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001908
Chris Wilsoncaea7472010-11-12 13:53:37 +00001909 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001910 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001911
Chris Wilson7dd49062012-03-21 10:48:18 +00001912 /* Bump MRU to take account of the delayed flush */
1913 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1914 struct drm_i915_fence_reg *reg;
1915
1916 reg = &dev_priv->fence_regs[obj->fence_reg];
1917 list_move_tail(&reg->lru_list,
1918 &dev_priv->mm.fence_list);
1919 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001920 }
1921}
1922
1923static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00001924i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1925{
Ben Widawskyca191b12013-07-31 17:00:14 -07001926 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1927 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1928 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001929
Chris Wilson65ce3022012-07-20 12:41:02 +01001930 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001931 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01001932
Ben Widawskyca191b12013-07-31 17:00:14 -07001933 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001934
Chris Wilson65ce3022012-07-20 12:41:02 +01001935 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001936 obj->ring = NULL;
1937
Chris Wilson65ce3022012-07-20 12:41:02 +01001938 obj->last_read_seqno = 0;
1939 obj->last_write_seqno = 0;
1940 obj->base.write_domain = 0;
1941
1942 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001943 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001944
1945 obj->active = 0;
1946 drm_gem_object_unreference(&obj->base);
1947
1948 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001949}
Eric Anholt673a3942008-07-30 12:06:12 -07001950
Chris Wilson9d7730912012-11-27 16:22:52 +00001951static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001952i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01001953{
Chris Wilson9d7730912012-11-27 16:22:52 +00001954 struct drm_i915_private *dev_priv = dev->dev_private;
1955 struct intel_ring_buffer *ring;
1956 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01001957
Chris Wilson107f27a52012-12-10 13:56:17 +02001958 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00001959 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02001960 ret = intel_ring_idle(ring);
1961 if (ret)
1962 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00001963 }
Chris Wilson9d7730912012-11-27 16:22:52 +00001964 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02001965
1966 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00001967 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001968 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02001969
Chris Wilson9d7730912012-11-27 16:22:52 +00001970 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1971 ring->sync_seqno[j] = 0;
1972 }
1973
1974 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01001975}
1976
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001977int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1978{
1979 struct drm_i915_private *dev_priv = dev->dev_private;
1980 int ret;
1981
1982 if (seqno == 0)
1983 return -EINVAL;
1984
1985 /* HWS page needs to be set less than what we
1986 * will inject to ring
1987 */
1988 ret = i915_gem_init_seqno(dev, seqno - 1);
1989 if (ret)
1990 return ret;
1991
1992 /* Carefully set the last_seqno value so that wrap
1993 * detection still works
1994 */
1995 dev_priv->next_seqno = seqno;
1996 dev_priv->last_seqno = seqno - 1;
1997 if (dev_priv->last_seqno == 0)
1998 dev_priv->last_seqno--;
1999
2000 return 0;
2001}
2002
Chris Wilson9d7730912012-11-27 16:22:52 +00002003int
2004i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002005{
Chris Wilson9d7730912012-11-27 16:22:52 +00002006 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002007
Chris Wilson9d7730912012-11-27 16:22:52 +00002008 /* reserve 0 for non-seqno */
2009 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002010 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002011 if (ret)
2012 return ret;
2013
2014 dev_priv->next_seqno = 1;
2015 }
2016
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002017 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002018 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002019}
2020
Mika Kuoppala0025c072013-06-12 12:35:30 +03002021int __i915_add_request(struct intel_ring_buffer *ring,
2022 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002023 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002024 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002025{
Chris Wilsondb53a302011-02-03 11:57:46 +00002026 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002027 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002028 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002029 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002030 int ret;
2031
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002032 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002033 /*
2034 * Emit any outstanding flushes - execbuf can fail to emit the flush
2035 * after having emitted the batchbuffer command. Hence we need to fix
2036 * things up similar to emitting the lazy request. The difference here
2037 * is that the flush _must_ happen before the next request, no matter
2038 * what.
2039 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002040 ret = intel_ring_flush_all_caches(ring);
2041 if (ret)
2042 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002043
Chris Wilson3c0e2342013-09-04 10:45:52 +01002044 request = ring->preallocated_lazy_request;
2045 if (WARN_ON(request == NULL))
Chris Wilsonacb868d2012-09-26 13:47:30 +01002046 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002047
Chris Wilsona71d8d92012-02-15 11:25:36 +00002048 /* Record the position of the start of the request so that
2049 * should we detect the updated seqno part-way through the
2050 * GPU processing the request, we never over-estimate the
2051 * position of the head.
2052 */
2053 request_ring_position = intel_ring_get_tail(ring);
2054
Chris Wilson9d7730912012-11-27 16:22:52 +00002055 ret = ring->add_request(ring);
Chris Wilson3c0e2342013-09-04 10:45:52 +01002056 if (ret)
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002057 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002058
Chris Wilson9d7730912012-11-27 16:22:52 +00002059 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002060 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002061 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002062 request->tail = request_ring_position;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002063
2064 /* Whilst this request exists, batch_obj will be on the
2065 * active_list, and so will hold the active reference. Only when this
2066 * request is retired will the the batch_obj be moved onto the
2067 * inactive_list and lose its active reference. Hence we do not need
2068 * to explicitly hold another reference here.
2069 */
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002070 request->batch_obj = obj;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002071
Chris Wilson9a7e0c22013-08-26 19:50:54 -03002072 /* Hold a reference to the current context so that we can inspect
2073 * it later in case a hangcheck error event fires.
2074 */
2075 request->ctx = ring->last_context;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002076 if (request->ctx)
2077 i915_gem_context_reference(request->ctx);
2078
Eric Anholt673a3942008-07-30 12:06:12 -07002079 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002080 was_empty = list_empty(&ring->request_list);
2081 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002082 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002083
Chris Wilsondb53a302011-02-03 11:57:46 +00002084 if (file) {
2085 struct drm_i915_file_private *file_priv = file->driver_priv;
2086
Chris Wilson1c255952010-09-26 11:03:27 +01002087 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002088 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002089 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002090 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002091 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002092 }
Eric Anholt673a3942008-07-30 12:06:12 -07002093
Chris Wilson9d7730912012-11-27 16:22:52 +00002094 trace_i915_gem_request_add(ring, request->seqno);
Chris Wilson18235212013-09-04 10:45:51 +01002095 ring->outstanding_lazy_seqno = 0;
Chris Wilson3c0e2342013-09-04 10:45:52 +01002096 ring->preallocated_lazy_request = NULL;
Chris Wilsondb53a302011-02-03 11:57:46 +00002097
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002098 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002099 i915_queue_hangcheck(ring->dev);
2100
Chris Wilsonf047e392012-07-21 12:31:41 +01002101 if (was_empty) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002102 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002103 &dev_priv->mm.retire_work,
2104 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002105 intel_mark_busy(dev_priv->dev);
2106 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002107 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002108
Chris Wilsonacb868d2012-09-26 13:47:30 +01002109 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002110 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002111 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002112}
2113
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002114static inline void
2115i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002116{
Chris Wilson1c255952010-09-26 11:03:27 +01002117 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002118
Chris Wilson1c255952010-09-26 11:03:27 +01002119 if (!file_priv)
2120 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002121
Chris Wilson1c255952010-09-26 11:03:27 +01002122 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00002123 if (request->file_priv) {
2124 list_del(&request->client_list);
2125 request->file_priv = NULL;
2126 }
Chris Wilson1c255952010-09-26 11:03:27 +01002127 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002128}
2129
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002130static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2131 struct i915_address_space *vm)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002132{
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002133 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2134 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002135 return true;
2136
2137 return false;
2138}
2139
2140static bool i915_head_inside_request(const u32 acthd_unmasked,
2141 const u32 request_start,
2142 const u32 request_end)
2143{
2144 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2145
2146 if (request_start < request_end) {
2147 if (acthd >= request_start && acthd < request_end)
2148 return true;
2149 } else if (request_start > request_end) {
2150 if (acthd >= request_start || acthd < request_end)
2151 return true;
2152 }
2153
2154 return false;
2155}
2156
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002157static struct i915_address_space *
2158request_to_vm(struct drm_i915_gem_request *request)
2159{
2160 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2161 struct i915_address_space *vm;
2162
2163 vm = &dev_priv->gtt.base;
2164
2165 return vm;
2166}
2167
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002168static bool i915_request_guilty(struct drm_i915_gem_request *request,
2169 const u32 acthd, bool *inside)
2170{
2171 /* There is a possibility that unmasked head address
2172 * pointing inside the ring, matches the batch_obj address range.
2173 * However this is extremely unlikely.
2174 */
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002175 if (request->batch_obj) {
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002176 if (i915_head_inside_object(acthd, request->batch_obj,
2177 request_to_vm(request))) {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002178 *inside = true;
2179 return true;
2180 }
2181 }
2182
2183 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2184 *inside = false;
2185 return true;
2186 }
2187
2188 return false;
2189}
2190
2191static void i915_set_reset_status(struct intel_ring_buffer *ring,
2192 struct drm_i915_gem_request *request,
2193 u32 acthd)
2194{
2195 struct i915_ctx_hang_stats *hs = NULL;
2196 bool inside, guilty;
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002197 unsigned long offset = 0;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002198
2199 /* Innocent until proven guilty */
2200 guilty = false;
2201
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002202 if (request->batch_obj)
2203 offset = i915_gem_obj_offset(request->batch_obj,
2204 request_to_vm(request));
2205
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002206 if (ring->hangcheck.action != HANGCHECK_WAIT &&
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002207 i915_request_guilty(request, acthd, &inside)) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002208 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002209 ring->name,
2210 inside ? "inside" : "flushing",
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002211 offset,
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002212 request->ctx ? request->ctx->id : 0,
2213 acthd);
2214
2215 guilty = true;
2216 }
2217
2218 /* If contexts are disabled or this is the default context, use
2219 * file_priv->reset_state
2220 */
2221 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2222 hs = &request->ctx->hang_stats;
2223 else if (request->file_priv)
2224 hs = &request->file_priv->hang_stats;
2225
2226 if (hs) {
2227 if (guilty)
2228 hs->batch_active++;
2229 else
2230 hs->batch_pending++;
2231 }
2232}
2233
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002234static void i915_gem_free_request(struct drm_i915_gem_request *request)
2235{
2236 list_del(&request->list);
2237 i915_gem_request_remove_from_client(request);
2238
2239 if (request->ctx)
2240 i915_gem_context_unreference(request->ctx);
2241
2242 kfree(request);
2243}
2244
Chris Wilsondfaae392010-09-22 10:31:52 +01002245static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2246 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002247{
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002248 u32 completed_seqno;
2249 u32 acthd;
2250
2251 acthd = intel_ring_get_active_head(ring);
2252 completed_seqno = ring->get_seqno(ring, false);
2253
Chris Wilsondfaae392010-09-22 10:31:52 +01002254 while (!list_empty(&ring->request_list)) {
2255 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01002256
Chris Wilsondfaae392010-09-22 10:31:52 +01002257 request = list_first_entry(&ring->request_list,
2258 struct drm_i915_gem_request,
2259 list);
2260
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002261 if (request->seqno > completed_seqno)
2262 i915_set_reset_status(ring, request, acthd);
2263
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002264 i915_gem_free_request(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01002265 }
2266
2267 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002268 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002269
Chris Wilson05394f32010-11-08 19:18:58 +00002270 obj = list_first_entry(&ring->active_list,
2271 struct drm_i915_gem_object,
2272 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002273
Chris Wilson05394f32010-11-08 19:18:58 +00002274 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002275 }
Eric Anholt673a3942008-07-30 12:06:12 -07002276}
2277
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002278void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002279{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 int i;
2282
Daniel Vetter4b9de732011-10-09 21:52:02 +02002283 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002284 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002285
Daniel Vetter94a335d2013-07-17 14:51:28 +02002286 /*
2287 * Commit delayed tiling changes if we have an object still
2288 * attached to the fence, otherwise just clear the fence.
2289 */
2290 if (reg->obj) {
2291 i915_gem_object_update_fence(reg->obj, reg,
2292 reg->obj->tiling_mode);
2293 } else {
2294 i915_gem_write_fence(dev, i, NULL);
2295 }
Chris Wilson312817a2010-11-22 11:50:11 +00002296 }
2297}
2298
Chris Wilson069efc12010-09-30 16:53:18 +01002299void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002300{
Chris Wilsondfaae392010-09-22 10:31:52 +01002301 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002302 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002303 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002304
Chris Wilsonb4519512012-05-11 14:29:30 +01002305 for_each_ring(ring, dev_priv, i)
2306 i915_gem_reset_ring_lists(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002307
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002308 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002309}
2310
2311/**
2312 * This function clears the request list as sequence numbers are passed.
2313 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002314void
Chris Wilsondb53a302011-02-03 11:57:46 +00002315i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002316{
Eric Anholt673a3942008-07-30 12:06:12 -07002317 uint32_t seqno;
2318
Chris Wilsondb53a302011-02-03 11:57:46 +00002319 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002320 return;
2321
Chris Wilsondb53a302011-02-03 11:57:46 +00002322 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002323
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002324 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002325
Zou Nan hai852835f2010-05-21 09:08:56 +08002326 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002327 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002328
Zou Nan hai852835f2010-05-21 09:08:56 +08002329 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002330 struct drm_i915_gem_request,
2331 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002332
Chris Wilsondfaae392010-09-22 10:31:52 +01002333 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002334 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002335
Chris Wilsondb53a302011-02-03 11:57:46 +00002336 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002337 /* We know the GPU must have read the request to have
2338 * sent us the seqno + interrupt, so use the position
2339 * of tail of the request to update the last known position
2340 * of the GPU head.
2341 */
2342 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002343
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002344 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002345 }
2346
2347 /* Move any buffers on the active list that are no longer referenced
2348 * by the ringbuffer to the flushing/inactive lists as appropriate.
2349 */
2350 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002351 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002352
Akshay Joshi0206e352011-08-16 15:34:10 -04002353 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00002354 struct drm_i915_gem_object,
2355 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002356
Chris Wilson0201f1e2012-07-20 12:41:01 +01002357 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002358 break;
2359
Chris Wilson65ce3022012-07-20 12:41:02 +01002360 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002361 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002362
Chris Wilsondb53a302011-02-03 11:57:46 +00002363 if (unlikely(ring->trace_irq_seqno &&
2364 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002365 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002366 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002367 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002368
Chris Wilsondb53a302011-02-03 11:57:46 +00002369 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002370}
2371
2372void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002373i915_gem_retire_requests(struct drm_device *dev)
2374{
2375 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002376 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002377 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002378
Chris Wilsonb4519512012-05-11 14:29:30 +01002379 for_each_ring(ring, dev_priv, i)
2380 i915_gem_retire_requests_ring(ring);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002381}
2382
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002383static void
Eric Anholt673a3942008-07-30 12:06:12 -07002384i915_gem_retire_work_handler(struct work_struct *work)
2385{
2386 drm_i915_private_t *dev_priv;
2387 struct drm_device *dev;
Chris Wilsonb4519512012-05-11 14:29:30 +01002388 struct intel_ring_buffer *ring;
Chris Wilson0a587052011-01-09 21:05:44 +00002389 bool idle;
2390 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002391
2392 dev_priv = container_of(work, drm_i915_private_t,
2393 mm.retire_work.work);
2394 dev = dev_priv->dev;
2395
Chris Wilson891b48c2010-09-29 12:26:37 +01002396 /* Come back later if the device is busy... */
2397 if (!mutex_trylock(&dev->struct_mutex)) {
Chris Wilsonbcb45082012-10-05 17:02:57 +01002398 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2399 round_jiffies_up_relative(HZ));
Chris Wilson891b48c2010-09-29 12:26:37 +01002400 return;
2401 }
2402
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002403 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002404
Chris Wilson0a587052011-01-09 21:05:44 +00002405 /* Send a periodic flush down the ring so we don't hold onto GEM
2406 * objects indefinitely.
2407 */
2408 idle = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002409 for_each_ring(ring, dev_priv, i) {
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002410 if (ring->gpu_caches_dirty)
Mika Kuoppala0025c072013-06-12 12:35:30 +03002411 i915_add_request(ring, NULL);
Chris Wilson0a587052011-01-09 21:05:44 +00002412
2413 idle &= list_empty(&ring->request_list);
2414 }
2415
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002416 if (!dev_priv->ums.mm_suspended && !idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002417 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2418 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002419 if (idle)
2420 intel_mark_idle(dev);
Chris Wilson0a587052011-01-09 21:05:44 +00002421
Eric Anholt673a3942008-07-30 12:06:12 -07002422 mutex_unlock(&dev->struct_mutex);
2423}
2424
Ben Widawsky5816d642012-04-11 11:18:19 -07002425/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002426 * Ensures that an object will eventually get non-busy by flushing any required
2427 * write domains, emitting any outstanding lazy request and retiring and
2428 * completed requests.
2429 */
2430static int
2431i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2432{
2433 int ret;
2434
2435 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002436 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002437 if (ret)
2438 return ret;
2439
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002440 i915_gem_retire_requests_ring(obj->ring);
2441 }
2442
2443 return 0;
2444}
2445
2446/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002447 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2448 * @DRM_IOCTL_ARGS: standard ioctl arguments
2449 *
2450 * Returns 0 if successful, else an error is returned with the remaining time in
2451 * the timeout parameter.
2452 * -ETIME: object is still busy after timeout
2453 * -ERESTARTSYS: signal interrupted the wait
2454 * -ENONENT: object doesn't exist
2455 * Also possible, but rare:
2456 * -EAGAIN: GPU wedged
2457 * -ENOMEM: damn
2458 * -ENODEV: Internal IRQ fail
2459 * -E?: The add request failed
2460 *
2461 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2462 * non-zero timeout parameter the wait ioctl will wait for the given number of
2463 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2464 * without holding struct_mutex the object may become re-busied before this
2465 * function completes. A similar but shorter * race condition exists in the busy
2466 * ioctl
2467 */
2468int
2469i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2470{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002471 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002472 struct drm_i915_gem_wait *args = data;
2473 struct drm_i915_gem_object *obj;
2474 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002475 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002476 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002477 u32 seqno = 0;
2478 int ret = 0;
2479
Ben Widawskyeac1f142012-06-05 15:24:24 -07002480 if (args->timeout_ns >= 0) {
2481 timeout_stack = ns_to_timespec(args->timeout_ns);
2482 timeout = &timeout_stack;
2483 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002484
2485 ret = i915_mutex_lock_interruptible(dev);
2486 if (ret)
2487 return ret;
2488
2489 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2490 if (&obj->base == NULL) {
2491 mutex_unlock(&dev->struct_mutex);
2492 return -ENOENT;
2493 }
2494
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002495 /* Need to make sure the object gets inactive eventually. */
2496 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002497 if (ret)
2498 goto out;
2499
2500 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002501 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002502 ring = obj->ring;
2503 }
2504
2505 if (seqno == 0)
2506 goto out;
2507
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002508 /* Do this after OLR check to make sure we make forward progress polling
2509 * on this IOCTL with a 0 timeout (like busy ioctl)
2510 */
2511 if (!args->timeout_ns) {
2512 ret = -ETIME;
2513 goto out;
2514 }
2515
2516 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002517 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002518 mutex_unlock(&dev->struct_mutex);
2519
Daniel Vetterf69061b2012-12-06 09:01:42 +01002520 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002521 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002522 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002523 return ret;
2524
2525out:
2526 drm_gem_object_unreference(&obj->base);
2527 mutex_unlock(&dev->struct_mutex);
2528 return ret;
2529}
2530
2531/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002532 * i915_gem_object_sync - sync an object to a ring.
2533 *
2534 * @obj: object which may be in use on another ring.
2535 * @to: ring we wish to use the object on. May be NULL.
2536 *
2537 * This code is meant to abstract object synchronization with the GPU.
2538 * Calling with NULL implies synchronizing the object with the CPU
2539 * rather than a particular GPU ring.
2540 *
2541 * Returns 0 if successful, else propagates up the lower layer error.
2542 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002543int
2544i915_gem_object_sync(struct drm_i915_gem_object *obj,
2545 struct intel_ring_buffer *to)
2546{
2547 struct intel_ring_buffer *from = obj->ring;
2548 u32 seqno;
2549 int ret, idx;
2550
2551 if (from == NULL || to == from)
2552 return 0;
2553
Ben Widawsky5816d642012-04-11 11:18:19 -07002554 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002555 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002556
2557 idx = intel_ring_sync_index(from, to);
2558
Chris Wilson0201f1e2012-07-20 12:41:01 +01002559 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002560 if (seqno <= from->sync_seqno[idx])
2561 return 0;
2562
Ben Widawskyb4aca012012-04-25 20:50:12 -07002563 ret = i915_gem_check_olr(obj->ring, seqno);
2564 if (ret)
2565 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002566
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002567 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002568 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002569 /* We use last_read_seqno because sync_to()
2570 * might have just caused seqno wrap under
2571 * the radar.
2572 */
2573 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002574
Ben Widawskye3a5a222012-04-11 11:18:20 -07002575 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002576}
2577
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002578static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2579{
2580 u32 old_write_domain, old_read_domains;
2581
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002582 /* Force a pagefault for domain tracking on next user access */
2583 i915_gem_release_mmap(obj);
2584
Keith Packardb97c3d92011-06-24 21:02:59 -07002585 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2586 return;
2587
Chris Wilson97c809fd2012-10-09 19:24:38 +01002588 /* Wait for any direct GTT access to complete */
2589 mb();
2590
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002591 old_read_domains = obj->base.read_domains;
2592 old_write_domain = obj->base.write_domain;
2593
2594 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2595 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2596
2597 trace_i915_gem_object_change_domain(obj,
2598 old_read_domains,
2599 old_write_domain);
2600}
2601
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002602int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002603{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002604 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002605 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002606 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002607
Daniel Vetterb93dab62013-08-26 11:23:47 +02002608 /* For now we only ever use 1 vma per object */
2609 WARN_ON(!list_is_singular(&obj->vma_list));
2610
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002611 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002612 return 0;
2613
Daniel Vetter0ff501c2013-08-29 19:50:31 +02002614 if (!drm_mm_node_allocated(&vma->node)) {
2615 i915_gem_vma_destroy(vma);
2616
2617 return 0;
2618 }
Ben Widawsky433544b2013-08-13 18:09:06 -07002619
Chris Wilson31d8d652012-05-24 19:11:20 +01002620 if (obj->pin_count)
2621 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002622
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002623 BUG_ON(obj->pages == NULL);
2624
Chris Wilsona8198ee2011-04-13 22:04:09 +01002625 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002626 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002627 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002628 /* Continue on if we fail due to EIO, the GPU is hung so we
2629 * should be safe and we need to cleanup or else we might
2630 * cause memory corruption through use-after-free.
2631 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002632
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002633 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002634
Daniel Vetter96b47b62009-12-15 17:50:00 +01002635 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002636 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002637 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002638 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002639
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002640 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002641
Daniel Vetter74898d72012-02-15 23:50:22 +01002642 if (obj->has_global_gtt_mapping)
2643 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002644 if (obj->has_aliasing_ppgtt_mapping) {
2645 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2646 obj->has_aliasing_ppgtt_mapping = 0;
2647 }
Daniel Vetter74163902012-02-15 23:50:21 +01002648 i915_gem_gtt_finish_object(obj);
Ben Widawsky401c29f2013-05-31 11:28:47 -07002649 i915_gem_object_unpin_pages(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002650
Ben Widawskyca191b12013-07-31 17:00:14 -07002651 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002652 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002653 if (i915_is_ggtt(vma->vm))
2654 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002655
Ben Widawsky2f633152013-07-17 12:19:03 -07002656 drm_mm_remove_node(&vma->node);
Ben Widawsky433544b2013-08-13 18:09:06 -07002657
Ben Widawsky2f633152013-07-17 12:19:03 -07002658 i915_gem_vma_destroy(vma);
2659
2660 /* Since the unbound list is global, only move to that list if
Daniel Vetterb93dab62013-08-26 11:23:47 +02002661 * no more VMAs exist. */
Ben Widawsky2f633152013-07-17 12:19:03 -07002662 if (list_empty(&obj->vma_list))
2663 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002664
Chris Wilson88241782011-01-07 17:09:48 +00002665 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002666}
2667
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002668/**
2669 * Unbinds an object from the global GTT aperture.
2670 */
2671int
2672i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2673{
2674 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2675 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2676
Dan Carpenter58e73e12013-08-09 12:44:11 +03002677 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002678 return 0;
2679
2680 if (obj->pin_count)
2681 return -EBUSY;
2682
2683 BUG_ON(obj->pages == NULL);
2684
2685 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2686}
2687
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002688int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002689{
2690 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002691 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002692 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002693
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002694 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002695 for_each_ring(ring, dev_priv, i) {
Ben Widawskyb6c74882012-08-14 14:35:14 -07002696 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2697 if (ret)
2698 return ret;
2699
Chris Wilson3e960502012-11-27 16:22:54 +00002700 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002701 if (ret)
2702 return ret;
2703 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002704
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002705 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002706}
2707
Chris Wilson9ce079e2012-04-17 15:31:30 +01002708static void i965_write_fence_reg(struct drm_device *dev, int reg,
2709 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002710{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002711 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002712 int fence_reg;
2713 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002714
Imre Deak56c844e2013-01-07 21:47:34 +02002715 if (INTEL_INFO(dev)->gen >= 6) {
2716 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2717 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2718 } else {
2719 fence_reg = FENCE_REG_965_0;
2720 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2721 }
2722
Chris Wilsond18b9612013-07-10 13:36:23 +01002723 fence_reg += reg * 8;
2724
2725 /* To w/a incoherency with non-atomic 64-bit register updates,
2726 * we split the 64-bit update into two 32-bit writes. In order
2727 * for a partial fence not to be evaluated between writes, we
2728 * precede the update with write to turn off the fence register,
2729 * and only enable the fence as the last step.
2730 *
2731 * For extra levels of paranoia, we make sure each step lands
2732 * before applying the next step.
2733 */
2734 I915_WRITE(fence_reg, 0);
2735 POSTING_READ(fence_reg);
2736
Chris Wilson9ce079e2012-04-17 15:31:30 +01002737 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002738 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002739 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002740
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002741 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002742 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002743 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002744 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002745 if (obj->tiling_mode == I915_TILING_Y)
2746 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2747 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002748
Chris Wilsond18b9612013-07-10 13:36:23 +01002749 I915_WRITE(fence_reg + 4, val >> 32);
2750 POSTING_READ(fence_reg + 4);
2751
2752 I915_WRITE(fence_reg + 0, val);
2753 POSTING_READ(fence_reg);
2754 } else {
2755 I915_WRITE(fence_reg + 4, 0);
2756 POSTING_READ(fence_reg + 4);
2757 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002758}
2759
Chris Wilson9ce079e2012-04-17 15:31:30 +01002760static void i915_write_fence_reg(struct drm_device *dev, int reg,
2761 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002762{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002763 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002764 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002765
Chris Wilson9ce079e2012-04-17 15:31:30 +01002766 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002767 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002768 int pitch_val;
2769 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002770
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002771 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002772 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002773 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2774 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2775 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002776
2777 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2778 tile_width = 128;
2779 else
2780 tile_width = 512;
2781
2782 /* Note: pitch better be a power of two tile widths */
2783 pitch_val = obj->stride / tile_width;
2784 pitch_val = ffs(pitch_val) - 1;
2785
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002786 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002787 if (obj->tiling_mode == I915_TILING_Y)
2788 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2789 val |= I915_FENCE_SIZE_BITS(size);
2790 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2791 val |= I830_FENCE_REG_VALID;
2792 } else
2793 val = 0;
2794
2795 if (reg < 8)
2796 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002797 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002798 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002799
Chris Wilson9ce079e2012-04-17 15:31:30 +01002800 I915_WRITE(reg, val);
2801 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002802}
2803
Chris Wilson9ce079e2012-04-17 15:31:30 +01002804static void i830_write_fence_reg(struct drm_device *dev, int reg,
2805 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002806{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002807 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002808 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002809
Chris Wilson9ce079e2012-04-17 15:31:30 +01002810 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002811 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002812 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002813
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002814 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002815 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002816 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2817 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2818 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002819
Chris Wilson9ce079e2012-04-17 15:31:30 +01002820 pitch_val = obj->stride / 128;
2821 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002822
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002823 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002824 if (obj->tiling_mode == I915_TILING_Y)
2825 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2826 val |= I830_FENCE_SIZE_BITS(size);
2827 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2828 val |= I830_FENCE_REG_VALID;
2829 } else
2830 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002831
Chris Wilson9ce079e2012-04-17 15:31:30 +01002832 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2833 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2834}
2835
Chris Wilsond0a57782012-10-09 19:24:37 +01002836inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2837{
2838 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2839}
2840
Chris Wilson9ce079e2012-04-17 15:31:30 +01002841static void i915_gem_write_fence(struct drm_device *dev, int reg,
2842 struct drm_i915_gem_object *obj)
2843{
Chris Wilsond0a57782012-10-09 19:24:37 +01002844 struct drm_i915_private *dev_priv = dev->dev_private;
2845
2846 /* Ensure that all CPU reads are completed before installing a fence
2847 * and all writes before removing the fence.
2848 */
2849 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2850 mb();
2851
Daniel Vetter94a335d2013-07-17 14:51:28 +02002852 WARN(obj && (!obj->stride || !obj->tiling_mode),
2853 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2854 obj->stride, obj->tiling_mode);
2855
Chris Wilson9ce079e2012-04-17 15:31:30 +01002856 switch (INTEL_INFO(dev)->gen) {
2857 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002858 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002859 case 5:
2860 case 4: i965_write_fence_reg(dev, reg, obj); break;
2861 case 3: i915_write_fence_reg(dev, reg, obj); break;
2862 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002863 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002864 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002865
2866 /* And similarly be paranoid that no direct access to this region
2867 * is reordered to before the fence is installed.
2868 */
2869 if (i915_gem_object_needs_mb(obj))
2870 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002871}
2872
Chris Wilson61050802012-04-17 15:31:31 +01002873static inline int fence_number(struct drm_i915_private *dev_priv,
2874 struct drm_i915_fence_reg *fence)
2875{
2876 return fence - dev_priv->fence_regs;
2877}
2878
2879static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2880 struct drm_i915_fence_reg *fence,
2881 bool enable)
2882{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002883 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002884 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002885
Chris Wilson46a0b632013-07-10 13:36:24 +01002886 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01002887
2888 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01002889 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01002890 fence->obj = obj;
2891 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2892 } else {
2893 obj->fence_reg = I915_FENCE_REG_NONE;
2894 fence->obj = NULL;
2895 list_del_init(&fence->lru_list);
2896 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02002897 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01002898}
2899
Chris Wilsond9e86c02010-11-10 16:40:20 +00002900static int
Chris Wilsond0a57782012-10-09 19:24:37 +01002901i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002902{
Chris Wilson1c293ea2012-04-17 15:31:27 +01002903 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01002904 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01002905 if (ret)
2906 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002907
2908 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002909 }
2910
Chris Wilson86d5bc32012-07-20 12:41:04 +01002911 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002912 return 0;
2913}
2914
2915int
2916i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2917{
Chris Wilson61050802012-04-17 15:31:31 +01002918 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002919 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002920 int ret;
2921
Chris Wilsond0a57782012-10-09 19:24:37 +01002922 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002923 if (ret)
2924 return ret;
2925
Chris Wilson61050802012-04-17 15:31:31 +01002926 if (obj->fence_reg == I915_FENCE_REG_NONE)
2927 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002928
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002929 fence = &dev_priv->fence_regs[obj->fence_reg];
2930
Chris Wilson61050802012-04-17 15:31:31 +01002931 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002932 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002933
2934 return 0;
2935}
2936
2937static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01002938i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01002939{
Daniel Vetterae3db242010-02-19 11:51:58 +01002940 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01002941 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002942 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002943
2944 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002945 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002946 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2947 reg = &dev_priv->fence_regs[i];
2948 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002949 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002950
Chris Wilson1690e1e2011-12-14 13:57:08 +01002951 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002952 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002953 }
2954
Chris Wilsond9e86c02010-11-10 16:40:20 +00002955 if (avail == NULL)
2956 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002957
2958 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002959 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002960 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002961 continue;
2962
Chris Wilson8fe301a2012-04-17 15:31:28 +01002963 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002964 }
2965
Chris Wilson8fe301a2012-04-17 15:31:28 +01002966 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002967}
2968
Jesse Barnesde151cf2008-11-12 10:03:55 -08002969/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002970 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002971 * @obj: object to map through a fence reg
2972 *
2973 * When mapping objects through the GTT, userspace wants to be able to write
2974 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002975 * This function walks the fence regs looking for a free one for @obj,
2976 * stealing one if it can't find any.
2977 *
2978 * It then sets up the reg based on the object's properties: address, pitch
2979 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002980 *
2981 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002982 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002983int
Chris Wilson06d98132012-04-17 15:31:24 +01002984i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002985{
Chris Wilson05394f32010-11-08 19:18:58 +00002986 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002987 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01002988 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002989 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002990 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002991
Chris Wilson14415742012-04-17 15:31:33 +01002992 /* Have we updated the tiling parameters upon the object and so
2993 * will need to serialise the write to the associated fence register?
2994 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002995 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01002996 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01002997 if (ret)
2998 return ret;
2999 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003000
Chris Wilsond9e86c02010-11-10 16:40:20 +00003001 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003002 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3003 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003004 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003005 list_move_tail(&reg->lru_list,
3006 &dev_priv->mm.fence_list);
3007 return 0;
3008 }
3009 } else if (enable) {
3010 reg = i915_find_fence_reg(dev);
3011 if (reg == NULL)
3012 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003013
Chris Wilson14415742012-04-17 15:31:33 +01003014 if (reg->obj) {
3015 struct drm_i915_gem_object *old = reg->obj;
3016
Chris Wilsond0a57782012-10-09 19:24:37 +01003017 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003018 if (ret)
3019 return ret;
3020
Chris Wilson14415742012-04-17 15:31:33 +01003021 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003022 }
Chris Wilson14415742012-04-17 15:31:33 +01003023 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003024 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003025
Chris Wilson14415742012-04-17 15:31:33 +01003026 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003027
Chris Wilson9ce079e2012-04-17 15:31:30 +01003028 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003029}
3030
Chris Wilson42d6ab42012-07-26 11:49:32 +01003031static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3032 struct drm_mm_node *gtt_space,
3033 unsigned long cache_level)
3034{
3035 struct drm_mm_node *other;
3036
3037 /* On non-LLC machines we have to be careful when putting differing
3038 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003039 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003040 */
3041 if (HAS_LLC(dev))
3042 return true;
3043
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003044 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003045 return true;
3046
3047 if (list_empty(&gtt_space->node_list))
3048 return true;
3049
3050 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3051 if (other->allocated && !other->hole_follows && other->color != cache_level)
3052 return false;
3053
3054 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3055 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3056 return false;
3057
3058 return true;
3059}
3060
3061static void i915_gem_verify_gtt(struct drm_device *dev)
3062{
3063#if WATCH_GTT
3064 struct drm_i915_private *dev_priv = dev->dev_private;
3065 struct drm_i915_gem_object *obj;
3066 int err = 0;
3067
Ben Widawsky35c20a62013-05-31 11:28:48 -07003068 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003069 if (obj->gtt_space == NULL) {
3070 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3071 err++;
3072 continue;
3073 }
3074
3075 if (obj->cache_level != obj->gtt_space->color) {
3076 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003077 i915_gem_obj_ggtt_offset(obj),
3078 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003079 obj->cache_level,
3080 obj->gtt_space->color);
3081 err++;
3082 continue;
3083 }
3084
3085 if (!i915_gem_valid_gtt_space(dev,
3086 obj->gtt_space,
3087 obj->cache_level)) {
3088 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003089 i915_gem_obj_ggtt_offset(obj),
3090 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003091 obj->cache_level);
3092 err++;
3093 continue;
3094 }
3095 }
3096
3097 WARN_ON(err);
3098#endif
3099}
3100
Jesse Barnesde151cf2008-11-12 10:03:55 -08003101/**
Eric Anholt673a3942008-07-30 12:06:12 -07003102 * Finds free space in the GTT aperture and binds the object there.
3103 */
3104static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003105i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3106 struct i915_address_space *vm,
3107 unsigned alignment,
3108 bool map_and_fenceable,
3109 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003110{
Chris Wilson05394f32010-11-08 19:18:58 +00003111 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003112 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003113 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003114 size_t gtt_max =
3115 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003116 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003117 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003118
Chris Wilsone28f8712011-07-18 13:11:49 -07003119 fence_size = i915_gem_get_gtt_size(dev,
3120 obj->base.size,
3121 obj->tiling_mode);
3122 fence_alignment = i915_gem_get_gtt_alignment(dev,
3123 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003124 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003125 unfenced_alignment =
Imre Deakd865110c2013-01-07 21:47:33 +02003126 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003127 obj->base.size,
Imre Deakd865110c2013-01-07 21:47:33 +02003128 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003129
Eric Anholt673a3942008-07-30 12:06:12 -07003130 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003131 alignment = map_and_fenceable ? fence_alignment :
3132 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003133 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003134 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3135 return -EINVAL;
3136 }
3137
Chris Wilson05394f32010-11-08 19:18:58 +00003138 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003139
Chris Wilson654fc602010-05-27 13:18:21 +01003140 /* If the object is bigger than the entire aperture, reject it early
3141 * before evicting everything in a vain attempt to find space.
3142 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003143 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003144 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003145 obj->base.size,
3146 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003147 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003148 return -E2BIG;
3149 }
3150
Chris Wilson37e680a2012-06-07 15:38:42 +01003151 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003152 if (ret)
3153 return ret;
3154
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003155 i915_gem_object_pin_pages(obj);
3156
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003157 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003158
Ben Widawskyaccfef22013-08-14 11:38:35 +02003159 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003160 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003161 ret = PTR_ERR(vma);
3162 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003163 }
3164
Ben Widawskyaccfef22013-08-14 11:38:35 +02003165 /* For now we only ever use 1 vma per object */
3166 WARN_ON(!list_is_singular(&obj->vma_list));
3167
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003168search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003169 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003170 size, alignment,
David Herrmann31e5d7c2013-07-27 13:36:27 +02003171 obj->cache_level, 0, gtt_max,
3172 DRM_MM_SEARCH_DEFAULT);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003173 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003174 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003175 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003176 map_and_fenceable,
3177 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003178 if (ret == 0)
3179 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003180
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003181 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003182 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003183 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003184 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003185 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003186 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003187 }
3188
Daniel Vetter74163902012-02-15 23:50:21 +01003189 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003190 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003191 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003192
Ben Widawsky35c20a62013-05-31 11:28:48 -07003193 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003194 list_add_tail(&vma->mm_list, &vm->inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01003195
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003196 if (i915_is_ggtt(vm)) {
3197 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003198
Daniel Vetter49987092013-08-14 10:21:23 +02003199 fenceable = (vma->node.size == fence_size &&
3200 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003201
Daniel Vetter49987092013-08-14 10:21:23 +02003202 mappable = (vma->node.start + obj->base.size <=
3203 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003204
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003205 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003206 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003207
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003208 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003209
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003210 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003211 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003212 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003213
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003214err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003215 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003216err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003217 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003218err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003219 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003220 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003221}
3222
Chris Wilson000433b2013-08-08 14:41:09 +01003223bool
Chris Wilson2c225692013-08-09 12:26:45 +01003224i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3225 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003226{
Eric Anholt673a3942008-07-30 12:06:12 -07003227 /* If we don't have a page list set up, then we're not pinned
3228 * to GPU, and we can ignore the cache flush because it'll happen
3229 * again at bind time.
3230 */
Chris Wilson05394f32010-11-08 19:18:58 +00003231 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003232 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003233
Imre Deak769ce462013-02-13 21:56:05 +02003234 /*
3235 * Stolen memory is always coherent with the GPU as it is explicitly
3236 * marked as wc by the system, or the system is cache-coherent.
3237 */
3238 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003239 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003240
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003241 /* If the GPU is snooping the contents of the CPU cache,
3242 * we do not need to manually clear the CPU cache lines. However,
3243 * the caches are only snooped when the render cache is
3244 * flushed/invalidated. As we always have to emit invalidations
3245 * and flushes when moving into and out of the RENDER domain, correct
3246 * snooping behaviour occurs naturally as the result of our domain
3247 * tracking.
3248 */
Chris Wilson2c225692013-08-09 12:26:45 +01003249 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003250 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003251
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003252 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003253 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003254
3255 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003256}
3257
3258/** Flushes the GTT write domain for the object if it's dirty. */
3259static void
Chris Wilson05394f32010-11-08 19:18:58 +00003260i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003261{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003262 uint32_t old_write_domain;
3263
Chris Wilson05394f32010-11-08 19:18:58 +00003264 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003265 return;
3266
Chris Wilson63256ec2011-01-04 18:42:07 +00003267 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003268 * to it immediately go to main memory as far as we know, so there's
3269 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003270 *
3271 * However, we do have to enforce the order so that all writes through
3272 * the GTT land before any writes to the device, such as updates to
3273 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003274 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003275 wmb();
3276
Chris Wilson05394f32010-11-08 19:18:58 +00003277 old_write_domain = obj->base.write_domain;
3278 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003279
3280 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003281 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003282 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003283}
3284
3285/** Flushes the CPU write domain for the object if it's dirty. */
3286static void
Chris Wilson2c225692013-08-09 12:26:45 +01003287i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3288 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003289{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003290 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003291
Chris Wilson05394f32010-11-08 19:18:58 +00003292 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003293 return;
3294
Chris Wilson000433b2013-08-08 14:41:09 +01003295 if (i915_gem_clflush_object(obj, force))
3296 i915_gem_chipset_flush(obj->base.dev);
3297
Chris Wilson05394f32010-11-08 19:18:58 +00003298 old_write_domain = obj->base.write_domain;
3299 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003300
3301 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003302 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003303 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003304}
3305
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003306/**
3307 * Moves a single object to the GTT read, and possibly write domain.
3308 *
3309 * This function returns when the move is complete, including waiting on
3310 * flushes to occur.
3311 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003312int
Chris Wilson20217462010-11-23 15:26:33 +00003313i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003314{
Chris Wilson8325a092012-04-24 15:52:35 +01003315 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003316 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003317 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003318
Eric Anholt02354392008-11-26 13:58:13 -08003319 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003320 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003321 return -EINVAL;
3322
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003323 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3324 return 0;
3325
Chris Wilson0201f1e2012-07-20 12:41:01 +01003326 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003327 if (ret)
3328 return ret;
3329
Chris Wilson2c225692013-08-09 12:26:45 +01003330 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003331
Chris Wilsond0a57782012-10-09 19:24:37 +01003332 /* Serialise direct access to this object with the barriers for
3333 * coherent writes from the GPU, by effectively invalidating the
3334 * GTT domain upon first access.
3335 */
3336 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3337 mb();
3338
Chris Wilson05394f32010-11-08 19:18:58 +00003339 old_write_domain = obj->base.write_domain;
3340 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003341
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003342 /* It should now be out of any other write domains, and we can update
3343 * the domain values for our changes.
3344 */
Chris Wilson05394f32010-11-08 19:18:58 +00003345 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3346 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003347 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003348 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3349 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3350 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003351 }
3352
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003353 trace_i915_gem_object_change_domain(obj,
3354 old_read_domains,
3355 old_write_domain);
3356
Chris Wilson8325a092012-04-24 15:52:35 +01003357 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003358 if (i915_gem_object_is_inactive(obj)) {
3359 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3360 &dev_priv->gtt.base);
3361 if (vma)
3362 list_move_tail(&vma->mm_list,
3363 &dev_priv->gtt.base.inactive_list);
3364
3365 }
Chris Wilson8325a092012-04-24 15:52:35 +01003366
Eric Anholte47c68e2008-11-14 13:35:19 -08003367 return 0;
3368}
3369
Chris Wilsone4ffd172011-04-04 09:44:39 +01003370int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3371 enum i915_cache_level cache_level)
3372{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003373 struct drm_device *dev = obj->base.dev;
3374 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003375 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003376 int ret;
3377
3378 if (obj->cache_level == cache_level)
3379 return 0;
3380
3381 if (obj->pin_count) {
3382 DRM_DEBUG("can not change the cache level of pinned objects\n");
3383 return -EBUSY;
3384 }
3385
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003386 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3387 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003388 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003389 if (ret)
3390 return ret;
3391
3392 break;
3393 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003394 }
3395
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003396 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003397 ret = i915_gem_object_finish_gpu(obj);
3398 if (ret)
3399 return ret;
3400
3401 i915_gem_object_finish_gtt(obj);
3402
3403 /* Before SandyBridge, you could not use tiling or fence
3404 * registers with snooped memory, so relinquish any fences
3405 * currently pointing to our region in the aperture.
3406 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003407 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003408 ret = i915_gem_object_put_fence(obj);
3409 if (ret)
3410 return ret;
3411 }
3412
Daniel Vetter74898d72012-02-15 23:50:22 +01003413 if (obj->has_global_gtt_mapping)
3414 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01003415 if (obj->has_aliasing_ppgtt_mapping)
3416 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3417 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003418 }
3419
Chris Wilson2c225692013-08-09 12:26:45 +01003420 list_for_each_entry(vma, &obj->vma_list, vma_link)
3421 vma->node.color = cache_level;
3422 obj->cache_level = cache_level;
3423
3424 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003425 u32 old_read_domains, old_write_domain;
3426
3427 /* If we're coming from LLC cached, then we haven't
3428 * actually been tracking whether the data is in the
3429 * CPU cache or not, since we only allow one bit set
3430 * in obj->write_domain and have been skipping the clflushes.
3431 * Just set it to the CPU cache for now.
3432 */
3433 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003434
3435 old_read_domains = obj->base.read_domains;
3436 old_write_domain = obj->base.write_domain;
3437
3438 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3439 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3440
3441 trace_i915_gem_object_change_domain(obj,
3442 old_read_domains,
3443 old_write_domain);
3444 }
3445
Chris Wilson42d6ab42012-07-26 11:49:32 +01003446 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003447 return 0;
3448}
3449
Ben Widawsky199adf42012-09-21 17:01:20 -07003450int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3451 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003452{
Ben Widawsky199adf42012-09-21 17:01:20 -07003453 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003454 struct drm_i915_gem_object *obj;
3455 int ret;
3456
3457 ret = i915_mutex_lock_interruptible(dev);
3458 if (ret)
3459 return ret;
3460
3461 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3462 if (&obj->base == NULL) {
3463 ret = -ENOENT;
3464 goto unlock;
3465 }
3466
Chris Wilson651d7942013-08-08 14:41:10 +01003467 switch (obj->cache_level) {
3468 case I915_CACHE_LLC:
3469 case I915_CACHE_L3_LLC:
3470 args->caching = I915_CACHING_CACHED;
3471 break;
3472
Chris Wilson4257d3b2013-08-08 14:41:11 +01003473 case I915_CACHE_WT:
3474 args->caching = I915_CACHING_DISPLAY;
3475 break;
3476
Chris Wilson651d7942013-08-08 14:41:10 +01003477 default:
3478 args->caching = I915_CACHING_NONE;
3479 break;
3480 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003481
3482 drm_gem_object_unreference(&obj->base);
3483unlock:
3484 mutex_unlock(&dev->struct_mutex);
3485 return ret;
3486}
3487
Ben Widawsky199adf42012-09-21 17:01:20 -07003488int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3489 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003490{
Ben Widawsky199adf42012-09-21 17:01:20 -07003491 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003492 struct drm_i915_gem_object *obj;
3493 enum i915_cache_level level;
3494 int ret;
3495
Ben Widawsky199adf42012-09-21 17:01:20 -07003496 switch (args->caching) {
3497 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003498 level = I915_CACHE_NONE;
3499 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003500 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003501 level = I915_CACHE_LLC;
3502 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003503 case I915_CACHING_DISPLAY:
3504 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3505 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003506 default:
3507 return -EINVAL;
3508 }
3509
Ben Widawsky3bc29132012-09-26 16:15:20 -07003510 ret = i915_mutex_lock_interruptible(dev);
3511 if (ret)
3512 return ret;
3513
Chris Wilsone6994ae2012-07-10 10:27:08 +01003514 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3515 if (&obj->base == NULL) {
3516 ret = -ENOENT;
3517 goto unlock;
3518 }
3519
3520 ret = i915_gem_object_set_cache_level(obj, level);
3521
3522 drm_gem_object_unreference(&obj->base);
3523unlock:
3524 mutex_unlock(&dev->struct_mutex);
3525 return ret;
3526}
3527
Chris Wilsoncc98b412013-08-09 12:25:09 +01003528static bool is_pin_display(struct drm_i915_gem_object *obj)
3529{
3530 /* There are 3 sources that pin objects:
3531 * 1. The display engine (scanouts, sprites, cursors);
3532 * 2. Reservations for execbuffer;
3533 * 3. The user.
3534 *
3535 * We can ignore reservations as we hold the struct_mutex and
3536 * are only called outside of the reservation path. The user
3537 * can only increment pin_count once, and so if after
3538 * subtracting the potential reference by the user, any pin_count
3539 * remains, it must be due to another use by the display engine.
3540 */
3541 return obj->pin_count - !!obj->user_pin_count;
3542}
3543
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003544/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003545 * Prepare buffer for display plane (scanout, cursors, etc).
3546 * Can be called from an uninterruptible phase (modesetting) and allows
3547 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003548 */
3549int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003550i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3551 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003552 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003553{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003554 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003555 int ret;
3556
Chris Wilson0be73282010-12-06 14:36:27 +00003557 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003558 ret = i915_gem_object_sync(obj, pipelined);
3559 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003560 return ret;
3561 }
3562
Chris Wilsoncc98b412013-08-09 12:25:09 +01003563 /* Mark the pin_display early so that we account for the
3564 * display coherency whilst setting up the cache domains.
3565 */
3566 obj->pin_display = true;
3567
Eric Anholta7ef0642011-03-29 16:59:54 -07003568 /* The display engine is not coherent with the LLC cache on gen6. As
3569 * a result, we make sure that the pinning that is about to occur is
3570 * done with uncached PTEs. This is lowest common denominator for all
3571 * chipsets.
3572 *
3573 * However for gen6+, we could do better by using the GFDT bit instead
3574 * of uncaching, which would allow us to flush all the LLC-cached data
3575 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3576 */
Chris Wilson651d7942013-08-08 14:41:10 +01003577 ret = i915_gem_object_set_cache_level(obj,
3578 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003579 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003580 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003581
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003582 /* As the user may map the buffer once pinned in the display plane
3583 * (e.g. libkms for the bootup splash), we have to ensure that we
3584 * always use map_and_fenceable for all scanout buffers.
3585 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003586 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003587 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003588 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003589
Chris Wilson2c225692013-08-09 12:26:45 +01003590 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003591
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003592 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003593 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003594
3595 /* It should now be out of any other write domains, and we can update
3596 * the domain values for our changes.
3597 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003598 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003599 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003600
3601 trace_i915_gem_object_change_domain(obj,
3602 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003603 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003604
3605 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003606
3607err_unpin_display:
3608 obj->pin_display = is_pin_display(obj);
3609 return ret;
3610}
3611
3612void
3613i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3614{
3615 i915_gem_object_unpin(obj);
3616 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003617}
3618
Chris Wilson85345512010-11-13 09:49:11 +00003619int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003620i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003621{
Chris Wilson88241782011-01-07 17:09:48 +00003622 int ret;
3623
Chris Wilsona8198ee2011-04-13 22:04:09 +01003624 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003625 return 0;
3626
Chris Wilson0201f1e2012-07-20 12:41:01 +01003627 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003628 if (ret)
3629 return ret;
3630
Chris Wilsona8198ee2011-04-13 22:04:09 +01003631 /* Ensure that we invalidate the GPU's caches and TLBs. */
3632 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003633 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003634}
3635
Eric Anholte47c68e2008-11-14 13:35:19 -08003636/**
3637 * Moves a single object to the CPU read, and possibly write domain.
3638 *
3639 * This function returns when the move is complete, including waiting on
3640 * flushes to occur.
3641 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003642int
Chris Wilson919926a2010-11-12 13:42:53 +00003643i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003644{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003645 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003646 int ret;
3647
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003648 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3649 return 0;
3650
Chris Wilson0201f1e2012-07-20 12:41:01 +01003651 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003652 if (ret)
3653 return ret;
3654
Eric Anholte47c68e2008-11-14 13:35:19 -08003655 i915_gem_object_flush_gtt_write_domain(obj);
3656
Chris Wilson05394f32010-11-08 19:18:58 +00003657 old_write_domain = obj->base.write_domain;
3658 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003659
Eric Anholte47c68e2008-11-14 13:35:19 -08003660 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003661 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003662 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003663
Chris Wilson05394f32010-11-08 19:18:58 +00003664 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003665 }
3666
3667 /* It should now be out of any other write domains, and we can update
3668 * the domain values for our changes.
3669 */
Chris Wilson05394f32010-11-08 19:18:58 +00003670 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003671
3672 /* If we're writing through the CPU, then the GPU read domains will
3673 * need to be invalidated at next use.
3674 */
3675 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003676 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3677 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003678 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003679
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003680 trace_i915_gem_object_change_domain(obj,
3681 old_read_domains,
3682 old_write_domain);
3683
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003684 return 0;
3685}
3686
Eric Anholt673a3942008-07-30 12:06:12 -07003687/* Throttle our rendering by waiting until the ring has completed our requests
3688 * emitted over 20 msec ago.
3689 *
Eric Anholtb9624422009-06-03 07:27:35 +00003690 * Note that if we were to use the current jiffies each time around the loop,
3691 * we wouldn't escape the function with any frames outstanding if the time to
3692 * render a frame was over 20ms.
3693 *
Eric Anholt673a3942008-07-30 12:06:12 -07003694 * This should get us reasonable parallelism between CPU and GPU but also
3695 * relatively low latency when blocking on a particular request to finish.
3696 */
3697static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003698i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003699{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003700 struct drm_i915_private *dev_priv = dev->dev_private;
3701 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003702 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003703 struct drm_i915_gem_request *request;
3704 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003705 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003706 u32 seqno = 0;
3707 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003708
Daniel Vetter308887a2012-11-14 17:14:06 +01003709 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3710 if (ret)
3711 return ret;
3712
3713 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3714 if (ret)
3715 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003716
Chris Wilson1c255952010-09-26 11:03:27 +01003717 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003718 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003719 if (time_after_eq(request->emitted_jiffies, recent_enough))
3720 break;
3721
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003722 ring = request->ring;
3723 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003724 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003725 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003726 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003727
3728 if (seqno == 0)
3729 return 0;
3730
Daniel Vetterf69061b2012-12-06 09:01:42 +01003731 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003732 if (ret == 0)
3733 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003734
Eric Anholt673a3942008-07-30 12:06:12 -07003735 return ret;
3736}
3737
Eric Anholt673a3942008-07-30 12:06:12 -07003738int
Chris Wilson05394f32010-11-08 19:18:58 +00003739i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003740 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003741 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003742 bool map_and_fenceable,
3743 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003744{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003745 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003746 int ret;
3747
Chris Wilson7e81a422012-09-15 09:41:57 +01003748 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3749 return -EBUSY;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003750
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003751 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3752
3753 vma = i915_gem_obj_to_vma(obj, vm);
3754
3755 if (vma) {
3756 if ((alignment &&
3757 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003758 (map_and_fenceable && !obj->map_and_fenceable)) {
3759 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003760 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003761 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003762 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003763 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003764 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003765 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003766 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003767 if (ret)
3768 return ret;
3769 }
3770 }
3771
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003772 if (!i915_gem_obj_bound(obj, vm)) {
Chris Wilson87422672012-11-21 13:04:03 +00003773 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3774
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003775 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3776 map_and_fenceable,
3777 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003778 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003779 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003780
3781 if (!dev_priv->mm.aliasing_ppgtt)
3782 i915_gem_gtt_bind_object(obj, obj->cache_level);
Chris Wilson22c344e2009-02-11 14:26:45 +00003783 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003784
Daniel Vetter74898d72012-02-15 23:50:22 +01003785 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3786 i915_gem_gtt_bind_object(obj, obj->cache_level);
3787
Chris Wilson1b502472012-04-24 15:47:30 +01003788 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003789 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003790
3791 return 0;
3792}
3793
3794void
Chris Wilson05394f32010-11-08 19:18:58 +00003795i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003796{
Chris Wilson05394f32010-11-08 19:18:58 +00003797 BUG_ON(obj->pin_count == 0);
Ben Widawsky98438772013-07-31 17:00:12 -07003798 BUG_ON(!i915_gem_obj_bound_any(obj));
Eric Anholt673a3942008-07-30 12:06:12 -07003799
Chris Wilson1b502472012-04-24 15:47:30 +01003800 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003801 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003802}
3803
3804int
3805i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003806 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003807{
3808 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003809 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003810 int ret;
3811
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003812 ret = i915_mutex_lock_interruptible(dev);
3813 if (ret)
3814 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003815
Chris Wilson05394f32010-11-08 19:18:58 +00003816 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003817 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003818 ret = -ENOENT;
3819 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003820 }
Eric Anholt673a3942008-07-30 12:06:12 -07003821
Chris Wilson05394f32010-11-08 19:18:58 +00003822 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003823 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003824 ret = -EINVAL;
3825 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003826 }
3827
Chris Wilson05394f32010-11-08 19:18:58 +00003828 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003829 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3830 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003831 ret = -EINVAL;
3832 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003833 }
3834
Chris Wilson93be8782013-01-02 10:31:22 +00003835 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003836 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003837 if (ret)
3838 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003839 }
3840
Chris Wilson93be8782013-01-02 10:31:22 +00003841 obj->user_pin_count++;
3842 obj->pin_filp = file;
3843
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003844 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003845out:
Chris Wilson05394f32010-11-08 19:18:58 +00003846 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003847unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003848 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003849 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003850}
3851
3852int
3853i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003854 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003855{
3856 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003857 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003858 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003859
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003860 ret = i915_mutex_lock_interruptible(dev);
3861 if (ret)
3862 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003863
Chris Wilson05394f32010-11-08 19:18:58 +00003864 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003865 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003866 ret = -ENOENT;
3867 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003868 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003869
Chris Wilson05394f32010-11-08 19:18:58 +00003870 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003871 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3872 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003873 ret = -EINVAL;
3874 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003875 }
Chris Wilson05394f32010-11-08 19:18:58 +00003876 obj->user_pin_count--;
3877 if (obj->user_pin_count == 0) {
3878 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003879 i915_gem_object_unpin(obj);
3880 }
Eric Anholt673a3942008-07-30 12:06:12 -07003881
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003882out:
Chris Wilson05394f32010-11-08 19:18:58 +00003883 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003884unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003885 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003886 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003887}
3888
3889int
3890i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003891 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003892{
3893 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003894 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003895 int ret;
3896
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003897 ret = i915_mutex_lock_interruptible(dev);
3898 if (ret)
3899 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003900
Chris Wilson05394f32010-11-08 19:18:58 +00003901 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003902 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003903 ret = -ENOENT;
3904 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003905 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003906
Chris Wilson0be555b2010-08-04 15:36:30 +01003907 /* Count all active objects as busy, even if they are currently not used
3908 * by the gpu. Users of this interface expect objects to eventually
3909 * become non-busy without any further actions, therefore emit any
3910 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003911 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003912 ret = i915_gem_object_flush_active(obj);
3913
Chris Wilson05394f32010-11-08 19:18:58 +00003914 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01003915 if (obj->ring) {
3916 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3917 args->busy |= intel_ring_flag(obj->ring) << 16;
3918 }
Eric Anholt673a3942008-07-30 12:06:12 -07003919
Chris Wilson05394f32010-11-08 19:18:58 +00003920 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003921unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003922 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003923 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003924}
3925
3926int
3927i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3928 struct drm_file *file_priv)
3929{
Akshay Joshi0206e352011-08-16 15:34:10 -04003930 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003931}
3932
Chris Wilson3ef94da2009-09-14 16:50:29 +01003933int
3934i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3935 struct drm_file *file_priv)
3936{
3937 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003938 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003939 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003940
3941 switch (args->madv) {
3942 case I915_MADV_DONTNEED:
3943 case I915_MADV_WILLNEED:
3944 break;
3945 default:
3946 return -EINVAL;
3947 }
3948
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003949 ret = i915_mutex_lock_interruptible(dev);
3950 if (ret)
3951 return ret;
3952
Chris Wilson05394f32010-11-08 19:18:58 +00003953 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003954 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003955 ret = -ENOENT;
3956 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003957 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003958
Chris Wilson05394f32010-11-08 19:18:58 +00003959 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003960 ret = -EINVAL;
3961 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003962 }
3963
Chris Wilson05394f32010-11-08 19:18:58 +00003964 if (obj->madv != __I915_MADV_PURGED)
3965 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003966
Chris Wilson6c085a72012-08-20 11:40:46 +02003967 /* if the object is no longer attached, discard its backing storage */
3968 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003969 i915_gem_object_truncate(obj);
3970
Chris Wilson05394f32010-11-08 19:18:58 +00003971 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003972
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003973out:
Chris Wilson05394f32010-11-08 19:18:58 +00003974 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003975unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003976 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003977 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003978}
3979
Chris Wilson37e680a2012-06-07 15:38:42 +01003980void i915_gem_object_init(struct drm_i915_gem_object *obj,
3981 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01003982{
Ben Widawsky35c20a62013-05-31 11:28:48 -07003983 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003984 INIT_LIST_HEAD(&obj->ring_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02003985 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07003986 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003987
Chris Wilson37e680a2012-06-07 15:38:42 +01003988 obj->ops = ops;
3989
Chris Wilson0327d6b2012-08-11 15:41:06 +01003990 obj->fence_reg = I915_FENCE_REG_NONE;
3991 obj->madv = I915_MADV_WILLNEED;
3992 /* Avoid an unnecessary call to unbind on the first bind. */
3993 obj->map_and_fenceable = true;
3994
3995 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3996}
3997
Chris Wilson37e680a2012-06-07 15:38:42 +01003998static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3999 .get_pages = i915_gem_object_get_pages_gtt,
4000 .put_pages = i915_gem_object_put_pages_gtt,
4001};
4002
Chris Wilson05394f32010-11-08 19:18:58 +00004003struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4004 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004005{
Daniel Vetterc397b902010-04-09 19:05:07 +00004006 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004007 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004008 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004009
Chris Wilson42dcedd2012-11-15 11:32:30 +00004010 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004011 if (obj == NULL)
4012 return NULL;
4013
4014 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004015 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004016 return NULL;
4017 }
4018
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004019 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4020 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4021 /* 965gm cannot relocate objects above 4GiB. */
4022 mask &= ~__GFP_HIGHMEM;
4023 mask |= __GFP_DMA32;
4024 }
4025
Al Viro496ad9a2013-01-23 17:07:38 -05004026 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004027 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004028
Chris Wilson37e680a2012-06-07 15:38:42 +01004029 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004030
Daniel Vetterc397b902010-04-09 19:05:07 +00004031 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4032 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4033
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004034 if (HAS_LLC(dev)) {
4035 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004036 * cache) for about a 10% performance improvement
4037 * compared to uncached. Graphics requests other than
4038 * display scanout are coherent with the CPU in
4039 * accessing this cache. This means in this mode we
4040 * don't need to clflush on the CPU side, and on the
4041 * GPU side we only need to flush internal caches to
4042 * get data visible to the CPU.
4043 *
4044 * However, we maintain the display planes as UC, and so
4045 * need to rebind when first used as such.
4046 */
4047 obj->cache_level = I915_CACHE_LLC;
4048 } else
4049 obj->cache_level = I915_CACHE_NONE;
4050
Daniel Vetterd861e332013-07-24 23:25:03 +02004051 trace_i915_gem_object_create(obj);
4052
Chris Wilson05394f32010-11-08 19:18:58 +00004053 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004054}
4055
Eric Anholt673a3942008-07-30 12:06:12 -07004056int i915_gem_init_object(struct drm_gem_object *obj)
4057{
Daniel Vetterc397b902010-04-09 19:05:07 +00004058 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004059
Eric Anholt673a3942008-07-30 12:06:12 -07004060 return 0;
4061}
4062
Chris Wilson1488fc02012-04-24 15:47:31 +01004063void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004064{
Chris Wilson1488fc02012-04-24 15:47:31 +01004065 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004066 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004067 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004068 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004069
Chris Wilson26e12f82011-03-20 11:20:19 +00004070 trace_i915_gem_object_destroy(obj);
4071
Chris Wilson1488fc02012-04-24 15:47:31 +01004072 if (obj->phys_obj)
4073 i915_gem_detach_phys_object(dev, obj);
4074
4075 obj->pin_count = 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004076 /* NB: 0 or 1 elements */
4077 WARN_ON(!list_empty(&obj->vma_list) &&
4078 !list_is_singular(&obj->vma_list));
4079 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4080 int ret = i915_vma_unbind(vma);
4081 if (WARN_ON(ret == -ERESTARTSYS)) {
4082 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004083
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004084 was_interruptible = dev_priv->mm.interruptible;
4085 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004086
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004087 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004088
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004089 dev_priv->mm.interruptible = was_interruptible;
4090 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004091 }
4092
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004093 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4094 * before progressing. */
4095 if (obj->stolen)
4096 i915_gem_object_unpin_pages(obj);
4097
Ben Widawsky401c29f2013-05-31 11:28:47 -07004098 if (WARN_ON(obj->pages_pin_count))
4099 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004100 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004101 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004102 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004103
Chris Wilson9da3da62012-06-01 15:20:22 +01004104 BUG_ON(obj->pages);
4105
Chris Wilson2f745ad2012-09-04 21:02:58 +01004106 if (obj->base.import_attach)
4107 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004108
Chris Wilson05394f32010-11-08 19:18:58 +00004109 drm_gem_object_release(&obj->base);
4110 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004111
Chris Wilson05394f32010-11-08 19:18:58 +00004112 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004113 i915_gem_object_free(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004114}
4115
Daniel Vettere656a6c2013-08-14 14:14:04 +02004116struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
Ben Widawsky2f633152013-07-17 12:19:03 -07004117 struct i915_address_space *vm)
4118{
Daniel Vettere656a6c2013-08-14 14:14:04 +02004119 struct i915_vma *vma;
4120 list_for_each_entry(vma, &obj->vma_list, vma_link)
4121 if (vma->vm == vm)
4122 return vma;
4123
4124 return NULL;
4125}
4126
4127static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4128 struct i915_address_space *vm)
4129{
Ben Widawsky2f633152013-07-17 12:19:03 -07004130 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4131 if (vma == NULL)
4132 return ERR_PTR(-ENOMEM);
4133
4134 INIT_LIST_HEAD(&vma->vma_link);
Ben Widawskyca191b12013-07-31 17:00:14 -07004135 INIT_LIST_HEAD(&vma->mm_list);
Ben Widawsky82a55ad2013-08-14 11:38:34 +02004136 INIT_LIST_HEAD(&vma->exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -07004137 vma->vm = vm;
4138 vma->obj = obj;
4139
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004140 /* Keep GGTT vmas first to make debug easier */
4141 if (i915_is_ggtt(vm))
4142 list_add(&vma->vma_link, &obj->vma_list);
4143 else
4144 list_add_tail(&vma->vma_link, &obj->vma_list);
4145
Ben Widawsky2f633152013-07-17 12:19:03 -07004146 return vma;
4147}
4148
Daniel Vettere656a6c2013-08-14 14:14:04 +02004149struct i915_vma *
4150i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4151 struct i915_address_space *vm)
4152{
4153 struct i915_vma *vma;
4154
4155 vma = i915_gem_obj_to_vma(obj, vm);
4156 if (!vma)
4157 vma = __i915_gem_vma_create(obj, vm);
4158
4159 return vma;
4160}
4161
Ben Widawsky2f633152013-07-17 12:19:03 -07004162void i915_gem_vma_destroy(struct i915_vma *vma)
4163{
4164 WARN_ON(vma->node.allocated);
Chris Wilsonaaa056672013-08-20 12:56:40 +01004165
4166 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4167 if (!list_empty(&vma->exec_list))
4168 return;
4169
Daniel Vetterb93dab62013-08-26 11:23:47 +02004170 list_del(&vma->vma_link);
4171
Ben Widawsky2f633152013-07-17 12:19:03 -07004172 kfree(vma);
4173}
4174
Jesse Barnes5669fca2009-02-17 15:13:31 -08004175int
Eric Anholt673a3942008-07-30 12:06:12 -07004176i915_gem_idle(struct drm_device *dev)
4177{
4178 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004179 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004180
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004181 if (dev_priv->ums.mm_suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004182 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004183 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004184 }
Eric Anholt673a3942008-07-30 12:06:12 -07004185
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004186 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004187 if (ret) {
4188 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004189 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004190 }
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004191 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004192
Chris Wilson29105cc2010-01-07 10:39:13 +00004193 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004194 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004195 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004196
Daniel Vetter99584db2012-11-14 17:14:04 +01004197 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004198
4199 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004200 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004201
Chris Wilson29105cc2010-01-07 10:39:13 +00004202 /* Cancel the retire work handler, which should be idle now. */
4203 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4204
Eric Anholt673a3942008-07-30 12:06:12 -07004205 return 0;
4206}
4207
Ben Widawskyb9524a12012-05-25 16:56:24 -07004208void i915_gem_l3_remap(struct drm_device *dev)
4209{
4210 drm_i915_private_t *dev_priv = dev->dev_private;
4211 u32 misccpctl;
4212 int i;
4213
Daniel Vettereb32e452013-02-14 19:46:07 +01004214 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskyb9524a12012-05-25 16:56:24 -07004215 return;
4216
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004217 if (!dev_priv->l3_parity.remap_info)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004218 return;
4219
4220 misccpctl = I915_READ(GEN7_MISCCPCTL);
4221 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4222 POSTING_READ(GEN7_MISCCPCTL);
4223
4224 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4225 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004226 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
Ben Widawskyb9524a12012-05-25 16:56:24 -07004227 DRM_DEBUG("0x%x was already programmed to %x\n",
4228 GEN7_L3LOG_BASE + i, remap);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004229 if (remap && !dev_priv->l3_parity.remap_info[i/4])
Ben Widawskyb9524a12012-05-25 16:56:24 -07004230 DRM_DEBUG_DRIVER("Clearing remapped register\n");
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004231 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004232 }
4233
4234 /* Make sure all the writes land before disabling dop clock gating */
4235 POSTING_READ(GEN7_L3LOG_BASE);
4236
4237 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4238}
4239
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004240void i915_gem_init_swizzling(struct drm_device *dev)
4241{
4242 drm_i915_private_t *dev_priv = dev->dev_private;
4243
Daniel Vetter11782b02012-01-31 16:47:55 +01004244 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004245 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4246 return;
4247
4248 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4249 DISP_TILE_SURFACE_SWIZZLING);
4250
Daniel Vetter11782b02012-01-31 16:47:55 +01004251 if (IS_GEN5(dev))
4252 return;
4253
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004254 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4255 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004256 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004257 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004258 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004259 else
4260 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004261}
Daniel Vettere21af882012-02-09 20:53:27 +01004262
Chris Wilson67b1b572012-07-05 23:49:40 +01004263static bool
4264intel_enable_blt(struct drm_device *dev)
4265{
4266 if (!HAS_BLT(dev))
4267 return false;
4268
4269 /* The blitter was dysfunctional on early prototypes */
4270 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4271 DRM_INFO("BLT not supported on this pre-production hardware;"
4272 " graphics performance will be degraded.\n");
4273 return false;
4274 }
4275
4276 return true;
4277}
4278
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004279static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004280{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004281 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004282 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004283
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004284 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004285 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004286 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004287
4288 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004289 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004290 if (ret)
4291 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004292 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004293
Chris Wilson67b1b572012-07-05 23:49:40 +01004294 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004295 ret = intel_init_blt_ring_buffer(dev);
4296 if (ret)
4297 goto cleanup_bsd_ring;
4298 }
4299
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004300 if (HAS_VEBOX(dev)) {
4301 ret = intel_init_vebox_ring_buffer(dev);
4302 if (ret)
4303 goto cleanup_blt_ring;
4304 }
4305
4306
Mika Kuoppala99433932013-01-22 14:12:17 +02004307 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4308 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004309 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004310
4311 return 0;
4312
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004313cleanup_vebox_ring:
4314 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004315cleanup_blt_ring:
4316 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4317cleanup_bsd_ring:
4318 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4319cleanup_render_ring:
4320 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4321
4322 return ret;
4323}
4324
4325int
4326i915_gem_init_hw(struct drm_device *dev)
4327{
4328 drm_i915_private_t *dev_priv = dev->dev_private;
4329 int ret;
4330
4331 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4332 return -EIO;
4333
Ben Widawsky59124502013-07-04 11:02:05 -07004334 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004335 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004336
Rodrigo Vivi94353732013-08-28 16:45:46 -03004337 if (IS_HSW_GT3(dev))
4338 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
4339 else
4340 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4341
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004342 if (HAS_PCH_NOP(dev)) {
4343 u32 temp = I915_READ(GEN7_MSG_CTL);
4344 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4345 I915_WRITE(GEN7_MSG_CTL, temp);
4346 }
4347
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004348 i915_gem_l3_remap(dev);
4349
4350 i915_gem_init_swizzling(dev);
4351
4352 ret = i915_gem_init_rings(dev);
4353 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004354 return ret;
4355
Ben Widawsky254f9652012-06-04 14:42:42 -07004356 /*
4357 * XXX: There was some w/a described somewhere suggesting loading
4358 * contexts before PPGTT.
4359 */
4360 i915_gem_context_init(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004361 if (dev_priv->mm.aliasing_ppgtt) {
4362 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4363 if (ret) {
4364 i915_gem_cleanup_aliasing_ppgtt(dev);
4365 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4366 }
4367 }
Daniel Vettere21af882012-02-09 20:53:27 +01004368
Chris Wilson68f95ba2010-05-27 13:18:22 +01004369 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004370}
4371
Chris Wilson1070a422012-04-24 15:47:41 +01004372int i915_gem_init(struct drm_device *dev)
4373{
4374 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004375 int ret;
4376
Chris Wilson1070a422012-04-24 15:47:41 +01004377 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004378
4379 if (IS_VALLEYVIEW(dev)) {
4380 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4381 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4382 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4383 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4384 }
4385
Ben Widawskyd7e50082012-12-18 10:31:25 -08004386 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004387
Chris Wilson1070a422012-04-24 15:47:41 +01004388 ret = i915_gem_init_hw(dev);
4389 mutex_unlock(&dev->struct_mutex);
4390 if (ret) {
4391 i915_gem_cleanup_aliasing_ppgtt(dev);
4392 return ret;
4393 }
4394
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004395 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4396 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4397 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004398 return 0;
4399}
4400
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004401void
4402i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4403{
4404 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004405 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004406 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004407
Chris Wilsonb4519512012-05-11 14:29:30 +01004408 for_each_ring(ring, dev_priv, i)
4409 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004410}
4411
4412int
Eric Anholt673a3942008-07-30 12:06:12 -07004413i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4414 struct drm_file *file_priv)
4415{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004416 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004417 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004418
Jesse Barnes79e53942008-11-07 14:24:08 -08004419 if (drm_core_check_feature(dev, DRIVER_MODESET))
4420 return 0;
4421
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004422 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004423 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004424 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004425 }
4426
Eric Anholt673a3942008-07-30 12:06:12 -07004427 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004428 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004429
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004430 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004431 if (ret != 0) {
4432 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004433 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004434 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004435
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004436 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004437 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004438
Chris Wilson5f353082010-06-07 14:03:03 +01004439 ret = drm_irq_install(dev);
4440 if (ret)
4441 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004442
Eric Anholt673a3942008-07-30 12:06:12 -07004443 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004444
4445cleanup_ringbuffer:
4446 mutex_lock(&dev->struct_mutex);
4447 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004448 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004449 mutex_unlock(&dev->struct_mutex);
4450
4451 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004452}
4453
4454int
4455i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4456 struct drm_file *file_priv)
4457{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004458 struct drm_i915_private *dev_priv = dev->dev_private;
4459 int ret;
4460
Jesse Barnes79e53942008-11-07 14:24:08 -08004461 if (drm_core_check_feature(dev, DRIVER_MODESET))
4462 return 0;
4463
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004464 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004465
4466 mutex_lock(&dev->struct_mutex);
4467 ret = i915_gem_idle(dev);
4468
4469 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4470 * We need to replace this with a semaphore, or something.
4471 * And not confound ums.mm_suspended!
4472 */
4473 if (ret != 0)
4474 dev_priv->ums.mm_suspended = 1;
4475 mutex_unlock(&dev->struct_mutex);
4476
4477 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004478}
4479
4480void
4481i915_gem_lastclose(struct drm_device *dev)
4482{
4483 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004484
Eric Anholte806b492009-01-22 09:56:58 -08004485 if (drm_core_check_feature(dev, DRIVER_MODESET))
4486 return;
4487
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004488 mutex_lock(&dev->struct_mutex);
Keith Packard6dbe2772008-10-14 21:41:13 -07004489 ret = i915_gem_idle(dev);
4490 if (ret)
4491 DRM_ERROR("failed to idle hardware: %d\n", ret);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004492 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004493}
4494
Chris Wilson64193402010-10-24 12:38:05 +01004495static void
4496init_ring_lists(struct intel_ring_buffer *ring)
4497{
4498 INIT_LIST_HEAD(&ring->active_list);
4499 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004500}
4501
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004502static void i915_init_vm(struct drm_i915_private *dev_priv,
4503 struct i915_address_space *vm)
4504{
4505 vm->dev = dev_priv->dev;
4506 INIT_LIST_HEAD(&vm->active_list);
4507 INIT_LIST_HEAD(&vm->inactive_list);
4508 INIT_LIST_HEAD(&vm->global_link);
4509 list_add(&vm->global_link, &dev_priv->vm_list);
4510}
4511
Eric Anholt673a3942008-07-30 12:06:12 -07004512void
4513i915_gem_load(struct drm_device *dev)
4514{
4515 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004516 int i;
4517
4518 dev_priv->slab =
4519 kmem_cache_create("i915_gem_object",
4520 sizeof(struct drm_i915_gem_object), 0,
4521 SLAB_HWCACHE_ALIGN,
4522 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004523
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004524 INIT_LIST_HEAD(&dev_priv->vm_list);
4525 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4526
Chris Wilson6c085a72012-08-20 11:40:46 +02004527 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4528 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004529 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004530 for (i = 0; i < I915_NUM_RINGS; i++)
4531 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004532 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004533 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004534 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4535 i915_gem_retire_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004536 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004537
Dave Airlie94400122010-07-20 13:15:31 +10004538 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4539 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004540 I915_WRITE(MI_ARB_STATE,
4541 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004542 }
4543
Chris Wilson72bfa192010-12-19 11:42:05 +00004544 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4545
Jesse Barnesde151cf2008-11-12 10:03:55 -08004546 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004547 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4548 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004549
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004550 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4551 dev_priv->num_fence_regs = 32;
4552 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004553 dev_priv->num_fence_regs = 16;
4554 else
4555 dev_priv->num_fence_regs = 8;
4556
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004557 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004558 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4559 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004560
Eric Anholt673a3942008-07-30 12:06:12 -07004561 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004562 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004563
Chris Wilsonce453d82011-02-21 14:43:56 +00004564 dev_priv->mm.interruptible = true;
4565
Chris Wilson17250b72010-10-28 12:51:39 +01004566 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4567 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4568 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004569}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004570
4571/*
4572 * Create a physically contiguous memory object for this object
4573 * e.g. for cursor + overlay regs
4574 */
Chris Wilson995b6762010-08-20 13:23:26 +01004575static int i915_gem_init_phys_object(struct drm_device *dev,
4576 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004577{
4578 drm_i915_private_t *dev_priv = dev->dev_private;
4579 struct drm_i915_gem_phys_object *phys_obj;
4580 int ret;
4581
4582 if (dev_priv->mm.phys_objs[id - 1] || !size)
4583 return 0;
4584
Eric Anholt9a298b22009-03-24 12:23:04 -07004585 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004586 if (!phys_obj)
4587 return -ENOMEM;
4588
4589 phys_obj->id = id;
4590
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004591 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004592 if (!phys_obj->handle) {
4593 ret = -ENOMEM;
4594 goto kfree_obj;
4595 }
4596#ifdef CONFIG_X86
4597 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4598#endif
4599
4600 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4601
4602 return 0;
4603kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004604 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004605 return ret;
4606}
4607
Chris Wilson995b6762010-08-20 13:23:26 +01004608static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004609{
4610 drm_i915_private_t *dev_priv = dev->dev_private;
4611 struct drm_i915_gem_phys_object *phys_obj;
4612
4613 if (!dev_priv->mm.phys_objs[id - 1])
4614 return;
4615
4616 phys_obj = dev_priv->mm.phys_objs[id - 1];
4617 if (phys_obj->cur_obj) {
4618 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4619 }
4620
4621#ifdef CONFIG_X86
4622 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4623#endif
4624 drm_pci_free(dev, phys_obj->handle);
4625 kfree(phys_obj);
4626 dev_priv->mm.phys_objs[id - 1] = NULL;
4627}
4628
4629void i915_gem_free_all_phys_object(struct drm_device *dev)
4630{
4631 int i;
4632
Dave Airlie260883c2009-01-22 17:58:49 +10004633 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004634 i915_gem_free_phys_object(dev, i);
4635}
4636
4637void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004638 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004639{
Al Viro496ad9a2013-01-23 17:07:38 -05004640 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004641 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004642 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004643 int page_count;
4644
Chris Wilson05394f32010-11-08 19:18:58 +00004645 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004646 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004647 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004648
Chris Wilson05394f32010-11-08 19:18:58 +00004649 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004650 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004651 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004652 if (!IS_ERR(page)) {
4653 char *dst = kmap_atomic(page);
4654 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4655 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004656
Chris Wilsone5281cc2010-10-28 13:45:36 +01004657 drm_clflush_pages(&page, 1);
4658
4659 set_page_dirty(page);
4660 mark_page_accessed(page);
4661 page_cache_release(page);
4662 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004663 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004664 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004665
Chris Wilson05394f32010-11-08 19:18:58 +00004666 obj->phys_obj->cur_obj = NULL;
4667 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004668}
4669
4670int
4671i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004672 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004673 int id,
4674 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004675{
Al Viro496ad9a2013-01-23 17:07:38 -05004676 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004677 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004678 int ret = 0;
4679 int page_count;
4680 int i;
4681
4682 if (id > I915_MAX_PHYS_OBJECT)
4683 return -EINVAL;
4684
Chris Wilson05394f32010-11-08 19:18:58 +00004685 if (obj->phys_obj) {
4686 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004687 return 0;
4688 i915_gem_detach_phys_object(dev, obj);
4689 }
4690
Dave Airlie71acb5e2008-12-30 20:31:46 +10004691 /* create a new object */
4692 if (!dev_priv->mm.phys_objs[id - 1]) {
4693 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004694 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004695 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004696 DRM_ERROR("failed to init phys object %d size: %zu\n",
4697 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004698 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004699 }
4700 }
4701
4702 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004703 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4704 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004705
Chris Wilson05394f32010-11-08 19:18:58 +00004706 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004707
4708 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004709 struct page *page;
4710 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004711
Hugh Dickins5949eac2011-06-27 16:18:18 -07004712 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004713 if (IS_ERR(page))
4714 return PTR_ERR(page);
4715
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004716 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004717 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004718 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004719 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004720
4721 mark_page_accessed(page);
4722 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004723 }
4724
4725 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004726}
4727
4728static int
Chris Wilson05394f32010-11-08 19:18:58 +00004729i915_gem_phys_pwrite(struct drm_device *dev,
4730 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004731 struct drm_i915_gem_pwrite *args,
4732 struct drm_file *file_priv)
4733{
Chris Wilson05394f32010-11-08 19:18:58 +00004734 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004735 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004736
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004737 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4738 unsigned long unwritten;
4739
4740 /* The physical object once assigned is fixed for the lifetime
4741 * of the obj, so we can safely drop the lock and continue
4742 * to access vaddr.
4743 */
4744 mutex_unlock(&dev->struct_mutex);
4745 unwritten = copy_from_user(vaddr, user_data, args->size);
4746 mutex_lock(&dev->struct_mutex);
4747 if (unwritten)
4748 return -EFAULT;
4749 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004750
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004751 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004752 return 0;
4753}
Eric Anholtb9624422009-06-03 07:27:35 +00004754
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004755void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004756{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004757 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004758
4759 /* Clean up our request list when the client is going away, so that
4760 * later retire_requests won't dereference our soon-to-be-gone
4761 * file_priv.
4762 */
Chris Wilson1c255952010-09-26 11:03:27 +01004763 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004764 while (!list_empty(&file_priv->mm.request_list)) {
4765 struct drm_i915_gem_request *request;
4766
4767 request = list_first_entry(&file_priv->mm.request_list,
4768 struct drm_i915_gem_request,
4769 client_list);
4770 list_del(&request->client_list);
4771 request->file_priv = NULL;
4772 }
Chris Wilson1c255952010-09-26 11:03:27 +01004773 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004774}
Chris Wilson31169712009-09-14 16:50:28 +01004775
Chris Wilson57745062012-11-21 13:04:04 +00004776static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4777{
4778 if (!mutex_is_locked(mutex))
4779 return false;
4780
4781#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4782 return mutex->owner == task;
4783#else
4784 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4785 return false;
4786#endif
4787}
4788
Chris Wilson31169712009-09-14 16:50:28 +01004789static int
Ying Han1495f232011-05-24 17:12:27 -07004790i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004791{
Chris Wilson17250b72010-10-28 12:51:39 +01004792 struct drm_i915_private *dev_priv =
4793 container_of(shrinker,
4794 struct drm_i915_private,
4795 mm.inactive_shrinker);
4796 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004797 struct drm_i915_gem_object *obj;
Ying Han1495f232011-05-24 17:12:27 -07004798 int nr_to_scan = sc->nr_to_scan;
Chris Wilson57745062012-11-21 13:04:04 +00004799 bool unlock = true;
Chris Wilson17250b72010-10-28 12:51:39 +01004800 int cnt;
4801
Chris Wilson57745062012-11-21 13:04:04 +00004802 if (!mutex_trylock(&dev->struct_mutex)) {
4803 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4804 return 0;
4805
Daniel Vetter677feac2012-12-19 14:33:45 +01004806 if (dev_priv->mm.shrinker_no_lock_stealing)
4807 return 0;
4808
Chris Wilson57745062012-11-21 13:04:04 +00004809 unlock = false;
4810 }
Chris Wilson31169712009-09-14 16:50:28 +01004811
Chris Wilson6c085a72012-08-20 11:40:46 +02004812 if (nr_to_scan) {
4813 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4814 if (nr_to_scan > 0)
Daniel Vetter93927ca2013-01-10 18:03:00 +01004815 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4816 false);
4817 if (nr_to_scan > 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02004818 i915_gem_shrink_all(dev_priv);
Chris Wilson31169712009-09-14 16:50:28 +01004819 }
4820
Chris Wilson17250b72010-10-28 12:51:39 +01004821 cnt = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004822 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004823 if (obj->pages_pin_count == 0)
4824 cnt += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004825
4826 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4827 if (obj->active)
4828 continue;
4829
Chris Wilsona5570172012-09-04 21:02:54 +01004830 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02004831 cnt += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004832 }
Chris Wilson31169712009-09-14 16:50:28 +01004833
Chris Wilson57745062012-11-21 13:04:04 +00004834 if (unlock)
4835 mutex_unlock(&dev->struct_mutex);
Chris Wilson6c085a72012-08-20 11:40:46 +02004836 return cnt;
Chris Wilson31169712009-09-14 16:50:28 +01004837}
Ben Widawskya70a3142013-07-31 16:59:56 -07004838
4839/* All the new VM stuff */
4840unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4841 struct i915_address_space *vm)
4842{
4843 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4844 struct i915_vma *vma;
4845
4846 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4847 vm = &dev_priv->gtt.base;
4848
4849 BUG_ON(list_empty(&o->vma_list));
4850 list_for_each_entry(vma, &o->vma_list, vma_link) {
4851 if (vma->vm == vm)
4852 return vma->node.start;
4853
4854 }
4855 return -1;
4856}
4857
4858bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4859 struct i915_address_space *vm)
4860{
4861 struct i915_vma *vma;
4862
4863 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004864 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004865 return true;
4866
4867 return false;
4868}
4869
4870bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4871{
4872 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4873 struct i915_address_space *vm;
4874
4875 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4876 if (i915_gem_obj_bound(o, vm))
4877 return true;
4878
4879 return false;
4880}
4881
4882unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4883 struct i915_address_space *vm)
4884{
4885 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4886 struct i915_vma *vma;
4887
4888 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4889 vm = &dev_priv->gtt.base;
4890
4891 BUG_ON(list_empty(&o->vma_list));
4892
4893 list_for_each_entry(vma, &o->vma_list, vma_link)
4894 if (vma->vm == vm)
4895 return vma->node.size;
4896
4897 return 0;
4898}