blob: 23c42567631ec781e8053b962484e4370ddd80ef [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
29#include <drm/i915_drm.h>
Eric Anholt673a3942008-07-30 12:06:12 -070030#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010031#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070032#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070033#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020037#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson05394f32010-11-08 19:18:58 +000039static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson2c225692013-08-09 12:26:45 +010040static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
41 bool force);
Ben Widawsky07fe0b12013-07-31 17:00:10 -070042static __must_check int
43i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
44 struct i915_address_space *vm,
45 unsigned alignment,
46 bool map_and_fenceable,
47 bool nonblocking);
Chris Wilson05394f32010-11-08 19:18:58 +000048static int i915_gem_phys_pwrite(struct drm_device *dev,
49 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100050 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000051 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070052
Chris Wilson61050802012-04-17 15:31:31 +010053static void i915_gem_write_fence(struct drm_device *dev, int reg,
54 struct drm_i915_gem_object *obj);
55static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
56 struct drm_i915_fence_reg *fence,
57 bool enable);
58
Chris Wilson17250b72010-10-28 12:51:39 +010059static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070060 struct shrink_control *sc);
Chris Wilson6c085a72012-08-20 11:40:46 +020061static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
Daniel Vetter8c599672011-12-14 13:57:31 +010063static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010064
Chris Wilsonc76ce032013-08-08 14:41:03 +010065static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level)
67{
68 return HAS_LLC(dev) || level != I915_CACHE_NONE;
69}
70
Chris Wilson2c225692013-08-09 12:26:45 +010071static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72{
73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74 return true;
75
76 return obj->pin_display;
77}
78
Chris Wilson61050802012-04-17 15:31:31 +010079static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80{
81 if (obj->tiling_mode)
82 i915_gem_release_mmap(obj);
83
84 /* As we do not have an associated fence register, we will force
85 * a tiling change if we ever need to acquire one.
86 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010087 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010088 obj->fence_reg = I915_FENCE_REG_NONE;
89}
90
Chris Wilson73aa8082010-09-30 11:46:12 +010091/* some bookkeeping */
92static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93 size_t size)
94{
Daniel Vetterc20e8352013-07-24 22:40:23 +020095 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010096 dev_priv->mm.object_count++;
97 dev_priv->mm.object_memory += size;
Daniel Vetterc20e8352013-07-24 22:40:23 +020098 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +010099}
100
101static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102 size_t size)
103{
Daniel Vetterc20e8352013-07-24 22:40:23 +0200104 spin_lock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100105 dev_priv->mm.object_count--;
106 dev_priv->mm.object_memory -= size;
Daniel Vetterc20e8352013-07-24 22:40:23 +0200107 spin_unlock(&dev_priv->mm.object_stat_lock);
Chris Wilson73aa8082010-09-30 11:46:12 +0100108}
109
Chris Wilson21dd3732011-01-26 15:55:56 +0000110static int
Daniel Vetter33196de2012-11-14 17:14:05 +0100111i915_gem_wait_for_error(struct i915_gpu_error *error)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100112{
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113 int ret;
114
Daniel Vetter7abb6902013-05-24 21:29:32 +0200115#define EXIT_COND (!i915_reset_in_progress(error) || \
116 i915_terminally_wedged(error))
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100117 if (EXIT_COND)
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100118 return 0;
119
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200120 /*
121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122 * userspace. If it takes that long something really bad is going on and
123 * we should simply try to bail out and fail as gracefully as possible.
124 */
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100125 ret = wait_event_interruptible_timeout(error->reset_queue,
126 EXIT_COND,
127 10*HZ);
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200128 if (ret == 0) {
129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130 return -EIO;
131 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100132 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200133 }
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100134#undef EXIT_COND
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100135
Chris Wilson21dd3732011-01-26 15:55:56 +0000136 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100137}
138
Chris Wilson54cf91d2010-11-25 18:00:26 +0000139int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100140{
Daniel Vetter33196de2012-11-14 17:14:05 +0100141 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100142 int ret;
143
Daniel Vetter33196de2012-11-14 17:14:05 +0100144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100145 if (ret)
146 return ret;
147
148 ret = mutex_lock_interruptible(&dev->struct_mutex);
149 if (ret)
150 return ret;
151
Chris Wilson23bc5982010-09-29 16:10:57 +0100152 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100153 return 0;
154}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100155
Chris Wilson7d1c4802010-08-07 21:45:03 +0100156static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000157i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100158{
Ben Widawsky98438772013-07-31 17:00:12 -0700159 return i915_gem_obj_bound_any(obj) && !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100160}
161
Eric Anholt673a3942008-07-30 12:06:12 -0700162int
163i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000164 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700165{
Ben Widawsky93d18792013-01-17 12:45:17 -0800166 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700167 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000168
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200169 if (drm_core_check_feature(dev, DRIVER_MODESET))
170 return -ENODEV;
171
Chris Wilson20217462010-11-23 15:26:33 +0000172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700175
Daniel Vetterf534bc02012-03-26 22:37:04 +0200176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
178 return -ENODEV;
179
Eric Anholt673a3942008-07-30 12:06:12 -0700180 mutex_lock(&dev->struct_mutex);
Ben Widawskyd7e50082012-12-18 10:31:25 -0800181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182 args->gtt_end);
Ben Widawsky93d18792013-01-17 12:45:17 -0800183 dev_priv->gtt.mappable_end = args->gtt_end;
Eric Anholt673a3942008-07-30 12:06:12 -0700184 mutex_unlock(&dev->struct_mutex);
185
Chris Wilson20217462010-11-23 15:26:33 +0000186 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700187}
188
Eric Anholt5a125c32008-10-22 21:40:13 -0700189int
190i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000191 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700192{
Chris Wilson73aa8082010-09-30 11:46:12 +0100193 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700194 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000195 struct drm_i915_gem_object *obj;
196 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700197
Chris Wilson6299f992010-11-24 12:23:44 +0000198 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100199 mutex_lock(&dev->struct_mutex);
Ben Widawsky35c20a62013-05-31 11:28:48 -0700200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
Chris Wilson1b502472012-04-24 15:47:30 +0100201 if (obj->pin_count)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700202 pinned += i915_gem_obj_ggtt_size(obj);
Chris Wilson73aa8082010-09-30 11:46:12 +0100203 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700204
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700205 args->aper_size = dev_priv->gtt.base.total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400206 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000207
Eric Anholt5a125c32008-10-22 21:40:13 -0700208 return 0;
209}
210
Chris Wilson42dcedd2012-11-15 11:32:30 +0000211void *i915_gem_object_alloc(struct drm_device *dev)
212{
213 struct drm_i915_private *dev_priv = dev->dev_private;
214 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
215}
216
217void i915_gem_object_free(struct drm_i915_gem_object *obj)
218{
219 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
220 kmem_cache_free(dev_priv->slab, obj);
221}
222
Dave Airlieff72145b2011-02-07 12:16:14 +1000223static int
224i915_gem_create(struct drm_file *file,
225 struct drm_device *dev,
226 uint64_t size,
227 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700228{
Chris Wilson05394f32010-11-08 19:18:58 +0000229 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300230 int ret;
231 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700232
Dave Airlieff72145b2011-02-07 12:16:14 +1000233 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200234 if (size == 0)
235 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700236
237 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000238 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700239 if (obj == NULL)
240 return -ENOMEM;
241
Chris Wilson05394f32010-11-08 19:18:58 +0000242 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100243 /* drop reference from allocate - handle holds it now */
Daniel Vetterd861e332013-07-24 23:25:03 +0200244 drm_gem_object_unreference_unlocked(&obj->base);
245 if (ret)
246 return ret;
Chris Wilson202f2fe2010-10-14 13:20:40 +0100247
Dave Airlieff72145b2011-02-07 12:16:14 +1000248 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700249 return 0;
250}
251
Dave Airlieff72145b2011-02-07 12:16:14 +1000252int
253i915_gem_dumb_create(struct drm_file *file,
254 struct drm_device *dev,
255 struct drm_mode_create_dumb *args)
256{
257 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000258 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000259 args->size = args->pitch * args->height;
260 return i915_gem_create(file, dev,
261 args->size, &args->handle);
262}
263
264int i915_gem_dumb_destroy(struct drm_file *file,
265 struct drm_device *dev,
266 uint32_t handle)
267{
268 return drm_gem_handle_delete(file, handle);
269}
270
271/**
272 * Creates a new mm object and returns a handle to it.
273 */
274int
275i915_gem_create_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *file)
277{
278 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200279
Dave Airlieff72145b2011-02-07 12:16:14 +1000280 return i915_gem_create(file, dev,
281 args->size, &args->handle);
282}
283
Daniel Vetter8c599672011-12-14 13:57:31 +0100284static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100285__copy_to_user_swizzled(char __user *cpu_vaddr,
286 const char *gpu_vaddr, int gpu_offset,
287 int length)
288{
289 int ret, cpu_offset = 0;
290
291 while (length > 0) {
292 int cacheline_end = ALIGN(gpu_offset + 1, 64);
293 int this_length = min(cacheline_end - gpu_offset, length);
294 int swizzled_gpu_offset = gpu_offset ^ 64;
295
296 ret = __copy_to_user(cpu_vaddr + cpu_offset,
297 gpu_vaddr + swizzled_gpu_offset,
298 this_length);
299 if (ret)
300 return ret + length;
301
302 cpu_offset += this_length;
303 gpu_offset += this_length;
304 length -= this_length;
305 }
306
307 return 0;
308}
309
310static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700311__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
312 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100313 int length)
314{
315 int ret, cpu_offset = 0;
316
317 while (length > 0) {
318 int cacheline_end = ALIGN(gpu_offset + 1, 64);
319 int this_length = min(cacheline_end - gpu_offset, length);
320 int swizzled_gpu_offset = gpu_offset ^ 64;
321
322 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
323 cpu_vaddr + cpu_offset,
324 this_length);
325 if (ret)
326 return ret + length;
327
328 cpu_offset += this_length;
329 gpu_offset += this_length;
330 length -= this_length;
331 }
332
333 return 0;
334}
335
Daniel Vetterd174bd62012-03-25 19:47:40 +0200336/* Per-page copy function for the shmem pread fastpath.
337 * Flushes invalid cachelines before reading the target if
338 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700339static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200340shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
341 char __user *user_data,
342 bool page_do_bit17_swizzling, bool needs_clflush)
343{
344 char *vaddr;
345 int ret;
346
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200347 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200348 return -EINVAL;
349
350 vaddr = kmap_atomic(page);
351 if (needs_clflush)
352 drm_clflush_virt_range(vaddr + shmem_page_offset,
353 page_length);
354 ret = __copy_to_user_inatomic(user_data,
355 vaddr + shmem_page_offset,
356 page_length);
357 kunmap_atomic(vaddr);
358
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100359 return ret ? -EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200360}
361
Daniel Vetter23c18c72012-03-25 19:47:42 +0200362static void
363shmem_clflush_swizzled_range(char *addr, unsigned long length,
364 bool swizzled)
365{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200366 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200367 unsigned long start = (unsigned long) addr;
368 unsigned long end = (unsigned long) addr + length;
369
370 /* For swizzling simply ensure that we always flush both
371 * channels. Lame, but simple and it works. Swizzled
372 * pwrite/pread is far from a hotpath - current userspace
373 * doesn't use it at all. */
374 start = round_down(start, 128);
375 end = round_up(end, 128);
376
377 drm_clflush_virt_range((void *)start, end - start);
378 } else {
379 drm_clflush_virt_range(addr, length);
380 }
381
382}
383
Daniel Vetterd174bd62012-03-25 19:47:40 +0200384/* Only difference to the fast-path function is that this can handle bit17
385 * and uses non-atomic copy and kmap functions. */
386static int
387shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
388 char __user *user_data,
389 bool page_do_bit17_swizzling, bool needs_clflush)
390{
391 char *vaddr;
392 int ret;
393
394 vaddr = kmap(page);
395 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200396 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
397 page_length,
398 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200399
400 if (page_do_bit17_swizzling)
401 ret = __copy_to_user_swizzled(user_data,
402 vaddr, shmem_page_offset,
403 page_length);
404 else
405 ret = __copy_to_user(user_data,
406 vaddr + shmem_page_offset,
407 page_length);
408 kunmap(page);
409
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100410 return ret ? - EFAULT : 0;
Daniel Vetterd174bd62012-03-25 19:47:40 +0200411}
412
Eric Anholteb014592009-03-10 11:44:52 -0700413static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200414i915_gem_shmem_pread(struct drm_device *dev,
415 struct drm_i915_gem_object *obj,
416 struct drm_i915_gem_pread *args,
417 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700418{
Daniel Vetter8461d222011-12-14 13:57:32 +0100419 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700420 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100421 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100422 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100423 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200424 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200425 int needs_clflush = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200426 struct sg_page_iter sg_iter;
Eric Anholteb014592009-03-10 11:44:52 -0700427
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200428 user_data = to_user_ptr(args->data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700429 remain = args->size;
430
Daniel Vetter8461d222011-12-14 13:57:32 +0100431 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700432
Daniel Vetter84897312012-03-25 19:47:31 +0200433 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
434 /* If we're not in the cpu read domain, set ourself into the gtt
435 * read domain and manually flush cachelines (if required). This
436 * optimizes for the case when the gpu will dirty the data
437 * anyway again before the next pread happens. */
Chris Wilsonc76ce032013-08-08 14:41:03 +0100438 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
Ben Widawsky98438772013-07-31 17:00:12 -0700439 if (i915_gem_obj_bound_any(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +0200440 ret = i915_gem_object_set_to_gtt_domain(obj, false);
441 if (ret)
442 return ret;
443 }
Daniel Vetter84897312012-03-25 19:47:31 +0200444 }
Eric Anholteb014592009-03-10 11:44:52 -0700445
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100446 ret = i915_gem_object_get_pages(obj);
447 if (ret)
448 return ret;
449
450 i915_gem_object_pin_pages(obj);
451
Eric Anholteb014592009-03-10 11:44:52 -0700452 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100453
Imre Deak67d5a502013-02-18 19:28:02 +0200454 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
455 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200456 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +0100457
458 if (remain <= 0)
459 break;
460
Eric Anholteb014592009-03-10 11:44:52 -0700461 /* Operation in this page
462 *
Eric Anholteb014592009-03-10 11:44:52 -0700463 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700464 * page_length = bytes to copy for this page
465 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100466 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700467 page_length = remain;
468 if ((shmem_page_offset + page_length) > PAGE_SIZE)
469 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700470
Daniel Vetter8461d222011-12-14 13:57:32 +0100471 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
472 (page_to_phys(page) & (1 << 17)) != 0;
473
Daniel Vetterd174bd62012-03-25 19:47:40 +0200474 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
475 user_data, page_do_bit17_swizzling,
476 needs_clflush);
477 if (ret == 0)
478 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700479
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200480 mutex_unlock(&dev->struct_mutex);
481
Xiong Zhang0b74b502013-07-19 13:51:24 +0800482 if (likely(!i915_prefault_disable) && !prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200483 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200484 /* Userspace is tricking us, but we've already clobbered
485 * its pages with the prefault and promised to write the
486 * data up to the first fault. Hence ignore any errors
487 * and just continue. */
488 (void)ret;
489 prefaulted = 1;
490 }
491
Daniel Vetterd174bd62012-03-25 19:47:40 +0200492 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
493 user_data, page_do_bit17_swizzling,
494 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700495
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200496 mutex_lock(&dev->struct_mutex);
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100497
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200498next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100499 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100500
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100501 if (ret)
Daniel Vetter8461d222011-12-14 13:57:32 +0100502 goto out;
Daniel Vetter8461d222011-12-14 13:57:32 +0100503
Eric Anholteb014592009-03-10 11:44:52 -0700504 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100505 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700506 offset += page_length;
507 }
508
Chris Wilson4f27b752010-10-14 15:26:45 +0100509out:
Chris Wilsonf60d7f02012-09-04 21:02:56 +0100510 i915_gem_object_unpin_pages(obj);
511
Eric Anholteb014592009-03-10 11:44:52 -0700512 return ret;
513}
514
Eric Anholt673a3942008-07-30 12:06:12 -0700515/**
516 * Reads data from the object referenced by handle.
517 *
518 * On error, the contents of *data are undefined.
519 */
520int
521i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000522 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700523{
524 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000525 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100526 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700527
Chris Wilson51311d02010-11-17 09:10:42 +0000528 if (args->size == 0)
529 return 0;
530
531 if (!access_ok(VERIFY_WRITE,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200532 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000533 args->size))
534 return -EFAULT;
535
Chris Wilson4f27b752010-10-14 15:26:45 +0100536 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100537 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100538 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700539
Chris Wilson05394f32010-11-08 19:18:58 +0000540 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000541 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100542 ret = -ENOENT;
543 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100544 }
Eric Anholt673a3942008-07-30 12:06:12 -0700545
Chris Wilson7dcd2492010-09-26 20:21:44 +0100546 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000547 if (args->offset > obj->base.size ||
548 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100549 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100550 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100551 }
552
Daniel Vetter1286ff72012-05-10 15:25:09 +0200553 /* prime objects have no backing filp to GEM pread/pwrite
554 * pages from.
555 */
556 if (!obj->base.filp) {
557 ret = -EINVAL;
558 goto out;
559 }
560
Chris Wilsondb53a302011-02-03 11:57:46 +0000561 trace_i915_gem_object_pread(obj, args->offset, args->size);
562
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200563 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700564
Chris Wilson35b62a82010-09-26 20:23:38 +0100565out:
Chris Wilson05394f32010-11-08 19:18:58 +0000566 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100567unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100568 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700569 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700570}
571
Keith Packard0839ccb2008-10-30 19:38:48 -0700572/* This is the fast write path which cannot handle
573 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700574 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700575
Keith Packard0839ccb2008-10-30 19:38:48 -0700576static inline int
577fast_user_write(struct io_mapping *mapping,
578 loff_t page_base, int page_offset,
579 char __user *user_data,
580 int length)
581{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700582 void __iomem *vaddr_atomic;
583 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700584 unsigned long unwritten;
585
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700586 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700587 /* We can use the cpu mem copy function because this is X86. */
588 vaddr = (void __force*)vaddr_atomic + page_offset;
589 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700590 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700591 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100592 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700593}
594
Eric Anholt3de09aa2009-03-09 09:42:23 -0700595/**
596 * This is the fast pwrite path, where we copy the data directly from the
597 * user into the GTT, uncached.
598 */
Eric Anholt673a3942008-07-30 12:06:12 -0700599static int
Chris Wilson05394f32010-11-08 19:18:58 +0000600i915_gem_gtt_pwrite_fast(struct drm_device *dev,
601 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700602 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000603 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700604{
Keith Packard0839ccb2008-10-30 19:38:48 -0700605 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700606 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700607 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700608 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200609 int page_offset, page_length, ret;
610
Ben Widawskyc37e2202013-07-31 16:59:58 -0700611 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200612 if (ret)
613 goto out;
614
615 ret = i915_gem_object_set_to_gtt_domain(obj, true);
616 if (ret)
617 goto out_unpin;
618
619 ret = i915_gem_object_put_fence(obj);
620 if (ret)
621 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200623 user_data = to_user_ptr(args->data_ptr);
Eric Anholt673a3942008-07-30 12:06:12 -0700624 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700625
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700626 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700627
628 while (remain > 0) {
629 /* Operation in this page
630 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700631 * page_base = page offset within aperture
632 * page_offset = offset within page
633 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700634 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100635 page_base = offset & PAGE_MASK;
636 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700637 page_length = remain;
638 if ((page_offset + remain) > PAGE_SIZE)
639 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700640
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700642 * source page isn't available. Return the error and we'll
643 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700644 */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800645 if (fast_user_write(dev_priv->gtt.mappable, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200646 page_offset, user_data, page_length)) {
647 ret = -EFAULT;
648 goto out_unpin;
649 }
Eric Anholt673a3942008-07-30 12:06:12 -0700650
Keith Packard0839ccb2008-10-30 19:38:48 -0700651 remain -= page_length;
652 user_data += page_length;
653 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700654 }
Eric Anholt673a3942008-07-30 12:06:12 -0700655
Daniel Vetter935aaa62012-03-25 19:47:35 +0200656out_unpin:
657 i915_gem_object_unpin(obj);
658out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700659 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700660}
661
Daniel Vetterd174bd62012-03-25 19:47:40 +0200662/* Per-page copy function for the shmem pwrite fastpath.
663 * Flushes invalid cachelines before writing to the target if
664 * needs_clflush_before is set and flushes out any written cachelines after
665 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700666static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200667shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
668 char __user *user_data,
669 bool page_do_bit17_swizzling,
670 bool needs_clflush_before,
671 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700672{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200673 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700674 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200676 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200677 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700678
Daniel Vetterd174bd62012-03-25 19:47:40 +0200679 vaddr = kmap_atomic(page);
680 if (needs_clflush_before)
681 drm_clflush_virt_range(vaddr + shmem_page_offset,
682 page_length);
683 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
684 user_data,
685 page_length);
686 if (needs_clflush_after)
687 drm_clflush_virt_range(vaddr + shmem_page_offset,
688 page_length);
689 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700690
Chris Wilson755d2212012-09-04 21:02:55 +0100691 return ret ? -EFAULT : 0;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700692}
693
Daniel Vetterd174bd62012-03-25 19:47:40 +0200694/* Only difference to the fast-path function is that this can handle bit17
695 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700696static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200697shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
698 char __user *user_data,
699 bool page_do_bit17_swizzling,
700 bool needs_clflush_before,
701 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700702{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 char *vaddr;
704 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700705
Daniel Vetterd174bd62012-03-25 19:47:40 +0200706 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200707 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200708 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
709 page_length,
710 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200711 if (page_do_bit17_swizzling)
712 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100713 user_data,
714 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200715 else
716 ret = __copy_from_user(vaddr + shmem_page_offset,
717 user_data,
718 page_length);
719 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200720 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
721 page_length,
722 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200723 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100724
Chris Wilson755d2212012-09-04 21:02:55 +0100725 return ret ? -EFAULT : 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700726}
727
Eric Anholt40123c12009-03-09 13:42:30 -0700728static int
Daniel Vettere244a442012-03-25 19:47:28 +0200729i915_gem_shmem_pwrite(struct drm_device *dev,
730 struct drm_i915_gem_object *obj,
731 struct drm_i915_gem_pwrite *args,
732 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700733{
Eric Anholt40123c12009-03-09 13:42:30 -0700734 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100735 loff_t offset;
736 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100737 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100738 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200739 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200740 int needs_clflush_after = 0;
741 int needs_clflush_before = 0;
Imre Deak67d5a502013-02-18 19:28:02 +0200742 struct sg_page_iter sg_iter;
Eric Anholt40123c12009-03-09 13:42:30 -0700743
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200744 user_data = to_user_ptr(args->data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700745 remain = args->size;
746
Daniel Vetter8c599672011-12-14 13:57:31 +0100747 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700748
Daniel Vetter58642882012-03-25 19:47:37 +0200749 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
750 /* If we're not in the cpu write domain, set ourself into the gtt
751 * write domain and manually flush cachelines (if required). This
752 * optimizes for the case when the gpu will use the data
753 * right away and we therefore have to clflush anyway. */
Chris Wilson2c225692013-08-09 12:26:45 +0100754 needs_clflush_after = cpu_write_needs_clflush(obj);
Ben Widawsky98438772013-07-31 17:00:12 -0700755 if (i915_gem_obj_bound_any(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +0200756 ret = i915_gem_object_set_to_gtt_domain(obj, true);
757 if (ret)
758 return ret;
759 }
Daniel Vetter58642882012-03-25 19:47:37 +0200760 }
Chris Wilsonc76ce032013-08-08 14:41:03 +0100761 /* Same trick applies to invalidate partially written cachelines read
762 * before writing. */
763 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
764 needs_clflush_before =
765 !cpu_cache_is_coherent(dev, obj->cache_level);
Daniel Vetter58642882012-03-25 19:47:37 +0200766
Chris Wilson755d2212012-09-04 21:02:55 +0100767 ret = i915_gem_object_get_pages(obj);
768 if (ret)
769 return ret;
770
771 i915_gem_object_pin_pages(obj);
772
Eric Anholt40123c12009-03-09 13:42:30 -0700773 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000774 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700775
Imre Deak67d5a502013-02-18 19:28:02 +0200776 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
777 offset >> PAGE_SHIFT) {
Imre Deak2db76d72013-03-26 15:14:18 +0200778 struct page *page = sg_page_iter_page(&sg_iter);
Daniel Vetter58642882012-03-25 19:47:37 +0200779 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100780
Chris Wilson9da3da62012-06-01 15:20:22 +0100781 if (remain <= 0)
782 break;
783
Eric Anholt40123c12009-03-09 13:42:30 -0700784 /* Operation in this page
785 *
Eric Anholt40123c12009-03-09 13:42:30 -0700786 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700787 * page_length = bytes to copy for this page
788 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100789 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700790
791 page_length = remain;
792 if ((shmem_page_offset + page_length) > PAGE_SIZE)
793 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700794
Daniel Vetter58642882012-03-25 19:47:37 +0200795 /* If we don't overwrite a cacheline completely we need to be
796 * careful to have up-to-date data by first clflushing. Don't
797 * overcomplicate things and flush the entire patch. */
798 partial_cacheline_write = needs_clflush_before &&
799 ((shmem_page_offset | page_length)
800 & (boot_cpu_data.x86_clflush_size - 1));
801
Daniel Vetter8c599672011-12-14 13:57:31 +0100802 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803 (page_to_phys(page) & (1 << 17)) != 0;
804
Daniel Vetterd174bd62012-03-25 19:47:40 +0200805 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
806 user_data, page_do_bit17_swizzling,
807 partial_cacheline_write,
808 needs_clflush_after);
809 if (ret == 0)
810 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700811
Daniel Vettere244a442012-03-25 19:47:28 +0200812 hit_slowpath = 1;
Daniel Vettere244a442012-03-25 19:47:28 +0200813 mutex_unlock(&dev->struct_mutex);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200814 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
815 user_data, page_do_bit17_swizzling,
816 partial_cacheline_write,
817 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700818
Daniel Vettere244a442012-03-25 19:47:28 +0200819 mutex_lock(&dev->struct_mutex);
Chris Wilson755d2212012-09-04 21:02:55 +0100820
Daniel Vettere244a442012-03-25 19:47:28 +0200821next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100822 set_page_dirty(page);
823 mark_page_accessed(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100824
Chris Wilson755d2212012-09-04 21:02:55 +0100825 if (ret)
Daniel Vetter8c599672011-12-14 13:57:31 +0100826 goto out;
Daniel Vetter8c599672011-12-14 13:57:31 +0100827
Eric Anholt40123c12009-03-09 13:42:30 -0700828 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100829 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700830 offset += page_length;
831 }
832
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100833out:
Chris Wilson755d2212012-09-04 21:02:55 +0100834 i915_gem_object_unpin_pages(obj);
835
Daniel Vettere244a442012-03-25 19:47:28 +0200836 if (hit_slowpath) {
Daniel Vetter8dcf0152012-11-15 16:53:58 +0100837 /*
838 * Fixup: Flush cpu caches in case we didn't flush the dirty
839 * cachelines in-line while writing and the object moved
840 * out of the cpu write domain while we've dropped the lock.
841 */
842 if (!needs_clflush_after &&
843 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilson000433b2013-08-08 14:41:09 +0100844 if (i915_gem_clflush_object(obj, obj->pin_display))
845 i915_gem_chipset_flush(dev);
Daniel Vettere244a442012-03-25 19:47:28 +0200846 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100847 }
Eric Anholt40123c12009-03-09 13:42:30 -0700848
Daniel Vetter58642882012-03-25 19:47:37 +0200849 if (needs_clflush_after)
Ben Widawskye76e9ae2012-11-04 09:21:27 -0800850 i915_gem_chipset_flush(dev);
Daniel Vetter58642882012-03-25 19:47:37 +0200851
Eric Anholt40123c12009-03-09 13:42:30 -0700852 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700853}
854
855/**
856 * Writes data to the object referenced by handle.
857 *
858 * On error, the contents of the buffer that were to be modified are undefined.
859 */
860int
861i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100862 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700863{
864 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000865 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000866 int ret;
867
868 if (args->size == 0)
869 return 0;
870
871 if (!access_ok(VERIFY_READ,
Ville Syrjälä2bb46292013-02-22 16:12:51 +0200872 to_user_ptr(args->data_ptr),
Chris Wilson51311d02010-11-17 09:10:42 +0000873 args->size))
874 return -EFAULT;
875
Xiong Zhang0b74b502013-07-19 13:51:24 +0800876 if (likely(!i915_prefault_disable)) {
877 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
878 args->size);
879 if (ret)
880 return -EFAULT;
881 }
Eric Anholt673a3942008-07-30 12:06:12 -0700882
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100883 ret = i915_mutex_lock_interruptible(dev);
884 if (ret)
885 return ret;
886
Chris Wilson05394f32010-11-08 19:18:58 +0000887 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000888 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100889 ret = -ENOENT;
890 goto unlock;
891 }
Eric Anholt673a3942008-07-30 12:06:12 -0700892
Chris Wilson7dcd2492010-09-26 20:21:44 +0100893 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000894 if (args->offset > obj->base.size ||
895 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100896 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100897 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100898 }
899
Daniel Vetter1286ff72012-05-10 15:25:09 +0200900 /* prime objects have no backing filp to GEM pread/pwrite
901 * pages from.
902 */
903 if (!obj->base.filp) {
904 ret = -EINVAL;
905 goto out;
906 }
907
Chris Wilsondb53a302011-02-03 11:57:46 +0000908 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
909
Daniel Vetter935aaa62012-03-25 19:47:35 +0200910 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700911 /* We can only do the GTT pwrite on untiled buffers, as otherwise
912 * it would end up going through the fenced access, and we'll get
913 * different detiling behavior between reading and writing.
914 * pread/pwrite currently are reading and writing from the CPU
915 * perspective, requiring manual detiling by the client.
916 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100917 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100918 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100919 goto out;
920 }
921
Chris Wilson2c225692013-08-09 12:26:45 +0100922 if (obj->tiling_mode == I915_TILING_NONE &&
923 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
924 cpu_write_needs_clflush(obj)) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100925 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200926 /* Note that the gtt paths might fail with non-page-backed user
927 * pointers (e.g. gtt mappings when moving data between
928 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700929 }
Eric Anholt673a3942008-07-30 12:06:12 -0700930
Chris Wilson86a1ee22012-08-11 15:41:04 +0100931 if (ret == -EFAULT || ret == -ENOSPC)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200932 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100933
Chris Wilson35b62a82010-09-26 20:23:38 +0100934out:
Chris Wilson05394f32010-11-08 19:18:58 +0000935 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100936unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100937 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700938 return ret;
939}
940
Chris Wilsonb3612372012-08-24 09:35:08 +0100941int
Daniel Vetter33196de2012-11-14 17:14:05 +0100942i915_gem_check_wedge(struct i915_gpu_error *error,
Chris Wilsonb3612372012-08-24 09:35:08 +0100943 bool interruptible)
944{
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100945 if (i915_reset_in_progress(error)) {
Chris Wilsonb3612372012-08-24 09:35:08 +0100946 /* Non-interruptible callers can't handle -EAGAIN, hence return
947 * -EIO unconditionally for these. */
948 if (!interruptible)
949 return -EIO;
950
Daniel Vetter1f83fee2012-11-15 17:17:22 +0100951 /* Recovery complete, but the reset failed ... */
952 if (i915_terminally_wedged(error))
Chris Wilsonb3612372012-08-24 09:35:08 +0100953 return -EIO;
954
955 return -EAGAIN;
956 }
957
958 return 0;
959}
960
961/*
962 * Compare seqno against outstanding lazy request. Emit a request if they are
963 * equal.
964 */
965static int
966i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
967{
968 int ret;
969
970 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
971
972 ret = 0;
973 if (seqno == ring->outstanding_lazy_request)
Mika Kuoppala0025c072013-06-12 12:35:30 +0300974 ret = i915_add_request(ring, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +0100975
976 return ret;
977}
978
979/**
980 * __wait_seqno - wait until execution of seqno has finished
981 * @ring: the ring expected to report seqno
982 * @seqno: duh!
Daniel Vetterf69061b2012-12-06 09:01:42 +0100983 * @reset_counter: reset sequence associated with the given seqno
Chris Wilsonb3612372012-08-24 09:35:08 +0100984 * @interruptible: do an interruptible wait (normally yes)
985 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
986 *
Daniel Vetterf69061b2012-12-06 09:01:42 +0100987 * Note: It is of utmost importance that the passed in seqno and reset_counter
988 * values have been read by the caller in an smp safe manner. Where read-side
989 * locks are involved, it is sufficient to read the reset_counter before
990 * unlocking the lock that protects the seqno. For lockless tricks, the
991 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
992 * inserted.
993 *
Chris Wilsonb3612372012-08-24 09:35:08 +0100994 * Returns 0 if the seqno was found within the alloted time. Else returns the
995 * errno with remaining time filled in timeout argument.
996 */
997static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Daniel Vetterf69061b2012-12-06 09:01:42 +0100998 unsigned reset_counter,
Chris Wilsonb3612372012-08-24 09:35:08 +0100999 bool interruptible, struct timespec *timeout)
1000{
1001 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1002 struct timespec before, now, wait_time={1,0};
1003 unsigned long timeout_jiffies;
1004 long end;
1005 bool wait_forever = true;
1006 int ret;
1007
Paulo Zanonic67a4702013-08-19 13:18:09 -03001008 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1009
Chris Wilsonb3612372012-08-24 09:35:08 +01001010 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1011 return 0;
1012
1013 trace_i915_gem_request_wait_begin(ring, seqno);
1014
1015 if (timeout != NULL) {
1016 wait_time = *timeout;
1017 wait_forever = false;
1018 }
1019
Imre Deake054cc32013-05-21 20:03:19 +03001020 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
Chris Wilsonb3612372012-08-24 09:35:08 +01001021
1022 if (WARN_ON(!ring->irq_get(ring)))
1023 return -ENODEV;
1024
1025 /* Record current time in case interrupted by signal, or wedged * */
1026 getrawmonotonic(&before);
1027
1028#define EXIT_COND \
1029 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
Daniel Vetterf69061b2012-12-06 09:01:42 +01001030 i915_reset_in_progress(&dev_priv->gpu_error) || \
1031 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
Chris Wilsonb3612372012-08-24 09:35:08 +01001032 do {
1033 if (interruptible)
1034 end = wait_event_interruptible_timeout(ring->irq_queue,
1035 EXIT_COND,
1036 timeout_jiffies);
1037 else
1038 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1039 timeout_jiffies);
1040
Daniel Vetterf69061b2012-12-06 09:01:42 +01001041 /* We need to check whether any gpu reset happened in between
1042 * the caller grabbing the seqno and now ... */
1043 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1044 end = -EAGAIN;
1045
1046 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1047 * gone. */
Daniel Vetter33196de2012-11-14 17:14:05 +01001048 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001049 if (ret)
1050 end = ret;
1051 } while (end == 0 && wait_forever);
1052
1053 getrawmonotonic(&now);
1054
1055 ring->irq_put(ring);
1056 trace_i915_gem_request_wait_end(ring, seqno);
1057#undef EXIT_COND
1058
1059 if (timeout) {
1060 struct timespec sleep_time = timespec_sub(now, before);
1061 *timeout = timespec_sub(*timeout, sleep_time);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03001062 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1063 set_normalized_timespec(timeout, 0, 0);
Chris Wilsonb3612372012-08-24 09:35:08 +01001064 }
1065
1066 switch (end) {
1067 case -EIO:
1068 case -EAGAIN: /* Wedged */
1069 case -ERESTARTSYS: /* Signal */
1070 return (int)end;
1071 case 0: /* Timeout */
Chris Wilsonb3612372012-08-24 09:35:08 +01001072 return -ETIME;
1073 default: /* Completed */
1074 WARN_ON(end < 0); /* We're not aware of other errors */
1075 return 0;
1076 }
1077}
1078
1079/**
1080 * Waits for a sequence number to be signaled, and cleans up the
1081 * request and object lists appropriately for that event.
1082 */
1083int
1084i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1085{
1086 struct drm_device *dev = ring->dev;
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088 bool interruptible = dev_priv->mm.interruptible;
1089 int ret;
1090
1091 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1092 BUG_ON(seqno == 0);
1093
Daniel Vetter33196de2012-11-14 17:14:05 +01001094 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
Chris Wilsonb3612372012-08-24 09:35:08 +01001095 if (ret)
1096 return ret;
1097
1098 ret = i915_gem_check_olr(ring, seqno);
1099 if (ret)
1100 return ret;
1101
Daniel Vetterf69061b2012-12-06 09:01:42 +01001102 return __wait_seqno(ring, seqno,
1103 atomic_read(&dev_priv->gpu_error.reset_counter),
1104 interruptible, NULL);
Chris Wilsonb3612372012-08-24 09:35:08 +01001105}
1106
Chris Wilsond26e3af2013-06-29 22:05:26 +01001107static int
1108i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1109 struct intel_ring_buffer *ring)
1110{
1111 i915_gem_retire_requests_ring(ring);
1112
1113 /* Manually manage the write flush as we may have not yet
1114 * retired the buffer.
1115 *
1116 * Note that the last_write_seqno is always the earlier of
1117 * the two (read/write) seqno, so if we haved successfully waited,
1118 * we know we have passed the last write.
1119 */
1120 obj->last_write_seqno = 0;
1121 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1122
1123 return 0;
1124}
1125
Chris Wilsonb3612372012-08-24 09:35:08 +01001126/**
1127 * Ensures that all rendering to the object has completed and the object is
1128 * safe to unbind from the GTT or access from the CPU.
1129 */
1130static __must_check int
1131i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1132 bool readonly)
1133{
1134 struct intel_ring_buffer *ring = obj->ring;
1135 u32 seqno;
1136 int ret;
1137
1138 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1139 if (seqno == 0)
1140 return 0;
1141
1142 ret = i915_wait_seqno(ring, seqno);
1143 if (ret)
1144 return ret;
1145
Chris Wilsond26e3af2013-06-29 22:05:26 +01001146 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilsonb3612372012-08-24 09:35:08 +01001147}
1148
Chris Wilson3236f572012-08-24 09:35:09 +01001149/* A nonblocking variant of the above wait. This is a highly dangerous routine
1150 * as the object state may change during this call.
1151 */
1152static __must_check int
1153i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1154 bool readonly)
1155{
1156 struct drm_device *dev = obj->base.dev;
1157 struct drm_i915_private *dev_priv = dev->dev_private;
1158 struct intel_ring_buffer *ring = obj->ring;
Daniel Vetterf69061b2012-12-06 09:01:42 +01001159 unsigned reset_counter;
Chris Wilson3236f572012-08-24 09:35:09 +01001160 u32 seqno;
1161 int ret;
1162
1163 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1164 BUG_ON(!dev_priv->mm.interruptible);
1165
1166 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1167 if (seqno == 0)
1168 return 0;
1169
Daniel Vetter33196de2012-11-14 17:14:05 +01001170 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
Chris Wilson3236f572012-08-24 09:35:09 +01001171 if (ret)
1172 return ret;
1173
1174 ret = i915_gem_check_olr(ring, seqno);
1175 if (ret)
1176 return ret;
1177
Daniel Vetterf69061b2012-12-06 09:01:42 +01001178 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson3236f572012-08-24 09:35:09 +01001179 mutex_unlock(&dev->struct_mutex);
Daniel Vetterf69061b2012-12-06 09:01:42 +01001180 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
Chris Wilson3236f572012-08-24 09:35:09 +01001181 mutex_lock(&dev->struct_mutex);
Chris Wilsond26e3af2013-06-29 22:05:26 +01001182 if (ret)
1183 return ret;
Chris Wilson3236f572012-08-24 09:35:09 +01001184
Chris Wilsond26e3af2013-06-29 22:05:26 +01001185 return i915_gem_object_wait_rendering__tail(obj, ring);
Chris Wilson3236f572012-08-24 09:35:09 +01001186}
1187
Eric Anholt673a3942008-07-30 12:06:12 -07001188/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001189 * Called when user space prepares to use an object with the CPU, either
1190 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001191 */
1192int
1193i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001194 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001195{
1196 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001197 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001198 uint32_t read_domains = args->read_domains;
1199 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001200 int ret;
1201
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001202 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001203 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001204 return -EINVAL;
1205
Chris Wilson21d509e2009-06-06 09:46:02 +01001206 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001207 return -EINVAL;
1208
1209 /* Having something in the write domain implies it's in the read
1210 * domain, and only that read domain. Enforce that in the request.
1211 */
1212 if (write_domain != 0 && read_domains != write_domain)
1213 return -EINVAL;
1214
Chris Wilson76c1dec2010-09-25 11:22:51 +01001215 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001216 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001217 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001218
Chris Wilson05394f32010-11-08 19:18:58 +00001219 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001220 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001221 ret = -ENOENT;
1222 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001223 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001224
Chris Wilson3236f572012-08-24 09:35:09 +01001225 /* Try to flush the object off the GPU without holding the lock.
1226 * We will repeat the flush holding the lock in the normal manner
1227 * to catch cases where we are gazumped.
1228 */
1229 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1230 if (ret)
1231 goto unref;
1232
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001233 if (read_domains & I915_GEM_DOMAIN_GTT) {
1234 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001235
1236 /* Silently promote "you're not bound, there was nothing to do"
1237 * to success, since the client was just asking us to
1238 * make sure everything was done.
1239 */
1240 if (ret == -EINVAL)
1241 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001242 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001243 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001244 }
1245
Chris Wilson3236f572012-08-24 09:35:09 +01001246unref:
Chris Wilson05394f32010-11-08 19:18:58 +00001247 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001248unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001249 mutex_unlock(&dev->struct_mutex);
1250 return ret;
1251}
1252
1253/**
1254 * Called when user space has done writes to this buffer
1255 */
1256int
1257i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001258 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001259{
1260 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001261 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001262 int ret = 0;
1263
Chris Wilson76c1dec2010-09-25 11:22:51 +01001264 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001265 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001266 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001267
Chris Wilson05394f32010-11-08 19:18:58 +00001268 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001269 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001270 ret = -ENOENT;
1271 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001272 }
1273
Eric Anholt673a3942008-07-30 12:06:12 -07001274 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson2c225692013-08-09 12:26:45 +01001275 if (obj->pin_display)
1276 i915_gem_object_flush_cpu_write_domain(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08001277
Chris Wilson05394f32010-11-08 19:18:58 +00001278 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001279unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001280 mutex_unlock(&dev->struct_mutex);
1281 return ret;
1282}
1283
1284/**
1285 * Maps the contents of an object, returning the address it is mapped
1286 * into.
1287 *
1288 * While the mapping holds a reference on the contents of the object, it doesn't
1289 * imply a ref on the object itself.
1290 */
1291int
1292i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001293 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001294{
1295 struct drm_i915_gem_mmap *args = data;
1296 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001297 unsigned long addr;
1298
Chris Wilson05394f32010-11-08 19:18:58 +00001299 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001300 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001301 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001302
Daniel Vetter1286ff72012-05-10 15:25:09 +02001303 /* prime objects have no backing filp to GEM mmap
1304 * pages from.
1305 */
1306 if (!obj->filp) {
1307 drm_gem_object_unreference_unlocked(obj);
1308 return -EINVAL;
1309 }
1310
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001311 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001312 PROT_READ | PROT_WRITE, MAP_SHARED,
1313 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001314 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001315 if (IS_ERR((void *)addr))
1316 return addr;
1317
1318 args->addr_ptr = (uint64_t) addr;
1319
1320 return 0;
1321}
1322
Jesse Barnesde151cf2008-11-12 10:03:55 -08001323/**
1324 * i915_gem_fault - fault a page into the GTT
1325 * vma: VMA in question
1326 * vmf: fault info
1327 *
1328 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1329 * from userspace. The fault handler takes care of binding the object to
1330 * the GTT (if needed), allocating and programming a fence register (again,
1331 * only if needed based on whether the old reg is still valid or the object
1332 * is tiled) and inserting a new PTE into the faulting process.
1333 *
1334 * Note that the faulting process may involve evicting existing objects
1335 * from the GTT and/or fence registers to make room. So performance may
1336 * suffer if the GTT working set is large or there are few fence registers
1337 * left.
1338 */
1339int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1340{
Chris Wilson05394f32010-11-08 19:18:58 +00001341 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1342 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001343 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001344 pgoff_t page_offset;
1345 unsigned long pfn;
1346 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001347 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001348
1349 /* We don't use vmf->pgoff since that has the fake offset */
1350 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1351 PAGE_SHIFT;
1352
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001353 ret = i915_mutex_lock_interruptible(dev);
1354 if (ret)
1355 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001356
Chris Wilsondb53a302011-02-03 11:57:46 +00001357 trace_i915_gem_object_fault(obj, page_offset, true, write);
1358
Chris Wilsoneb119bd2012-12-16 12:43:36 +00001359 /* Access to snoopable pages through the GTT is incoherent. */
1360 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1361 ret = -EINVAL;
1362 goto unlock;
1363 }
1364
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001365 /* Now bind it into the GTT if needed */
Ben Widawskyc37e2202013-07-31 16:59:58 -07001366 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001367 if (ret)
1368 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001369
Chris Wilsonc9839302012-11-20 10:45:17 +00001370 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1371 if (ret)
1372 goto unpin;
1373
1374 ret = i915_gem_object_get_fence(obj);
1375 if (ret)
1376 goto unpin;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001377
Chris Wilson6299f992010-11-24 12:23:44 +00001378 obj->fault_mappable = true;
1379
Ben Widawskyf343c5f2013-07-05 14:41:04 -07001380 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1381 pfn >>= PAGE_SHIFT;
1382 pfn += page_offset;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383
1384 /* Finally, remap it using the new GTT offset */
1385 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc9839302012-11-20 10:45:17 +00001386unpin:
1387 i915_gem_object_unpin(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001388unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001389 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001390out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001391 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001392 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001393 /* If this -EIO is due to a gpu hang, give the reset code a
1394 * chance to clean up the mess. Otherwise return the proper
1395 * SIGBUS. */
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001396 if (i915_terminally_wedged(&dev_priv->gpu_error))
Daniel Vettera9340cc2012-07-04 22:18:42 +02001397 return VM_FAULT_SIGBUS;
Chris Wilson045e7692010-11-07 09:18:22 +00001398 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001399 /* Give the error handler a chance to run and move the
1400 * objects off the GPU active list. Next time we service the
1401 * fault, we should be able to transition the page into the
1402 * GTT without touching the GPU (and so avoid further
1403 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1404 * with coherency, just lost writes.
1405 */
Chris Wilson045e7692010-11-07 09:18:22 +00001406 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001407 case 0:
1408 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001409 case -EINTR:
Dmitry Rogozhkine79e0fe2012-10-03 17:15:26 +03001410 case -EBUSY:
1411 /*
1412 * EBUSY is ok: this just means that another thread
1413 * already did the job.
1414 */
Chris Wilsonc7150892009-09-23 00:43:56 +01001415 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001416 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001417 return VM_FAULT_OOM;
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001418 case -ENOSPC:
1419 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001420 default:
Daniel Vettera7c2e1a2012-10-17 11:17:16 +02001421 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
Chris Wilsonc7150892009-09-23 00:43:56 +01001422 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001423 }
1424}
1425
1426/**
Chris Wilson901782b2009-07-10 08:18:50 +01001427 * i915_gem_release_mmap - remove physical page mappings
1428 * @obj: obj in question
1429 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001430 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001431 * relinquish ownership of the pages back to the system.
1432 *
1433 * It is vital that we remove the page mapping if we have mapped a tiled
1434 * object through the GTT and then lose the fence register due to
1435 * resource pressure. Similarly if the object has been moved out of the
1436 * aperture, than pages mapped into userspace must be revoked. Removing the
1437 * mapping will then trigger a page fault on the next user access, allowing
1438 * fixup by i915_gem_fault().
1439 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001440void
Chris Wilson05394f32010-11-08 19:18:58 +00001441i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001442{
Chris Wilson6299f992010-11-24 12:23:44 +00001443 if (!obj->fault_mappable)
1444 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001445
Chris Wilsonf6e47882011-03-20 21:09:12 +00001446 if (obj->base.dev->dev_mapping)
1447 unmap_mapping_range(obj->base.dev->dev_mapping,
1448 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1449 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001450
Chris Wilson6299f992010-11-24 12:23:44 +00001451 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001452}
1453
Imre Deak0fa87792013-01-07 21:47:35 +02001454uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001455i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001456{
Chris Wilsone28f8712011-07-18 13:11:49 -07001457 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001458
1459 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001460 tiling_mode == I915_TILING_NONE)
1461 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001462
1463 /* Previous chips need a power-of-two fence region when tiling */
1464 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001465 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001466 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001467 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001468
Chris Wilsone28f8712011-07-18 13:11:49 -07001469 while (gtt_size < size)
1470 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001471
Chris Wilsone28f8712011-07-18 13:11:49 -07001472 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001473}
1474
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475/**
1476 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1477 * @obj: object to check
1478 *
1479 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001480 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001481 */
Imre Deakd8651102013-01-07 21:47:33 +02001482uint32_t
1483i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1484 int tiling_mode, bool fenced)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001485{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001486 /*
1487 * Minimum alignment is 4k (GTT page size), but might be greater
1488 * if a fence register is needed for the object.
1489 */
Imre Deakd8651102013-01-07 21:47:33 +02001490 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001491 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492 return 4096;
1493
1494 /*
1495 * Previous chips need to be aligned to the size of the smallest
1496 * fence register that can contain the object.
1497 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001498 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001499}
1500
Chris Wilsond8cb5082012-08-11 15:41:03 +01001501static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1502{
1503 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1504 int ret;
1505
1506 if (obj->base.map_list.map)
1507 return 0;
1508
Daniel Vetterda494d72012-12-20 15:11:16 +01001509 dev_priv->mm.shrinker_no_lock_stealing = true;
1510
Chris Wilsond8cb5082012-08-11 15:41:03 +01001511 ret = drm_gem_create_mmap_offset(&obj->base);
1512 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001513 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001514
1515 /* Badly fragmented mmap space? The only way we can recover
1516 * space is by destroying unwanted objects. We can't randomly release
1517 * mmap_offsets as userspace expects them to be persistent for the
1518 * lifetime of the objects. The closest we can is to release the
1519 * offsets on purgeable objects by truncating it and marking it purged,
1520 * which prevents userspace from ever using that object again.
1521 */
1522 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1523 ret = drm_gem_create_mmap_offset(&obj->base);
1524 if (ret != -ENOSPC)
Daniel Vetterda494d72012-12-20 15:11:16 +01001525 goto out;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001526
1527 i915_gem_shrink_all(dev_priv);
Daniel Vetterda494d72012-12-20 15:11:16 +01001528 ret = drm_gem_create_mmap_offset(&obj->base);
1529out:
1530 dev_priv->mm.shrinker_no_lock_stealing = false;
1531
1532 return ret;
Chris Wilsond8cb5082012-08-11 15:41:03 +01001533}
1534
1535static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1536{
1537 if (!obj->base.map_list.map)
1538 return;
1539
1540 drm_gem_free_mmap_offset(&obj->base);
1541}
1542
Jesse Barnesde151cf2008-11-12 10:03:55 -08001543int
Dave Airlieff72145b2011-02-07 12:16:14 +10001544i915_gem_mmap_gtt(struct drm_file *file,
1545 struct drm_device *dev,
1546 uint32_t handle,
1547 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001548{
Chris Wilsonda761a62010-10-27 17:37:08 +01001549 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001550 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001551 int ret;
1552
Chris Wilson76c1dec2010-09-25 11:22:51 +01001553 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001554 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001555 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001556
Dave Airlieff72145b2011-02-07 12:16:14 +10001557 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001558 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001559 ret = -ENOENT;
1560 goto unlock;
1561 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001562
Ben Widawsky5d4545a2013-01-17 12:45:15 -08001563 if (obj->base.size > dev_priv->gtt.mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001564 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001565 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001566 }
1567
Chris Wilson05394f32010-11-08 19:18:58 +00001568 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001569 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001570 ret = -EINVAL;
1571 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001572 }
1573
Chris Wilsond8cb5082012-08-11 15:41:03 +01001574 ret = i915_gem_object_create_mmap_offset(obj);
1575 if (ret)
1576 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001577
Dave Airlieff72145b2011-02-07 12:16:14 +10001578 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001579
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001580out:
Chris Wilson05394f32010-11-08 19:18:58 +00001581 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001582unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001583 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001584 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001585}
1586
Dave Airlieff72145b2011-02-07 12:16:14 +10001587/**
1588 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1589 * @dev: DRM device
1590 * @data: GTT mapping ioctl data
1591 * @file: GEM object info
1592 *
1593 * Simply returns the fake offset to userspace so it can mmap it.
1594 * The mmap call will end up in drm_gem_mmap(), which will set things
1595 * up so we can get faults in the handler above.
1596 *
1597 * The fault handler will take care of binding the object into the GTT
1598 * (since it may have been evicted to make room for something), allocating
1599 * a fence register, and mapping the appropriate aperture address into
1600 * userspace.
1601 */
1602int
1603i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1604 struct drm_file *file)
1605{
1606 struct drm_i915_gem_mmap_gtt *args = data;
1607
Dave Airlieff72145b2011-02-07 12:16:14 +10001608 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1609}
1610
Daniel Vetter225067e2012-08-20 10:23:20 +02001611/* Immediately discard the backing storage */
1612static void
1613i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001614{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001615 struct inode *inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001616
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001617 i915_gem_object_free_mmap_offset(obj);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001618
Chris Wilson4d6294bf2012-08-11 15:41:05 +01001619 if (obj->base.filp == NULL)
1620 return;
1621
Daniel Vetter225067e2012-08-20 10:23:20 +02001622 /* Our goal here is to return as much of the memory as
1623 * is possible back to the system as we are called from OOM.
1624 * To do this we must instruct the shmfs to drop all of its
1625 * backing pages, *now*.
Chris Wilsone5281cc2010-10-28 13:45:36 +01001626 */
Al Viro496ad9a2013-01-23 17:07:38 -05001627 inode = file_inode(obj->base.filp);
Daniel Vetter225067e2012-08-20 10:23:20 +02001628 shmem_truncate_range(inode, 0, (loff_t)-1);
Hugh Dickins5949eac2011-06-27 16:18:18 -07001629
Daniel Vetter225067e2012-08-20 10:23:20 +02001630 obj->madv = __I915_MADV_PURGED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001631}
Chris Wilsone5281cc2010-10-28 13:45:36 +01001632
Daniel Vetter225067e2012-08-20 10:23:20 +02001633static inline int
1634i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1635{
1636 return obj->madv == I915_MADV_DONTNEED;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001637}
1638
Chris Wilson5cdf5882010-09-27 15:51:07 +01001639static void
Chris Wilson05394f32010-11-08 19:18:58 +00001640i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001641{
Imre Deak90797e62013-02-18 19:28:03 +02001642 struct sg_page_iter sg_iter;
1643 int ret;
Daniel Vetter1286ff72012-05-10 15:25:09 +02001644
Chris Wilson05394f32010-11-08 19:18:58 +00001645 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001646
Chris Wilson6c085a72012-08-20 11:40:46 +02001647 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1648 if (ret) {
1649 /* In the event of a disaster, abandon all caches and
1650 * hope for the best.
1651 */
1652 WARN_ON(ret != -EIO);
Chris Wilson2c225692013-08-09 12:26:45 +01001653 i915_gem_clflush_object(obj, true);
Chris Wilson6c085a72012-08-20 11:40:46 +02001654 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1655 }
1656
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001657 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001658 i915_gem_object_save_bit_17_swizzle(obj);
1659
Chris Wilson05394f32010-11-08 19:18:58 +00001660 if (obj->madv == I915_MADV_DONTNEED)
1661 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001662
Imre Deak90797e62013-02-18 19:28:03 +02001663 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
Imre Deak2db76d72013-03-26 15:14:18 +02001664 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson9da3da62012-06-01 15:20:22 +01001665
Chris Wilson05394f32010-11-08 19:18:58 +00001666 if (obj->dirty)
Chris Wilson9da3da62012-06-01 15:20:22 +01001667 set_page_dirty(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001668
Chris Wilson05394f32010-11-08 19:18:58 +00001669 if (obj->madv == I915_MADV_WILLNEED)
Chris Wilson9da3da62012-06-01 15:20:22 +01001670 mark_page_accessed(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001671
Chris Wilson9da3da62012-06-01 15:20:22 +01001672 page_cache_release(page);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001673 }
Chris Wilson05394f32010-11-08 19:18:58 +00001674 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001675
Chris Wilson9da3da62012-06-01 15:20:22 +01001676 sg_free_table(obj->pages);
1677 kfree(obj->pages);
Chris Wilson37e680a2012-06-07 15:38:42 +01001678}
1679
Chris Wilsondd624af2013-01-15 12:39:35 +00001680int
Chris Wilson37e680a2012-06-07 15:38:42 +01001681i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1682{
1683 const struct drm_i915_gem_object_ops *ops = obj->ops;
1684
Chris Wilson2f745ad2012-09-04 21:02:58 +01001685 if (obj->pages == NULL)
Chris Wilson37e680a2012-06-07 15:38:42 +01001686 return 0;
1687
Chris Wilsona5570172012-09-04 21:02:54 +01001688 if (obj->pages_pin_count)
1689 return -EBUSY;
1690
Ben Widawsky98438772013-07-31 17:00:12 -07001691 BUG_ON(i915_gem_obj_bound_any(obj));
Ben Widawsky3e123022013-07-31 17:00:04 -07001692
Chris Wilsona2165e32012-12-03 11:49:00 +00001693 /* ->put_pages might need to allocate memory for the bit17 swizzle
1694 * array, hence protect them from being reaped by removing them from gtt
1695 * lists early. */
Ben Widawsky35c20a62013-05-31 11:28:48 -07001696 list_del(&obj->global_list);
Chris Wilsona2165e32012-12-03 11:49:00 +00001697
Chris Wilson37e680a2012-06-07 15:38:42 +01001698 ops->put_pages(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001699 obj->pages = NULL;
Chris Wilson6c085a72012-08-20 11:40:46 +02001700
Chris Wilson6c085a72012-08-20 11:40:46 +02001701 if (i915_gem_object_is_purgeable(obj))
1702 i915_gem_object_truncate(obj);
1703
1704 return 0;
1705}
1706
1707static long
Daniel Vetter93927ca2013-01-10 18:03:00 +01001708__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1709 bool purgeable_only)
Chris Wilson6c085a72012-08-20 11:40:46 +02001710{
1711 struct drm_i915_gem_object *obj, *next;
1712 long count = 0;
1713
1714 list_for_each_entry_safe(obj, next,
1715 &dev_priv->mm.unbound_list,
Ben Widawsky35c20a62013-05-31 11:28:48 -07001716 global_list) {
Daniel Vetter93927ca2013-01-10 18:03:00 +01001717 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
Chris Wilson37e680a2012-06-07 15:38:42 +01001718 i915_gem_object_put_pages(obj) == 0) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001719 count += obj->base.size >> PAGE_SHIFT;
1720 if (count >= target)
1721 return count;
1722 }
1723 }
1724
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001725 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1726 global_list) {
1727 struct i915_vma *vma, *v;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001728
1729 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1730 continue;
1731
Ben Widawsky07fe0b12013-07-31 17:00:10 -07001732 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1733 if (i915_vma_unbind(vma))
1734 break;
Ben Widawsky80dcfdb2013-07-31 17:00:01 -07001735
1736 if (!i915_gem_object_put_pages(obj)) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001737 count += obj->base.size >> PAGE_SHIFT;
1738 if (count >= target)
1739 return count;
1740 }
1741 }
1742
1743 return count;
1744}
1745
Daniel Vetter93927ca2013-01-10 18:03:00 +01001746static long
1747i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1748{
1749 return __i915_gem_shrink(dev_priv, target, true);
1750}
1751
Chris Wilson6c085a72012-08-20 11:40:46 +02001752static void
1753i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1754{
1755 struct drm_i915_gem_object *obj, *next;
1756
1757 i915_gem_evict_everything(dev_priv->dev);
1758
Ben Widawsky35c20a62013-05-31 11:28:48 -07001759 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1760 global_list)
Chris Wilson37e680a2012-06-07 15:38:42 +01001761 i915_gem_object_put_pages(obj);
Daniel Vetter225067e2012-08-20 10:23:20 +02001762}
1763
Chris Wilson37e680a2012-06-07 15:38:42 +01001764static int
Chris Wilson6c085a72012-08-20 11:40:46 +02001765i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001766{
Chris Wilson6c085a72012-08-20 11:40:46 +02001767 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001768 int page_count, i;
1769 struct address_space *mapping;
Chris Wilson9da3da62012-06-01 15:20:22 +01001770 struct sg_table *st;
1771 struct scatterlist *sg;
Imre Deak90797e62013-02-18 19:28:03 +02001772 struct sg_page_iter sg_iter;
Eric Anholt673a3942008-07-30 12:06:12 -07001773 struct page *page;
Imre Deak90797e62013-02-18 19:28:03 +02001774 unsigned long last_pfn = 0; /* suppress gcc warning */
Chris Wilson6c085a72012-08-20 11:40:46 +02001775 gfp_t gfp;
Eric Anholt673a3942008-07-30 12:06:12 -07001776
Chris Wilson6c085a72012-08-20 11:40:46 +02001777 /* Assert that the object is not currently in any GPU domain. As it
1778 * wasn't in the GTT, there shouldn't be any way it could have been in
1779 * a GPU cache
1780 */
1781 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1782 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1783
Chris Wilson9da3da62012-06-01 15:20:22 +01001784 st = kmalloc(sizeof(*st), GFP_KERNEL);
1785 if (st == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001786 return -ENOMEM;
1787
Chris Wilson9da3da62012-06-01 15:20:22 +01001788 page_count = obj->base.size / PAGE_SIZE;
1789 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1790 sg_free_table(st);
1791 kfree(st);
1792 return -ENOMEM;
1793 }
1794
1795 /* Get the list of pages out of our struct file. They'll be pinned
1796 * at this point until we release them.
1797 *
1798 * Fail silently without starting the shrinker
1799 */
Al Viro496ad9a2013-01-23 17:07:38 -05001800 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilson6c085a72012-08-20 11:40:46 +02001801 gfp = mapping_gfp_mask(mapping);
Linus Torvaldscaf49192012-12-10 10:51:16 -08001802 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001803 gfp &= ~(__GFP_IO | __GFP_WAIT);
Imre Deak90797e62013-02-18 19:28:03 +02001804 sg = st->sgl;
1805 st->nents = 0;
1806 for (i = 0; i < page_count; i++) {
Chris Wilson6c085a72012-08-20 11:40:46 +02001807 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1808 if (IS_ERR(page)) {
1809 i915_gem_purge(dev_priv, page_count);
1810 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1811 }
1812 if (IS_ERR(page)) {
1813 /* We've tried hard to allocate the memory by reaping
1814 * our own buffer, now let the real VM do its job and
1815 * go down in flames if truly OOM.
1816 */
Linus Torvaldscaf49192012-12-10 10:51:16 -08001817 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
Chris Wilson6c085a72012-08-20 11:40:46 +02001818 gfp |= __GFP_IO | __GFP_WAIT;
1819
1820 i915_gem_shrink_all(dev_priv);
1821 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1822 if (IS_ERR(page))
1823 goto err_pages;
1824
Linus Torvaldscaf49192012-12-10 10:51:16 -08001825 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
Chris Wilson6c085a72012-08-20 11:40:46 +02001826 gfp &= ~(__GFP_IO | __GFP_WAIT);
1827 }
Konrad Rzeszutek Wilk1625e7e2013-06-24 11:47:48 -04001828#ifdef CONFIG_SWIOTLB
1829 if (swiotlb_nr_tbl()) {
1830 st->nents++;
1831 sg_set_page(sg, page, PAGE_SIZE, 0);
1832 sg = sg_next(sg);
1833 continue;
1834 }
1835#endif
Imre Deak90797e62013-02-18 19:28:03 +02001836 if (!i || page_to_pfn(page) != last_pfn + 1) {
1837 if (i)
1838 sg = sg_next(sg);
1839 st->nents++;
1840 sg_set_page(sg, page, PAGE_SIZE, 0);
1841 } else {
1842 sg->length += PAGE_SIZE;
1843 }
1844 last_pfn = page_to_pfn(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001845 }
Konrad Rzeszutek Wilk1625e7e2013-06-24 11:47:48 -04001846#ifdef CONFIG_SWIOTLB
1847 if (!swiotlb_nr_tbl())
1848#endif
1849 sg_mark_end(sg);
Chris Wilson74ce6b62012-10-19 15:51:06 +01001850 obj->pages = st;
1851
Eric Anholt673a3942008-07-30 12:06:12 -07001852 if (i915_gem_object_needs_bit17_swizzle(obj))
1853 i915_gem_object_do_bit_17_swizzle(obj);
1854
1855 return 0;
1856
1857err_pages:
Imre Deak90797e62013-02-18 19:28:03 +02001858 sg_mark_end(sg);
1859 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
Imre Deak2db76d72013-03-26 15:14:18 +02001860 page_cache_release(sg_page_iter_page(&sg_iter));
Chris Wilson9da3da62012-06-01 15:20:22 +01001861 sg_free_table(st);
1862 kfree(st);
Eric Anholt673a3942008-07-30 12:06:12 -07001863 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07001864}
1865
Chris Wilson37e680a2012-06-07 15:38:42 +01001866/* Ensure that the associated pages are gathered from the backing storage
1867 * and pinned into our object. i915_gem_object_get_pages() may be called
1868 * multiple times before they are released by a single call to
1869 * i915_gem_object_put_pages() - once the pages are no longer referenced
1870 * either as a result of memory pressure (reaping pages under the shrinker)
1871 * or as the object is itself released.
1872 */
1873int
1874i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1875{
1876 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1877 const struct drm_i915_gem_object_ops *ops = obj->ops;
1878 int ret;
1879
Chris Wilson2f745ad2012-09-04 21:02:58 +01001880 if (obj->pages)
Chris Wilson37e680a2012-06-07 15:38:42 +01001881 return 0;
1882
Chris Wilson43e28f02013-01-08 10:53:09 +00001883 if (obj->madv != I915_MADV_WILLNEED) {
1884 DRM_ERROR("Attempting to obtain a purgeable object\n");
1885 return -EINVAL;
1886 }
1887
Chris Wilsona5570172012-09-04 21:02:54 +01001888 BUG_ON(obj->pages_pin_count);
1889
Chris Wilson37e680a2012-06-07 15:38:42 +01001890 ret = ops->get_pages(obj);
1891 if (ret)
1892 return ret;
1893
Ben Widawsky35c20a62013-05-31 11:28:48 -07001894 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Chris Wilson37e680a2012-06-07 15:38:42 +01001895 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001896}
1897
Chris Wilson54cf91d2010-11-25 18:00:26 +00001898void
Chris Wilson05394f32010-11-08 19:18:58 +00001899i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson9d7730912012-11-27 16:22:52 +00001900 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001901{
Chris Wilson05394f32010-11-08 19:18:58 +00001902 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001903 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson9d7730912012-11-27 16:22:52 +00001904 u32 seqno = intel_ring_get_seqno(ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001905
Zou Nan hai852835f2010-05-21 09:08:56 +08001906 BUG_ON(ring == NULL);
Chris Wilson02978ff2013-07-09 09:22:39 +01001907 if (obj->ring != ring && obj->last_write_seqno) {
1908 /* Keep the seqno relative to the current ring */
1909 obj->last_write_seqno = seqno;
1910 }
Chris Wilson05394f32010-11-08 19:18:58 +00001911 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001912
1913 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001914 if (!obj->active) {
1915 drm_gem_object_reference(&obj->base);
1916 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001917 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001918
Chris Wilson05394f32010-11-08 19:18:58 +00001919 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001920
Chris Wilson0201f1e2012-07-20 12:41:01 +01001921 obj->last_read_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001922
Chris Wilsoncaea7472010-11-12 13:53:37 +00001923 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001924 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001925
Chris Wilson7dd49062012-03-21 10:48:18 +00001926 /* Bump MRU to take account of the delayed flush */
1927 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1928 struct drm_i915_fence_reg *reg;
1929
1930 reg = &dev_priv->fence_regs[obj->fence_reg];
1931 list_move_tail(&reg->lru_list,
1932 &dev_priv->mm.fence_list);
1933 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001934 }
1935}
1936
1937static void
Chris Wilsoncaea7472010-11-12 13:53:37 +00001938i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1939{
Ben Widawskyca191b12013-07-31 17:00:14 -07001940 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1941 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1942 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001943
Chris Wilson65ce3022012-07-20 12:41:02 +01001944 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001945 BUG_ON(!obj->active);
Chris Wilson65ce3022012-07-20 12:41:02 +01001946
Ben Widawskyca191b12013-07-31 17:00:14 -07001947 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001948
Chris Wilson65ce3022012-07-20 12:41:02 +01001949 list_del_init(&obj->ring_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001950 obj->ring = NULL;
1951
Chris Wilson65ce3022012-07-20 12:41:02 +01001952 obj->last_read_seqno = 0;
1953 obj->last_write_seqno = 0;
1954 obj->base.write_domain = 0;
1955
1956 obj->last_fenced_seqno = 0;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001957 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001958
1959 obj->active = 0;
1960 drm_gem_object_unreference(&obj->base);
1961
1962 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001963}
Eric Anholt673a3942008-07-30 12:06:12 -07001964
Chris Wilson9d7730912012-11-27 16:22:52 +00001965static int
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001966i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01001967{
Chris Wilson9d7730912012-11-27 16:22:52 +00001968 struct drm_i915_private *dev_priv = dev->dev_private;
1969 struct intel_ring_buffer *ring;
1970 int ret, i, j;
Daniel Vetter53d227f2012-01-25 16:32:49 +01001971
Chris Wilson107f27a52012-12-10 13:56:17 +02001972 /* Carefully retire all requests without writing to the rings */
Chris Wilson9d7730912012-11-27 16:22:52 +00001973 for_each_ring(ring, dev_priv, i) {
Chris Wilson107f27a52012-12-10 13:56:17 +02001974 ret = intel_ring_idle(ring);
1975 if (ret)
1976 return ret;
Chris Wilson9d7730912012-11-27 16:22:52 +00001977 }
Chris Wilson9d7730912012-11-27 16:22:52 +00001978 i915_gem_retire_requests(dev);
Chris Wilson107f27a52012-12-10 13:56:17 +02001979
1980 /* Finally reset hw state */
Chris Wilson9d7730912012-11-27 16:22:52 +00001981 for_each_ring(ring, dev_priv, i) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001982 intel_ring_init_seqno(ring, seqno);
Mika Kuoppala498d2ac2012-12-04 15:12:04 +02001983
Chris Wilson9d7730912012-11-27 16:22:52 +00001984 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1985 ring->sync_seqno[j] = 0;
1986 }
1987
1988 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01001989}
1990
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02001991int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1992{
1993 struct drm_i915_private *dev_priv = dev->dev_private;
1994 int ret;
1995
1996 if (seqno == 0)
1997 return -EINVAL;
1998
1999 /* HWS page needs to be set less than what we
2000 * will inject to ring
2001 */
2002 ret = i915_gem_init_seqno(dev, seqno - 1);
2003 if (ret)
2004 return ret;
2005
2006 /* Carefully set the last_seqno value so that wrap
2007 * detection still works
2008 */
2009 dev_priv->next_seqno = seqno;
2010 dev_priv->last_seqno = seqno - 1;
2011 if (dev_priv->last_seqno == 0)
2012 dev_priv->last_seqno--;
2013
2014 return 0;
2015}
2016
Chris Wilson9d7730912012-11-27 16:22:52 +00002017int
2018i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
Daniel Vetter53d227f2012-01-25 16:32:49 +01002019{
Chris Wilson9d7730912012-11-27 16:22:52 +00002020 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002021
Chris Wilson9d7730912012-11-27 16:22:52 +00002022 /* reserve 0 for non-seqno */
2023 if (dev_priv->next_seqno == 0) {
Mika Kuoppalafca26bb2012-12-19 11:13:08 +02002024 int ret = i915_gem_init_seqno(dev, 0);
Chris Wilson9d7730912012-11-27 16:22:52 +00002025 if (ret)
2026 return ret;
2027
2028 dev_priv->next_seqno = 1;
2029 }
2030
Mika Kuoppalaf72b3432012-12-10 15:41:48 +02002031 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
Chris Wilson9d7730912012-11-27 16:22:52 +00002032 return 0;
Daniel Vetter53d227f2012-01-25 16:32:49 +01002033}
2034
Mika Kuoppala0025c072013-06-12 12:35:30 +03002035int __i915_add_request(struct intel_ring_buffer *ring,
2036 struct drm_file *file,
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002037 struct drm_i915_gem_object *obj,
Mika Kuoppala0025c072013-06-12 12:35:30 +03002038 u32 *out_seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002039{
Chris Wilsondb53a302011-02-03 11:57:46 +00002040 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Chris Wilsonacb868d2012-09-26 13:47:30 +01002041 struct drm_i915_gem_request *request;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002042 u32 request_ring_position, request_start;
Eric Anholt673a3942008-07-30 12:06:12 -07002043 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01002044 int ret;
2045
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002046 request_start = intel_ring_get_tail(ring);
Daniel Vettercc889e02012-06-13 20:45:19 +02002047 /*
2048 * Emit any outstanding flushes - execbuf can fail to emit the flush
2049 * after having emitted the batchbuffer command. Hence we need to fix
2050 * things up similar to emitting the lazy request. The difference here
2051 * is that the flush _must_ happen before the next request, no matter
2052 * what.
2053 */
Chris Wilsona7b97612012-07-20 12:41:08 +01002054 ret = intel_ring_flush_all_caches(ring);
2055 if (ret)
2056 return ret;
Daniel Vettercc889e02012-06-13 20:45:19 +02002057
Chris Wilsonacb868d2012-09-26 13:47:30 +01002058 request = kmalloc(sizeof(*request), GFP_KERNEL);
2059 if (request == NULL)
2060 return -ENOMEM;
Daniel Vettercc889e02012-06-13 20:45:19 +02002061
Eric Anholt673a3942008-07-30 12:06:12 -07002062
Chris Wilsona71d8d92012-02-15 11:25:36 +00002063 /* Record the position of the start of the request so that
2064 * should we detect the updated seqno part-way through the
2065 * GPU processing the request, we never over-estimate the
2066 * position of the head.
2067 */
2068 request_ring_position = intel_ring_get_tail(ring);
2069
Chris Wilson9d7730912012-11-27 16:22:52 +00002070 ret = ring->add_request(ring);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002071 if (ret) {
2072 kfree(request);
2073 return ret;
2074 }
Eric Anholt673a3942008-07-30 12:06:12 -07002075
Chris Wilson9d7730912012-11-27 16:22:52 +00002076 request->seqno = intel_ring_get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08002077 request->ring = ring;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002078 request->head = request_start;
Chris Wilsona71d8d92012-02-15 11:25:36 +00002079 request->tail = request_ring_position;
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002080 request->ctx = ring->last_context;
Mika Kuoppala7d736f42013-06-12 15:01:39 +03002081 request->batch_obj = obj;
2082
2083 /* Whilst this request exists, batch_obj will be on the
2084 * active_list, and so will hold the active reference. Only when this
2085 * request is retired will the the batch_obj be moved onto the
2086 * inactive_list and lose its active reference. Hence we do not need
2087 * to explicitly hold another reference here.
2088 */
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002089
2090 if (request->ctx)
2091 i915_gem_context_reference(request->ctx);
2092
Eric Anholt673a3942008-07-30 12:06:12 -07002093 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08002094 was_empty = list_empty(&ring->request_list);
2095 list_add_tail(&request->list, &ring->request_list);
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002096 request->file_priv = NULL;
Zou Nan hai852835f2010-05-21 09:08:56 +08002097
Chris Wilsondb53a302011-02-03 11:57:46 +00002098 if (file) {
2099 struct drm_i915_file_private *file_priv = file->driver_priv;
2100
Chris Wilson1c255952010-09-26 11:03:27 +01002101 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002102 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002103 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002104 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01002105 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00002106 }
Eric Anholt673a3942008-07-30 12:06:12 -07002107
Chris Wilson9d7730912012-11-27 16:22:52 +00002108 trace_i915_gem_request_add(ring, request->seqno);
Daniel Vetter5391d0c2012-01-25 14:03:57 +01002109 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00002110
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002111 if (!dev_priv->ums.mm_suspended) {
Mika Kuoppala10cd45b2013-07-03 17:22:08 +03002112 i915_queue_hangcheck(ring->dev);
2113
Chris Wilsonf047e392012-07-21 12:31:41 +01002114 if (was_empty) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01002115 queue_delayed_work(dev_priv->wq,
Chris Wilsonbcb45082012-10-05 17:02:57 +01002116 &dev_priv->mm.retire_work,
2117 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002118 intel_mark_busy(dev_priv->dev);
2119 }
Ben Gamarif65d9422009-09-14 17:48:44 -04002120 }
Daniel Vettercc889e02012-06-13 20:45:19 +02002121
Chris Wilsonacb868d2012-09-26 13:47:30 +01002122 if (out_seqno)
Chris Wilson9d7730912012-11-27 16:22:52 +00002123 *out_seqno = request->seqno;
Chris Wilson3cce4692010-10-27 16:11:02 +01002124 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002125}
2126
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002127static inline void
2128i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07002129{
Chris Wilson1c255952010-09-26 11:03:27 +01002130 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002131
Chris Wilson1c255952010-09-26 11:03:27 +01002132 if (!file_priv)
2133 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002134
Chris Wilson1c255952010-09-26 11:03:27 +01002135 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00002136 if (request->file_priv) {
2137 list_del(&request->client_list);
2138 request->file_priv = NULL;
2139 }
Chris Wilson1c255952010-09-26 11:03:27 +01002140 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002141}
2142
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002143static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2144 struct i915_address_space *vm)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002145{
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002146 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2147 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002148 return true;
2149
2150 return false;
2151}
2152
2153static bool i915_head_inside_request(const u32 acthd_unmasked,
2154 const u32 request_start,
2155 const u32 request_end)
2156{
2157 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2158
2159 if (request_start < request_end) {
2160 if (acthd >= request_start && acthd < request_end)
2161 return true;
2162 } else if (request_start > request_end) {
2163 if (acthd >= request_start || acthd < request_end)
2164 return true;
2165 }
2166
2167 return false;
2168}
2169
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002170static struct i915_address_space *
2171request_to_vm(struct drm_i915_gem_request *request)
2172{
2173 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2174 struct i915_address_space *vm;
2175
2176 vm = &dev_priv->gtt.base;
2177
2178 return vm;
2179}
2180
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002181static bool i915_request_guilty(struct drm_i915_gem_request *request,
2182 const u32 acthd, bool *inside)
2183{
2184 /* There is a possibility that unmasked head address
2185 * pointing inside the ring, matches the batch_obj address range.
2186 * However this is extremely unlikely.
2187 */
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002188 if (request->batch_obj) {
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002189 if (i915_head_inside_object(acthd, request->batch_obj,
2190 request_to_vm(request))) {
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002191 *inside = true;
2192 return true;
2193 }
2194 }
2195
2196 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2197 *inside = false;
2198 return true;
2199 }
2200
2201 return false;
2202}
2203
2204static void i915_set_reset_status(struct intel_ring_buffer *ring,
2205 struct drm_i915_gem_request *request,
2206 u32 acthd)
2207{
2208 struct i915_ctx_hang_stats *hs = NULL;
2209 bool inside, guilty;
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002210 unsigned long offset = 0;
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002211
2212 /* Innocent until proven guilty */
2213 guilty = false;
2214
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002215 if (request->batch_obj)
2216 offset = i915_gem_obj_offset(request->batch_obj,
2217 request_to_vm(request));
2218
Jani Nikulaf2f4d822013-08-11 12:44:01 +03002219 if (ring->hangcheck.action != HANGCHECK_WAIT &&
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002220 i915_request_guilty(request, acthd, &inside)) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002221 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002222 ring->name,
2223 inside ? "inside" : "flushing",
Ben Widawskyd1ccbb52013-07-31 17:00:05 -07002224 offset,
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002225 request->ctx ? request->ctx->id : 0,
2226 acthd);
2227
2228 guilty = true;
2229 }
2230
2231 /* If contexts are disabled or this is the default context, use
2232 * file_priv->reset_state
2233 */
2234 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2235 hs = &request->ctx->hang_stats;
2236 else if (request->file_priv)
2237 hs = &request->file_priv->hang_stats;
2238
2239 if (hs) {
2240 if (guilty)
2241 hs->batch_active++;
2242 else
2243 hs->batch_pending++;
2244 }
2245}
2246
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002247static void i915_gem_free_request(struct drm_i915_gem_request *request)
2248{
2249 list_del(&request->list);
2250 i915_gem_request_remove_from_client(request);
2251
2252 if (request->ctx)
2253 i915_gem_context_unreference(request->ctx);
2254
2255 kfree(request);
2256}
2257
Chris Wilsondfaae392010-09-22 10:31:52 +01002258static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2259 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01002260{
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002261 u32 completed_seqno;
2262 u32 acthd;
2263
2264 acthd = intel_ring_get_active_head(ring);
2265 completed_seqno = ring->get_seqno(ring, false);
2266
Chris Wilsondfaae392010-09-22 10:31:52 +01002267 while (!list_empty(&ring->request_list)) {
2268 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01002269
Chris Wilsondfaae392010-09-22 10:31:52 +01002270 request = list_first_entry(&ring->request_list,
2271 struct drm_i915_gem_request,
2272 list);
2273
Mika Kuoppalaaa60c662013-06-12 15:13:20 +03002274 if (request->seqno > completed_seqno)
2275 i915_set_reset_status(ring, request, acthd);
2276
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002277 i915_gem_free_request(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01002278 }
2279
2280 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002281 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07002282
Chris Wilson05394f32010-11-08 19:18:58 +00002283 obj = list_first_entry(&ring->active_list,
2284 struct drm_i915_gem_object,
2285 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002286
Chris Wilson05394f32010-11-08 19:18:58 +00002287 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002288 }
Eric Anholt673a3942008-07-30 12:06:12 -07002289}
2290
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002291void i915_gem_restore_fences(struct drm_device *dev)
Chris Wilson312817a2010-11-22 11:50:11 +00002292{
2293 struct drm_i915_private *dev_priv = dev->dev_private;
2294 int i;
2295
Daniel Vetter4b9de732011-10-09 21:52:02 +02002296 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00002297 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00002298
Daniel Vetter94a335d2013-07-17 14:51:28 +02002299 /*
2300 * Commit delayed tiling changes if we have an object still
2301 * attached to the fence, otherwise just clear the fence.
2302 */
2303 if (reg->obj) {
2304 i915_gem_object_update_fence(reg->obj, reg,
2305 reg->obj->tiling_mode);
2306 } else {
2307 i915_gem_write_fence(dev, i, NULL);
2308 }
Chris Wilson312817a2010-11-22 11:50:11 +00002309 }
2310}
2311
Chris Wilson069efc12010-09-30 16:53:18 +01002312void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07002313{
Chris Wilsondfaae392010-09-22 10:31:52 +01002314 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002315 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002316 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002317
Chris Wilsonb4519512012-05-11 14:29:30 +01002318 for_each_ring(ring, dev_priv, i)
2319 i915_gem_reset_ring_lists(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01002320
Chris Wilson19b2dbd2013-06-12 10:15:12 +01002321 i915_gem_restore_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002322}
2323
2324/**
2325 * This function clears the request list as sequence numbers are passed.
2326 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00002327void
Chris Wilsondb53a302011-02-03 11:57:46 +00002328i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07002329{
Eric Anholt673a3942008-07-30 12:06:12 -07002330 uint32_t seqno;
2331
Chris Wilsondb53a302011-02-03 11:57:46 +00002332 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01002333 return;
2334
Chris Wilsondb53a302011-02-03 11:57:46 +00002335 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002336
Chris Wilsonb2eadbc2012-08-09 10:58:30 +01002337 seqno = ring->get_seqno(ring, true);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002338
Zou Nan hai852835f2010-05-21 09:08:56 +08002339 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002340 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07002341
Zou Nan hai852835f2010-05-21 09:08:56 +08002342 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002343 struct drm_i915_gem_request,
2344 list);
Eric Anholt673a3942008-07-30 12:06:12 -07002345
Chris Wilsondfaae392010-09-22 10:31:52 +01002346 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07002347 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002348
Chris Wilsondb53a302011-02-03 11:57:46 +00002349 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00002350 /* We know the GPU must have read the request to have
2351 * sent us the seqno + interrupt, so use the position
2352 * of tail of the request to update the last known position
2353 * of the GPU head.
2354 */
2355 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002356
Mika Kuoppala0e50e962013-05-02 16:48:08 +03002357 i915_gem_free_request(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002358 }
2359
2360 /* Move any buffers on the active list that are no longer referenced
2361 * by the ringbuffer to the flushing/inactive lists as appropriate.
2362 */
2363 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00002364 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002365
Akshay Joshi0206e352011-08-16 15:34:10 -04002366 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00002367 struct drm_i915_gem_object,
2368 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002369
Chris Wilson0201f1e2012-07-20 12:41:01 +01002370 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01002371 break;
2372
Chris Wilson65ce3022012-07-20 12:41:02 +01002373 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002374 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002375
Chris Wilsondb53a302011-02-03 11:57:46 +00002376 if (unlikely(ring->trace_irq_seqno &&
2377 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002378 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00002379 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01002380 }
Chris Wilson23bc5982010-09-29 16:10:57 +01002381
Chris Wilsondb53a302011-02-03 11:57:46 +00002382 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07002383}
2384
2385void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002386i915_gem_retire_requests(struct drm_device *dev)
2387{
2388 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002389 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002390 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002391
Chris Wilsonb4519512012-05-11 14:29:30 +01002392 for_each_ring(ring, dev_priv, i)
2393 i915_gem_retire_requests_ring(ring);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002394}
2395
Daniel Vetter75ef9da2010-08-21 00:25:16 +02002396static void
Eric Anholt673a3942008-07-30 12:06:12 -07002397i915_gem_retire_work_handler(struct work_struct *work)
2398{
2399 drm_i915_private_t *dev_priv;
2400 struct drm_device *dev;
Chris Wilsonb4519512012-05-11 14:29:30 +01002401 struct intel_ring_buffer *ring;
Chris Wilson0a587052011-01-09 21:05:44 +00002402 bool idle;
2403 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07002404
2405 dev_priv = container_of(work, drm_i915_private_t,
2406 mm.retire_work.work);
2407 dev = dev_priv->dev;
2408
Chris Wilson891b48c2010-09-29 12:26:37 +01002409 /* Come back later if the device is busy... */
2410 if (!mutex_trylock(&dev->struct_mutex)) {
Chris Wilsonbcb45082012-10-05 17:02:57 +01002411 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2412 round_jiffies_up_relative(HZ));
Chris Wilson891b48c2010-09-29 12:26:37 +01002413 return;
2414 }
2415
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002416 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002417
Chris Wilson0a587052011-01-09 21:05:44 +00002418 /* Send a periodic flush down the ring so we don't hold onto GEM
2419 * objects indefinitely.
2420 */
2421 idle = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01002422 for_each_ring(ring, dev_priv, i) {
Chris Wilson3bb73ab2012-07-20 12:40:59 +01002423 if (ring->gpu_caches_dirty)
Mika Kuoppala0025c072013-06-12 12:35:30 +03002424 i915_add_request(ring, NULL);
Chris Wilson0a587052011-01-09 21:05:44 +00002425
2426 idle &= list_empty(&ring->request_list);
2427 }
2428
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02002429 if (!dev_priv->ums.mm_suspended && !idle)
Chris Wilsonbcb45082012-10-05 17:02:57 +01002430 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2431 round_jiffies_up_relative(HZ));
Chris Wilsonf047e392012-07-21 12:31:41 +01002432 if (idle)
2433 intel_mark_idle(dev);
Chris Wilson0a587052011-01-09 21:05:44 +00002434
Eric Anholt673a3942008-07-30 12:06:12 -07002435 mutex_unlock(&dev->struct_mutex);
2436}
2437
Ben Widawsky5816d642012-04-11 11:18:19 -07002438/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002439 * Ensures that an object will eventually get non-busy by flushing any required
2440 * write domains, emitting any outstanding lazy request and retiring and
2441 * completed requests.
2442 */
2443static int
2444i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2445{
2446 int ret;
2447
2448 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002449 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002450 if (ret)
2451 return ret;
2452
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002453 i915_gem_retire_requests_ring(obj->ring);
2454 }
2455
2456 return 0;
2457}
2458
2459/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002460 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2461 * @DRM_IOCTL_ARGS: standard ioctl arguments
2462 *
2463 * Returns 0 if successful, else an error is returned with the remaining time in
2464 * the timeout parameter.
2465 * -ETIME: object is still busy after timeout
2466 * -ERESTARTSYS: signal interrupted the wait
2467 * -ENONENT: object doesn't exist
2468 * Also possible, but rare:
2469 * -EAGAIN: GPU wedged
2470 * -ENOMEM: damn
2471 * -ENODEV: Internal IRQ fail
2472 * -E?: The add request failed
2473 *
2474 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2475 * non-zero timeout parameter the wait ioctl will wait for the given number of
2476 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2477 * without holding struct_mutex the object may become re-busied before this
2478 * function completes. A similar but shorter * race condition exists in the busy
2479 * ioctl
2480 */
2481int
2482i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2483{
Daniel Vetterf69061b2012-12-06 09:01:42 +01002484 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002485 struct drm_i915_gem_wait *args = data;
2486 struct drm_i915_gem_object *obj;
2487 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002488 struct timespec timeout_stack, *timeout = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01002489 unsigned reset_counter;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002490 u32 seqno = 0;
2491 int ret = 0;
2492
Ben Widawskyeac1f142012-06-05 15:24:24 -07002493 if (args->timeout_ns >= 0) {
2494 timeout_stack = ns_to_timespec(args->timeout_ns);
2495 timeout = &timeout_stack;
2496 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002497
2498 ret = i915_mutex_lock_interruptible(dev);
2499 if (ret)
2500 return ret;
2501
2502 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2503 if (&obj->base == NULL) {
2504 mutex_unlock(&dev->struct_mutex);
2505 return -ENOENT;
2506 }
2507
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002508 /* Need to make sure the object gets inactive eventually. */
2509 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002510 if (ret)
2511 goto out;
2512
2513 if (obj->active) {
Chris Wilson0201f1e2012-07-20 12:41:01 +01002514 seqno = obj->last_read_seqno;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002515 ring = obj->ring;
2516 }
2517
2518 if (seqno == 0)
2519 goto out;
2520
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002521 /* Do this after OLR check to make sure we make forward progress polling
2522 * on this IOCTL with a 0 timeout (like busy ioctl)
2523 */
2524 if (!args->timeout_ns) {
2525 ret = -ETIME;
2526 goto out;
2527 }
2528
2529 drm_gem_object_unreference(&obj->base);
Daniel Vetterf69061b2012-12-06 09:01:42 +01002530 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002531 mutex_unlock(&dev->struct_mutex);
2532
Daniel Vetterf69061b2012-12-06 09:01:42 +01002533 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
Chris Wilson4f42f4e2013-04-26 16:22:46 +03002534 if (timeout)
Ben Widawskyeac1f142012-06-05 15:24:24 -07002535 args->timeout_ns = timespec_to_ns(timeout);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002536 return ret;
2537
2538out:
2539 drm_gem_object_unreference(&obj->base);
2540 mutex_unlock(&dev->struct_mutex);
2541 return ret;
2542}
2543
2544/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002545 * i915_gem_object_sync - sync an object to a ring.
2546 *
2547 * @obj: object which may be in use on another ring.
2548 * @to: ring we wish to use the object on. May be NULL.
2549 *
2550 * This code is meant to abstract object synchronization with the GPU.
2551 * Calling with NULL implies synchronizing the object with the CPU
2552 * rather than a particular GPU ring.
2553 *
2554 * Returns 0 if successful, else propagates up the lower layer error.
2555 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002556int
2557i915_gem_object_sync(struct drm_i915_gem_object *obj,
2558 struct intel_ring_buffer *to)
2559{
2560 struct intel_ring_buffer *from = obj->ring;
2561 u32 seqno;
2562 int ret, idx;
2563
2564 if (from == NULL || to == from)
2565 return 0;
2566
Ben Widawsky5816d642012-04-11 11:18:19 -07002567 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Chris Wilson0201f1e2012-07-20 12:41:01 +01002568 return i915_gem_object_wait_rendering(obj, false);
Ben Widawsky2911a352012-04-05 14:47:36 -07002569
2570 idx = intel_ring_sync_index(from, to);
2571
Chris Wilson0201f1e2012-07-20 12:41:01 +01002572 seqno = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002573 if (seqno <= from->sync_seqno[idx])
2574 return 0;
2575
Ben Widawskyb4aca012012-04-25 20:50:12 -07002576 ret = i915_gem_check_olr(obj->ring, seqno);
2577 if (ret)
2578 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002579
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002580 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002581 if (!ret)
Mika Kuoppala7b01e262012-11-28 17:18:45 +02002582 /* We use last_read_seqno because sync_to()
2583 * might have just caused seqno wrap under
2584 * the radar.
2585 */
2586 from->sync_seqno[idx] = obj->last_read_seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002587
Ben Widawskye3a5a222012-04-11 11:18:20 -07002588 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002589}
2590
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002591static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2592{
2593 u32 old_write_domain, old_read_domains;
2594
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002595 /* Force a pagefault for domain tracking on next user access */
2596 i915_gem_release_mmap(obj);
2597
Keith Packardb97c3d92011-06-24 21:02:59 -07002598 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2599 return;
2600
Chris Wilson97c809fd2012-10-09 19:24:38 +01002601 /* Wait for any direct GTT access to complete */
2602 mb();
2603
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002604 old_read_domains = obj->base.read_domains;
2605 old_write_domain = obj->base.write_domain;
2606
2607 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2608 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2609
2610 trace_i915_gem_object_change_domain(obj,
2611 old_read_domains,
2612 old_write_domain);
2613}
2614
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002615int i915_vma_unbind(struct i915_vma *vma)
Eric Anholt673a3942008-07-30 12:06:12 -07002616{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002617 struct drm_i915_gem_object *obj = vma->obj;
Daniel Vetter7bddb012012-02-09 17:15:47 +01002618 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson43e28f02013-01-08 10:53:09 +00002619 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002620
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002621 if (list_empty(&vma->vma_link))
Eric Anholt673a3942008-07-30 12:06:12 -07002622 return 0;
2623
Ben Widawsky433544b2013-08-13 18:09:06 -07002624 if (!drm_mm_node_allocated(&vma->node))
2625 goto destroy;
2626
Chris Wilson31d8d652012-05-24 19:11:20 +01002627 if (obj->pin_count)
2628 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002629
Chris Wilsonc4670ad2012-08-20 10:23:27 +01002630 BUG_ON(obj->pages == NULL);
2631
Chris Wilsona8198ee2011-04-13 22:04:09 +01002632 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002633 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002634 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002635 /* Continue on if we fail due to EIO, the GPU is hung so we
2636 * should be safe and we need to cleanup or else we might
2637 * cause memory corruption through use-after-free.
2638 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002639
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002640 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002641
Daniel Vetter96b47b62009-12-15 17:50:00 +01002642 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002643 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002644 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002645 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002646
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002647 trace_i915_vma_unbind(vma);
Chris Wilsondb53a302011-02-03 11:57:46 +00002648
Daniel Vetter74898d72012-02-15 23:50:22 +01002649 if (obj->has_global_gtt_mapping)
2650 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002651 if (obj->has_aliasing_ppgtt_mapping) {
2652 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2653 obj->has_aliasing_ppgtt_mapping = 0;
2654 }
Daniel Vetter74163902012-02-15 23:50:21 +01002655 i915_gem_gtt_finish_object(obj);
Ben Widawsky401c29f2013-05-31 11:28:47 -07002656 i915_gem_object_unpin_pages(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002657
Ben Widawskyca191b12013-07-31 17:00:14 -07002658 list_del(&vma->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002659 /* Avoid an unnecessary call to unbind on rebind. */
Ben Widawsky5cacaac2013-07-31 17:00:13 -07002660 if (i915_is_ggtt(vma->vm))
2661 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002662
Ben Widawsky2f633152013-07-17 12:19:03 -07002663 drm_mm_remove_node(&vma->node);
Ben Widawsky433544b2013-08-13 18:09:06 -07002664
2665destroy:
Ben Widawsky2f633152013-07-17 12:19:03 -07002666 i915_gem_vma_destroy(vma);
2667
2668 /* Since the unbound list is global, only move to that list if
2669 * no more VMAs exist.
2670 * NB: Until we have real VMAs there will only ever be one */
2671 WARN_ON(!list_empty(&obj->vma_list));
2672 if (list_empty(&obj->vma_list))
2673 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
Eric Anholt673a3942008-07-30 12:06:12 -07002674
Chris Wilson88241782011-01-07 17:09:48 +00002675 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002676}
2677
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002678/**
2679 * Unbinds an object from the global GTT aperture.
2680 */
2681int
2682i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2683{
2684 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2685 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2686
Dan Carpenter58e73e12013-08-09 12:44:11 +03002687 if (!i915_gem_obj_ggtt_bound(obj))
Ben Widawsky07fe0b12013-07-31 17:00:10 -07002688 return 0;
2689
2690 if (obj->pin_count)
2691 return -EBUSY;
2692
2693 BUG_ON(obj->pages == NULL);
2694
2695 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2696}
2697
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002698int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002699{
2700 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002701 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002702 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002703
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002704 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002705 for_each_ring(ring, dev_priv, i) {
Ben Widawskyb6c74882012-08-14 14:35:14 -07002706 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2707 if (ret)
2708 return ret;
2709
Chris Wilson3e960502012-11-27 16:22:54 +00002710 ret = intel_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002711 if (ret)
2712 return ret;
2713 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002714
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002715 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002716}
2717
Chris Wilson9ce079e2012-04-17 15:31:30 +01002718static void i965_write_fence_reg(struct drm_device *dev, int reg,
2719 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002720{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002721 drm_i915_private_t *dev_priv = dev->dev_private;
Imre Deak56c844e2013-01-07 21:47:34 +02002722 int fence_reg;
2723 int fence_pitch_shift;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002724
Imre Deak56c844e2013-01-07 21:47:34 +02002725 if (INTEL_INFO(dev)->gen >= 6) {
2726 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2727 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2728 } else {
2729 fence_reg = FENCE_REG_965_0;
2730 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2731 }
2732
Chris Wilsond18b9612013-07-10 13:36:23 +01002733 fence_reg += reg * 8;
2734
2735 /* To w/a incoherency with non-atomic 64-bit register updates,
2736 * we split the 64-bit update into two 32-bit writes. In order
2737 * for a partial fence not to be evaluated between writes, we
2738 * precede the update with write to turn off the fence register,
2739 * and only enable the fence as the last step.
2740 *
2741 * For extra levels of paranoia, we make sure each step lands
2742 * before applying the next step.
2743 */
2744 I915_WRITE(fence_reg, 0);
2745 POSTING_READ(fence_reg);
2746
Chris Wilson9ce079e2012-04-17 15:31:30 +01002747 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002748 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilsond18b9612013-07-10 13:36:23 +01002749 uint64_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002750
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002751 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
Chris Wilson9ce079e2012-04-17 15:31:30 +01002752 0xfffff000) << 32;
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002753 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
Imre Deak56c844e2013-01-07 21:47:34 +02002754 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002755 if (obj->tiling_mode == I915_TILING_Y)
2756 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2757 val |= I965_FENCE_REG_VALID;
Daniel Vetterc6642782010-11-12 13:46:18 +00002758
Chris Wilsond18b9612013-07-10 13:36:23 +01002759 I915_WRITE(fence_reg + 4, val >> 32);
2760 POSTING_READ(fence_reg + 4);
2761
2762 I915_WRITE(fence_reg + 0, val);
2763 POSTING_READ(fence_reg);
2764 } else {
2765 I915_WRITE(fence_reg + 4, 0);
2766 POSTING_READ(fence_reg + 4);
2767 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002768}
2769
Chris Wilson9ce079e2012-04-17 15:31:30 +01002770static void i915_write_fence_reg(struct drm_device *dev, int reg,
2771 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002772{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002773 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002774 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002775
Chris Wilson9ce079e2012-04-17 15:31:30 +01002776 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002777 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002778 int pitch_val;
2779 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002780
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002781 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002782 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002783 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2784 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2785 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002786
2787 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2788 tile_width = 128;
2789 else
2790 tile_width = 512;
2791
2792 /* Note: pitch better be a power of two tile widths */
2793 pitch_val = obj->stride / tile_width;
2794 pitch_val = ffs(pitch_val) - 1;
2795
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002796 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002797 if (obj->tiling_mode == I915_TILING_Y)
2798 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2799 val |= I915_FENCE_SIZE_BITS(size);
2800 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2801 val |= I830_FENCE_REG_VALID;
2802 } else
2803 val = 0;
2804
2805 if (reg < 8)
2806 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002807 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002808 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002809
Chris Wilson9ce079e2012-04-17 15:31:30 +01002810 I915_WRITE(reg, val);
2811 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002812}
2813
Chris Wilson9ce079e2012-04-17 15:31:30 +01002814static void i830_write_fence_reg(struct drm_device *dev, int reg,
2815 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002816{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002817 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002818 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002819
Chris Wilson9ce079e2012-04-17 15:31:30 +01002820 if (obj) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002821 u32 size = i915_gem_obj_ggtt_size(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002822 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002823
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002824 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
Chris Wilson9ce079e2012-04-17 15:31:30 +01002825 (size & -size) != size ||
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002826 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2827 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2828 i915_gem_obj_ggtt_offset(obj), size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002829
Chris Wilson9ce079e2012-04-17 15:31:30 +01002830 pitch_val = obj->stride / 128;
2831 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002832
Ben Widawskyf343c5f2013-07-05 14:41:04 -07002833 val = i915_gem_obj_ggtt_offset(obj);
Chris Wilson9ce079e2012-04-17 15:31:30 +01002834 if (obj->tiling_mode == I915_TILING_Y)
2835 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2836 val |= I830_FENCE_SIZE_BITS(size);
2837 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2838 val |= I830_FENCE_REG_VALID;
2839 } else
2840 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002841
Chris Wilson9ce079e2012-04-17 15:31:30 +01002842 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2843 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2844}
2845
Chris Wilsond0a57782012-10-09 19:24:37 +01002846inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2847{
2848 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2849}
2850
Chris Wilson9ce079e2012-04-17 15:31:30 +01002851static void i915_gem_write_fence(struct drm_device *dev, int reg,
2852 struct drm_i915_gem_object *obj)
2853{
Chris Wilsond0a57782012-10-09 19:24:37 +01002854 struct drm_i915_private *dev_priv = dev->dev_private;
2855
2856 /* Ensure that all CPU reads are completed before installing a fence
2857 * and all writes before removing the fence.
2858 */
2859 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2860 mb();
2861
Daniel Vetter94a335d2013-07-17 14:51:28 +02002862 WARN(obj && (!obj->stride || !obj->tiling_mode),
2863 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2864 obj->stride, obj->tiling_mode);
2865
Chris Wilson9ce079e2012-04-17 15:31:30 +01002866 switch (INTEL_INFO(dev)->gen) {
2867 case 7:
Imre Deak56c844e2013-01-07 21:47:34 +02002868 case 6:
Chris Wilson9ce079e2012-04-17 15:31:30 +01002869 case 5:
2870 case 4: i965_write_fence_reg(dev, reg, obj); break;
2871 case 3: i915_write_fence_reg(dev, reg, obj); break;
2872 case 2: i830_write_fence_reg(dev, reg, obj); break;
Ben Widawsky7dbf9d62012-12-18 10:31:22 -08002873 default: BUG();
Chris Wilson9ce079e2012-04-17 15:31:30 +01002874 }
Chris Wilsond0a57782012-10-09 19:24:37 +01002875
2876 /* And similarly be paranoid that no direct access to this region
2877 * is reordered to before the fence is installed.
2878 */
2879 if (i915_gem_object_needs_mb(obj))
2880 mb();
Jesse Barnesde151cf2008-11-12 10:03:55 -08002881}
2882
Chris Wilson61050802012-04-17 15:31:31 +01002883static inline int fence_number(struct drm_i915_private *dev_priv,
2884 struct drm_i915_fence_reg *fence)
2885{
2886 return fence - dev_priv->fence_regs;
2887}
2888
2889static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2890 struct drm_i915_fence_reg *fence,
2891 bool enable)
2892{
Chris Wilson2dc8aae2013-05-22 17:08:06 +01002893 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson46a0b632013-07-10 13:36:24 +01002894 int reg = fence_number(dev_priv, fence);
Chris Wilson61050802012-04-17 15:31:31 +01002895
Chris Wilson46a0b632013-07-10 13:36:24 +01002896 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
Chris Wilson61050802012-04-17 15:31:31 +01002897
2898 if (enable) {
Chris Wilson46a0b632013-07-10 13:36:24 +01002899 obj->fence_reg = reg;
Chris Wilson61050802012-04-17 15:31:31 +01002900 fence->obj = obj;
2901 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2902 } else {
2903 obj->fence_reg = I915_FENCE_REG_NONE;
2904 fence->obj = NULL;
2905 list_del_init(&fence->lru_list);
2906 }
Daniel Vetter94a335d2013-07-17 14:51:28 +02002907 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +01002908}
2909
Chris Wilsond9e86c02010-11-10 16:40:20 +00002910static int
Chris Wilsond0a57782012-10-09 19:24:37 +01002911i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002912{
Chris Wilson1c293ea2012-04-17 15:31:27 +01002913 if (obj->last_fenced_seqno) {
Chris Wilson86d5bc32012-07-20 12:41:04 +01002914 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01002915 if (ret)
2916 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002917
2918 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002919 }
2920
Chris Wilson86d5bc32012-07-20 12:41:04 +01002921 obj->fenced_gpu_access = false;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002922 return 0;
2923}
2924
2925int
2926i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2927{
Chris Wilson61050802012-04-17 15:31:31 +01002928 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002929 struct drm_i915_fence_reg *fence;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002930 int ret;
2931
Chris Wilsond0a57782012-10-09 19:24:37 +01002932 ret = i915_gem_object_wait_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002933 if (ret)
2934 return ret;
2935
Chris Wilson61050802012-04-17 15:31:31 +01002936 if (obj->fence_reg == I915_FENCE_REG_NONE)
2937 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002938
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002939 fence = &dev_priv->fence_regs[obj->fence_reg];
2940
Chris Wilson61050802012-04-17 15:31:31 +01002941 i915_gem_object_fence_lost(obj);
Chris Wilsonf9c513e2013-03-26 11:29:27 +00002942 i915_gem_object_update_fence(obj, fence, false);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002943
2944 return 0;
2945}
2946
2947static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01002948i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01002949{
Daniel Vetterae3db242010-02-19 11:51:58 +01002950 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01002951 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002952 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002953
2954 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002955 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002956 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2957 reg = &dev_priv->fence_regs[i];
2958 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002959 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002960
Chris Wilson1690e1e2011-12-14 13:57:08 +01002961 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002962 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002963 }
2964
Chris Wilsond9e86c02010-11-10 16:40:20 +00002965 if (avail == NULL)
2966 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002967
2968 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002969 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002970 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002971 continue;
2972
Chris Wilson8fe301a2012-04-17 15:31:28 +01002973 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002974 }
2975
Chris Wilson8fe301a2012-04-17 15:31:28 +01002976 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002977}
2978
Jesse Barnesde151cf2008-11-12 10:03:55 -08002979/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002980 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002981 * @obj: object to map through a fence reg
2982 *
2983 * When mapping objects through the GTT, userspace wants to be able to write
2984 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002985 * This function walks the fence regs looking for a free one for @obj,
2986 * stealing one if it can't find any.
2987 *
2988 * It then sets up the reg based on the object's properties: address, pitch
2989 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002990 *
2991 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002992 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002993int
Chris Wilson06d98132012-04-17 15:31:24 +01002994i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002995{
Chris Wilson05394f32010-11-08 19:18:58 +00002996 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002997 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01002998 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002999 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01003000 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003001
Chris Wilson14415742012-04-17 15:31:33 +01003002 /* Have we updated the tiling parameters upon the object and so
3003 * will need to serialise the write to the associated fence register?
3004 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003005 if (obj->fence_dirty) {
Chris Wilsond0a57782012-10-09 19:24:37 +01003006 ret = i915_gem_object_wait_fence(obj);
Chris Wilson14415742012-04-17 15:31:33 +01003007 if (ret)
3008 return ret;
3009 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00003010
Chris Wilsond9e86c02010-11-10 16:40:20 +00003011 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00003012 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3013 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01003014 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01003015 list_move_tail(&reg->lru_list,
3016 &dev_priv->mm.fence_list);
3017 return 0;
3018 }
3019 } else if (enable) {
3020 reg = i915_find_fence_reg(dev);
3021 if (reg == NULL)
3022 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00003023
Chris Wilson14415742012-04-17 15:31:33 +01003024 if (reg->obj) {
3025 struct drm_i915_gem_object *old = reg->obj;
3026
Chris Wilsond0a57782012-10-09 19:24:37 +01003027 ret = i915_gem_object_wait_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003028 if (ret)
3029 return ret;
3030
Chris Wilson14415742012-04-17 15:31:33 +01003031 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00003032 }
Chris Wilson14415742012-04-17 15:31:33 +01003033 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07003034 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07003035
Chris Wilson14415742012-04-17 15:31:33 +01003036 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson14415742012-04-17 15:31:33 +01003037
Chris Wilson9ce079e2012-04-17 15:31:30 +01003038 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003039}
3040
Chris Wilson42d6ab42012-07-26 11:49:32 +01003041static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3042 struct drm_mm_node *gtt_space,
3043 unsigned long cache_level)
3044{
3045 struct drm_mm_node *other;
3046
3047 /* On non-LLC machines we have to be careful when putting differing
3048 * types of snoopable memory together to avoid the prefetcher
Damien Lespiau4239ca72012-12-03 16:26:16 +00003049 * crossing memory domains and dying.
Chris Wilson42d6ab42012-07-26 11:49:32 +01003050 */
3051 if (HAS_LLC(dev))
3052 return true;
3053
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003054 if (!drm_mm_node_allocated(gtt_space))
Chris Wilson42d6ab42012-07-26 11:49:32 +01003055 return true;
3056
3057 if (list_empty(&gtt_space->node_list))
3058 return true;
3059
3060 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3061 if (other->allocated && !other->hole_follows && other->color != cache_level)
3062 return false;
3063
3064 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3065 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3066 return false;
3067
3068 return true;
3069}
3070
3071static void i915_gem_verify_gtt(struct drm_device *dev)
3072{
3073#if WATCH_GTT
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct drm_i915_gem_object *obj;
3076 int err = 0;
3077
Ben Widawsky35c20a62013-05-31 11:28:48 -07003078 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
Chris Wilson42d6ab42012-07-26 11:49:32 +01003079 if (obj->gtt_space == NULL) {
3080 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3081 err++;
3082 continue;
3083 }
3084
3085 if (obj->cache_level != obj->gtt_space->color) {
3086 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003087 i915_gem_obj_ggtt_offset(obj),
3088 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003089 obj->cache_level,
3090 obj->gtt_space->color);
3091 err++;
3092 continue;
3093 }
3094
3095 if (!i915_gem_valid_gtt_space(dev,
3096 obj->gtt_space,
3097 obj->cache_level)) {
3098 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003099 i915_gem_obj_ggtt_offset(obj),
3100 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
Chris Wilson42d6ab42012-07-26 11:49:32 +01003101 obj->cache_level);
3102 err++;
3103 continue;
3104 }
3105 }
3106
3107 WARN_ON(err);
3108#endif
3109}
3110
Jesse Barnesde151cf2008-11-12 10:03:55 -08003111/**
Eric Anholt673a3942008-07-30 12:06:12 -07003112 * Finds free space in the GTT aperture and binds the object there.
3113 */
3114static int
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003115i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3116 struct i915_address_space *vm,
3117 unsigned alignment,
3118 bool map_and_fenceable,
3119 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003120{
Chris Wilson05394f32010-11-08 19:18:58 +00003121 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003122 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter5e783302010-11-14 22:32:36 +01003123 u32 size, fence_size, fence_alignment, unfenced_alignment;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003124 size_t gtt_max =
3125 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
Ben Widawsky2f633152013-07-17 12:19:03 -07003126 struct i915_vma *vma;
Chris Wilson07f73f62009-09-14 16:50:30 +01003127 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003128
Chris Wilsone28f8712011-07-18 13:11:49 -07003129 fence_size = i915_gem_get_gtt_size(dev,
3130 obj->base.size,
3131 obj->tiling_mode);
3132 fence_alignment = i915_gem_get_gtt_alignment(dev,
3133 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003134 obj->tiling_mode, true);
Chris Wilsone28f8712011-07-18 13:11:49 -07003135 unfenced_alignment =
Imre Deakd8651102013-01-07 21:47:33 +02003136 i915_gem_get_gtt_alignment(dev,
Chris Wilsone28f8712011-07-18 13:11:49 -07003137 obj->base.size,
Imre Deakd8651102013-01-07 21:47:33 +02003138 obj->tiling_mode, false);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003139
Eric Anholt673a3942008-07-30 12:06:12 -07003140 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01003141 alignment = map_and_fenceable ? fence_alignment :
3142 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003143 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003144 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3145 return -EINVAL;
3146 }
3147
Chris Wilson05394f32010-11-08 19:18:58 +00003148 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003149
Chris Wilson654fc602010-05-27 13:18:21 +01003150 /* If the object is bigger than the entire aperture, reject it early
3151 * before evicting everything in a vain attempt to find space.
3152 */
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003153 if (obj->base.size > gtt_max) {
Jani Nikula3765f302013-06-07 16:03:50 +03003154 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
Chris Wilsona36689c2013-05-21 16:58:49 +01003155 obj->base.size,
3156 map_and_fenceable ? "mappable" : "total",
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003157 gtt_max);
Chris Wilson654fc602010-05-27 13:18:21 +01003158 return -E2BIG;
3159 }
3160
Chris Wilson37e680a2012-06-07 15:38:42 +01003161 ret = i915_gem_object_get_pages(obj);
Chris Wilson6c085a72012-08-20 11:40:46 +02003162 if (ret)
3163 return ret;
3164
Chris Wilsonfbdda6f2012-11-20 10:45:16 +00003165 i915_gem_object_pin_pages(obj);
3166
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003167 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003168
Ben Widawskyaccfef22013-08-14 11:38:35 +02003169 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
Dan Carpenterdb473b32013-07-19 08:45:46 +03003170 if (IS_ERR(vma)) {
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003171 ret = PTR_ERR(vma);
3172 goto err_unpin;
Ben Widawsky2f633152013-07-17 12:19:03 -07003173 }
3174
Ben Widawskyaccfef22013-08-14 11:38:35 +02003175 /* For now we only ever use 1 vma per object */
3176 WARN_ON(!list_is_singular(&obj->vma_list));
3177
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003178search_free:
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003179 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
Ben Widawsky0a9ae0d2013-05-25 12:26:35 -07003180 size, alignment,
3181 obj->cache_level, 0, gtt_max);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003182 if (ret) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -07003183 ret = i915_gem_evict_something(dev, vm, size, alignment,
Chris Wilson42d6ab42012-07-26 11:49:32 +01003184 obj->cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003185 map_and_fenceable,
3186 nonblocking);
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003187 if (ret == 0)
3188 goto search_free;
Chris Wilson97311292009-09-21 00:22:34 +01003189
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003190 goto err_free_vma;
Chris Wilsondc9dd7a2012-12-07 20:37:07 +00003191 }
Ben Widawsky2f633152013-07-17 12:19:03 -07003192 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
Ben Widawskyc6cfb322013-07-05 14:41:06 -07003193 obj->cache_level))) {
Ben Widawsky2f633152013-07-17 12:19:03 -07003194 ret = -EINVAL;
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003195 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003196 }
3197
Daniel Vetter74163902012-02-15 23:50:21 +01003198 ret = i915_gem_gtt_prepare_object(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003199 if (ret)
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003200 goto err_remove_node;
Eric Anholt673a3942008-07-30 12:06:12 -07003201
Ben Widawsky35c20a62013-05-31 11:28:48 -07003202 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
Ben Widawskyca191b12013-07-31 17:00:14 -07003203 list_add_tail(&vma->mm_list, &vm->inactive_list);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003204
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003205 if (i915_is_ggtt(vm)) {
3206 bool mappable, fenceable;
Chris Wilsona00b10c2010-09-24 21:15:47 +01003207
Daniel Vetter49987092013-08-14 10:21:23 +02003208 fenceable = (vma->node.size == fence_size &&
3209 (vma->node.start & (fence_alignment - 1)) == 0);
Chris Wilsona00b10c2010-09-24 21:15:47 +01003210
Daniel Vetter49987092013-08-14 10:21:23 +02003211 mappable = (vma->node.start + obj->base.size <=
3212 dev_priv->gtt.mappable_end);
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003213
Ben Widawsky5cacaac2013-07-31 17:00:13 -07003214 obj->map_and_fenceable = mappable && fenceable;
Ben Widawsky4bd561b2013-08-13 18:09:07 -07003215 }
Daniel Vetter75e9e912010-11-04 17:11:09 +01003216
Ben Widawsky7ace7ef2013-08-09 22:12:12 -07003217 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3218
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003219 trace_i915_vma_bind(vma, map_and_fenceable);
Chris Wilson42d6ab42012-07-26 11:49:32 +01003220 i915_gem_verify_gtt(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003221 return 0;
Ben Widawsky2f633152013-07-17 12:19:03 -07003222
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003223err_remove_node:
Dan Carpenter6286ef92013-07-19 08:46:27 +03003224 drm_mm_remove_node(&vma->node);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003225err_free_vma:
Ben Widawsky2f633152013-07-17 12:19:03 -07003226 i915_gem_vma_destroy(vma);
Daniel Vetterbc6bc152013-07-22 12:12:38 +02003227err_unpin:
Ben Widawsky2f633152013-07-17 12:19:03 -07003228 i915_gem_object_unpin_pages(obj);
Ben Widawsky2f633152013-07-17 12:19:03 -07003229 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003230}
3231
Chris Wilson000433b2013-08-08 14:41:09 +01003232bool
Chris Wilson2c225692013-08-09 12:26:45 +01003233i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3234 bool force)
Eric Anholt673a3942008-07-30 12:06:12 -07003235{
Eric Anholt673a3942008-07-30 12:06:12 -07003236 /* If we don't have a page list set up, then we're not pinned
3237 * to GPU, and we can ignore the cache flush because it'll happen
3238 * again at bind time.
3239 */
Chris Wilson05394f32010-11-08 19:18:58 +00003240 if (obj->pages == NULL)
Chris Wilson000433b2013-08-08 14:41:09 +01003241 return false;
Eric Anholt673a3942008-07-30 12:06:12 -07003242
Imre Deak769ce462013-02-13 21:56:05 +02003243 /*
3244 * Stolen memory is always coherent with the GPU as it is explicitly
3245 * marked as wc by the system, or the system is cache-coherent.
3246 */
3247 if (obj->stolen)
Chris Wilson000433b2013-08-08 14:41:09 +01003248 return false;
Imre Deak769ce462013-02-13 21:56:05 +02003249
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003250 /* If the GPU is snooping the contents of the CPU cache,
3251 * we do not need to manually clear the CPU cache lines. However,
3252 * the caches are only snooped when the render cache is
3253 * flushed/invalidated. As we always have to emit invalidations
3254 * and flushes when moving into and out of the RENDER domain, correct
3255 * snooping behaviour occurs naturally as the result of our domain
3256 * tracking.
3257 */
Chris Wilson2c225692013-08-09 12:26:45 +01003258 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
Chris Wilson000433b2013-08-08 14:41:09 +01003259 return false;
Chris Wilson9c23f7f2011-03-29 16:59:52 -07003260
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003261 trace_i915_gem_object_clflush(obj);
Chris Wilson9da3da62012-06-01 15:20:22 +01003262 drm_clflush_sg(obj->pages);
Chris Wilson000433b2013-08-08 14:41:09 +01003263
3264 return true;
Eric Anholte47c68e2008-11-14 13:35:19 -08003265}
3266
3267/** Flushes the GTT write domain for the object if it's dirty. */
3268static void
Chris Wilson05394f32010-11-08 19:18:58 +00003269i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003270{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003271 uint32_t old_write_domain;
3272
Chris Wilson05394f32010-11-08 19:18:58 +00003273 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08003274 return;
3275
Chris Wilson63256ec2011-01-04 18:42:07 +00003276 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08003277 * to it immediately go to main memory as far as we know, so there's
3278 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00003279 *
3280 * However, we do have to enforce the order so that all writes through
3281 * the GTT land before any writes to the device, such as updates to
3282 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08003283 */
Chris Wilson63256ec2011-01-04 18:42:07 +00003284 wmb();
3285
Chris Wilson05394f32010-11-08 19:18:58 +00003286 old_write_domain = obj->base.write_domain;
3287 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003288
3289 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003290 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003291 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003292}
3293
3294/** Flushes the CPU write domain for the object if it's dirty. */
3295static void
Chris Wilson2c225692013-08-09 12:26:45 +01003296i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3297 bool force)
Eric Anholte47c68e2008-11-14 13:35:19 -08003298{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003299 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08003300
Chris Wilson05394f32010-11-08 19:18:58 +00003301 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08003302 return;
3303
Chris Wilson000433b2013-08-08 14:41:09 +01003304 if (i915_gem_clflush_object(obj, force))
3305 i915_gem_chipset_flush(obj->base.dev);
3306
Chris Wilson05394f32010-11-08 19:18:58 +00003307 old_write_domain = obj->base.write_domain;
3308 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003309
3310 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00003311 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003312 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08003313}
3314
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003315/**
3316 * Moves a single object to the GTT read, and possibly write domain.
3317 *
3318 * This function returns when the move is complete, including waiting on
3319 * flushes to occur.
3320 */
Jesse Barnes79e53942008-11-07 14:24:08 -08003321int
Chris Wilson20217462010-11-23 15:26:33 +00003322i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003323{
Chris Wilson8325a092012-04-24 15:52:35 +01003324 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003325 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003326 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003327
Eric Anholt02354392008-11-26 13:58:13 -08003328 /* Not valid to be called on unbound objects. */
Ben Widawsky98438772013-07-31 17:00:12 -07003329 if (!i915_gem_obj_bound_any(obj))
Eric Anholt02354392008-11-26 13:58:13 -08003330 return -EINVAL;
3331
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003332 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3333 return 0;
3334
Chris Wilson0201f1e2012-07-20 12:41:01 +01003335 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003336 if (ret)
3337 return ret;
3338
Chris Wilson2c225692013-08-09 12:26:45 +01003339 i915_gem_object_flush_cpu_write_domain(obj, false);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003340
Chris Wilsond0a57782012-10-09 19:24:37 +01003341 /* Serialise direct access to this object with the barriers for
3342 * coherent writes from the GPU, by effectively invalidating the
3343 * GTT domain upon first access.
3344 */
3345 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3346 mb();
3347
Chris Wilson05394f32010-11-08 19:18:58 +00003348 old_write_domain = obj->base.write_domain;
3349 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003350
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003351 /* It should now be out of any other write domains, and we can update
3352 * the domain values for our changes.
3353 */
Chris Wilson05394f32010-11-08 19:18:58 +00003354 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3355 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08003356 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003357 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3358 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3359 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08003360 }
3361
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003362 trace_i915_gem_object_change_domain(obj,
3363 old_read_domains,
3364 old_write_domain);
3365
Chris Wilson8325a092012-04-24 15:52:35 +01003366 /* And bump the LRU for this access */
Ben Widawskyca191b12013-07-31 17:00:14 -07003367 if (i915_gem_object_is_inactive(obj)) {
3368 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3369 &dev_priv->gtt.base);
3370 if (vma)
3371 list_move_tail(&vma->mm_list,
3372 &dev_priv->gtt.base.inactive_list);
3373
3374 }
Chris Wilson8325a092012-04-24 15:52:35 +01003375
Eric Anholte47c68e2008-11-14 13:35:19 -08003376 return 0;
3377}
3378
Chris Wilsone4ffd172011-04-04 09:44:39 +01003379int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3380 enum i915_cache_level cache_level)
3381{
Daniel Vetter7bddb012012-02-09 17:15:47 +01003382 struct drm_device *dev = obj->base.dev;
3383 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003384 struct i915_vma *vma;
Chris Wilsone4ffd172011-04-04 09:44:39 +01003385 int ret;
3386
3387 if (obj->cache_level == cache_level)
3388 return 0;
3389
3390 if (obj->pin_count) {
3391 DRM_DEBUG("can not change the cache level of pinned objects\n");
3392 return -EBUSY;
3393 }
3394
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003395 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3396 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003397 ret = i915_vma_unbind(vma);
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003398 if (ret)
3399 return ret;
3400
3401 break;
3402 }
Chris Wilson42d6ab42012-07-26 11:49:32 +01003403 }
3404
Ben Widawsky3089c6f2013-07-31 17:00:03 -07003405 if (i915_gem_obj_bound_any(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003406 ret = i915_gem_object_finish_gpu(obj);
3407 if (ret)
3408 return ret;
3409
3410 i915_gem_object_finish_gtt(obj);
3411
3412 /* Before SandyBridge, you could not use tiling or fence
3413 * registers with snooped memory, so relinquish any fences
3414 * currently pointing to our region in the aperture.
3415 */
Chris Wilson42d6ab42012-07-26 11:49:32 +01003416 if (INTEL_INFO(dev)->gen < 6) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003417 ret = i915_gem_object_put_fence(obj);
3418 if (ret)
3419 return ret;
3420 }
3421
Daniel Vetter74898d72012-02-15 23:50:22 +01003422 if (obj->has_global_gtt_mapping)
3423 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01003424 if (obj->has_aliasing_ppgtt_mapping)
3425 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3426 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003427 }
3428
Chris Wilson2c225692013-08-09 12:26:45 +01003429 list_for_each_entry(vma, &obj->vma_list, vma_link)
3430 vma->node.color = cache_level;
3431 obj->cache_level = cache_level;
3432
3433 if (cpu_write_needs_clflush(obj)) {
Chris Wilsone4ffd172011-04-04 09:44:39 +01003434 u32 old_read_domains, old_write_domain;
3435
3436 /* If we're coming from LLC cached, then we haven't
3437 * actually been tracking whether the data is in the
3438 * CPU cache or not, since we only allow one bit set
3439 * in obj->write_domain and have been skipping the clflushes.
3440 * Just set it to the CPU cache for now.
3441 */
3442 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003443
3444 old_read_domains = obj->base.read_domains;
3445 old_write_domain = obj->base.write_domain;
3446
3447 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3448 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3449
3450 trace_i915_gem_object_change_domain(obj,
3451 old_read_domains,
3452 old_write_domain);
3453 }
3454
Chris Wilson42d6ab42012-07-26 11:49:32 +01003455 i915_gem_verify_gtt(dev);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003456 return 0;
3457}
3458
Ben Widawsky199adf42012-09-21 17:01:20 -07003459int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3460 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003461{
Ben Widawsky199adf42012-09-21 17:01:20 -07003462 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003463 struct drm_i915_gem_object *obj;
3464 int ret;
3465
3466 ret = i915_mutex_lock_interruptible(dev);
3467 if (ret)
3468 return ret;
3469
3470 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3471 if (&obj->base == NULL) {
3472 ret = -ENOENT;
3473 goto unlock;
3474 }
3475
Chris Wilson651d7942013-08-08 14:41:10 +01003476 switch (obj->cache_level) {
3477 case I915_CACHE_LLC:
3478 case I915_CACHE_L3_LLC:
3479 args->caching = I915_CACHING_CACHED;
3480 break;
3481
Chris Wilson4257d3b2013-08-08 14:41:11 +01003482 case I915_CACHE_WT:
3483 args->caching = I915_CACHING_DISPLAY;
3484 break;
3485
Chris Wilson651d7942013-08-08 14:41:10 +01003486 default:
3487 args->caching = I915_CACHING_NONE;
3488 break;
3489 }
Chris Wilsone6994ae2012-07-10 10:27:08 +01003490
3491 drm_gem_object_unreference(&obj->base);
3492unlock:
3493 mutex_unlock(&dev->struct_mutex);
3494 return ret;
3495}
3496
Ben Widawsky199adf42012-09-21 17:01:20 -07003497int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3498 struct drm_file *file)
Chris Wilsone6994ae2012-07-10 10:27:08 +01003499{
Ben Widawsky199adf42012-09-21 17:01:20 -07003500 struct drm_i915_gem_caching *args = data;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003501 struct drm_i915_gem_object *obj;
3502 enum i915_cache_level level;
3503 int ret;
3504
Ben Widawsky199adf42012-09-21 17:01:20 -07003505 switch (args->caching) {
3506 case I915_CACHING_NONE:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003507 level = I915_CACHE_NONE;
3508 break;
Ben Widawsky199adf42012-09-21 17:01:20 -07003509 case I915_CACHING_CACHED:
Chris Wilsone6994ae2012-07-10 10:27:08 +01003510 level = I915_CACHE_LLC;
3511 break;
Chris Wilson4257d3b2013-08-08 14:41:11 +01003512 case I915_CACHING_DISPLAY:
3513 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3514 break;
Chris Wilsone6994ae2012-07-10 10:27:08 +01003515 default:
3516 return -EINVAL;
3517 }
3518
Ben Widawsky3bc29132012-09-26 16:15:20 -07003519 ret = i915_mutex_lock_interruptible(dev);
3520 if (ret)
3521 return ret;
3522
Chris Wilsone6994ae2012-07-10 10:27:08 +01003523 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3524 if (&obj->base == NULL) {
3525 ret = -ENOENT;
3526 goto unlock;
3527 }
3528
3529 ret = i915_gem_object_set_cache_level(obj, level);
3530
3531 drm_gem_object_unreference(&obj->base);
3532unlock:
3533 mutex_unlock(&dev->struct_mutex);
3534 return ret;
3535}
3536
Chris Wilsoncc98b412013-08-09 12:25:09 +01003537static bool is_pin_display(struct drm_i915_gem_object *obj)
3538{
3539 /* There are 3 sources that pin objects:
3540 * 1. The display engine (scanouts, sprites, cursors);
3541 * 2. Reservations for execbuffer;
3542 * 3. The user.
3543 *
3544 * We can ignore reservations as we hold the struct_mutex and
3545 * are only called outside of the reservation path. The user
3546 * can only increment pin_count once, and so if after
3547 * subtracting the potential reference by the user, any pin_count
3548 * remains, it must be due to another use by the display engine.
3549 */
3550 return obj->pin_count - !!obj->user_pin_count;
3551}
3552
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003553/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003554 * Prepare buffer for display plane (scanout, cursors, etc).
3555 * Can be called from an uninterruptible phase (modesetting) and allows
3556 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003557 */
3558int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003559i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3560 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003561 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003562{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003563 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003564 int ret;
3565
Chris Wilson0be73282010-12-06 14:36:27 +00003566 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003567 ret = i915_gem_object_sync(obj, pipelined);
3568 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003569 return ret;
3570 }
3571
Chris Wilsoncc98b412013-08-09 12:25:09 +01003572 /* Mark the pin_display early so that we account for the
3573 * display coherency whilst setting up the cache domains.
3574 */
3575 obj->pin_display = true;
3576
Eric Anholta7ef0642011-03-29 16:59:54 -07003577 /* The display engine is not coherent with the LLC cache on gen6. As
3578 * a result, we make sure that the pinning that is about to occur is
3579 * done with uncached PTEs. This is lowest common denominator for all
3580 * chipsets.
3581 *
3582 * However for gen6+, we could do better by using the GFDT bit instead
3583 * of uncaching, which would allow us to flush all the LLC-cached data
3584 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3585 */
Chris Wilson651d7942013-08-08 14:41:10 +01003586 ret = i915_gem_object_set_cache_level(obj,
3587 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
Eric Anholta7ef0642011-03-29 16:59:54 -07003588 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003589 goto err_unpin_display;
Eric Anholta7ef0642011-03-29 16:59:54 -07003590
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003591 /* As the user may map the buffer once pinned in the display plane
3592 * (e.g. libkms for the bootup splash), we have to ensure that we
3593 * always use map_and_fenceable for all scanout buffers.
3594 */
Ben Widawskyc37e2202013-07-31 16:59:58 -07003595 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003596 if (ret)
Chris Wilsoncc98b412013-08-09 12:25:09 +01003597 goto err_unpin_display;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003598
Chris Wilson2c225692013-08-09 12:26:45 +01003599 i915_gem_object_flush_cpu_write_domain(obj, true);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003600
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003601 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003602 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003603
3604 /* It should now be out of any other write domains, and we can update
3605 * the domain values for our changes.
3606 */
Chris Wilsone5f1d962012-07-20 12:41:00 +01003607 obj->base.write_domain = 0;
Chris Wilson05394f32010-11-08 19:18:58 +00003608 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003609
3610 trace_i915_gem_object_change_domain(obj,
3611 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003612 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003613
3614 return 0;
Chris Wilsoncc98b412013-08-09 12:25:09 +01003615
3616err_unpin_display:
3617 obj->pin_display = is_pin_display(obj);
3618 return ret;
3619}
3620
3621void
3622i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3623{
3624 i915_gem_object_unpin(obj);
3625 obj->pin_display = is_pin_display(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003626}
3627
Chris Wilson85345512010-11-13 09:49:11 +00003628int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003629i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003630{
Chris Wilson88241782011-01-07 17:09:48 +00003631 int ret;
3632
Chris Wilsona8198ee2011-04-13 22:04:09 +01003633 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003634 return 0;
3635
Chris Wilson0201f1e2012-07-20 12:41:01 +01003636 ret = i915_gem_object_wait_rendering(obj, false);
Chris Wilsonc501ae72011-12-14 13:57:23 +01003637 if (ret)
3638 return ret;
3639
Chris Wilsona8198ee2011-04-13 22:04:09 +01003640 /* Ensure that we invalidate the GPU's caches and TLBs. */
3641 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003642 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003643}
3644
Eric Anholte47c68e2008-11-14 13:35:19 -08003645/**
3646 * Moves a single object to the CPU read, and possibly write domain.
3647 *
3648 * This function returns when the move is complete, including waiting on
3649 * flushes to occur.
3650 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003651int
Chris Wilson919926a2010-11-12 13:42:53 +00003652i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003653{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003654 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003655 int ret;
3656
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003657 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3658 return 0;
3659
Chris Wilson0201f1e2012-07-20 12:41:01 +01003660 ret = i915_gem_object_wait_rendering(obj, !write);
Chris Wilson88241782011-01-07 17:09:48 +00003661 if (ret)
3662 return ret;
3663
Eric Anholte47c68e2008-11-14 13:35:19 -08003664 i915_gem_object_flush_gtt_write_domain(obj);
3665
Chris Wilson05394f32010-11-08 19:18:58 +00003666 old_write_domain = obj->base.write_domain;
3667 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003668
Eric Anholte47c68e2008-11-14 13:35:19 -08003669 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003670 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Chris Wilson2c225692013-08-09 12:26:45 +01003671 i915_gem_clflush_object(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003672
Chris Wilson05394f32010-11-08 19:18:58 +00003673 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003674 }
3675
3676 /* It should now be out of any other write domains, and we can update
3677 * the domain values for our changes.
3678 */
Chris Wilson05394f32010-11-08 19:18:58 +00003679 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003680
3681 /* If we're writing through the CPU, then the GPU read domains will
3682 * need to be invalidated at next use.
3683 */
3684 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003685 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3686 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003687 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003688
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003689 trace_i915_gem_object_change_domain(obj,
3690 old_read_domains,
3691 old_write_domain);
3692
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003693 return 0;
3694}
3695
Eric Anholt673a3942008-07-30 12:06:12 -07003696/* Throttle our rendering by waiting until the ring has completed our requests
3697 * emitted over 20 msec ago.
3698 *
Eric Anholtb9624422009-06-03 07:27:35 +00003699 * Note that if we were to use the current jiffies each time around the loop,
3700 * we wouldn't escape the function with any frames outstanding if the time to
3701 * render a frame was over 20ms.
3702 *
Eric Anholt673a3942008-07-30 12:06:12 -07003703 * This should get us reasonable parallelism between CPU and GPU but also
3704 * relatively low latency when blocking on a particular request to finish.
3705 */
3706static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003707i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003708{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003709 struct drm_i915_private *dev_priv = dev->dev_private;
3710 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003711 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003712 struct drm_i915_gem_request *request;
3713 struct intel_ring_buffer *ring = NULL;
Daniel Vetterf69061b2012-12-06 09:01:42 +01003714 unsigned reset_counter;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003715 u32 seqno = 0;
3716 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003717
Daniel Vetter308887a2012-11-14 17:14:06 +01003718 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3719 if (ret)
3720 return ret;
3721
3722 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3723 if (ret)
3724 return ret;
Chris Wilsone110e8d2011-01-26 15:39:14 +00003725
Chris Wilson1c255952010-09-26 11:03:27 +01003726 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003727 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003728 if (time_after_eq(request->emitted_jiffies, recent_enough))
3729 break;
3730
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003731 ring = request->ring;
3732 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003733 }
Daniel Vetterf69061b2012-12-06 09:01:42 +01003734 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilson1c255952010-09-26 11:03:27 +01003735 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003736
3737 if (seqno == 0)
3738 return 0;
3739
Daniel Vetterf69061b2012-12-06 09:01:42 +01003740 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003741 if (ret == 0)
3742 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003743
Eric Anholt673a3942008-07-30 12:06:12 -07003744 return ret;
3745}
3746
Eric Anholt673a3942008-07-30 12:06:12 -07003747int
Chris Wilson05394f32010-11-08 19:18:58 +00003748i915_gem_object_pin(struct drm_i915_gem_object *obj,
Ben Widawskyc37e2202013-07-31 16:59:58 -07003749 struct i915_address_space *vm,
Chris Wilson05394f32010-11-08 19:18:58 +00003750 uint32_t alignment,
Chris Wilson86a1ee22012-08-11 15:41:04 +01003751 bool map_and_fenceable,
3752 bool nonblocking)
Eric Anholt673a3942008-07-30 12:06:12 -07003753{
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003754 struct i915_vma *vma;
Eric Anholt673a3942008-07-30 12:06:12 -07003755 int ret;
3756
Chris Wilson7e81a422012-09-15 09:41:57 +01003757 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3758 return -EBUSY;
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003759
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003760 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3761
3762 vma = i915_gem_obj_to_vma(obj, vm);
3763
3764 if (vma) {
3765 if ((alignment &&
3766 vma->node.start & (alignment - 1)) ||
Chris Wilson05394f32010-11-08 19:18:58 +00003767 (map_and_fenceable && !obj->map_and_fenceable)) {
3768 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003769 "bo is already pinned with incorrect alignment:"
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003770 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003771 " obj->map_and_fenceable=%d\n",
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003772 i915_gem_obj_offset(obj, vm), alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003773 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003774 obj->map_and_fenceable);
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003775 ret = i915_vma_unbind(vma);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003776 if (ret)
3777 return ret;
3778 }
3779 }
3780
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003781 if (!i915_gem_obj_bound(obj, vm)) {
Chris Wilson87422672012-11-21 13:04:03 +00003782 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3783
Ben Widawsky07fe0b12013-07-31 17:00:10 -07003784 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3785 map_and_fenceable,
3786 nonblocking);
Chris Wilson97311292009-09-21 00:22:34 +01003787 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003788 return ret;
Chris Wilson87422672012-11-21 13:04:03 +00003789
3790 if (!dev_priv->mm.aliasing_ppgtt)
3791 i915_gem_gtt_bind_object(obj, obj->cache_level);
Chris Wilson22c344e2009-02-11 14:26:45 +00003792 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003793
Daniel Vetter74898d72012-02-15 23:50:22 +01003794 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3795 i915_gem_gtt_bind_object(obj, obj->cache_level);
3796
Chris Wilson1b502472012-04-24 15:47:30 +01003797 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003798 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003799
3800 return 0;
3801}
3802
3803void
Chris Wilson05394f32010-11-08 19:18:58 +00003804i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003805{
Chris Wilson05394f32010-11-08 19:18:58 +00003806 BUG_ON(obj->pin_count == 0);
Ben Widawsky98438772013-07-31 17:00:12 -07003807 BUG_ON(!i915_gem_obj_bound_any(obj));
Eric Anholt673a3942008-07-30 12:06:12 -07003808
Chris Wilson1b502472012-04-24 15:47:30 +01003809 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003810 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003811}
3812
3813int
3814i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003815 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003816{
3817 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003818 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003819 int ret;
3820
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003821 ret = i915_mutex_lock_interruptible(dev);
3822 if (ret)
3823 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003824
Chris Wilson05394f32010-11-08 19:18:58 +00003825 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003826 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003827 ret = -ENOENT;
3828 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003829 }
Eric Anholt673a3942008-07-30 12:06:12 -07003830
Chris Wilson05394f32010-11-08 19:18:58 +00003831 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003832 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003833 ret = -EINVAL;
3834 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003835 }
3836
Chris Wilson05394f32010-11-08 19:18:58 +00003837 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003838 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3839 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003840 ret = -EINVAL;
3841 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003842 }
3843
Chris Wilson93be8782013-01-02 10:31:22 +00003844 if (obj->user_pin_count == 0) {
Ben Widawskyc37e2202013-07-31 16:59:58 -07003845 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003846 if (ret)
3847 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003848 }
3849
Chris Wilson93be8782013-01-02 10:31:22 +00003850 obj->user_pin_count++;
3851 obj->pin_filp = file;
3852
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003853 args->offset = i915_gem_obj_ggtt_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003854out:
Chris Wilson05394f32010-11-08 19:18:58 +00003855 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003856unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003857 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003858 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003859}
3860
3861int
3862i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003863 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003864{
3865 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003866 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003867 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003868
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003869 ret = i915_mutex_lock_interruptible(dev);
3870 if (ret)
3871 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003872
Chris Wilson05394f32010-11-08 19:18:58 +00003873 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003874 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003875 ret = -ENOENT;
3876 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003877 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003878
Chris Wilson05394f32010-11-08 19:18:58 +00003879 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003880 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3881 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003882 ret = -EINVAL;
3883 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003884 }
Chris Wilson05394f32010-11-08 19:18:58 +00003885 obj->user_pin_count--;
3886 if (obj->user_pin_count == 0) {
3887 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003888 i915_gem_object_unpin(obj);
3889 }
Eric Anholt673a3942008-07-30 12:06:12 -07003890
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003891out:
Chris Wilson05394f32010-11-08 19:18:58 +00003892 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003893unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003894 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003895 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003896}
3897
3898int
3899i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003900 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003901{
3902 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003903 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003904 int ret;
3905
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003906 ret = i915_mutex_lock_interruptible(dev);
3907 if (ret)
3908 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003909
Chris Wilson05394f32010-11-08 19:18:58 +00003910 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003911 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003912 ret = -ENOENT;
3913 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003914 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003915
Chris Wilson0be555b2010-08-04 15:36:30 +01003916 /* Count all active objects as busy, even if they are currently not used
3917 * by the gpu. Users of this interface expect objects to eventually
3918 * become non-busy without any further actions, therefore emit any
3919 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003920 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003921 ret = i915_gem_object_flush_active(obj);
3922
Chris Wilson05394f32010-11-08 19:18:58 +00003923 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01003924 if (obj->ring) {
3925 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3926 args->busy |= intel_ring_flag(obj->ring) << 16;
3927 }
Eric Anholt673a3942008-07-30 12:06:12 -07003928
Chris Wilson05394f32010-11-08 19:18:58 +00003929 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003930unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003931 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003932 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003933}
3934
3935int
3936i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3937 struct drm_file *file_priv)
3938{
Akshay Joshi0206e352011-08-16 15:34:10 -04003939 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003940}
3941
Chris Wilson3ef94da2009-09-14 16:50:29 +01003942int
3943i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3944 struct drm_file *file_priv)
3945{
3946 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003947 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003948 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003949
3950 switch (args->madv) {
3951 case I915_MADV_DONTNEED:
3952 case I915_MADV_WILLNEED:
3953 break;
3954 default:
3955 return -EINVAL;
3956 }
3957
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003958 ret = i915_mutex_lock_interruptible(dev);
3959 if (ret)
3960 return ret;
3961
Chris Wilson05394f32010-11-08 19:18:58 +00003962 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003963 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003964 ret = -ENOENT;
3965 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003966 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003967
Chris Wilson05394f32010-11-08 19:18:58 +00003968 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003969 ret = -EINVAL;
3970 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003971 }
3972
Chris Wilson05394f32010-11-08 19:18:58 +00003973 if (obj->madv != __I915_MADV_PURGED)
3974 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003975
Chris Wilson6c085a72012-08-20 11:40:46 +02003976 /* if the object is no longer attached, discard its backing storage */
3977 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003978 i915_gem_object_truncate(obj);
3979
Chris Wilson05394f32010-11-08 19:18:58 +00003980 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003981
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003982out:
Chris Wilson05394f32010-11-08 19:18:58 +00003983 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003984unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003985 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003986 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003987}
3988
Chris Wilson37e680a2012-06-07 15:38:42 +01003989void i915_gem_object_init(struct drm_i915_gem_object *obj,
3990 const struct drm_i915_gem_object_ops *ops)
Chris Wilson0327d6b2012-08-11 15:41:06 +01003991{
Ben Widawsky35c20a62013-05-31 11:28:48 -07003992 INIT_LIST_HEAD(&obj->global_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003993 INIT_LIST_HEAD(&obj->ring_list);
3994 INIT_LIST_HEAD(&obj->exec_list);
Ben Widawskyb25cb2f2013-08-14 11:38:33 +02003995 INIT_LIST_HEAD(&obj->obj_exec_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07003996 INIT_LIST_HEAD(&obj->vma_list);
Chris Wilson0327d6b2012-08-11 15:41:06 +01003997
Chris Wilson37e680a2012-06-07 15:38:42 +01003998 obj->ops = ops;
3999
Chris Wilson0327d6b2012-08-11 15:41:06 +01004000 obj->fence_reg = I915_FENCE_REG_NONE;
4001 obj->madv = I915_MADV_WILLNEED;
4002 /* Avoid an unnecessary call to unbind on the first bind. */
4003 obj->map_and_fenceable = true;
4004
4005 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4006}
4007
Chris Wilson37e680a2012-06-07 15:38:42 +01004008static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4009 .get_pages = i915_gem_object_get_pages_gtt,
4010 .put_pages = i915_gem_object_put_pages_gtt,
4011};
4012
Chris Wilson05394f32010-11-08 19:18:58 +00004013struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4014 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00004015{
Daniel Vetterc397b902010-04-09 19:05:07 +00004016 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07004017 struct address_space *mapping;
Daniel Vetter1a240d42012-11-29 22:18:51 +01004018 gfp_t mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00004019
Chris Wilson42dcedd2012-11-15 11:32:30 +00004020 obj = i915_gem_object_alloc(dev);
Daniel Vetterc397b902010-04-09 19:05:07 +00004021 if (obj == NULL)
4022 return NULL;
4023
4024 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
Chris Wilson42dcedd2012-11-15 11:32:30 +00004025 i915_gem_object_free(obj);
Daniel Vetterc397b902010-04-09 19:05:07 +00004026 return NULL;
4027 }
4028
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004029 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4030 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4031 /* 965gm cannot relocate objects above 4GiB. */
4032 mask &= ~__GFP_HIGHMEM;
4033 mask |= __GFP_DMA32;
4034 }
4035
Al Viro496ad9a2013-01-23 17:07:38 -05004036 mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01004037 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07004038
Chris Wilson37e680a2012-06-07 15:38:42 +01004039 i915_gem_object_init(obj, &i915_gem_object_ops);
Chris Wilson73aa8082010-09-30 11:46:12 +01004040
Daniel Vetterc397b902010-04-09 19:05:07 +00004041 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4042 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4043
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02004044 if (HAS_LLC(dev)) {
4045 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07004046 * cache) for about a 10% performance improvement
4047 * compared to uncached. Graphics requests other than
4048 * display scanout are coherent with the CPU in
4049 * accessing this cache. This means in this mode we
4050 * don't need to clflush on the CPU side, and on the
4051 * GPU side we only need to flush internal caches to
4052 * get data visible to the CPU.
4053 *
4054 * However, we maintain the display planes as UC, and so
4055 * need to rebind when first used as such.
4056 */
4057 obj->cache_level = I915_CACHE_LLC;
4058 } else
4059 obj->cache_level = I915_CACHE_NONE;
4060
Daniel Vetterd861e332013-07-24 23:25:03 +02004061 trace_i915_gem_object_create(obj);
4062
Chris Wilson05394f32010-11-08 19:18:58 +00004063 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004064}
4065
Eric Anholt673a3942008-07-30 12:06:12 -07004066int i915_gem_init_object(struct drm_gem_object *obj)
4067{
Daniel Vetterc397b902010-04-09 19:05:07 +00004068 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004069
Eric Anholt673a3942008-07-30 12:06:12 -07004070 return 0;
4071}
4072
Chris Wilson1488fc02012-04-24 15:47:31 +01004073void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01004074{
Chris Wilson1488fc02012-04-24 15:47:31 +01004075 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00004076 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01004077 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004078 struct i915_vma *vma, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01004079
Chris Wilson26e12f892011-03-20 11:20:19 +00004080 trace_i915_gem_object_destroy(obj);
4081
Chris Wilson1488fc02012-04-24 15:47:31 +01004082 if (obj->phys_obj)
4083 i915_gem_detach_phys_object(dev, obj);
4084
4085 obj->pin_count = 0;
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004086 /* NB: 0 or 1 elements */
4087 WARN_ON(!list_empty(&obj->vma_list) &&
4088 !list_is_singular(&obj->vma_list));
4089 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4090 int ret = i915_vma_unbind(vma);
4091 if (WARN_ON(ret == -ERESTARTSYS)) {
4092 bool was_interruptible;
Chris Wilson1488fc02012-04-24 15:47:31 +01004093
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004094 was_interruptible = dev_priv->mm.interruptible;
4095 dev_priv->mm.interruptible = false;
Chris Wilson1488fc02012-04-24 15:47:31 +01004096
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004097 WARN_ON(i915_vma_unbind(vma));
Chris Wilson1488fc02012-04-24 15:47:31 +01004098
Ben Widawsky07fe0b12013-07-31 17:00:10 -07004099 dev_priv->mm.interruptible = was_interruptible;
4100 }
Chris Wilson1488fc02012-04-24 15:47:31 +01004101 }
4102
Ben Widawsky1d64ae72013-05-31 14:46:20 -07004103 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4104 * before progressing. */
4105 if (obj->stolen)
4106 i915_gem_object_unpin_pages(obj);
4107
Ben Widawsky401c29f2013-05-31 11:28:47 -07004108 if (WARN_ON(obj->pages_pin_count))
4109 obj->pages_pin_count = 0;
Chris Wilson37e680a2012-06-07 15:38:42 +01004110 i915_gem_object_put_pages(obj);
Chris Wilsond8cb5082012-08-11 15:41:03 +01004111 i915_gem_object_free_mmap_offset(obj);
Chris Wilson0104fdb2012-11-15 11:32:26 +00004112 i915_gem_object_release_stolen(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004113
Chris Wilson9da3da62012-06-01 15:20:22 +01004114 BUG_ON(obj->pages);
4115
Chris Wilson2f745ad2012-09-04 21:02:58 +01004116 if (obj->base.import_attach)
4117 drm_prime_gem_destroy(&obj->base, NULL);
Chris Wilsonbe726152010-07-23 23:18:50 +01004118
Chris Wilson05394f32010-11-08 19:18:58 +00004119 drm_gem_object_release(&obj->base);
4120 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01004121
Chris Wilson05394f32010-11-08 19:18:58 +00004122 kfree(obj->bit_17);
Chris Wilson42dcedd2012-11-15 11:32:30 +00004123 i915_gem_object_free(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01004124}
4125
Ben Widawsky2f633152013-07-17 12:19:03 -07004126struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4127 struct i915_address_space *vm)
4128{
4129 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4130 if (vma == NULL)
4131 return ERR_PTR(-ENOMEM);
4132
4133 INIT_LIST_HEAD(&vma->vma_link);
Ben Widawskyca191b12013-07-31 17:00:14 -07004134 INIT_LIST_HEAD(&vma->mm_list);
Ben Widawsky82a55ad2013-08-14 11:38:34 +02004135 INIT_LIST_HEAD(&vma->exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -07004136 vma->vm = vm;
4137 vma->obj = obj;
4138
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004139 /* Keep GGTT vmas first to make debug easier */
4140 if (i915_is_ggtt(vm))
4141 list_add(&vma->vma_link, &obj->vma_list);
4142 else
4143 list_add_tail(&vma->vma_link, &obj->vma_list);
4144
Ben Widawsky2f633152013-07-17 12:19:03 -07004145 return vma;
4146}
4147
4148void i915_gem_vma_destroy(struct i915_vma *vma)
4149{
4150 WARN_ON(vma->node.allocated);
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004151 list_del(&vma->vma_link);
Ben Widawsky2f633152013-07-17 12:19:03 -07004152 kfree(vma);
4153}
4154
Jesse Barnes5669fca2009-02-17 15:13:31 -08004155int
Eric Anholt673a3942008-07-30 12:06:12 -07004156i915_gem_idle(struct drm_device *dev)
4157{
4158 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004159 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004160
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004161 if (dev_priv->ums.mm_suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004162 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004163 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004164 }
Eric Anholt673a3942008-07-30 12:06:12 -07004165
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004166 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004167 if (ret) {
4168 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004169 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004170 }
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004171 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004172
Chris Wilson29105cc2010-01-07 10:39:13 +00004173 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01004174 if (!drm_core_check_feature(dev, DRIVER_MODESET))
Chris Wilson6c085a72012-08-20 11:40:46 +02004175 i915_gem_evict_everything(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004176
Daniel Vetter99584db2012-11-14 17:14:04 +01004177 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004178
4179 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004180 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004181
Chris Wilson29105cc2010-01-07 10:39:13 +00004182 /* Cancel the retire work handler, which should be idle now. */
4183 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4184
Eric Anholt673a3942008-07-30 12:06:12 -07004185 return 0;
4186}
4187
Ben Widawskyb9524a12012-05-25 16:56:24 -07004188void i915_gem_l3_remap(struct drm_device *dev)
4189{
4190 drm_i915_private_t *dev_priv = dev->dev_private;
4191 u32 misccpctl;
4192 int i;
4193
Daniel Vettereb32e452013-02-14 19:46:07 +01004194 if (!HAS_L3_GPU_CACHE(dev))
Ben Widawskyb9524a12012-05-25 16:56:24 -07004195 return;
4196
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004197 if (!dev_priv->l3_parity.remap_info)
Ben Widawskyb9524a12012-05-25 16:56:24 -07004198 return;
4199
4200 misccpctl = I915_READ(GEN7_MISCCPCTL);
4201 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4202 POSTING_READ(GEN7_MISCCPCTL);
4203
4204 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4205 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004206 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
Ben Widawskyb9524a12012-05-25 16:56:24 -07004207 DRM_DEBUG("0x%x was already programmed to %x\n",
4208 GEN7_L3LOG_BASE + i, remap);
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004209 if (remap && !dev_priv->l3_parity.remap_info[i/4])
Ben Widawskyb9524a12012-05-25 16:56:24 -07004210 DRM_DEBUG_DRIVER("Clearing remapped register\n");
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004211 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
Ben Widawskyb9524a12012-05-25 16:56:24 -07004212 }
4213
4214 /* Make sure all the writes land before disabling dop clock gating */
4215 POSTING_READ(GEN7_L3LOG_BASE);
4216
4217 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4218}
4219
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004220void i915_gem_init_swizzling(struct drm_device *dev)
4221{
4222 drm_i915_private_t *dev_priv = dev->dev_private;
4223
Daniel Vetter11782b02012-01-31 16:47:55 +01004224 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004225 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4226 return;
4227
4228 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4229 DISP_TILE_SURFACE_SWIZZLING);
4230
Daniel Vetter11782b02012-01-31 16:47:55 +01004231 if (IS_GEN5(dev))
4232 return;
4233
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004234 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4235 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004236 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004237 else if (IS_GEN7(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02004238 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Ben Widawsky8782e262012-12-18 10:31:23 -08004239 else
4240 BUG();
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004241}
Daniel Vettere21af882012-02-09 20:53:27 +01004242
Chris Wilson67b1b572012-07-05 23:49:40 +01004243static bool
4244intel_enable_blt(struct drm_device *dev)
4245{
4246 if (!HAS_BLT(dev))
4247 return false;
4248
4249 /* The blitter was dysfunctional on early prototypes */
4250 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4251 DRM_INFO("BLT not supported on this pre-production hardware;"
4252 " graphics performance will be degraded.\n");
4253 return false;
4254 }
4255
4256 return true;
4257}
4258
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004259static int i915_gem_init_rings(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004260{
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004261 struct drm_i915_private *dev_priv = dev->dev_private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004262 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004263
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004264 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004265 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00004266 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004267
4268 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004269 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004270 if (ret)
4271 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004272 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004273
Chris Wilson67b1b572012-07-05 23:49:40 +01004274 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01004275 ret = intel_init_blt_ring_buffer(dev);
4276 if (ret)
4277 goto cleanup_bsd_ring;
4278 }
4279
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004280 if (HAS_VEBOX(dev)) {
4281 ret = intel_init_vebox_ring_buffer(dev);
4282 if (ret)
4283 goto cleanup_blt_ring;
4284 }
4285
4286
Mika Kuoppala99433932013-01-22 14:12:17 +02004287 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4288 if (ret)
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004289 goto cleanup_vebox_ring;
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004290
4291 return 0;
4292
Ben Widawsky9a8a2212013-05-28 19:22:23 -07004293cleanup_vebox_ring:
4294 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004295cleanup_blt_ring:
4296 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4297cleanup_bsd_ring:
4298 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4299cleanup_render_ring:
4300 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4301
4302 return ret;
4303}
4304
4305int
4306i915_gem_init_hw(struct drm_device *dev)
4307{
4308 drm_i915_private_t *dev_priv = dev->dev_private;
4309 int ret;
4310
4311 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4312 return -EIO;
4313
Ben Widawsky59124502013-07-04 11:02:05 -07004314 if (dev_priv->ellc_size)
Ben Widawsky05e21cc2013-07-04 11:02:04 -07004315 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004316
Ben Widawsky88a2b2a2013-04-05 13:12:43 -07004317 if (HAS_PCH_NOP(dev)) {
4318 u32 temp = I915_READ(GEN7_MSG_CTL);
4319 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4320 I915_WRITE(GEN7_MSG_CTL, temp);
4321 }
4322
Ben Widawsky4fc7c972013-02-08 11:49:24 -08004323 i915_gem_l3_remap(dev);
4324
4325 i915_gem_init_swizzling(dev);
4326
4327 ret = i915_gem_init_rings(dev);
4328 if (ret)
Mika Kuoppala99433932013-01-22 14:12:17 +02004329 return ret;
4330
Ben Widawsky254f9652012-06-04 14:42:42 -07004331 /*
4332 * XXX: There was some w/a described somewhere suggesting loading
4333 * contexts before PPGTT.
4334 */
4335 i915_gem_context_init(dev);
Ben Widawskyb7c36d22013-04-08 18:43:56 -07004336 if (dev_priv->mm.aliasing_ppgtt) {
4337 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4338 if (ret) {
4339 i915_gem_cleanup_aliasing_ppgtt(dev);
4340 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4341 }
4342 }
Daniel Vettere21af882012-02-09 20:53:27 +01004343
Chris Wilson68f95ba2010-05-27 13:18:22 +01004344 return 0;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004345}
4346
Chris Wilson1070a422012-04-24 15:47:41 +01004347int i915_gem_init(struct drm_device *dev)
4348{
4349 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson1070a422012-04-24 15:47:41 +01004350 int ret;
4351
Chris Wilson1070a422012-04-24 15:47:41 +01004352 mutex_lock(&dev->struct_mutex);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004353
4354 if (IS_VALLEYVIEW(dev)) {
4355 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4356 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4357 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4358 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4359 }
4360
Ben Widawskyd7e50082012-12-18 10:31:25 -08004361 i915_gem_init_global_gtt(dev);
Jesse Barnesd62b4892013-03-08 10:45:53 -08004362
Chris Wilson1070a422012-04-24 15:47:41 +01004363 ret = i915_gem_init_hw(dev);
4364 mutex_unlock(&dev->struct_mutex);
4365 if (ret) {
4366 i915_gem_cleanup_aliasing_ppgtt(dev);
4367 return ret;
4368 }
4369
Daniel Vetter53ca26c2012-04-26 23:28:03 +02004370 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4371 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4372 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01004373 return 0;
4374}
4375
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004376void
4377i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4378{
4379 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004380 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004381 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004382
Chris Wilsonb4519512012-05-11 14:29:30 +01004383 for_each_ring(ring, dev_priv, i)
4384 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004385}
4386
4387int
Eric Anholt673a3942008-07-30 12:06:12 -07004388i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4389 struct drm_file *file_priv)
4390{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004391 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01004392 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004393
Jesse Barnes79e53942008-11-07 14:24:08 -08004394 if (drm_core_check_feature(dev, DRIVER_MODESET))
4395 return 0;
4396
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004397 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004398 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004399 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004400 }
4401
Eric Anholt673a3942008-07-30 12:06:12 -07004402 mutex_lock(&dev->struct_mutex);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004403 dev_priv->ums.mm_suspended = 0;
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004404
Daniel Vetterf691e2f2012-02-02 09:58:12 +01004405 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004406 if (ret != 0) {
4407 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004408 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004409 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004410
Ben Widawsky5cef07e2013-07-16 16:50:08 -07004411 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004412 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004413
Chris Wilson5f353082010-06-07 14:03:03 +01004414 ret = drm_irq_install(dev);
4415 if (ret)
4416 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004417
Eric Anholt673a3942008-07-30 12:06:12 -07004418 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004419
4420cleanup_ringbuffer:
4421 mutex_lock(&dev->struct_mutex);
4422 i915_gem_cleanup_ringbuffer(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004423 dev_priv->ums.mm_suspended = 1;
Chris Wilson5f353082010-06-07 14:03:03 +01004424 mutex_unlock(&dev->struct_mutex);
4425
4426 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004427}
4428
4429int
4430i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4431 struct drm_file *file_priv)
4432{
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004433 struct drm_i915_private *dev_priv = dev->dev_private;
4434 int ret;
4435
Jesse Barnes79e53942008-11-07 14:24:08 -08004436 if (drm_core_check_feature(dev, DRIVER_MODESET))
4437 return 0;
4438
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004439 drm_irq_uninstall(dev);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004440
4441 mutex_lock(&dev->struct_mutex);
4442 ret = i915_gem_idle(dev);
4443
4444 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4445 * We need to replace this with a semaphore, or something.
4446 * And not confound ums.mm_suspended!
4447 */
4448 if (ret != 0)
4449 dev_priv->ums.mm_suspended = 1;
4450 mutex_unlock(&dev->struct_mutex);
4451
4452 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004453}
4454
4455void
4456i915_gem_lastclose(struct drm_device *dev)
4457{
4458 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004459
Eric Anholte806b492009-01-22 09:56:58 -08004460 if (drm_core_check_feature(dev, DRIVER_MODESET))
4461 return;
4462
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004463 mutex_lock(&dev->struct_mutex);
Keith Packard6dbe2772008-10-14 21:41:13 -07004464 ret = i915_gem_idle(dev);
4465 if (ret)
4466 DRM_ERROR("failed to idle hardware: %d\n", ret);
Daniel Vetterdb1b76c2013-07-09 16:51:37 +02004467 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004468}
4469
Chris Wilson64193402010-10-24 12:38:05 +01004470static void
4471init_ring_lists(struct intel_ring_buffer *ring)
4472{
4473 INIT_LIST_HEAD(&ring->active_list);
4474 INIT_LIST_HEAD(&ring->request_list);
Chris Wilson64193402010-10-24 12:38:05 +01004475}
4476
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004477static void i915_init_vm(struct drm_i915_private *dev_priv,
4478 struct i915_address_space *vm)
4479{
4480 vm->dev = dev_priv->dev;
4481 INIT_LIST_HEAD(&vm->active_list);
4482 INIT_LIST_HEAD(&vm->inactive_list);
4483 INIT_LIST_HEAD(&vm->global_link);
4484 list_add(&vm->global_link, &dev_priv->vm_list);
4485}
4486
Eric Anholt673a3942008-07-30 12:06:12 -07004487void
4488i915_gem_load(struct drm_device *dev)
4489{
4490 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson42dcedd2012-11-15 11:32:30 +00004491 int i;
4492
4493 dev_priv->slab =
4494 kmem_cache_create("i915_gem_object",
4495 sizeof(struct drm_i915_gem_object), 0,
4496 SLAB_HWCACHE_ALIGN,
4497 NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07004498
Ben Widawskyfc8c0672013-07-31 16:59:54 -07004499 INIT_LIST_HEAD(&dev_priv->vm_list);
4500 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4501
Chris Wilson6c085a72012-08-20 11:40:46 +02004502 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4503 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004504 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004505 for (i = 0; i < I915_NUM_RINGS; i++)
4506 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02004507 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004508 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004509 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4510 i915_gem_retire_work_handler);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01004511 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
Chris Wilson31169712009-09-14 16:50:28 +01004512
Dave Airlie94400122010-07-20 13:15:31 +10004513 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4514 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02004515 I915_WRITE(MI_ARB_STATE,
4516 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10004517 }
4518
Chris Wilson72bfa192010-12-19 11:42:05 +00004519 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4520
Jesse Barnesde151cf2008-11-12 10:03:55 -08004521 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004522 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4523 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004524
Ville Syrjälä42b5aea2013-04-09 13:02:47 +03004525 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4526 dev_priv->num_fence_regs = 32;
4527 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004528 dev_priv->num_fence_regs = 16;
4529 else
4530 dev_priv->num_fence_regs = 8;
4531
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004532 /* Initialize fence registers to zero */
Chris Wilson19b2dbd2013-06-12 10:15:12 +01004533 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4534 i915_gem_restore_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07004535
Eric Anholt673a3942008-07-30 12:06:12 -07004536 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004537 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01004538
Chris Wilsonce453d82011-02-21 14:43:56 +00004539 dev_priv->mm.interruptible = true;
4540
Chris Wilson17250b72010-10-28 12:51:39 +01004541 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4542 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4543 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07004544}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004545
4546/*
4547 * Create a physically contiguous memory object for this object
4548 * e.g. for cursor + overlay regs
4549 */
Chris Wilson995b6762010-08-20 13:23:26 +01004550static int i915_gem_init_phys_object(struct drm_device *dev,
4551 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004552{
4553 drm_i915_private_t *dev_priv = dev->dev_private;
4554 struct drm_i915_gem_phys_object *phys_obj;
4555 int ret;
4556
4557 if (dev_priv->mm.phys_objs[id - 1] || !size)
4558 return 0;
4559
Eric Anholt9a298b22009-03-24 12:23:04 -07004560 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004561 if (!phys_obj)
4562 return -ENOMEM;
4563
4564 phys_obj->id = id;
4565
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004566 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004567 if (!phys_obj->handle) {
4568 ret = -ENOMEM;
4569 goto kfree_obj;
4570 }
4571#ifdef CONFIG_X86
4572 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4573#endif
4574
4575 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4576
4577 return 0;
4578kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004579 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004580 return ret;
4581}
4582
Chris Wilson995b6762010-08-20 13:23:26 +01004583static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004584{
4585 drm_i915_private_t *dev_priv = dev->dev_private;
4586 struct drm_i915_gem_phys_object *phys_obj;
4587
4588 if (!dev_priv->mm.phys_objs[id - 1])
4589 return;
4590
4591 phys_obj = dev_priv->mm.phys_objs[id - 1];
4592 if (phys_obj->cur_obj) {
4593 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4594 }
4595
4596#ifdef CONFIG_X86
4597 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4598#endif
4599 drm_pci_free(dev, phys_obj->handle);
4600 kfree(phys_obj);
4601 dev_priv->mm.phys_objs[id - 1] = NULL;
4602}
4603
4604void i915_gem_free_all_phys_object(struct drm_device *dev)
4605{
4606 int i;
4607
Dave Airlie260883c2009-01-22 17:58:49 +10004608 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004609 i915_gem_free_phys_object(dev, i);
4610}
4611
4612void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004613 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004614{
Al Viro496ad9a2013-01-23 17:07:38 -05004615 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004616 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004617 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004618 int page_count;
4619
Chris Wilson05394f32010-11-08 19:18:58 +00004620 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004621 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004622 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004623
Chris Wilson05394f32010-11-08 19:18:58 +00004624 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004625 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004626 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004627 if (!IS_ERR(page)) {
4628 char *dst = kmap_atomic(page);
4629 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4630 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004631
Chris Wilsone5281cc2010-10-28 13:45:36 +01004632 drm_clflush_pages(&page, 1);
4633
4634 set_page_dirty(page);
4635 mark_page_accessed(page);
4636 page_cache_release(page);
4637 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004638 }
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004639 i915_gem_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004640
Chris Wilson05394f32010-11-08 19:18:58 +00004641 obj->phys_obj->cur_obj = NULL;
4642 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004643}
4644
4645int
4646i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004647 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004648 int id,
4649 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004650{
Al Viro496ad9a2013-01-23 17:07:38 -05004651 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004652 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004653 int ret = 0;
4654 int page_count;
4655 int i;
4656
4657 if (id > I915_MAX_PHYS_OBJECT)
4658 return -EINVAL;
4659
Chris Wilson05394f32010-11-08 19:18:58 +00004660 if (obj->phys_obj) {
4661 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004662 return 0;
4663 i915_gem_detach_phys_object(dev, obj);
4664 }
4665
Dave Airlie71acb5e2008-12-30 20:31:46 +10004666 /* create a new object */
4667 if (!dev_priv->mm.phys_objs[id - 1]) {
4668 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004669 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004670 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004671 DRM_ERROR("failed to init phys object %d size: %zu\n",
4672 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004673 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004674 }
4675 }
4676
4677 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004678 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4679 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004680
Chris Wilson05394f32010-11-08 19:18:58 +00004681 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004682
4683 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004684 struct page *page;
4685 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004686
Hugh Dickins5949eac2011-06-27 16:18:18 -07004687 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004688 if (IS_ERR(page))
4689 return PTR_ERR(page);
4690
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004691 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004692 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004693 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004694 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004695
4696 mark_page_accessed(page);
4697 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004698 }
4699
4700 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004701}
4702
4703static int
Chris Wilson05394f32010-11-08 19:18:58 +00004704i915_gem_phys_pwrite(struct drm_device *dev,
4705 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004706 struct drm_i915_gem_pwrite *args,
4707 struct drm_file *file_priv)
4708{
Chris Wilson05394f32010-11-08 19:18:58 +00004709 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Ville Syrjälä2bb46292013-02-22 16:12:51 +02004710 char __user *user_data = to_user_ptr(args->data_ptr);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004711
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004712 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4713 unsigned long unwritten;
4714
4715 /* The physical object once assigned is fixed for the lifetime
4716 * of the obj, so we can safely drop the lock and continue
4717 * to access vaddr.
4718 */
4719 mutex_unlock(&dev->struct_mutex);
4720 unwritten = copy_from_user(vaddr, user_data, args->size);
4721 mutex_lock(&dev->struct_mutex);
4722 if (unwritten)
4723 return -EFAULT;
4724 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004725
Ben Widawskye76e9ae2012-11-04 09:21:27 -08004726 i915_gem_chipset_flush(dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004727 return 0;
4728}
Eric Anholtb9624422009-06-03 07:27:35 +00004729
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004730void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004731{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004732 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004733
4734 /* Clean up our request list when the client is going away, so that
4735 * later retire_requests won't dereference our soon-to-be-gone
4736 * file_priv.
4737 */
Chris Wilson1c255952010-09-26 11:03:27 +01004738 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004739 while (!list_empty(&file_priv->mm.request_list)) {
4740 struct drm_i915_gem_request *request;
4741
4742 request = list_first_entry(&file_priv->mm.request_list,
4743 struct drm_i915_gem_request,
4744 client_list);
4745 list_del(&request->client_list);
4746 request->file_priv = NULL;
4747 }
Chris Wilson1c255952010-09-26 11:03:27 +01004748 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004749}
Chris Wilson31169712009-09-14 16:50:28 +01004750
Chris Wilson57745062012-11-21 13:04:04 +00004751static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4752{
4753 if (!mutex_is_locked(mutex))
4754 return false;
4755
4756#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4757 return mutex->owner == task;
4758#else
4759 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4760 return false;
4761#endif
4762}
4763
Chris Wilson31169712009-09-14 16:50:28 +01004764static int
Ying Han1495f232011-05-24 17:12:27 -07004765i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004766{
Chris Wilson17250b72010-10-28 12:51:39 +01004767 struct drm_i915_private *dev_priv =
4768 container_of(shrinker,
4769 struct drm_i915_private,
4770 mm.inactive_shrinker);
4771 struct drm_device *dev = dev_priv->dev;
Chris Wilson6c085a72012-08-20 11:40:46 +02004772 struct drm_i915_gem_object *obj;
Ying Han1495f232011-05-24 17:12:27 -07004773 int nr_to_scan = sc->nr_to_scan;
Chris Wilson57745062012-11-21 13:04:04 +00004774 bool unlock = true;
Chris Wilson17250b72010-10-28 12:51:39 +01004775 int cnt;
4776
Chris Wilson57745062012-11-21 13:04:04 +00004777 if (!mutex_trylock(&dev->struct_mutex)) {
4778 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4779 return 0;
4780
Daniel Vetter677feac2012-12-19 14:33:45 +01004781 if (dev_priv->mm.shrinker_no_lock_stealing)
4782 return 0;
4783
Chris Wilson57745062012-11-21 13:04:04 +00004784 unlock = false;
4785 }
Chris Wilson31169712009-09-14 16:50:28 +01004786
Chris Wilson6c085a72012-08-20 11:40:46 +02004787 if (nr_to_scan) {
4788 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4789 if (nr_to_scan > 0)
Daniel Vetter93927ca2013-01-10 18:03:00 +01004790 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4791 false);
4792 if (nr_to_scan > 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02004793 i915_gem_shrink_all(dev_priv);
Chris Wilson31169712009-09-14 16:50:28 +01004794 }
4795
Chris Wilson17250b72010-10-28 12:51:39 +01004796 cnt = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -07004797 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilsona5570172012-09-04 21:02:54 +01004798 if (obj->pages_pin_count == 0)
4799 cnt += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004800
4801 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4802 if (obj->active)
4803 continue;
4804
Chris Wilsona5570172012-09-04 21:02:54 +01004805 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
Chris Wilson6c085a72012-08-20 11:40:46 +02004806 cnt += obj->base.size >> PAGE_SHIFT;
Ben Widawskyfcb4a572013-07-31 16:59:57 -07004807 }
Chris Wilson31169712009-09-14 16:50:28 +01004808
Chris Wilson57745062012-11-21 13:04:04 +00004809 if (unlock)
4810 mutex_unlock(&dev->struct_mutex);
Chris Wilson6c085a72012-08-20 11:40:46 +02004811 return cnt;
Chris Wilson31169712009-09-14 16:50:28 +01004812}
Ben Widawskya70a3142013-07-31 16:59:56 -07004813
4814/* All the new VM stuff */
4815unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4816 struct i915_address_space *vm)
4817{
4818 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4819 struct i915_vma *vma;
4820
4821 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4822 vm = &dev_priv->gtt.base;
4823
4824 BUG_ON(list_empty(&o->vma_list));
4825 list_for_each_entry(vma, &o->vma_list, vma_link) {
4826 if (vma->vm == vm)
4827 return vma->node.start;
4828
4829 }
4830 return -1;
4831}
4832
4833bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4834 struct i915_address_space *vm)
4835{
4836 struct i915_vma *vma;
4837
4838 list_for_each_entry(vma, &o->vma_list, vma_link)
Ben Widawsky8b9c2b92013-07-31 17:00:16 -07004839 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
Ben Widawskya70a3142013-07-31 16:59:56 -07004840 return true;
4841
4842 return false;
4843}
4844
4845bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4846{
4847 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4848 struct i915_address_space *vm;
4849
4850 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4851 if (i915_gem_obj_bound(o, vm))
4852 return true;
4853
4854 return false;
4855}
4856
4857unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4858 struct i915_address_space *vm)
4859{
4860 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4861 struct i915_vma *vma;
4862
4863 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4864 vm = &dev_priv->gtt.base;
4865
4866 BUG_ON(list_empty(&o->vma_list));
4867
4868 list_for_each_entry(vma, &o->vma_list, vma_link)
4869 if (vma->vm == vm)
4870 return vma->node.size;
4871
4872 return 0;
4873}
4874
4875struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4876 struct i915_address_space *vm)
4877{
4878 struct i915_vma *vma;
4879 list_for_each_entry(vma, &obj->vma_list, vma_link)
4880 if (vma->vm == vm)
4881 return vma;
4882
4883 return NULL;
4884}
Ben Widawskyaccfef22013-08-14 11:38:35 +02004885
4886struct i915_vma *
4887i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4888 struct i915_address_space *vm)
4889{
4890 struct i915_vma *vma;
4891
4892 vma = i915_gem_obj_to_vma(obj, vm);
4893 if (!vma)
4894 vma = i915_gem_vma_create(obj, vm);
4895
4896 return vma;
4897}