blob: 23f1a6bcee7309cfca04b0b1da635ae3a461c6c0 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
Chris Wilsond9e86c02010-11-10 16:40:20 +000045static void i915_gem_clear_fence_reg(struct drm_device *dev,
46 struct drm_i915_fence_reg *reg);
Chris Wilson05394f32010-11-08 19:18:58 +000047static int i915_gem_phys_pwrite(struct drm_device *dev,
48 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100049 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000050 struct drm_file *file);
51static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070052
Chris Wilson17250b72010-10-28 12:51:39 +010053static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070054 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010055static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010056
Chris Wilson73aa8082010-09-30 11:46:12 +010057/* some bookkeeping */
58static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
59 size_t size)
60{
61 dev_priv->mm.object_count++;
62 dev_priv->mm.object_memory += size;
63}
64
65static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
66 size_t size)
67{
68 dev_priv->mm.object_count--;
69 dev_priv->mm.object_memory -= size;
70}
71
Chris Wilson21dd3732011-01-26 15:55:56 +000072static int
73i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010074{
75 struct drm_i915_private *dev_priv = dev->dev_private;
76 struct completion *x = &dev_priv->error_completion;
77 unsigned long flags;
78 int ret;
79
80 if (!atomic_read(&dev_priv->mm.wedged))
81 return 0;
82
83 ret = wait_for_completion_interruptible(x);
84 if (ret)
85 return ret;
86
Chris Wilson21dd3732011-01-26 15:55:56 +000087 if (atomic_read(&dev_priv->mm.wedged)) {
88 /* GPU is hung, bump the completion count to account for
89 * the token we just consumed so that we never hit zero and
90 * end up waiting upon a subsequent completion event that
91 * will never happen.
92 */
93 spin_lock_irqsave(&x->wait.lock, flags);
94 x->done++;
95 spin_unlock_irqrestore(&x->wait.lock, flags);
96 }
97 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +010098}
99
Chris Wilson54cf91d2010-11-25 18:00:26 +0000100int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100101{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100102 int ret;
103
Chris Wilson21dd3732011-01-26 15:55:56 +0000104 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100105 if (ret)
106 return ret;
107
108 ret = mutex_lock_interruptible(&dev->struct_mutex);
109 if (ret)
110 return ret;
111
Chris Wilson23bc5982010-09-29 16:10:57 +0100112 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100113 return 0;
114}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100115
Chris Wilson7d1c4802010-08-07 21:45:03 +0100116static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000117i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100118{
Chris Wilson05394f32010-11-08 19:18:58 +0000119 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100120}
121
Eric Anholt673a3942008-07-30 12:06:12 -0700122int
123i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000124 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700125{
Eric Anholt673a3942008-07-30 12:06:12 -0700126 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000127
128 if (args->gtt_start >= args->gtt_end ||
129 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
130 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700131
132 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200133 i915_gem_init_global_gtt(dev, args->gtt_start,
134 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700135 mutex_unlock(&dev->struct_mutex);
136
Chris Wilson20217462010-11-23 15:26:33 +0000137 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700138}
139
Eric Anholt5a125c32008-10-22 21:40:13 -0700140int
141i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000142 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700143{
Chris Wilson73aa8082010-09-30 11:46:12 +0100144 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700145 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000146 struct drm_i915_gem_object *obj;
147 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700148
149 if (!(dev->driver->driver_features & DRIVER_GEM))
150 return -ENODEV;
151
Chris Wilson6299f992010-11-24 12:23:44 +0000152 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100153 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000154 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
155 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100156 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700157
Chris Wilson6299f992010-11-24 12:23:44 +0000158 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400159 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000160
Eric Anholt5a125c32008-10-22 21:40:13 -0700161 return 0;
162}
163
Dave Airlieff72145b2011-02-07 12:16:14 +1000164static int
165i915_gem_create(struct drm_file *file,
166 struct drm_device *dev,
167 uint64_t size,
168 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700169{
Chris Wilson05394f32010-11-08 19:18:58 +0000170 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300171 int ret;
172 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700173
Dave Airlieff72145b2011-02-07 12:16:14 +1000174 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200175 if (size == 0)
176 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700177
178 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000179 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700180 if (obj == NULL)
181 return -ENOMEM;
182
Chris Wilson05394f32010-11-08 19:18:58 +0000183 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100184 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000185 drm_gem_object_release(&obj->base);
186 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100187 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700188 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100189 }
190
Chris Wilson202f2fe2010-10-14 13:20:40 +0100191 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000192 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100193 trace_i915_gem_object_create(obj);
194
Dave Airlieff72145b2011-02-07 12:16:14 +1000195 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700196 return 0;
197}
198
Dave Airlieff72145b2011-02-07 12:16:14 +1000199int
200i915_gem_dumb_create(struct drm_file *file,
201 struct drm_device *dev,
202 struct drm_mode_create_dumb *args)
203{
204 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000205 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000206 args->size = args->pitch * args->height;
207 return i915_gem_create(file, dev,
208 args->size, &args->handle);
209}
210
211int i915_gem_dumb_destroy(struct drm_file *file,
212 struct drm_device *dev,
213 uint32_t handle)
214{
215 return drm_gem_handle_delete(file, handle);
216}
217
218/**
219 * Creates a new mm object and returns a handle to it.
220 */
221int
222i915_gem_create_ioctl(struct drm_device *dev, void *data,
223 struct drm_file *file)
224{
225 struct drm_i915_gem_create *args = data;
226 return i915_gem_create(file, dev,
227 args->size, &args->handle);
228}
229
Chris Wilson05394f32010-11-08 19:18:58 +0000230static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700231{
Chris Wilson05394f32010-11-08 19:18:58 +0000232 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700233
234 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000235 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700236}
237
Daniel Vetter8c599672011-12-14 13:57:31 +0100238static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100239__copy_to_user_swizzled(char __user *cpu_vaddr,
240 const char *gpu_vaddr, int gpu_offset,
241 int length)
242{
243 int ret, cpu_offset = 0;
244
245 while (length > 0) {
246 int cacheline_end = ALIGN(gpu_offset + 1, 64);
247 int this_length = min(cacheline_end - gpu_offset, length);
248 int swizzled_gpu_offset = gpu_offset ^ 64;
249
250 ret = __copy_to_user(cpu_vaddr + cpu_offset,
251 gpu_vaddr + swizzled_gpu_offset,
252 this_length);
253 if (ret)
254 return ret + length;
255
256 cpu_offset += this_length;
257 gpu_offset += this_length;
258 length -= this_length;
259 }
260
261 return 0;
262}
263
264static inline int
Daniel Vetter8c599672011-12-14 13:57:31 +0100265__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
266 const char *cpu_vaddr,
267 int length)
268{
269 int ret, cpu_offset = 0;
270
271 while (length > 0) {
272 int cacheline_end = ALIGN(gpu_offset + 1, 64);
273 int this_length = min(cacheline_end - gpu_offset, length);
274 int swizzled_gpu_offset = gpu_offset ^ 64;
275
276 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
277 cpu_vaddr + cpu_offset,
278 this_length);
279 if (ret)
280 return ret + length;
281
282 cpu_offset += this_length;
283 gpu_offset += this_length;
284 length -= this_length;
285 }
286
287 return 0;
288}
289
Eric Anholteb014592009-03-10 11:44:52 -0700290static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200291i915_gem_shmem_pread(struct drm_device *dev,
292 struct drm_i915_gem_object *obj,
293 struct drm_i915_gem_pread *args,
294 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700295{
Chris Wilson05394f32010-11-08 19:18:58 +0000296 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100297 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700298 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100299 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100300 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100301 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200302 int hit_slowpath = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200303 int needs_clflush = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200304 int release_page;
Eric Anholteb014592009-03-10 11:44:52 -0700305
Daniel Vetter8461d222011-12-14 13:57:32 +0100306 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700307 remain = args->size;
308
Daniel Vetter8461d222011-12-14 13:57:32 +0100309 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700310
Daniel Vetter84897312012-03-25 19:47:31 +0200311 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
312 /* If we're not in the cpu read domain, set ourself into the gtt
313 * read domain and manually flush cachelines (if required). This
314 * optimizes for the case when the gpu will dirty the data
315 * anyway again before the next pread happens. */
316 if (obj->cache_level == I915_CACHE_NONE)
317 needs_clflush = 1;
318 ret = i915_gem_object_set_to_gtt_domain(obj, false);
319 if (ret)
320 return ret;
321 }
322
Eric Anholteb014592009-03-10 11:44:52 -0700323 offset = args->offset;
324
325 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100326 struct page *page;
Daniel Vetter8461d222011-12-14 13:57:32 +0100327 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100328
Eric Anholteb014592009-03-10 11:44:52 -0700329 /* Operation in this page
330 *
Eric Anholteb014592009-03-10 11:44:52 -0700331 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700332 * page_length = bytes to copy for this page
333 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100334 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700335 page_length = remain;
336 if ((shmem_page_offset + page_length) > PAGE_SIZE)
337 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700338
Daniel Vetter692a5762012-03-25 19:47:34 +0200339 if (obj->pages) {
340 page = obj->pages[offset >> PAGE_SHIFT];
341 release_page = 0;
342 } else {
343 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
344 if (IS_ERR(page)) {
345 ret = PTR_ERR(page);
346 goto out;
347 }
348 release_page = 1;
Jesper Juhlb65552f2011-06-12 20:53:44 +0000349 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100350
Daniel Vetter8461d222011-12-14 13:57:32 +0100351 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
352 (page_to_phys(page) & (1 << 17)) != 0;
353
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200354 if (!page_do_bit17_swizzling) {
355 vaddr = kmap_atomic(page);
Daniel Vetter84897312012-03-25 19:47:31 +0200356 if (needs_clflush)
357 drm_clflush_virt_range(vaddr + shmem_page_offset,
358 page_length);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200359 ret = __copy_to_user_inatomic(user_data,
360 vaddr + shmem_page_offset,
361 page_length);
362 kunmap_atomic(vaddr);
363 if (ret == 0)
364 goto next_page;
365 }
366
367 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200368 page_cache_get(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200369 mutex_unlock(&dev->struct_mutex);
370
Daniel Vetter8461d222011-12-14 13:57:32 +0100371 vaddr = kmap(page);
Daniel Vetter84897312012-03-25 19:47:31 +0200372 if (needs_clflush)
373 drm_clflush_virt_range(vaddr + shmem_page_offset,
374 page_length);
375
Daniel Vetter8461d222011-12-14 13:57:32 +0100376 if (page_do_bit17_swizzling)
377 ret = __copy_to_user_swizzled(user_data,
378 vaddr, shmem_page_offset,
379 page_length);
380 else
381 ret = __copy_to_user(user_data,
382 vaddr + shmem_page_offset,
383 page_length);
384 kunmap(page);
Eric Anholteb014592009-03-10 11:44:52 -0700385
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200386 mutex_lock(&dev->struct_mutex);
Daniel Vetter692a5762012-03-25 19:47:34 +0200387 page_cache_release(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200388next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100389 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200390 if (release_page)
391 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100392
Daniel Vetter8461d222011-12-14 13:57:32 +0100393 if (ret) {
394 ret = -EFAULT;
395 goto out;
396 }
397
Eric Anholteb014592009-03-10 11:44:52 -0700398 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100399 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700400 offset += page_length;
401 }
402
Chris Wilson4f27b752010-10-14 15:26:45 +0100403out:
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200404 if (hit_slowpath) {
405 /* Fixup: Kill any reinstated backing storage pages */
406 if (obj->madv == __I915_MADV_PURGED)
407 i915_gem_object_truncate(obj);
408 }
Eric Anholteb014592009-03-10 11:44:52 -0700409
410 return ret;
411}
412
Eric Anholt673a3942008-07-30 12:06:12 -0700413/**
414 * Reads data from the object referenced by handle.
415 *
416 * On error, the contents of *data are undefined.
417 */
418int
419i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000420 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700421{
422 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000423 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100424 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700425
Chris Wilson51311d02010-11-17 09:10:42 +0000426 if (args->size == 0)
427 return 0;
428
429 if (!access_ok(VERIFY_WRITE,
430 (char __user *)(uintptr_t)args->data_ptr,
431 args->size))
432 return -EFAULT;
433
434 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
435 args->size);
436 if (ret)
437 return -EFAULT;
438
Chris Wilson4f27b752010-10-14 15:26:45 +0100439 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100440 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100441 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700442
Chris Wilson05394f32010-11-08 19:18:58 +0000443 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000444 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100445 ret = -ENOENT;
446 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100447 }
Eric Anholt673a3942008-07-30 12:06:12 -0700448
Chris Wilson7dcd2492010-09-26 20:21:44 +0100449 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000450 if (args->offset > obj->base.size ||
451 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100452 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100453 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100454 }
455
Chris Wilsondb53a302011-02-03 11:57:46 +0000456 trace_i915_gem_object_pread(obj, args->offset, args->size);
457
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200458 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700459
Chris Wilson35b62a82010-09-26 20:23:38 +0100460out:
Chris Wilson05394f32010-11-08 19:18:58 +0000461 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100462unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100463 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700464 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700465}
466
Keith Packard0839ccb2008-10-30 19:38:48 -0700467/* This is the fast write path which cannot handle
468 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700469 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700470
Keith Packard0839ccb2008-10-30 19:38:48 -0700471static inline int
472fast_user_write(struct io_mapping *mapping,
473 loff_t page_base, int page_offset,
474 char __user *user_data,
475 int length)
476{
477 char *vaddr_atomic;
478 unsigned long unwritten;
479
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700480 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700481 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
482 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700483 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100484 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700485}
486
Eric Anholt3de09aa2009-03-09 09:42:23 -0700487/**
488 * This is the fast pwrite path, where we copy the data directly from the
489 * user into the GTT, uncached.
490 */
Eric Anholt673a3942008-07-30 12:06:12 -0700491static int
Chris Wilson05394f32010-11-08 19:18:58 +0000492i915_gem_gtt_pwrite_fast(struct drm_device *dev,
493 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700494 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000495 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700496{
Keith Packard0839ccb2008-10-30 19:38:48 -0700497 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700498 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700499 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700500 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200501 int page_offset, page_length, ret;
502
503 ret = i915_gem_object_pin(obj, 0, true);
504 if (ret)
505 goto out;
506
507 ret = i915_gem_object_set_to_gtt_domain(obj, true);
508 if (ret)
509 goto out_unpin;
510
511 ret = i915_gem_object_put_fence(obj);
512 if (ret)
513 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700514
515 user_data = (char __user *) (uintptr_t) args->data_ptr;
516 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700517
Chris Wilson05394f32010-11-08 19:18:58 +0000518 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700519
520 while (remain > 0) {
521 /* Operation in this page
522 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700523 * page_base = page offset within aperture
524 * page_offset = offset within page
525 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700526 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100527 page_base = offset & PAGE_MASK;
528 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700529 page_length = remain;
530 if ((page_offset + remain) > PAGE_SIZE)
531 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700532
Keith Packard0839ccb2008-10-30 19:38:48 -0700533 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700534 * source page isn't available. Return the error and we'll
535 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700536 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100537 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200538 page_offset, user_data, page_length)) {
539 ret = -EFAULT;
540 goto out_unpin;
541 }
Eric Anholt673a3942008-07-30 12:06:12 -0700542
Keith Packard0839ccb2008-10-30 19:38:48 -0700543 remain -= page_length;
544 user_data += page_length;
545 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700546 }
Eric Anholt673a3942008-07-30 12:06:12 -0700547
Daniel Vetter935aaa62012-03-25 19:47:35 +0200548out_unpin:
549 i915_gem_object_unpin(obj);
550out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700551 return ret;
552}
553
Eric Anholt3043c602008-10-02 12:24:47 -0700554static int
Daniel Vettere244a442012-03-25 19:47:28 +0200555i915_gem_shmem_pwrite(struct drm_device *dev,
556 struct drm_i915_gem_object *obj,
557 struct drm_i915_gem_pwrite *args,
558 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700559{
Chris Wilson05394f32010-11-08 19:18:58 +0000560 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700561 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100562 loff_t offset;
563 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100564 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100565 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200566 int hit_slowpath = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200567 int release_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700568
Daniel Vetter935aaa62012-03-25 19:47:35 +0200569 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
570 if (ret)
571 return ret;
572
Daniel Vetter8c599672011-12-14 13:57:31 +0100573 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700574 remain = args->size;
575
Daniel Vetter8c599672011-12-14 13:57:31 +0100576 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700577
Eric Anholt40123c12009-03-09 13:42:30 -0700578 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000579 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700580
581 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100582 struct page *page;
Daniel Vetter8c599672011-12-14 13:57:31 +0100583 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100584
Eric Anholt40123c12009-03-09 13:42:30 -0700585 /* Operation in this page
586 *
Eric Anholt40123c12009-03-09 13:42:30 -0700587 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700588 * page_length = bytes to copy for this page
589 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100590 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700591
592 page_length = remain;
593 if ((shmem_page_offset + page_length) > PAGE_SIZE)
594 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700595
Daniel Vetter692a5762012-03-25 19:47:34 +0200596 if (obj->pages) {
597 page = obj->pages[offset >> PAGE_SHIFT];
598 release_page = 0;
599 } else {
600 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
601 if (IS_ERR(page)) {
602 ret = PTR_ERR(page);
603 goto out;
604 }
605 release_page = 1;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100606 }
607
Daniel Vetter8c599672011-12-14 13:57:31 +0100608 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
609 (page_to_phys(page) & (1 << 17)) != 0;
610
Daniel Vettere244a442012-03-25 19:47:28 +0200611 if (!page_do_bit17_swizzling) {
612 vaddr = kmap_atomic(page);
613 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
614 user_data,
615 page_length);
616 kunmap_atomic(vaddr);
617
618 if (ret == 0)
619 goto next_page;
620 }
621
622 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200623 page_cache_get(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200624 mutex_unlock(&dev->struct_mutex);
625
Daniel Vetter8c599672011-12-14 13:57:31 +0100626 vaddr = kmap(page);
627 if (page_do_bit17_swizzling)
628 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
629 user_data,
630 page_length);
631 else
632 ret = __copy_from_user(vaddr + shmem_page_offset,
633 user_data,
634 page_length);
635 kunmap(page);
Eric Anholt40123c12009-03-09 13:42:30 -0700636
Daniel Vettere244a442012-03-25 19:47:28 +0200637 mutex_lock(&dev->struct_mutex);
Daniel Vetter692a5762012-03-25 19:47:34 +0200638 page_cache_release(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200639next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100640 set_page_dirty(page);
641 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200642 if (release_page)
643 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100644
Daniel Vetter8c599672011-12-14 13:57:31 +0100645 if (ret) {
646 ret = -EFAULT;
647 goto out;
648 }
649
Eric Anholt40123c12009-03-09 13:42:30 -0700650 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100651 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700652 offset += page_length;
653 }
654
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100655out:
Daniel Vettere244a442012-03-25 19:47:28 +0200656 if (hit_slowpath) {
657 /* Fixup: Kill any reinstated backing storage pages */
658 if (obj->madv == __I915_MADV_PURGED)
659 i915_gem_object_truncate(obj);
660 /* and flush dirty cachelines in case the object isn't in the cpu write
661 * domain anymore. */
662 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
663 i915_gem_clflush_object(obj);
664 intel_gtt_chipset_flush();
665 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100666 }
Eric Anholt40123c12009-03-09 13:42:30 -0700667
668 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700669}
670
671/**
672 * Writes data to the object referenced by handle.
673 *
674 * On error, the contents of the buffer that were to be modified are undefined.
675 */
676int
677i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100678 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700679{
680 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000681 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000682 int ret;
683
684 if (args->size == 0)
685 return 0;
686
687 if (!access_ok(VERIFY_READ,
688 (char __user *)(uintptr_t)args->data_ptr,
689 args->size))
690 return -EFAULT;
691
692 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
693 args->size);
694 if (ret)
695 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700696
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100697 ret = i915_mutex_lock_interruptible(dev);
698 if (ret)
699 return ret;
700
Chris Wilson05394f32010-11-08 19:18:58 +0000701 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000702 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100703 ret = -ENOENT;
704 goto unlock;
705 }
Eric Anholt673a3942008-07-30 12:06:12 -0700706
Chris Wilson7dcd2492010-09-26 20:21:44 +0100707 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000708 if (args->offset > obj->base.size ||
709 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100710 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100711 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100712 }
713
Chris Wilsondb53a302011-02-03 11:57:46 +0000714 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
715
Daniel Vetter935aaa62012-03-25 19:47:35 +0200716 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700717 /* We can only do the GTT pwrite on untiled buffers, as otherwise
718 * it would end up going through the fenced access, and we'll get
719 * different detiling behavior between reading and writing.
720 * pread/pwrite currently are reading and writing from the CPU
721 * perspective, requiring manual detiling by the client.
722 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100723 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100724 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100725 goto out;
726 }
727
728 if (obj->gtt_space &&
Daniel Vetter3ae53782012-03-25 19:47:33 +0200729 obj->cache_level == I915_CACHE_NONE &&
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100730 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100731 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200732 /* Note that the gtt paths might fail with non-page-backed user
733 * pointers (e.g. gtt mappings when moving data between
734 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700735 }
Eric Anholt673a3942008-07-30 12:06:12 -0700736
Daniel Vetter935aaa62012-03-25 19:47:35 +0200737 if (ret == -EFAULT)
738 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100739
Chris Wilson35b62a82010-09-26 20:23:38 +0100740out:
Chris Wilson05394f32010-11-08 19:18:58 +0000741 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100742unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100743 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700744 return ret;
745}
746
747/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800748 * Called when user space prepares to use an object with the CPU, either
749 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700750 */
751int
752i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000753 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700754{
755 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000756 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800757 uint32_t read_domains = args->read_domains;
758 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700759 int ret;
760
761 if (!(dev->driver->driver_features & DRIVER_GEM))
762 return -ENODEV;
763
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800764 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100765 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800766 return -EINVAL;
767
Chris Wilson21d509e2009-06-06 09:46:02 +0100768 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800769 return -EINVAL;
770
771 /* Having something in the write domain implies it's in the read
772 * domain, and only that read domain. Enforce that in the request.
773 */
774 if (write_domain != 0 && read_domains != write_domain)
775 return -EINVAL;
776
Chris Wilson76c1dec2010-09-25 11:22:51 +0100777 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100778 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100779 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700780
Chris Wilson05394f32010-11-08 19:18:58 +0000781 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000782 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100783 ret = -ENOENT;
784 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100785 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700786
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800787 if (read_domains & I915_GEM_DOMAIN_GTT) {
788 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800789
790 /* Silently promote "you're not bound, there was nothing to do"
791 * to success, since the client was just asking us to
792 * make sure everything was done.
793 */
794 if (ret == -EINVAL)
795 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800796 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -0800797 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800798 }
799
Chris Wilson05394f32010-11-08 19:18:58 +0000800 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100801unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700802 mutex_unlock(&dev->struct_mutex);
803 return ret;
804}
805
806/**
807 * Called when user space has done writes to this buffer
808 */
809int
810i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000811 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700812{
813 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000814 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700815 int ret = 0;
816
817 if (!(dev->driver->driver_features & DRIVER_GEM))
818 return -ENODEV;
819
Chris Wilson76c1dec2010-09-25 11:22:51 +0100820 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100821 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100822 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100823
Chris Wilson05394f32010-11-08 19:18:58 +0000824 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000825 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100826 ret = -ENOENT;
827 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -0700828 }
829
Eric Anholt673a3942008-07-30 12:06:12 -0700830 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +0000831 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -0800832 i915_gem_object_flush_cpu_write_domain(obj);
833
Chris Wilson05394f32010-11-08 19:18:58 +0000834 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100835unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700836 mutex_unlock(&dev->struct_mutex);
837 return ret;
838}
839
840/**
841 * Maps the contents of an object, returning the address it is mapped
842 * into.
843 *
844 * While the mapping holds a reference on the contents of the object, it doesn't
845 * imply a ref on the object itself.
846 */
847int
848i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000849 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700850{
851 struct drm_i915_gem_mmap *args = data;
852 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700853 unsigned long addr;
854
855 if (!(dev->driver->driver_features & DRIVER_GEM))
856 return -ENODEV;
857
Chris Wilson05394f32010-11-08 19:18:58 +0000858 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -0700859 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100860 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700861
Eric Anholt673a3942008-07-30 12:06:12 -0700862 down_write(&current->mm->mmap_sem);
863 addr = do_mmap(obj->filp, 0, args->size,
864 PROT_READ | PROT_WRITE, MAP_SHARED,
865 args->offset);
866 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000867 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700868 if (IS_ERR((void *)addr))
869 return addr;
870
871 args->addr_ptr = (uint64_t) addr;
872
873 return 0;
874}
875
Jesse Barnesde151cf2008-11-12 10:03:55 -0800876/**
877 * i915_gem_fault - fault a page into the GTT
878 * vma: VMA in question
879 * vmf: fault info
880 *
881 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
882 * from userspace. The fault handler takes care of binding the object to
883 * the GTT (if needed), allocating and programming a fence register (again,
884 * only if needed based on whether the old reg is still valid or the object
885 * is tiled) and inserting a new PTE into the faulting process.
886 *
887 * Note that the faulting process may involve evicting existing objects
888 * from the GTT and/or fence registers to make room. So performance may
889 * suffer if the GTT working set is large or there are few fence registers
890 * left.
891 */
892int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
893{
Chris Wilson05394f32010-11-08 19:18:58 +0000894 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
895 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100896 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800897 pgoff_t page_offset;
898 unsigned long pfn;
899 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -0800900 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -0800901
902 /* We don't use vmf->pgoff since that has the fake offset */
903 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
904 PAGE_SHIFT;
905
Chris Wilsond9bc7e92011-02-07 13:09:31 +0000906 ret = i915_mutex_lock_interruptible(dev);
907 if (ret)
908 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +0100909
Chris Wilsondb53a302011-02-03 11:57:46 +0000910 trace_i915_gem_object_fault(obj, page_offset, true, write);
911
Chris Wilsond9bc7e92011-02-07 13:09:31 +0000912 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +0000913 if (!obj->map_and_fenceable) {
914 ret = i915_gem_object_unbind(obj);
915 if (ret)
916 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +0100917 }
Chris Wilson05394f32010-11-08 19:18:58 +0000918 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +0100919 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +0100920 if (ret)
921 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800922
Eric Anholte92d03b2011-06-14 16:43:09 -0700923 ret = i915_gem_object_set_to_gtt_domain(obj, write);
924 if (ret)
925 goto unlock;
926 }
Chris Wilson4a684a42010-10-28 14:44:08 +0100927
Daniel Vetter74898d72012-02-15 23:50:22 +0100928 if (!obj->has_global_gtt_mapping)
929 i915_gem_gtt_bind_object(obj, obj->cache_level);
930
Chris Wilsond9e86c02010-11-10 16:40:20 +0000931 if (obj->tiling_mode == I915_TILING_NONE)
932 ret = i915_gem_object_put_fence(obj);
933 else
Chris Wilsonce453d82011-02-21 14:43:56 +0000934 ret = i915_gem_object_get_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +0000935 if (ret)
936 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800937
Chris Wilson05394f32010-11-08 19:18:58 +0000938 if (i915_gem_object_is_inactive(obj))
939 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +0100940
Chris Wilson6299f992010-11-24 12:23:44 +0000941 obj->fault_mappable = true;
942
Chris Wilson05394f32010-11-08 19:18:58 +0000943 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -0800944 page_offset;
945
946 /* Finally, remap it using the new GTT offset */
947 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +0100948unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -0800949 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +0000950out:
Jesse Barnesde151cf2008-11-12 10:03:55 -0800951 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +0000952 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +0000953 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +0000954 /* Give the error handler a chance to run and move the
955 * objects off the GPU active list. Next time we service the
956 * fault, we should be able to transition the page into the
957 * GTT without touching the GPU (and so avoid further
958 * EIO/EGAIN). If the GPU is wedged, then there is no issue
959 * with coherency, just lost writes.
960 */
Chris Wilson045e7692010-11-07 09:18:22 +0000961 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +0100962 case 0:
963 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +0000964 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +0100965 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800966 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -0800967 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800968 default:
Chris Wilsonc7150892009-09-23 00:43:56 +0100969 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800970 }
971}
972
973/**
Chris Wilson901782b2009-07-10 08:18:50 +0100974 * i915_gem_release_mmap - remove physical page mappings
975 * @obj: obj in question
976 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200977 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +0100978 * relinquish ownership of the pages back to the system.
979 *
980 * It is vital that we remove the page mapping if we have mapped a tiled
981 * object through the GTT and then lose the fence register due to
982 * resource pressure. Similarly if the object has been moved out of the
983 * aperture, than pages mapped into userspace must be revoked. Removing the
984 * mapping will then trigger a page fault on the next user access, allowing
985 * fixup by i915_gem_fault().
986 */
Eric Anholtd05ca302009-07-10 13:02:26 -0700987void
Chris Wilson05394f32010-11-08 19:18:58 +0000988i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +0100989{
Chris Wilson6299f992010-11-24 12:23:44 +0000990 if (!obj->fault_mappable)
991 return;
Chris Wilson901782b2009-07-10 08:18:50 +0100992
Chris Wilsonf6e47882011-03-20 21:09:12 +0000993 if (obj->base.dev->dev_mapping)
994 unmap_mapping_range(obj->base.dev->dev_mapping,
995 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
996 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200997
Chris Wilson6299f992010-11-24 12:23:44 +0000998 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +0100999}
1000
Chris Wilson92b88ae2010-11-09 11:47:32 +00001001static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001002i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001003{
Chris Wilsone28f8712011-07-18 13:11:49 -07001004 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001005
1006 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001007 tiling_mode == I915_TILING_NONE)
1008 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001009
1010 /* Previous chips need a power-of-two fence region when tiling */
1011 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001012 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001013 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001014 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001015
Chris Wilsone28f8712011-07-18 13:11:49 -07001016 while (gtt_size < size)
1017 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001018
Chris Wilsone28f8712011-07-18 13:11:49 -07001019 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001020}
1021
Jesse Barnesde151cf2008-11-12 10:03:55 -08001022/**
1023 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1024 * @obj: object to check
1025 *
1026 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001027 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001028 */
1029static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001030i915_gem_get_gtt_alignment(struct drm_device *dev,
1031 uint32_t size,
1032 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001033{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001034 /*
1035 * Minimum alignment is 4k (GTT page size), but might be greater
1036 * if a fence register is needed for the object.
1037 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001038 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001039 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001040 return 4096;
1041
1042 /*
1043 * Previous chips need to be aligned to the size of the smallest
1044 * fence register that can contain the object.
1045 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001046 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001047}
1048
Daniel Vetter5e783302010-11-14 22:32:36 +01001049/**
1050 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1051 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001052 * @dev: the device
1053 * @size: size of the object
1054 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001055 *
1056 * Return the required GTT alignment for an object, only taking into account
1057 * unfenced tiled surface requirements.
1058 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001059uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001060i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1061 uint32_t size,
1062 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001063{
Daniel Vetter5e783302010-11-14 22:32:36 +01001064 /*
1065 * Minimum alignment is 4k (GTT page size) for sane hw.
1066 */
1067 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001068 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001069 return 4096;
1070
Chris Wilsone28f8712011-07-18 13:11:49 -07001071 /* Previous hardware however needs to be aligned to a power-of-two
1072 * tile height. The simplest method for determining this is to reuse
1073 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001074 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001075 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001076}
1077
Jesse Barnesde151cf2008-11-12 10:03:55 -08001078int
Dave Airlieff72145b2011-02-07 12:16:14 +10001079i915_gem_mmap_gtt(struct drm_file *file,
1080 struct drm_device *dev,
1081 uint32_t handle,
1082 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001083{
Chris Wilsonda761a62010-10-27 17:37:08 +01001084 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001085 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001086 int ret;
1087
1088 if (!(dev->driver->driver_features & DRIVER_GEM))
1089 return -ENODEV;
1090
Chris Wilson76c1dec2010-09-25 11:22:51 +01001091 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001092 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001093 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001094
Dave Airlieff72145b2011-02-07 12:16:14 +10001095 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001096 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001097 ret = -ENOENT;
1098 goto unlock;
1099 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001100
Chris Wilson05394f32010-11-08 19:18:58 +00001101 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001102 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001103 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001104 }
1105
Chris Wilson05394f32010-11-08 19:18:58 +00001106 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001107 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001108 ret = -EINVAL;
1109 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001110 }
1111
Chris Wilson05394f32010-11-08 19:18:58 +00001112 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001113 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001114 if (ret)
1115 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001116 }
1117
Dave Airlieff72145b2011-02-07 12:16:14 +10001118 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001119
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001120out:
Chris Wilson05394f32010-11-08 19:18:58 +00001121 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001122unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001123 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001124 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001125}
1126
Dave Airlieff72145b2011-02-07 12:16:14 +10001127/**
1128 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1129 * @dev: DRM device
1130 * @data: GTT mapping ioctl data
1131 * @file: GEM object info
1132 *
1133 * Simply returns the fake offset to userspace so it can mmap it.
1134 * The mmap call will end up in drm_gem_mmap(), which will set things
1135 * up so we can get faults in the handler above.
1136 *
1137 * The fault handler will take care of binding the object into the GTT
1138 * (since it may have been evicted to make room for something), allocating
1139 * a fence register, and mapping the appropriate aperture address into
1140 * userspace.
1141 */
1142int
1143i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1144 struct drm_file *file)
1145{
1146 struct drm_i915_gem_mmap_gtt *args = data;
1147
1148 if (!(dev->driver->driver_features & DRIVER_GEM))
1149 return -ENODEV;
1150
1151 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1152}
1153
1154
Chris Wilsone5281cc2010-10-28 13:45:36 +01001155static int
Chris Wilson05394f32010-11-08 19:18:58 +00001156i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001157 gfp_t gfpmask)
1158{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001159 int page_count, i;
1160 struct address_space *mapping;
1161 struct inode *inode;
1162 struct page *page;
1163
1164 /* Get the list of pages out of our struct file. They'll be pinned
1165 * at this point until we release them.
1166 */
Chris Wilson05394f32010-11-08 19:18:58 +00001167 page_count = obj->base.size / PAGE_SIZE;
1168 BUG_ON(obj->pages != NULL);
1169 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1170 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001171 return -ENOMEM;
1172
Chris Wilson05394f32010-11-08 19:18:58 +00001173 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001174 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001175 gfpmask |= mapping_gfp_mask(mapping);
1176
Chris Wilsone5281cc2010-10-28 13:45:36 +01001177 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001178 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001179 if (IS_ERR(page))
1180 goto err_pages;
1181
Chris Wilson05394f32010-11-08 19:18:58 +00001182 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001183 }
1184
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001185 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001186 i915_gem_object_do_bit_17_swizzle(obj);
1187
1188 return 0;
1189
1190err_pages:
1191 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001192 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001193
Chris Wilson05394f32010-11-08 19:18:58 +00001194 drm_free_large(obj->pages);
1195 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001196 return PTR_ERR(page);
1197}
1198
Chris Wilson5cdf5882010-09-27 15:51:07 +01001199static void
Chris Wilson05394f32010-11-08 19:18:58 +00001200i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001201{
Chris Wilson05394f32010-11-08 19:18:58 +00001202 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001203 int i;
1204
Chris Wilson05394f32010-11-08 19:18:58 +00001205 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001206
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001207 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001208 i915_gem_object_save_bit_17_swizzle(obj);
1209
Chris Wilson05394f32010-11-08 19:18:58 +00001210 if (obj->madv == I915_MADV_DONTNEED)
1211 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001212
1213 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001214 if (obj->dirty)
1215 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001216
Chris Wilson05394f32010-11-08 19:18:58 +00001217 if (obj->madv == I915_MADV_WILLNEED)
1218 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001219
Chris Wilson05394f32010-11-08 19:18:58 +00001220 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001221 }
Chris Wilson05394f32010-11-08 19:18:58 +00001222 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001223
Chris Wilson05394f32010-11-08 19:18:58 +00001224 drm_free_large(obj->pages);
1225 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001226}
1227
Chris Wilson54cf91d2010-11-25 18:00:26 +00001228void
Chris Wilson05394f32010-11-08 19:18:58 +00001229i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001230 struct intel_ring_buffer *ring,
1231 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001232{
Chris Wilson05394f32010-11-08 19:18:58 +00001233 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001234 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001235
Zou Nan hai852835f2010-05-21 09:08:56 +08001236 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001237 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001238
1239 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001240 if (!obj->active) {
1241 drm_gem_object_reference(&obj->base);
1242 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001243 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001244
Eric Anholt673a3942008-07-30 12:06:12 -07001245 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001246 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1247 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001248
Chris Wilson05394f32010-11-08 19:18:58 +00001249 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001250 if (obj->fenced_gpu_access) {
1251 struct drm_i915_fence_reg *reg;
1252
1253 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1254
1255 obj->last_fenced_seqno = seqno;
1256 obj->last_fenced_ring = ring;
1257
1258 reg = &dev_priv->fence_regs[obj->fence_reg];
1259 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1260 }
1261}
1262
1263static void
1264i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1265{
1266 list_del_init(&obj->ring_list);
1267 obj->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001268}
1269
Eric Anholtce44b0e2008-11-06 16:00:31 -08001270static void
Chris Wilson05394f32010-11-08 19:18:58 +00001271i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001272{
Chris Wilson05394f32010-11-08 19:18:58 +00001273 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001274 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001275
Chris Wilson05394f32010-11-08 19:18:58 +00001276 BUG_ON(!obj->active);
1277 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001278
1279 i915_gem_object_move_off_active(obj);
1280}
1281
1282static void
1283i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1284{
1285 struct drm_device *dev = obj->base.dev;
1286 struct drm_i915_private *dev_priv = dev->dev_private;
1287
1288 if (obj->pin_count != 0)
1289 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1290 else
1291 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1292
1293 BUG_ON(!list_empty(&obj->gpu_write_list));
1294 BUG_ON(!obj->active);
1295 obj->ring = NULL;
1296
1297 i915_gem_object_move_off_active(obj);
1298 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001299
1300 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001301 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001302 drm_gem_object_unreference(&obj->base);
1303
1304 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001305}
Eric Anholt673a3942008-07-30 12:06:12 -07001306
Chris Wilson963b4832009-09-20 23:03:54 +01001307/* Immediately discard the backing storage */
1308static void
Chris Wilson05394f32010-11-08 19:18:58 +00001309i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001310{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001311 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001312
Chris Wilsonae9fed62010-08-07 11:01:30 +01001313 /* Our goal here is to return as much of the memory as
1314 * is possible back to the system as we are called from OOM.
1315 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001316 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001317 */
Chris Wilson05394f32010-11-08 19:18:58 +00001318 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001319 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001320
Chris Wilsona14917e2012-02-24 21:13:38 +00001321 if (obj->base.map_list.map)
1322 drm_gem_free_mmap_offset(&obj->base);
1323
Chris Wilson05394f32010-11-08 19:18:58 +00001324 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001325}
1326
1327static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001328i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001329{
Chris Wilson05394f32010-11-08 19:18:58 +00001330 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001331}
1332
Eric Anholt673a3942008-07-30 12:06:12 -07001333static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001334i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1335 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001336{
Chris Wilson05394f32010-11-08 19:18:58 +00001337 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001338
Chris Wilson05394f32010-11-08 19:18:58 +00001339 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001340 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001341 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001342 if (obj->base.write_domain & flush_domains) {
1343 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001344
Chris Wilson05394f32010-11-08 19:18:58 +00001345 obj->base.write_domain = 0;
1346 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001347 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001348 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001349
Daniel Vetter63560392010-02-19 11:51:59 +01001350 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001351 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001352 old_write_domain);
1353 }
1354 }
1355}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001356
Daniel Vetter53d227f2012-01-25 16:32:49 +01001357static u32
1358i915_gem_get_seqno(struct drm_device *dev)
1359{
1360 drm_i915_private_t *dev_priv = dev->dev_private;
1361 u32 seqno = dev_priv->next_seqno;
1362
1363 /* reserve 0 for non-seqno */
1364 if (++dev_priv->next_seqno == 0)
1365 dev_priv->next_seqno = 1;
1366
1367 return seqno;
1368}
1369
1370u32
1371i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1372{
1373 if (ring->outstanding_lazy_request == 0)
1374 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1375
1376 return ring->outstanding_lazy_request;
1377}
1378
Chris Wilson3cce4692010-10-27 16:11:02 +01001379int
Chris Wilsondb53a302011-02-03 11:57:46 +00001380i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001381 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001382 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001383{
Chris Wilsondb53a302011-02-03 11:57:46 +00001384 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001385 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001386 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001387 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001388 int ret;
1389
1390 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001391 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001392
Chris Wilsona71d8d92012-02-15 11:25:36 +00001393 /* Record the position of the start of the request so that
1394 * should we detect the updated seqno part-way through the
1395 * GPU processing the request, we never over-estimate the
1396 * position of the head.
1397 */
1398 request_ring_position = intel_ring_get_tail(ring);
1399
Chris Wilson3cce4692010-10-27 16:11:02 +01001400 ret = ring->add_request(ring, &seqno);
1401 if (ret)
1402 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001403
Chris Wilsondb53a302011-02-03 11:57:46 +00001404 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001405
1406 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001407 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001408 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001409 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001410 was_empty = list_empty(&ring->request_list);
1411 list_add_tail(&request->list, &ring->request_list);
1412
Chris Wilsondb53a302011-02-03 11:57:46 +00001413 if (file) {
1414 struct drm_i915_file_private *file_priv = file->driver_priv;
1415
Chris Wilson1c255952010-09-26 11:03:27 +01001416 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001417 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001418 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001419 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001420 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001421 }
Eric Anholt673a3942008-07-30 12:06:12 -07001422
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001423 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001424
Ben Gamarif65d9422009-09-14 17:48:44 -04001425 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001426 if (i915_enable_hangcheck) {
1427 mod_timer(&dev_priv->hangcheck_timer,
1428 jiffies +
1429 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1430 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001431 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001432 queue_delayed_work(dev_priv->wq,
1433 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001434 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001435 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001436}
1437
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001438static inline void
1439i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001440{
Chris Wilson1c255952010-09-26 11:03:27 +01001441 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001442
Chris Wilson1c255952010-09-26 11:03:27 +01001443 if (!file_priv)
1444 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001445
Chris Wilson1c255952010-09-26 11:03:27 +01001446 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001447 if (request->file_priv) {
1448 list_del(&request->client_list);
1449 request->file_priv = NULL;
1450 }
Chris Wilson1c255952010-09-26 11:03:27 +01001451 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001452}
1453
Chris Wilsondfaae392010-09-22 10:31:52 +01001454static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1455 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001456{
Chris Wilsondfaae392010-09-22 10:31:52 +01001457 while (!list_empty(&ring->request_list)) {
1458 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001459
Chris Wilsondfaae392010-09-22 10:31:52 +01001460 request = list_first_entry(&ring->request_list,
1461 struct drm_i915_gem_request,
1462 list);
1463
1464 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001465 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001466 kfree(request);
1467 }
1468
1469 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001470 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001471
Chris Wilson05394f32010-11-08 19:18:58 +00001472 obj = list_first_entry(&ring->active_list,
1473 struct drm_i915_gem_object,
1474 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001475
Chris Wilson05394f32010-11-08 19:18:58 +00001476 obj->base.write_domain = 0;
1477 list_del_init(&obj->gpu_write_list);
1478 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001479 }
Eric Anholt673a3942008-07-30 12:06:12 -07001480}
1481
Chris Wilson312817a2010-11-22 11:50:11 +00001482static void i915_gem_reset_fences(struct drm_device *dev)
1483{
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 int i;
1486
Daniel Vetter4b9de732011-10-09 21:52:02 +02001487 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001488 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001489 struct drm_i915_gem_object *obj = reg->obj;
1490
1491 if (!obj)
1492 continue;
1493
1494 if (obj->tiling_mode)
1495 i915_gem_release_mmap(obj);
1496
Chris Wilsond9e86c02010-11-10 16:40:20 +00001497 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1498 reg->obj->fenced_gpu_access = false;
1499 reg->obj->last_fenced_seqno = 0;
1500 reg->obj->last_fenced_ring = NULL;
1501 i915_gem_clear_fence_reg(dev, reg);
Chris Wilson312817a2010-11-22 11:50:11 +00001502 }
1503}
1504
Chris Wilson069efc12010-09-30 16:53:18 +01001505void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001506{
Chris Wilsondfaae392010-09-22 10:31:52 +01001507 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001508 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001509 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001510
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001511 for (i = 0; i < I915_NUM_RINGS; i++)
1512 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001513
1514 /* Remove anything from the flushing lists. The GPU cache is likely
1515 * to be lost on reset along with the data, so simply move the
1516 * lost bo to the inactive list.
1517 */
1518 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001519 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001520 struct drm_i915_gem_object,
1521 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001522
Chris Wilson05394f32010-11-08 19:18:58 +00001523 obj->base.write_domain = 0;
1524 list_del_init(&obj->gpu_write_list);
1525 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001526 }
Chris Wilson9375e442010-09-19 12:21:28 +01001527
Chris Wilsondfaae392010-09-22 10:31:52 +01001528 /* Move everything out of the GPU domains to ensure we do any
1529 * necessary invalidation upon reuse.
1530 */
Chris Wilson05394f32010-11-08 19:18:58 +00001531 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001532 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001533 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001534 {
Chris Wilson05394f32010-11-08 19:18:58 +00001535 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001536 }
Chris Wilson069efc12010-09-30 16:53:18 +01001537
1538 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001539 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001540}
1541
1542/**
1543 * This function clears the request list as sequence numbers are passed.
1544 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001545void
Chris Wilsondb53a302011-02-03 11:57:46 +00001546i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001547{
Eric Anholt673a3942008-07-30 12:06:12 -07001548 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001549 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001550
Chris Wilsondb53a302011-02-03 11:57:46 +00001551 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001552 return;
1553
Chris Wilsondb53a302011-02-03 11:57:46 +00001554 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001555
Chris Wilson78501ea2010-10-27 12:18:21 +01001556 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001557
Chris Wilson076e2c02011-01-21 10:07:18 +00001558 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001559 if (seqno >= ring->sync_seqno[i])
1560 ring->sync_seqno[i] = 0;
1561
Zou Nan hai852835f2010-05-21 09:08:56 +08001562 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001563 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001564
Zou Nan hai852835f2010-05-21 09:08:56 +08001565 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001566 struct drm_i915_gem_request,
1567 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001568
Chris Wilsondfaae392010-09-22 10:31:52 +01001569 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001570 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001571
Chris Wilsondb53a302011-02-03 11:57:46 +00001572 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001573 /* We know the GPU must have read the request to have
1574 * sent us the seqno + interrupt, so use the position
1575 * of tail of the request to update the last known position
1576 * of the GPU head.
1577 */
1578 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001579
1580 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001581 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001582 kfree(request);
1583 }
1584
1585 /* Move any buffers on the active list that are no longer referenced
1586 * by the ringbuffer to the flushing/inactive lists as appropriate.
1587 */
1588 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001589 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001590
Akshay Joshi0206e352011-08-16 15:34:10 -04001591 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001592 struct drm_i915_gem_object,
1593 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001594
Chris Wilson05394f32010-11-08 19:18:58 +00001595 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001596 break;
1597
Chris Wilson05394f32010-11-08 19:18:58 +00001598 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001599 i915_gem_object_move_to_flushing(obj);
1600 else
1601 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001602 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001603
Chris Wilsondb53a302011-02-03 11:57:46 +00001604 if (unlikely(ring->trace_irq_seqno &&
1605 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001606 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001607 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001608 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001609
Chris Wilsondb53a302011-02-03 11:57:46 +00001610 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001611}
1612
1613void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001614i915_gem_retire_requests(struct drm_device *dev)
1615{
1616 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001617 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001618
Chris Wilsonbe726152010-07-23 23:18:50 +01001619 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001620 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001621
1622 /* We must be careful that during unbind() we do not
1623 * accidentally infinitely recurse into retire requests.
1624 * Currently:
1625 * retire -> free -> unbind -> wait -> retire_ring
1626 */
Chris Wilson05394f32010-11-08 19:18:58 +00001627 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001628 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001629 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001630 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001631 }
1632
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001633 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001634 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001635}
1636
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001637static void
Eric Anholt673a3942008-07-30 12:06:12 -07001638i915_gem_retire_work_handler(struct work_struct *work)
1639{
1640 drm_i915_private_t *dev_priv;
1641 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001642 bool idle;
1643 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001644
1645 dev_priv = container_of(work, drm_i915_private_t,
1646 mm.retire_work.work);
1647 dev = dev_priv->dev;
1648
Chris Wilson891b48c2010-09-29 12:26:37 +01001649 /* Come back later if the device is busy... */
1650 if (!mutex_trylock(&dev->struct_mutex)) {
1651 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1652 return;
1653 }
1654
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001655 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001656
Chris Wilson0a587052011-01-09 21:05:44 +00001657 /* Send a periodic flush down the ring so we don't hold onto GEM
1658 * objects indefinitely.
1659 */
1660 idle = true;
1661 for (i = 0; i < I915_NUM_RINGS; i++) {
1662 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1663
1664 if (!list_empty(&ring->gpu_write_list)) {
1665 struct drm_i915_gem_request *request;
1666 int ret;
1667
Chris Wilsondb53a302011-02-03 11:57:46 +00001668 ret = i915_gem_flush_ring(ring,
1669 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00001670 request = kzalloc(sizeof(*request), GFP_KERNEL);
1671 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001672 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001673 kfree(request);
1674 }
1675
1676 idle &= list_empty(&ring->request_list);
1677 }
1678
1679 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001680 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001681
Eric Anholt673a3942008-07-30 12:06:12 -07001682 mutex_unlock(&dev->struct_mutex);
1683}
1684
Chris Wilsondb53a302011-02-03 11:57:46 +00001685/**
1686 * Waits for a sequence number to be signaled, and cleans up the
1687 * request and object lists appropriately for that event.
1688 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001689int
Chris Wilsondb53a302011-02-03 11:57:46 +00001690i915_wait_request(struct intel_ring_buffer *ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001691 uint32_t seqno,
1692 bool do_retire)
Eric Anholt673a3942008-07-30 12:06:12 -07001693{
Chris Wilsondb53a302011-02-03 11:57:46 +00001694 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001695 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001696 int ret = 0;
1697
1698 BUG_ON(seqno == 0);
1699
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001700 if (atomic_read(&dev_priv->mm.wedged)) {
1701 struct completion *x = &dev_priv->error_completion;
1702 bool recovery_complete;
1703 unsigned long flags;
1704
1705 /* Give the error handler a chance to run. */
1706 spin_lock_irqsave(&x->wait.lock, flags);
1707 recovery_complete = x->done > 0;
1708 spin_unlock_irqrestore(&x->wait.lock, flags);
1709
1710 return recovery_complete ? -EIO : -EAGAIN;
1711 }
Ben Gamariffed1d02009-09-14 17:48:41 -04001712
Chris Wilson5d97eb62010-11-10 20:40:02 +00001713 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001714 struct drm_i915_gem_request *request;
1715
1716 request = kzalloc(sizeof(*request), GFP_KERNEL);
1717 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001718 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001719
Chris Wilsondb53a302011-02-03 11:57:46 +00001720 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01001721 if (ret) {
1722 kfree(request);
1723 return ret;
1724 }
1725
1726 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001727 }
1728
Chris Wilson78501ea2010-10-27 12:18:21 +01001729 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001730 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001731 ier = I915_READ(DEIER) | I915_READ(GTIER);
1732 else
1733 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001734 if (!ier) {
1735 DRM_ERROR("something (likely vbetool) disabled "
1736 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01001737 ring->dev->driver->irq_preinstall(ring->dev);
1738 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001739 }
1740
Chris Wilsondb53a302011-02-03 11:57:46 +00001741 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001742
Chris Wilsonb2223492010-10-27 15:27:33 +01001743 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001744 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00001745 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001746 ret = wait_event_interruptible(ring->irq_queue,
1747 i915_seqno_passed(ring->get_seqno(ring), seqno)
1748 || atomic_read(&dev_priv->mm.wedged));
1749 else
1750 wait_event(ring->irq_queue,
1751 i915_seqno_passed(ring->get_seqno(ring), seqno)
1752 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001753
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001754 ring->irq_put(ring);
Eric Anholte959b5d2011-12-22 14:55:01 -08001755 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1756 seqno) ||
1757 atomic_read(&dev_priv->mm.wedged), 3000))
Chris Wilsonb5ba1772010-12-14 12:17:15 +00001758 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01001759 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001760
Chris Wilsondb53a302011-02-03 11:57:46 +00001761 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001762 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001763 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001764 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001765
Eric Anholt673a3942008-07-30 12:06:12 -07001766 /* Directly dispatch request retiring. While we have the work queue
1767 * to handle this, the waiter on a request often wants an associated
1768 * buffer to have made it to the inactive list, and we would need
1769 * a separate wait queue to handle that.
1770 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001771 if (ret == 0 && do_retire)
Chris Wilsondb53a302011-02-03 11:57:46 +00001772 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001773
1774 return ret;
1775}
1776
Daniel Vetter48764bf2009-09-15 22:57:32 +02001777/**
Eric Anholt673a3942008-07-30 12:06:12 -07001778 * Ensures that all rendering to the object has completed and the object is
1779 * safe to unbind from the GTT or access from the CPU.
1780 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001781int
Chris Wilsonce453d82011-02-21 14:43:56 +00001782i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001783{
Eric Anholt673a3942008-07-30 12:06:12 -07001784 int ret;
1785
Eric Anholte47c68e2008-11-14 13:35:19 -08001786 /* This function only exists to support waiting for existing rendering,
1787 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001788 */
Chris Wilson05394f32010-11-08 19:18:58 +00001789 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001790
1791 /* If there is rendering queued on the buffer being evicted, wait for
1792 * it.
1793 */
Chris Wilson05394f32010-11-08 19:18:58 +00001794 if (obj->active) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001795 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
1796 true);
Chris Wilson2cf34d72010-09-14 13:03:28 +01001797 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07001798 return ret;
1799 }
1800
1801 return 0;
1802}
1803
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001804static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1805{
1806 u32 old_write_domain, old_read_domains;
1807
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001808 /* Act a barrier for all accesses through the GTT */
1809 mb();
1810
1811 /* Force a pagefault for domain tracking on next user access */
1812 i915_gem_release_mmap(obj);
1813
Keith Packardb97c3d92011-06-24 21:02:59 -07001814 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1815 return;
1816
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001817 old_read_domains = obj->base.read_domains;
1818 old_write_domain = obj->base.write_domain;
1819
1820 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1821 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1822
1823 trace_i915_gem_object_change_domain(obj,
1824 old_read_domains,
1825 old_write_domain);
1826}
1827
Eric Anholt673a3942008-07-30 12:06:12 -07001828/**
1829 * Unbinds an object from the GTT aperture.
1830 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001831int
Chris Wilson05394f32010-11-08 19:18:58 +00001832i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001833{
Daniel Vetter7bddb012012-02-09 17:15:47 +01001834 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001835 int ret = 0;
1836
Chris Wilson05394f32010-11-08 19:18:58 +00001837 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001838 return 0;
1839
Chris Wilson05394f32010-11-08 19:18:58 +00001840 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07001841 DRM_ERROR("Attempting to unbind pinned buffer\n");
1842 return -EINVAL;
1843 }
1844
Chris Wilsona8198ee2011-04-13 22:04:09 +01001845 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson8dc17752010-07-23 23:18:51 +01001846 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07001847 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01001848 /* Continue on if we fail due to EIO, the GPU is hung so we
1849 * should be safe and we need to cleanup or else we might
1850 * cause memory corruption through use-after-free.
1851 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01001852
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001853 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01001854
1855 /* Move the object to the CPU domain to ensure that
1856 * any possible CPU writes while it's not in the GTT
1857 * are flushed when we go to remap it.
1858 */
1859 if (ret == 0)
1860 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1861 if (ret == -ERESTARTSYS)
1862 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01001863 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01001864 /* In the event of a disaster, abandon all caches and
1865 * hope for the best.
1866 */
Chris Wilson812ed4922010-09-30 15:08:57 +01001867 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001868 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01001869 }
Eric Anholt673a3942008-07-30 12:06:12 -07001870
Daniel Vetter96b47b62009-12-15 17:50:00 +01001871 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00001872 ret = i915_gem_object_put_fence(obj);
1873 if (ret == -ERESTARTSYS)
1874 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01001875
Chris Wilsondb53a302011-02-03 11:57:46 +00001876 trace_i915_gem_object_unbind(obj);
1877
Daniel Vetter74898d72012-02-15 23:50:22 +01001878 if (obj->has_global_gtt_mapping)
1879 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001880 if (obj->has_aliasing_ppgtt_mapping) {
1881 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1882 obj->has_aliasing_ppgtt_mapping = 0;
1883 }
Daniel Vetter74163902012-02-15 23:50:21 +01001884 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001885
Chris Wilsone5281cc2010-10-28 13:45:36 +01001886 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001887
Chris Wilson6299f992010-11-24 12:23:44 +00001888 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00001889 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01001890 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00001891 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07001892
Chris Wilson05394f32010-11-08 19:18:58 +00001893 drm_mm_put_block(obj->gtt_space);
1894 obj->gtt_space = NULL;
1895 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001896
Chris Wilson05394f32010-11-08 19:18:58 +00001897 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01001898 i915_gem_object_truncate(obj);
1899
Chris Wilson8dc17752010-07-23 23:18:51 +01001900 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001901}
1902
Chris Wilson88241782011-01-07 17:09:48 +00001903int
Chris Wilsondb53a302011-02-03 11:57:46 +00001904i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001905 uint32_t invalidate_domains,
1906 uint32_t flush_domains)
1907{
Chris Wilson88241782011-01-07 17:09:48 +00001908 int ret;
1909
Chris Wilson36d527d2011-03-19 22:26:49 +00001910 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
1911 return 0;
1912
Chris Wilsondb53a302011-02-03 11:57:46 +00001913 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
1914
Chris Wilson88241782011-01-07 17:09:48 +00001915 ret = ring->flush(ring, invalidate_domains, flush_domains);
1916 if (ret)
1917 return ret;
1918
Chris Wilson36d527d2011-03-19 22:26:49 +00001919 if (flush_domains & I915_GEM_GPU_DOMAINS)
1920 i915_gem_process_flushing_list(ring, flush_domains);
1921
Chris Wilson88241782011-01-07 17:09:48 +00001922 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00001923}
1924
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001925static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
Chris Wilsona56ba562010-09-28 10:07:56 +01001926{
Chris Wilson88241782011-01-07 17:09:48 +00001927 int ret;
1928
Chris Wilson395b70b2010-10-28 21:28:46 +01001929 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01001930 return 0;
1931
Chris Wilson88241782011-01-07 17:09:48 +00001932 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001933 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00001934 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00001935 if (ret)
1936 return ret;
1937 }
1938
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001939 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
1940 do_retire);
Chris Wilsona56ba562010-09-28 10:07:56 +01001941}
1942
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001943int i915_gpu_idle(struct drm_device *dev, bool do_retire)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001944{
1945 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001946 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001947
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001948 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001949 for (i = 0; i < I915_NUM_RINGS; i++) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001950 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001951 if (ret)
1952 return ret;
1953 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08001954
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001955 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01001956}
1957
Daniel Vetterc6642782010-11-12 13:46:18 +00001958static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
1959 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07001960{
Chris Wilson05394f32010-11-08 19:18:58 +00001961 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07001962 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001963 u32 size = obj->gtt_space->size;
1964 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07001965 uint64_t val;
1966
Chris Wilson05394f32010-11-08 19:18:58 +00001967 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00001968 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00001969 val |= obj->gtt_offset & 0xfffff000;
1970 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07001971 SANDYBRIDGE_FENCE_PITCH_SHIFT;
1972
Chris Wilson05394f32010-11-08 19:18:58 +00001973 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07001974 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1975 val |= I965_FENCE_REG_VALID;
1976
Daniel Vetterc6642782010-11-12 13:46:18 +00001977 if (pipelined) {
1978 int ret = intel_ring_begin(pipelined, 6);
1979 if (ret)
1980 return ret;
1981
1982 intel_ring_emit(pipelined, MI_NOOP);
1983 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
1984 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
1985 intel_ring_emit(pipelined, (u32)val);
1986 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
1987 intel_ring_emit(pipelined, (u32)(val >> 32));
1988 intel_ring_advance(pipelined);
1989 } else
1990 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
1991
1992 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07001993}
1994
Daniel Vetterc6642782010-11-12 13:46:18 +00001995static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
1996 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001997{
Chris Wilson05394f32010-11-08 19:18:58 +00001998 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001999 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002000 u32 size = obj->gtt_space->size;
2001 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002002 uint64_t val;
2003
Chris Wilson05394f32010-11-08 19:18:58 +00002004 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002005 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002006 val |= obj->gtt_offset & 0xfffff000;
2007 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2008 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002009 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2010 val |= I965_FENCE_REG_VALID;
2011
Daniel Vetterc6642782010-11-12 13:46:18 +00002012 if (pipelined) {
2013 int ret = intel_ring_begin(pipelined, 6);
2014 if (ret)
2015 return ret;
2016
2017 intel_ring_emit(pipelined, MI_NOOP);
2018 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2019 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2020 intel_ring_emit(pipelined, (u32)val);
2021 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2022 intel_ring_emit(pipelined, (u32)(val >> 32));
2023 intel_ring_advance(pipelined);
2024 } else
2025 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2026
2027 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002028}
2029
Daniel Vetterc6642782010-11-12 13:46:18 +00002030static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2031 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002032{
Chris Wilson05394f32010-11-08 19:18:58 +00002033 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002034 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002035 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002036 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002037 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002038
Daniel Vetterc6642782010-11-12 13:46:18 +00002039 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2040 (size & -size) != size ||
2041 (obj->gtt_offset & (size - 1)),
2042 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2043 obj->gtt_offset, obj->map_and_fenceable, size))
2044 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002045
Daniel Vetterc6642782010-11-12 13:46:18 +00002046 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002047 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002048 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002049 tile_width = 512;
2050
2051 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002052 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002053 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002054
Chris Wilson05394f32010-11-08 19:18:58 +00002055 val = obj->gtt_offset;
2056 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002057 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002058 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002059 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2060 val |= I830_FENCE_REG_VALID;
2061
Chris Wilson05394f32010-11-08 19:18:58 +00002062 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002063 if (fence_reg < 8)
2064 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002065 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002066 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002067
2068 if (pipelined) {
2069 int ret = intel_ring_begin(pipelined, 4);
2070 if (ret)
2071 return ret;
2072
2073 intel_ring_emit(pipelined, MI_NOOP);
2074 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2075 intel_ring_emit(pipelined, fence_reg);
2076 intel_ring_emit(pipelined, val);
2077 intel_ring_advance(pipelined);
2078 } else
2079 I915_WRITE(fence_reg, val);
2080
2081 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002082}
2083
Daniel Vetterc6642782010-11-12 13:46:18 +00002084static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2085 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002086{
Chris Wilson05394f32010-11-08 19:18:58 +00002087 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002088 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002089 u32 size = obj->gtt_space->size;
2090 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002091 uint32_t val;
2092 uint32_t pitch_val;
2093
Daniel Vetterc6642782010-11-12 13:46:18 +00002094 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2095 (size & -size) != size ||
2096 (obj->gtt_offset & (size - 1)),
2097 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2098 obj->gtt_offset, size))
2099 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002100
Chris Wilson05394f32010-11-08 19:18:58 +00002101 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002102 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002103
Chris Wilson05394f32010-11-08 19:18:58 +00002104 val = obj->gtt_offset;
2105 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002106 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002107 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002108 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2109 val |= I830_FENCE_REG_VALID;
2110
Daniel Vetterc6642782010-11-12 13:46:18 +00002111 if (pipelined) {
2112 int ret = intel_ring_begin(pipelined, 4);
2113 if (ret)
2114 return ret;
2115
2116 intel_ring_emit(pipelined, MI_NOOP);
2117 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2118 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2119 intel_ring_emit(pipelined, val);
2120 intel_ring_advance(pipelined);
2121 } else
2122 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2123
2124 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002125}
2126
Chris Wilsond9e86c02010-11-10 16:40:20 +00002127static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2128{
2129 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2130}
2131
2132static int
2133i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002134 struct intel_ring_buffer *pipelined)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002135{
2136 int ret;
2137
2138 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002139 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002140 ret = i915_gem_flush_ring(obj->last_fenced_ring,
Chris Wilson88241782011-01-07 17:09:48 +00002141 0, obj->base.write_domain);
2142 if (ret)
2143 return ret;
2144 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002145
2146 obj->fenced_gpu_access = false;
2147 }
2148
2149 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2150 if (!ring_passed_seqno(obj->last_fenced_ring,
2151 obj->last_fenced_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002152 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002153 obj->last_fenced_seqno,
2154 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002155 if (ret)
2156 return ret;
2157 }
2158
2159 obj->last_fenced_seqno = 0;
2160 obj->last_fenced_ring = NULL;
2161 }
2162
Chris Wilson63256ec2011-01-04 18:42:07 +00002163 /* Ensure that all CPU reads are completed before installing a fence
2164 * and all writes before removing the fence.
2165 */
2166 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2167 mb();
2168
Chris Wilsond9e86c02010-11-10 16:40:20 +00002169 return 0;
2170}
2171
2172int
2173i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2174{
2175 int ret;
2176
2177 if (obj->tiling_mode)
2178 i915_gem_release_mmap(obj);
2179
Chris Wilsonce453d82011-02-21 14:43:56 +00002180 ret = i915_gem_object_flush_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002181 if (ret)
2182 return ret;
2183
2184 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2185 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002186
2187 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002188 i915_gem_clear_fence_reg(obj->base.dev,
2189 &dev_priv->fence_regs[obj->fence_reg]);
2190
2191 obj->fence_reg = I915_FENCE_REG_NONE;
2192 }
2193
2194 return 0;
2195}
2196
2197static struct drm_i915_fence_reg *
2198i915_find_fence_reg(struct drm_device *dev,
2199 struct intel_ring_buffer *pipelined)
Daniel Vetterae3db242010-02-19 11:51:58 +01002200{
Daniel Vetterae3db242010-02-19 11:51:58 +01002201 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002202 struct drm_i915_fence_reg *reg, *first, *avail;
2203 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002204
2205 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002206 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002207 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2208 reg = &dev_priv->fence_regs[i];
2209 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002210 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002211
Chris Wilson1690e1e2011-12-14 13:57:08 +01002212 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002213 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002214 }
2215
Chris Wilsond9e86c02010-11-10 16:40:20 +00002216 if (avail == NULL)
2217 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002218
2219 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002220 avail = first = NULL;
2221 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002222 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002223 continue;
2224
Chris Wilsond9e86c02010-11-10 16:40:20 +00002225 if (first == NULL)
2226 first = reg;
2227
2228 if (!pipelined ||
2229 !reg->obj->last_fenced_ring ||
2230 reg->obj->last_fenced_ring == pipelined) {
2231 avail = reg;
2232 break;
2233 }
Daniel Vetterae3db242010-02-19 11:51:58 +01002234 }
2235
Chris Wilsond9e86c02010-11-10 16:40:20 +00002236 if (avail == NULL)
2237 avail = first;
Daniel Vetterae3db242010-02-19 11:51:58 +01002238
Chris Wilsona00b10c2010-09-24 21:15:47 +01002239 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002240}
2241
Jesse Barnesde151cf2008-11-12 10:03:55 -08002242/**
Chris Wilsond9e86c02010-11-10 16:40:20 +00002243 * i915_gem_object_get_fence - set up a fence reg for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002244 * @obj: object to map through a fence reg
Chris Wilsond9e86c02010-11-10 16:40:20 +00002245 * @pipelined: ring on which to queue the change, or NULL for CPU access
2246 * @interruptible: must we wait uninterruptibly for the register to retire?
Jesse Barnesde151cf2008-11-12 10:03:55 -08002247 *
2248 * When mapping objects through the GTT, userspace wants to be able to write
2249 * to them without having to worry about swizzling if the object is tiled.
2250 *
2251 * This function walks the fence regs looking for a free one for @obj,
2252 * stealing one if it can't find any.
2253 *
2254 * It then sets up the reg based on the object's properties: address, pitch
2255 * and tiling format.
2256 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002257int
Chris Wilsond9e86c02010-11-10 16:40:20 +00002258i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002259 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002260{
Chris Wilson05394f32010-11-08 19:18:58 +00002261 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002262 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002263 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002264 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002265
Chris Wilson6bda10d2010-12-05 21:04:18 +00002266 /* XXX disable pipelining. There are bugs. Shocking. */
2267 pipelined = NULL;
2268
Chris Wilsond9e86c02010-11-10 16:40:20 +00002269 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002270 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2271 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002272 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002273
Chris Wilson29c5a582011-03-17 15:23:22 +00002274 if (obj->tiling_changed) {
2275 ret = i915_gem_object_flush_fence(obj, pipelined);
2276 if (ret)
2277 return ret;
2278
2279 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2280 pipelined = NULL;
2281
2282 if (pipelined) {
2283 reg->setup_seqno =
2284 i915_gem_next_request_seqno(pipelined);
2285 obj->last_fenced_seqno = reg->setup_seqno;
2286 obj->last_fenced_ring = pipelined;
2287 }
2288
2289 goto update;
2290 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002291
2292 if (!pipelined) {
2293 if (reg->setup_seqno) {
2294 if (!ring_passed_seqno(obj->last_fenced_ring,
2295 reg->setup_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002296 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002297 reg->setup_seqno,
2298 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002299 if (ret)
2300 return ret;
2301 }
2302
2303 reg->setup_seqno = 0;
2304 }
2305 } else if (obj->last_fenced_ring &&
2306 obj->last_fenced_ring != pipelined) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002307 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002308 if (ret)
2309 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002310 }
2311
Eric Anholta09ba7f2009-08-29 12:49:51 -07002312 return 0;
2313 }
2314
Chris Wilsond9e86c02010-11-10 16:40:20 +00002315 reg = i915_find_fence_reg(dev, pipelined);
2316 if (reg == NULL)
Daniel Vetter39965b32011-12-14 13:57:09 +01002317 return -EDEADLK;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002318
Chris Wilsonce453d82011-02-21 14:43:56 +00002319 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002320 if (ret)
Daniel Vetterae3db242010-02-19 11:51:58 +01002321 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002322
Chris Wilsond9e86c02010-11-10 16:40:20 +00002323 if (reg->obj) {
2324 struct drm_i915_gem_object *old = reg->obj;
2325
2326 drm_gem_object_reference(&old->base);
2327
2328 if (old->tiling_mode)
2329 i915_gem_release_mmap(old);
2330
Chris Wilsonce453d82011-02-21 14:43:56 +00002331 ret = i915_gem_object_flush_fence(old, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002332 if (ret) {
2333 drm_gem_object_unreference(&old->base);
2334 return ret;
2335 }
2336
2337 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2338 pipelined = NULL;
2339
2340 old->fence_reg = I915_FENCE_REG_NONE;
2341 old->last_fenced_ring = pipelined;
2342 old->last_fenced_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002343 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002344
2345 drm_gem_object_unreference(&old->base);
2346 } else if (obj->last_fenced_seqno == 0)
2347 pipelined = NULL;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002348
Jesse Barnesde151cf2008-11-12 10:03:55 -08002349 reg->obj = obj;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002350 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2351 obj->fence_reg = reg - dev_priv->fence_regs;
2352 obj->last_fenced_ring = pipelined;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002353
Chris Wilsond9e86c02010-11-10 16:40:20 +00002354 reg->setup_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002355 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002356 obj->last_fenced_seqno = reg->setup_seqno;
2357
2358update:
2359 obj->tiling_changed = false;
Chris Wilsone259bef2010-09-17 00:32:02 +01002360 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002361 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002362 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002363 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002364 break;
2365 case 5:
2366 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002367 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002368 break;
2369 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002370 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002371 break;
2372 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002373 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002374 break;
2375 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002376
Daniel Vetterc6642782010-11-12 13:46:18 +00002377 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002378}
2379
2380/**
2381 * i915_gem_clear_fence_reg - clear out fence register info
2382 * @obj: object to clear
2383 *
2384 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002385 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002386 */
2387static void
Chris Wilsond9e86c02010-11-10 16:40:20 +00002388i915_gem_clear_fence_reg(struct drm_device *dev,
2389 struct drm_i915_fence_reg *reg)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002390{
Jesse Barnes79e53942008-11-07 14:24:08 -08002391 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002392 uint32_t fence_reg = reg - dev_priv->fence_regs;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002393
Chris Wilsone259bef2010-09-17 00:32:02 +01002394 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002395 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002396 case 6:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002397 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002398 break;
2399 case 5:
2400 case 4:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002401 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002402 break;
2403 case 3:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002404 if (fence_reg >= 8)
2405 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002406 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002407 case 2:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002408 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002409
2410 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002411 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002412 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002413
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002414 list_del_init(&reg->lru_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002415 reg->obj = NULL;
2416 reg->setup_seqno = 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002417 reg->pin_count = 0;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002418}
2419
2420/**
Eric Anholt673a3942008-07-30 12:06:12 -07002421 * Finds free space in the GTT aperture and binds the object there.
2422 */
2423static int
Chris Wilson05394f32010-11-08 19:18:58 +00002424i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002425 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002426 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002427{
Chris Wilson05394f32010-11-08 19:18:58 +00002428 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002429 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002430 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002431 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002432 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002433 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002434 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002435
Chris Wilson05394f32010-11-08 19:18:58 +00002436 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002437 DRM_ERROR("Attempting to bind a purgeable object\n");
2438 return -EINVAL;
2439 }
2440
Chris Wilsone28f8712011-07-18 13:11:49 -07002441 fence_size = i915_gem_get_gtt_size(dev,
2442 obj->base.size,
2443 obj->tiling_mode);
2444 fence_alignment = i915_gem_get_gtt_alignment(dev,
2445 obj->base.size,
2446 obj->tiling_mode);
2447 unfenced_alignment =
2448 i915_gem_get_unfenced_gtt_alignment(dev,
2449 obj->base.size,
2450 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002451
Eric Anholt673a3942008-07-30 12:06:12 -07002452 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002453 alignment = map_and_fenceable ? fence_alignment :
2454 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002455 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002456 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2457 return -EINVAL;
2458 }
2459
Chris Wilson05394f32010-11-08 19:18:58 +00002460 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002461
Chris Wilson654fc602010-05-27 13:18:21 +01002462 /* If the object is bigger than the entire aperture, reject it early
2463 * before evicting everything in a vain attempt to find space.
2464 */
Chris Wilson05394f32010-11-08 19:18:58 +00002465 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002466 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002467 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2468 return -E2BIG;
2469 }
2470
Eric Anholt673a3942008-07-30 12:06:12 -07002471 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002472 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002473 free_space =
2474 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002475 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002476 dev_priv->mm.gtt_mappable_end,
2477 0);
2478 else
2479 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002480 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002481
2482 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002483 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002484 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002485 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002486 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002487 dev_priv->mm.gtt_mappable_end,
2488 0);
2489 else
Chris Wilson05394f32010-11-08 19:18:58 +00002490 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002491 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002492 }
Chris Wilson05394f32010-11-08 19:18:58 +00002493 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002494 /* If the gtt is empty and we're still having trouble
2495 * fitting our object in, we're out of memory.
2496 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002497 ret = i915_gem_evict_something(dev, size, alignment,
2498 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002499 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002500 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002501
Eric Anholt673a3942008-07-30 12:06:12 -07002502 goto search_free;
2503 }
2504
Chris Wilsone5281cc2010-10-28 13:45:36 +01002505 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002506 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002507 drm_mm_put_block(obj->gtt_space);
2508 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002509
2510 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002511 /* first try to reclaim some memory by clearing the GTT */
2512 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002513 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002514 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002515 if (gfpmask) {
2516 gfpmask = 0;
2517 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002518 }
2519
Chris Wilson809b6332011-01-10 17:33:15 +00002520 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002521 }
2522
2523 goto search_free;
2524 }
2525
Eric Anholt673a3942008-07-30 12:06:12 -07002526 return ret;
2527 }
2528
Daniel Vetter74163902012-02-15 23:50:21 +01002529 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002530 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002531 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002532 drm_mm_put_block(obj->gtt_space);
2533 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002534
Chris Wilson809b6332011-01-10 17:33:15 +00002535 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002536 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002537
2538 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002539 }
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002540
2541 if (!dev_priv->mm.aliasing_ppgtt)
2542 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002543
Chris Wilson6299f992010-11-24 12:23:44 +00002544 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002545 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002546
Eric Anholt673a3942008-07-30 12:06:12 -07002547 /* Assert that the object is not currently in any GPU domain. As it
2548 * wasn't in the GTT, there shouldn't be any way it could have been in
2549 * a GPU cache
2550 */
Chris Wilson05394f32010-11-08 19:18:58 +00002551 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2552 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002553
Chris Wilson6299f992010-11-24 12:23:44 +00002554 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002555
Daniel Vetter75e9e912010-11-04 17:11:09 +01002556 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002557 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002558 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002559
Daniel Vetter75e9e912010-11-04 17:11:09 +01002560 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002561 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002562
Chris Wilson05394f32010-11-08 19:18:58 +00002563 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002564
Chris Wilsondb53a302011-02-03 11:57:46 +00002565 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002566 return 0;
2567}
2568
2569void
Chris Wilson05394f32010-11-08 19:18:58 +00002570i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002571{
Eric Anholt673a3942008-07-30 12:06:12 -07002572 /* If we don't have a page list set up, then we're not pinned
2573 * to GPU, and we can ignore the cache flush because it'll happen
2574 * again at bind time.
2575 */
Chris Wilson05394f32010-11-08 19:18:58 +00002576 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002577 return;
2578
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002579 /* If the GPU is snooping the contents of the CPU cache,
2580 * we do not need to manually clear the CPU cache lines. However,
2581 * the caches are only snooped when the render cache is
2582 * flushed/invalidated. As we always have to emit invalidations
2583 * and flushes when moving into and out of the RENDER domain, correct
2584 * snooping behaviour occurs naturally as the result of our domain
2585 * tracking.
2586 */
2587 if (obj->cache_level != I915_CACHE_NONE)
2588 return;
2589
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002590 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002591
Chris Wilson05394f32010-11-08 19:18:58 +00002592 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002593}
2594
Eric Anholte47c68e2008-11-14 13:35:19 -08002595/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002596static int
Chris Wilson3619df02010-11-28 15:37:17 +00002597i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002598{
Chris Wilson05394f32010-11-08 19:18:58 +00002599 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002600 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002601
2602 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002603 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002604}
2605
2606/** Flushes the GTT write domain for the object if it's dirty. */
2607static void
Chris Wilson05394f32010-11-08 19:18:58 +00002608i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002609{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002610 uint32_t old_write_domain;
2611
Chris Wilson05394f32010-11-08 19:18:58 +00002612 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002613 return;
2614
Chris Wilson63256ec2011-01-04 18:42:07 +00002615 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002616 * to it immediately go to main memory as far as we know, so there's
2617 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002618 *
2619 * However, we do have to enforce the order so that all writes through
2620 * the GTT land before any writes to the device, such as updates to
2621 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002622 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002623 wmb();
2624
Chris Wilson05394f32010-11-08 19:18:58 +00002625 old_write_domain = obj->base.write_domain;
2626 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002627
2628 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002629 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002630 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002631}
2632
2633/** Flushes the CPU write domain for the object if it's dirty. */
2634static void
Chris Wilson05394f32010-11-08 19:18:58 +00002635i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002636{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002637 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002638
Chris Wilson05394f32010-11-08 19:18:58 +00002639 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002640 return;
2641
2642 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002643 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002644 old_write_domain = obj->base.write_domain;
2645 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002646
2647 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002648 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002649 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002650}
2651
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002652/**
2653 * Moves a single object to the GTT read, and possibly write domain.
2654 *
2655 * This function returns when the move is complete, including waiting on
2656 * flushes to occur.
2657 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002658int
Chris Wilson20217462010-11-23 15:26:33 +00002659i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002660{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002661 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002662 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002663
Eric Anholt02354392008-11-26 13:58:13 -08002664 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002665 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002666 return -EINVAL;
2667
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002668 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2669 return 0;
2670
Chris Wilson88241782011-01-07 17:09:48 +00002671 ret = i915_gem_object_flush_gpu_write_domain(obj);
2672 if (ret)
2673 return ret;
2674
Chris Wilson87ca9c82010-12-02 09:42:56 +00002675 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002676 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002677 if (ret)
2678 return ret;
2679 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002680
Chris Wilson72133422010-09-13 23:56:38 +01002681 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002682
Chris Wilson05394f32010-11-08 19:18:58 +00002683 old_write_domain = obj->base.write_domain;
2684 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002685
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002686 /* It should now be out of any other write domains, and we can update
2687 * the domain values for our changes.
2688 */
Chris Wilson05394f32010-11-08 19:18:58 +00002689 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2690 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002691 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002692 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2693 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2694 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002695 }
2696
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002697 trace_i915_gem_object_change_domain(obj,
2698 old_read_domains,
2699 old_write_domain);
2700
Eric Anholte47c68e2008-11-14 13:35:19 -08002701 return 0;
2702}
2703
Chris Wilsone4ffd172011-04-04 09:44:39 +01002704int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2705 enum i915_cache_level cache_level)
2706{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002707 struct drm_device *dev = obj->base.dev;
2708 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002709 int ret;
2710
2711 if (obj->cache_level == cache_level)
2712 return 0;
2713
2714 if (obj->pin_count) {
2715 DRM_DEBUG("can not change the cache level of pinned objects\n");
2716 return -EBUSY;
2717 }
2718
2719 if (obj->gtt_space) {
2720 ret = i915_gem_object_finish_gpu(obj);
2721 if (ret)
2722 return ret;
2723
2724 i915_gem_object_finish_gtt(obj);
2725
2726 /* Before SandyBridge, you could not use tiling or fence
2727 * registers with snooped memory, so relinquish any fences
2728 * currently pointing to our region in the aperture.
2729 */
2730 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2731 ret = i915_gem_object_put_fence(obj);
2732 if (ret)
2733 return ret;
2734 }
2735
Daniel Vetter74898d72012-02-15 23:50:22 +01002736 if (obj->has_global_gtt_mapping)
2737 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002738 if (obj->has_aliasing_ppgtt_mapping)
2739 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2740 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01002741 }
2742
2743 if (cache_level == I915_CACHE_NONE) {
2744 u32 old_read_domains, old_write_domain;
2745
2746 /* If we're coming from LLC cached, then we haven't
2747 * actually been tracking whether the data is in the
2748 * CPU cache or not, since we only allow one bit set
2749 * in obj->write_domain and have been skipping the clflushes.
2750 * Just set it to the CPU cache for now.
2751 */
2752 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2753 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2754
2755 old_read_domains = obj->base.read_domains;
2756 old_write_domain = obj->base.write_domain;
2757
2758 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2759 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2760
2761 trace_i915_gem_object_change_domain(obj,
2762 old_read_domains,
2763 old_write_domain);
2764 }
2765
2766 obj->cache_level = cache_level;
2767 return 0;
2768}
2769
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002770/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002771 * Prepare buffer for display plane (scanout, cursors, etc).
2772 * Can be called from an uninterruptible phase (modesetting) and allows
2773 * any flushes to be pipelined (for pageflips).
2774 *
2775 * For the display plane, we want to be in the GTT but out of any write
2776 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2777 * ability to pipeline the waits, pinning and any additional subtleties
2778 * that may differentiate the display plane from ordinary buffers.
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002779 */
2780int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002781i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2782 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00002783 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002784{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002785 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002786 int ret;
2787
Chris Wilson88241782011-01-07 17:09:48 +00002788 ret = i915_gem_object_flush_gpu_write_domain(obj);
2789 if (ret)
2790 return ret;
2791
Chris Wilson0be73282010-12-06 14:36:27 +00002792 if (pipelined != obj->ring) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002793 ret = i915_gem_object_wait_rendering(obj);
Keith Packardf0b69ef2011-07-19 16:21:40 -07002794 if (ret == -ERESTARTSYS)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002795 return ret;
2796 }
2797
Eric Anholta7ef0642011-03-29 16:59:54 -07002798 /* The display engine is not coherent with the LLC cache on gen6. As
2799 * a result, we make sure that the pinning that is about to occur is
2800 * done with uncached PTEs. This is lowest common denominator for all
2801 * chipsets.
2802 *
2803 * However for gen6+, we could do better by using the GFDT bit instead
2804 * of uncaching, which would allow us to flush all the LLC-cached data
2805 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2806 */
2807 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2808 if (ret)
2809 return ret;
2810
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002811 /* As the user may map the buffer once pinned in the display plane
2812 * (e.g. libkms for the bootup splash), we have to ensure that we
2813 * always use map_and_fenceable for all scanout buffers.
2814 */
2815 ret = i915_gem_object_pin(obj, alignment, true);
2816 if (ret)
2817 return ret;
2818
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002819 i915_gem_object_flush_cpu_write_domain(obj);
2820
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002821 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00002822 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002823
2824 /* It should now be out of any other write domains, and we can update
2825 * the domain values for our changes.
2826 */
2827 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00002828 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002829
2830 trace_i915_gem_object_change_domain(obj,
2831 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002832 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002833
2834 return 0;
2835}
2836
Chris Wilson85345512010-11-13 09:49:11 +00002837int
Chris Wilsona8198ee2011-04-13 22:04:09 +01002838i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00002839{
Chris Wilson88241782011-01-07 17:09:48 +00002840 int ret;
2841
Chris Wilsona8198ee2011-04-13 22:04:09 +01002842 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00002843 return 0;
2844
Chris Wilson88241782011-01-07 17:09:48 +00002845 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002846 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00002847 if (ret)
2848 return ret;
2849 }
Chris Wilson85345512010-11-13 09:49:11 +00002850
Chris Wilsonc501ae72011-12-14 13:57:23 +01002851 ret = i915_gem_object_wait_rendering(obj);
2852 if (ret)
2853 return ret;
2854
Chris Wilsona8198ee2011-04-13 22:04:09 +01002855 /* Ensure that we invalidate the GPU's caches and TLBs. */
2856 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01002857 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00002858}
2859
Eric Anholte47c68e2008-11-14 13:35:19 -08002860/**
2861 * Moves a single object to the CPU read, and possibly write domain.
2862 *
2863 * This function returns when the move is complete, including waiting on
2864 * flushes to occur.
2865 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02002866int
Chris Wilson919926a2010-11-12 13:42:53 +00002867i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002868{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002869 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002870 int ret;
2871
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002872 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2873 return 0;
2874
Chris Wilson88241782011-01-07 17:09:48 +00002875 ret = i915_gem_object_flush_gpu_write_domain(obj);
2876 if (ret)
2877 return ret;
2878
Chris Wilsonce453d82011-02-21 14:43:56 +00002879 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01002880 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08002881 return ret;
2882
2883 i915_gem_object_flush_gtt_write_domain(obj);
2884
Chris Wilson05394f32010-11-08 19:18:58 +00002885 old_write_domain = obj->base.write_domain;
2886 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002887
Eric Anholte47c68e2008-11-14 13:35:19 -08002888 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00002889 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002890 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002891
Chris Wilson05394f32010-11-08 19:18:58 +00002892 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002893 }
2894
2895 /* It should now be out of any other write domains, and we can update
2896 * the domain values for our changes.
2897 */
Chris Wilson05394f32010-11-08 19:18:58 +00002898 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08002899
2900 /* If we're writing through the CPU, then the GPU read domains will
2901 * need to be invalidated at next use.
2902 */
2903 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002904 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2905 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002906 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002907
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002908 trace_i915_gem_object_change_domain(obj,
2909 old_read_domains,
2910 old_write_domain);
2911
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002912 return 0;
2913}
2914
Eric Anholt673a3942008-07-30 12:06:12 -07002915/* Throttle our rendering by waiting until the ring has completed our requests
2916 * emitted over 20 msec ago.
2917 *
Eric Anholtb9624422009-06-03 07:27:35 +00002918 * Note that if we were to use the current jiffies each time around the loop,
2919 * we wouldn't escape the function with any frames outstanding if the time to
2920 * render a frame was over 20ms.
2921 *
Eric Anholt673a3942008-07-30 12:06:12 -07002922 * This should get us reasonable parallelism between CPU and GPU but also
2923 * relatively low latency when blocking on a particular request to finish.
2924 */
2925static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002926i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07002927{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002928 struct drm_i915_private *dev_priv = dev->dev_private;
2929 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002930 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002931 struct drm_i915_gem_request *request;
2932 struct intel_ring_buffer *ring = NULL;
2933 u32 seqno = 0;
2934 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002935
Chris Wilsone110e8d2011-01-26 15:39:14 +00002936 if (atomic_read(&dev_priv->mm.wedged))
2937 return -EIO;
2938
Chris Wilson1c255952010-09-26 11:03:27 +01002939 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002940 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00002941 if (time_after_eq(request->emitted_jiffies, recent_enough))
2942 break;
2943
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002944 ring = request->ring;
2945 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00002946 }
Chris Wilson1c255952010-09-26 11:03:27 +01002947 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002948
2949 if (seqno == 0)
2950 return 0;
2951
2952 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01002953 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002954 /* And wait for the seqno passing without holding any locks and
2955 * causing extra latency for others. This is safe as the irq
2956 * generation is designed to be run atomically and so is
2957 * lockless.
2958 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002959 if (ring->irq_get(ring)) {
2960 ret = wait_event_interruptible(ring->irq_queue,
2961 i915_seqno_passed(ring->get_seqno(ring), seqno)
2962 || atomic_read(&dev_priv->mm.wedged));
2963 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002964
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002965 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
2966 ret = -EIO;
Eric Anholte959b5d2011-12-22 14:55:01 -08002967 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
2968 seqno) ||
Eric Anholt7ea29b12011-12-22 14:54:59 -08002969 atomic_read(&dev_priv->mm.wedged), 3000)) {
2970 ret = -EBUSY;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002971 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002972 }
2973
2974 if (ret == 0)
2975 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00002976
Eric Anholt673a3942008-07-30 12:06:12 -07002977 return ret;
2978}
2979
Eric Anholt673a3942008-07-30 12:06:12 -07002980int
Chris Wilson05394f32010-11-08 19:18:58 +00002981i915_gem_object_pin(struct drm_i915_gem_object *obj,
2982 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002983 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002984{
Chris Wilson05394f32010-11-08 19:18:58 +00002985 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01002986 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002987 int ret;
2988
Chris Wilson05394f32010-11-08 19:18:58 +00002989 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01002990 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01002991
Chris Wilson05394f32010-11-08 19:18:58 +00002992 if (obj->gtt_space != NULL) {
2993 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2994 (map_and_fenceable && !obj->map_and_fenceable)) {
2995 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01002996 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01002997 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2998 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00002999 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003000 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003001 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003002 ret = i915_gem_object_unbind(obj);
3003 if (ret)
3004 return ret;
3005 }
3006 }
3007
Chris Wilson05394f32010-11-08 19:18:58 +00003008 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003009 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003010 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003011 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003012 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003013 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003014
Daniel Vetter74898d72012-02-15 23:50:22 +01003015 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3016 i915_gem_gtt_bind_object(obj, obj->cache_level);
3017
Chris Wilson05394f32010-11-08 19:18:58 +00003018 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003019 if (!obj->active)
3020 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003021 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003022 }
Chris Wilson6299f992010-11-24 12:23:44 +00003023 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003024
Chris Wilson23bc5982010-09-29 16:10:57 +01003025 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003026 return 0;
3027}
3028
3029void
Chris Wilson05394f32010-11-08 19:18:58 +00003030i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003031{
Chris Wilson05394f32010-11-08 19:18:58 +00003032 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003033 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003034
Chris Wilson23bc5982010-09-29 16:10:57 +01003035 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003036 BUG_ON(obj->pin_count == 0);
3037 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003038
Chris Wilson05394f32010-11-08 19:18:58 +00003039 if (--obj->pin_count == 0) {
3040 if (!obj->active)
3041 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003042 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003043 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003044 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003045 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003046}
3047
3048int
3049i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003050 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003051{
3052 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003053 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003054 int ret;
3055
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003056 ret = i915_mutex_lock_interruptible(dev);
3057 if (ret)
3058 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003059
Chris Wilson05394f32010-11-08 19:18:58 +00003060 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003061 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003062 ret = -ENOENT;
3063 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003064 }
Eric Anholt673a3942008-07-30 12:06:12 -07003065
Chris Wilson05394f32010-11-08 19:18:58 +00003066 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003067 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003068 ret = -EINVAL;
3069 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003070 }
3071
Chris Wilson05394f32010-11-08 19:18:58 +00003072 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003073 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3074 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003075 ret = -EINVAL;
3076 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003077 }
3078
Chris Wilson05394f32010-11-08 19:18:58 +00003079 obj->user_pin_count++;
3080 obj->pin_filp = file;
3081 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003082 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003083 if (ret)
3084 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003085 }
3086
3087 /* XXX - flush the CPU caches for pinned objects
3088 * as the X server doesn't manage domains yet
3089 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003090 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003091 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003092out:
Chris Wilson05394f32010-11-08 19:18:58 +00003093 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003094unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003095 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003096 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003097}
3098
3099int
3100i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003101 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003102{
3103 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003104 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003105 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003106
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003107 ret = i915_mutex_lock_interruptible(dev);
3108 if (ret)
3109 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003110
Chris Wilson05394f32010-11-08 19:18:58 +00003111 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003112 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003113 ret = -ENOENT;
3114 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003115 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003116
Chris Wilson05394f32010-11-08 19:18:58 +00003117 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003118 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3119 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003120 ret = -EINVAL;
3121 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003122 }
Chris Wilson05394f32010-11-08 19:18:58 +00003123 obj->user_pin_count--;
3124 if (obj->user_pin_count == 0) {
3125 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003126 i915_gem_object_unpin(obj);
3127 }
Eric Anholt673a3942008-07-30 12:06:12 -07003128
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003129out:
Chris Wilson05394f32010-11-08 19:18:58 +00003130 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003131unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003132 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003133 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003134}
3135
3136int
3137i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003138 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003139{
3140 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003141 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003142 int ret;
3143
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003144 ret = i915_mutex_lock_interruptible(dev);
3145 if (ret)
3146 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003147
Chris Wilson05394f32010-11-08 19:18:58 +00003148 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003149 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003150 ret = -ENOENT;
3151 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003152 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003153
Chris Wilson0be555b2010-08-04 15:36:30 +01003154 /* Count all active objects as busy, even if they are currently not used
3155 * by the gpu. Users of this interface expect objects to eventually
3156 * become non-busy without any further actions, therefore emit any
3157 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003158 */
Chris Wilson05394f32010-11-08 19:18:58 +00003159 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003160 if (args->busy) {
3161 /* Unconditionally flush objects, even when the gpu still uses this
3162 * object. Userspace calling this function indicates that it wants to
3163 * use this buffer rather sooner than later, so issuing the required
3164 * flush earlier is beneficial.
3165 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003166 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003167 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003168 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003169 } else if (obj->ring->outstanding_lazy_request ==
3170 obj->last_rendering_seqno) {
3171 struct drm_i915_gem_request *request;
3172
Chris Wilson7a194872010-12-07 10:38:40 +00003173 /* This ring is not being cleared by active usage,
3174 * so emit a request to do so.
3175 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003176 request = kzalloc(sizeof(*request), GFP_KERNEL);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003177 if (request) {
Akshay Joshi0206e352011-08-16 15:34:10 -04003178 ret = i915_add_request(obj->ring, NULL, request);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003179 if (ret)
3180 kfree(request);
3181 } else
Chris Wilson7a194872010-12-07 10:38:40 +00003182 ret = -ENOMEM;
3183 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003184
3185 /* Update the active list for the hardware's current position.
3186 * Otherwise this only updates on a delayed timer or when irqs
3187 * are actually unmasked, and our working set ends up being
3188 * larger than required.
3189 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003190 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003191
Chris Wilson05394f32010-11-08 19:18:58 +00003192 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003193 }
Eric Anholt673a3942008-07-30 12:06:12 -07003194
Chris Wilson05394f32010-11-08 19:18:58 +00003195 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003196unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003197 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003198 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003199}
3200
3201int
3202i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3203 struct drm_file *file_priv)
3204{
Akshay Joshi0206e352011-08-16 15:34:10 -04003205 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003206}
3207
Chris Wilson3ef94da2009-09-14 16:50:29 +01003208int
3209i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3210 struct drm_file *file_priv)
3211{
3212 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003213 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003214 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003215
3216 switch (args->madv) {
3217 case I915_MADV_DONTNEED:
3218 case I915_MADV_WILLNEED:
3219 break;
3220 default:
3221 return -EINVAL;
3222 }
3223
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003224 ret = i915_mutex_lock_interruptible(dev);
3225 if (ret)
3226 return ret;
3227
Chris Wilson05394f32010-11-08 19:18:58 +00003228 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003229 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003230 ret = -ENOENT;
3231 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003232 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003233
Chris Wilson05394f32010-11-08 19:18:58 +00003234 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003235 ret = -EINVAL;
3236 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003237 }
3238
Chris Wilson05394f32010-11-08 19:18:58 +00003239 if (obj->madv != __I915_MADV_PURGED)
3240 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003241
Chris Wilson2d7ef392009-09-20 23:13:10 +01003242 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003243 if (i915_gem_object_is_purgeable(obj) &&
3244 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003245 i915_gem_object_truncate(obj);
3246
Chris Wilson05394f32010-11-08 19:18:58 +00003247 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003248
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003249out:
Chris Wilson05394f32010-11-08 19:18:58 +00003250 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003251unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003252 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003253 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003254}
3255
Chris Wilson05394f32010-11-08 19:18:58 +00003256struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3257 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003258{
Chris Wilson73aa8082010-09-30 11:46:12 +01003259 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003260 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003261 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003262
3263 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3264 if (obj == NULL)
3265 return NULL;
3266
3267 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3268 kfree(obj);
3269 return NULL;
3270 }
3271
Hugh Dickins5949eac2011-06-27 16:18:18 -07003272 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3273 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3274
Chris Wilson73aa8082010-09-30 11:46:12 +01003275 i915_gem_info_add_obj(dev_priv, size);
3276
Daniel Vetterc397b902010-04-09 19:05:07 +00003277 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3278 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3279
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003280 if (HAS_LLC(dev)) {
3281 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003282 * cache) for about a 10% performance improvement
3283 * compared to uncached. Graphics requests other than
3284 * display scanout are coherent with the CPU in
3285 * accessing this cache. This means in this mode we
3286 * don't need to clflush on the CPU side, and on the
3287 * GPU side we only need to flush internal caches to
3288 * get data visible to the CPU.
3289 *
3290 * However, we maintain the display planes as UC, and so
3291 * need to rebind when first used as such.
3292 */
3293 obj->cache_level = I915_CACHE_LLC;
3294 } else
3295 obj->cache_level = I915_CACHE_NONE;
3296
Daniel Vetter62b8b212010-04-09 19:05:08 +00003297 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003298 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003299 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003300 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003301 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003302 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003303 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003304 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003305 /* Avoid an unnecessary call to unbind on the first bind. */
3306 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003307
Chris Wilson05394f32010-11-08 19:18:58 +00003308 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003309}
3310
Eric Anholt673a3942008-07-30 12:06:12 -07003311int i915_gem_init_object(struct drm_gem_object *obj)
3312{
Daniel Vetterc397b902010-04-09 19:05:07 +00003313 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003314
Eric Anholt673a3942008-07-30 12:06:12 -07003315 return 0;
3316}
3317
Chris Wilson05394f32010-11-08 19:18:58 +00003318static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003319{
Chris Wilson05394f32010-11-08 19:18:58 +00003320 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003321 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003322 int ret;
3323
3324 ret = i915_gem_object_unbind(obj);
3325 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003326 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003327 &dev_priv->mm.deferred_free_list);
3328 return;
3329 }
3330
Chris Wilson26e12f892011-03-20 11:20:19 +00003331 trace_i915_gem_object_destroy(obj);
3332
Chris Wilson05394f32010-11-08 19:18:58 +00003333 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003334 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003335
Chris Wilson05394f32010-11-08 19:18:58 +00003336 drm_gem_object_release(&obj->base);
3337 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003338
Chris Wilson05394f32010-11-08 19:18:58 +00003339 kfree(obj->bit_17);
3340 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003341}
3342
Chris Wilson05394f32010-11-08 19:18:58 +00003343void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003344{
Chris Wilson05394f32010-11-08 19:18:58 +00003345 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3346 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003347
Chris Wilson05394f32010-11-08 19:18:58 +00003348 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003349 i915_gem_object_unpin(obj);
3350
Chris Wilson05394f32010-11-08 19:18:58 +00003351 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003352 i915_gem_detach_phys_object(dev, obj);
3353
Chris Wilsonbe726152010-07-23 23:18:50 +01003354 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003355}
3356
Jesse Barnes5669fca2009-02-17 15:13:31 -08003357int
Eric Anholt673a3942008-07-30 12:06:12 -07003358i915_gem_idle(struct drm_device *dev)
3359{
3360 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003361 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003362
Keith Packard6dbe2772008-10-14 21:41:13 -07003363 mutex_lock(&dev->struct_mutex);
3364
Chris Wilson87acb0a2010-10-19 10:13:00 +01003365 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003366 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003367 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003368 }
Eric Anholt673a3942008-07-30 12:06:12 -07003369
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003370 ret = i915_gpu_idle(dev, true);
Keith Packard6dbe2772008-10-14 21:41:13 -07003371 if (ret) {
3372 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003373 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003374 }
Eric Anholt673a3942008-07-30 12:06:12 -07003375
Chris Wilson29105cc2010-01-07 10:39:13 +00003376 /* Under UMS, be paranoid and evict. */
3377 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003378 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003379 if (ret) {
3380 mutex_unlock(&dev->struct_mutex);
3381 return ret;
3382 }
3383 }
3384
Chris Wilson312817a2010-11-22 11:50:11 +00003385 i915_gem_reset_fences(dev);
3386
Chris Wilson29105cc2010-01-07 10:39:13 +00003387 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3388 * We need to replace this with a semaphore, or something.
3389 * And not confound mm.suspended!
3390 */
3391 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003392 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003393
3394 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003395 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003396
Keith Packard6dbe2772008-10-14 21:41:13 -07003397 mutex_unlock(&dev->struct_mutex);
3398
Chris Wilson29105cc2010-01-07 10:39:13 +00003399 /* Cancel the retire work handler, which should be idle now. */
3400 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3401
Eric Anholt673a3942008-07-30 12:06:12 -07003402 return 0;
3403}
3404
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003405void i915_gem_init_swizzling(struct drm_device *dev)
3406{
3407 drm_i915_private_t *dev_priv = dev->dev_private;
3408
Daniel Vetter11782b02012-01-31 16:47:55 +01003409 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003410 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3411 return;
3412
3413 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3414 DISP_TILE_SURFACE_SWIZZLING);
3415
Daniel Vetter11782b02012-01-31 16:47:55 +01003416 if (IS_GEN5(dev))
3417 return;
3418
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003419 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3420 if (IS_GEN6(dev))
3421 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
3422 else
3423 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
3424}
Daniel Vettere21af882012-02-09 20:53:27 +01003425
3426void i915_gem_init_ppgtt(struct drm_device *dev)
3427{
3428 drm_i915_private_t *dev_priv = dev->dev_private;
3429 uint32_t pd_offset;
3430 struct intel_ring_buffer *ring;
3431 int i;
3432
3433 if (!dev_priv->mm.aliasing_ppgtt)
3434 return;
3435
3436 pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
3437 pd_offset /= 64; /* in cachelines, */
3438 pd_offset <<= 16;
3439
3440 if (INTEL_INFO(dev)->gen == 6) {
3441 uint32_t ecochk = I915_READ(GAM_ECOCHK);
3442 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3443 ECOCHK_PPGTT_CACHE64B);
3444 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3445 } else if (INTEL_INFO(dev)->gen >= 7) {
3446 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3447 /* GFX_MODE is per-ring on gen7+ */
3448 }
3449
3450 for (i = 0; i < I915_NUM_RINGS; i++) {
3451 ring = &dev_priv->ring[i];
3452
3453 if (INTEL_INFO(dev)->gen >= 7)
3454 I915_WRITE(RING_MODE_GEN7(ring),
3455 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3456
3457 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3458 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3459 }
3460}
3461
Eric Anholt673a3942008-07-30 12:06:12 -07003462int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003463i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003464{
3465 drm_i915_private_t *dev_priv = dev->dev_private;
3466 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003467
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003468 i915_gem_init_swizzling(dev);
3469
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003470 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003471 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003472 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003473
3474 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003475 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003476 if (ret)
3477 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003478 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003479
Chris Wilson549f7362010-10-19 11:19:32 +01003480 if (HAS_BLT(dev)) {
3481 ret = intel_init_blt_ring_buffer(dev);
3482 if (ret)
3483 goto cleanup_bsd_ring;
3484 }
3485
Chris Wilson6f392d5482010-08-07 11:01:22 +01003486 dev_priv->next_seqno = 1;
3487
Daniel Vettere21af882012-02-09 20:53:27 +01003488 i915_gem_init_ppgtt(dev);
3489
Chris Wilson68f95ba2010-05-27 13:18:22 +01003490 return 0;
3491
Chris Wilson549f7362010-10-19 11:19:32 +01003492cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003493 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003494cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003495 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003496 return ret;
3497}
3498
3499void
3500i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3501{
3502 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003503 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003504
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003505 for (i = 0; i < I915_NUM_RINGS; i++)
3506 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003507}
3508
3509int
Eric Anholt673a3942008-07-30 12:06:12 -07003510i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3511 struct drm_file *file_priv)
3512{
3513 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003514 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003515
Jesse Barnes79e53942008-11-07 14:24:08 -08003516 if (drm_core_check_feature(dev, DRIVER_MODESET))
3517 return 0;
3518
Ben Gamariba1234d2009-09-14 17:48:47 -04003519 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003520 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003521 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003522 }
3523
Eric Anholt673a3942008-07-30 12:06:12 -07003524 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003525 dev_priv->mm.suspended = 0;
3526
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003527 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003528 if (ret != 0) {
3529 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003530 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003531 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003532
Chris Wilson69dc4982010-10-19 10:36:51 +01003533 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003534 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3535 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003536 for (i = 0; i < I915_NUM_RINGS; i++) {
3537 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3538 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3539 }
Eric Anholt673a3942008-07-30 12:06:12 -07003540 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003541
Chris Wilson5f353082010-06-07 14:03:03 +01003542 ret = drm_irq_install(dev);
3543 if (ret)
3544 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003545
Eric Anholt673a3942008-07-30 12:06:12 -07003546 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003547
3548cleanup_ringbuffer:
3549 mutex_lock(&dev->struct_mutex);
3550 i915_gem_cleanup_ringbuffer(dev);
3551 dev_priv->mm.suspended = 1;
3552 mutex_unlock(&dev->struct_mutex);
3553
3554 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003555}
3556
3557int
3558i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv)
3560{
Jesse Barnes79e53942008-11-07 14:24:08 -08003561 if (drm_core_check_feature(dev, DRIVER_MODESET))
3562 return 0;
3563
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003564 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003565 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003566}
3567
3568void
3569i915_gem_lastclose(struct drm_device *dev)
3570{
3571 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003572
Eric Anholte806b492009-01-22 09:56:58 -08003573 if (drm_core_check_feature(dev, DRIVER_MODESET))
3574 return;
3575
Keith Packard6dbe2772008-10-14 21:41:13 -07003576 ret = i915_gem_idle(dev);
3577 if (ret)
3578 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003579}
3580
Chris Wilson64193402010-10-24 12:38:05 +01003581static void
3582init_ring_lists(struct intel_ring_buffer *ring)
3583{
3584 INIT_LIST_HEAD(&ring->active_list);
3585 INIT_LIST_HEAD(&ring->request_list);
3586 INIT_LIST_HEAD(&ring->gpu_write_list);
3587}
3588
Eric Anholt673a3942008-07-30 12:06:12 -07003589void
3590i915_gem_load(struct drm_device *dev)
3591{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003592 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003593 drm_i915_private_t *dev_priv = dev->dev_private;
3594
Chris Wilson69dc4982010-10-19 10:36:51 +01003595 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003596 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3597 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003598 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003599 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003600 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003601 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003602 for (i = 0; i < I915_NUM_RINGS; i++)
3603 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003604 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003605 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003606 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3607 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003608 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003609
Dave Airlie94400122010-07-20 13:15:31 +10003610 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3611 if (IS_GEN3(dev)) {
3612 u32 tmp = I915_READ(MI_ARB_STATE);
3613 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3614 /* arb state is a masked write, so set bit + bit in mask */
3615 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3616 I915_WRITE(MI_ARB_STATE, tmp);
3617 }
3618 }
3619
Chris Wilson72bfa192010-12-19 11:42:05 +00003620 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3621
Jesse Barnesde151cf2008-11-12 10:03:55 -08003622 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003623 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3624 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003625
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003626 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003627 dev_priv->num_fence_regs = 16;
3628 else
3629 dev_priv->num_fence_regs = 8;
3630
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003631 /* Initialize fence registers to zero */
Eric Anholt10ed13e2011-05-06 13:53:49 -07003632 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3633 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003634 }
Eric Anholt10ed13e2011-05-06 13:53:49 -07003635
Eric Anholt673a3942008-07-30 12:06:12 -07003636 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003637 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003638
Chris Wilsonce453d82011-02-21 14:43:56 +00003639 dev_priv->mm.interruptible = true;
3640
Chris Wilson17250b72010-10-28 12:51:39 +01003641 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3642 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3643 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003644}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003645
3646/*
3647 * Create a physically contiguous memory object for this object
3648 * e.g. for cursor + overlay regs
3649 */
Chris Wilson995b6762010-08-20 13:23:26 +01003650static int i915_gem_init_phys_object(struct drm_device *dev,
3651 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003652{
3653 drm_i915_private_t *dev_priv = dev->dev_private;
3654 struct drm_i915_gem_phys_object *phys_obj;
3655 int ret;
3656
3657 if (dev_priv->mm.phys_objs[id - 1] || !size)
3658 return 0;
3659
Eric Anholt9a298b22009-03-24 12:23:04 -07003660 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003661 if (!phys_obj)
3662 return -ENOMEM;
3663
3664 phys_obj->id = id;
3665
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003666 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003667 if (!phys_obj->handle) {
3668 ret = -ENOMEM;
3669 goto kfree_obj;
3670 }
3671#ifdef CONFIG_X86
3672 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3673#endif
3674
3675 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3676
3677 return 0;
3678kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003679 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003680 return ret;
3681}
3682
Chris Wilson995b6762010-08-20 13:23:26 +01003683static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003684{
3685 drm_i915_private_t *dev_priv = dev->dev_private;
3686 struct drm_i915_gem_phys_object *phys_obj;
3687
3688 if (!dev_priv->mm.phys_objs[id - 1])
3689 return;
3690
3691 phys_obj = dev_priv->mm.phys_objs[id - 1];
3692 if (phys_obj->cur_obj) {
3693 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3694 }
3695
3696#ifdef CONFIG_X86
3697 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3698#endif
3699 drm_pci_free(dev, phys_obj->handle);
3700 kfree(phys_obj);
3701 dev_priv->mm.phys_objs[id - 1] = NULL;
3702}
3703
3704void i915_gem_free_all_phys_object(struct drm_device *dev)
3705{
3706 int i;
3707
Dave Airlie260883c2009-01-22 17:58:49 +10003708 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003709 i915_gem_free_phys_object(dev, i);
3710}
3711
3712void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003713 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003714{
Chris Wilson05394f32010-11-08 19:18:58 +00003715 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003716 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003717 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003718 int page_count;
3719
Chris Wilson05394f32010-11-08 19:18:58 +00003720 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003721 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003722 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003723
Chris Wilson05394f32010-11-08 19:18:58 +00003724 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003725 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07003726 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003727 if (!IS_ERR(page)) {
3728 char *dst = kmap_atomic(page);
3729 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3730 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003731
Chris Wilsone5281cc2010-10-28 13:45:36 +01003732 drm_clflush_pages(&page, 1);
3733
3734 set_page_dirty(page);
3735 mark_page_accessed(page);
3736 page_cache_release(page);
3737 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003738 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003739 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003740
Chris Wilson05394f32010-11-08 19:18:58 +00003741 obj->phys_obj->cur_obj = NULL;
3742 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003743}
3744
3745int
3746i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003747 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003748 int id,
3749 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003750{
Chris Wilson05394f32010-11-08 19:18:58 +00003751 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003752 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003753 int ret = 0;
3754 int page_count;
3755 int i;
3756
3757 if (id > I915_MAX_PHYS_OBJECT)
3758 return -EINVAL;
3759
Chris Wilson05394f32010-11-08 19:18:58 +00003760 if (obj->phys_obj) {
3761 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003762 return 0;
3763 i915_gem_detach_phys_object(dev, obj);
3764 }
3765
Dave Airlie71acb5e2008-12-30 20:31:46 +10003766 /* create a new object */
3767 if (!dev_priv->mm.phys_objs[id - 1]) {
3768 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003769 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003770 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003771 DRM_ERROR("failed to init phys object %d size: %zu\n",
3772 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003773 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003774 }
3775 }
3776
3777 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003778 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3779 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003780
Chris Wilson05394f32010-11-08 19:18:58 +00003781 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003782
3783 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003784 struct page *page;
3785 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003786
Hugh Dickins5949eac2011-06-27 16:18:18 -07003787 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003788 if (IS_ERR(page))
3789 return PTR_ERR(page);
3790
Chris Wilsonff75b9b2010-10-30 22:52:31 +01003791 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00003792 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003793 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07003794 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003795
3796 mark_page_accessed(page);
3797 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003798 }
3799
3800 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003801}
3802
3803static int
Chris Wilson05394f32010-11-08 19:18:58 +00003804i915_gem_phys_pwrite(struct drm_device *dev,
3805 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10003806 struct drm_i915_gem_pwrite *args,
3807 struct drm_file *file_priv)
3808{
Chris Wilson05394f32010-11-08 19:18:58 +00003809 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003810 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003811
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003812 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
3813 unsigned long unwritten;
3814
3815 /* The physical object once assigned is fixed for the lifetime
3816 * of the obj, so we can safely drop the lock and continue
3817 * to access vaddr.
3818 */
3819 mutex_unlock(&dev->struct_mutex);
3820 unwritten = copy_from_user(vaddr, user_data, args->size);
3821 mutex_lock(&dev->struct_mutex);
3822 if (unwritten)
3823 return -EFAULT;
3824 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003825
Daniel Vetter40ce6572010-11-05 18:12:18 +01003826 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10003827 return 0;
3828}
Eric Anholtb9624422009-06-03 07:27:35 +00003829
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003830void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00003831{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003832 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003833
3834 /* Clean up our request list when the client is going away, so that
3835 * later retire_requests won't dereference our soon-to-be-gone
3836 * file_priv.
3837 */
Chris Wilson1c255952010-09-26 11:03:27 +01003838 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003839 while (!list_empty(&file_priv->mm.request_list)) {
3840 struct drm_i915_gem_request *request;
3841
3842 request = list_first_entry(&file_priv->mm.request_list,
3843 struct drm_i915_gem_request,
3844 client_list);
3845 list_del(&request->client_list);
3846 request->file_priv = NULL;
3847 }
Chris Wilson1c255952010-09-26 11:03:27 +01003848 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00003849}
Chris Wilson31169712009-09-14 16:50:28 +01003850
Chris Wilson31169712009-09-14 16:50:28 +01003851static int
Chris Wilson1637ef42010-04-20 17:10:35 +01003852i915_gpu_is_active(struct drm_device *dev)
3853{
3854 drm_i915_private_t *dev_priv = dev->dev_private;
3855 int lists_empty;
3856
Chris Wilson1637ef42010-04-20 17:10:35 +01003857 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01003858 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01003859
3860 return !lists_empty;
3861}
3862
3863static int
Ying Han1495f232011-05-24 17:12:27 -07003864i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01003865{
Chris Wilson17250b72010-10-28 12:51:39 +01003866 struct drm_i915_private *dev_priv =
3867 container_of(shrinker,
3868 struct drm_i915_private,
3869 mm.inactive_shrinker);
3870 struct drm_device *dev = dev_priv->dev;
3871 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07003872 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01003873 int cnt;
3874
3875 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01003876 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01003877
3878 /* "fast-path" to count number of available objects */
3879 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01003880 cnt = 0;
3881 list_for_each_entry(obj,
3882 &dev_priv->mm.inactive_list,
3883 mm_list)
3884 cnt++;
3885 mutex_unlock(&dev->struct_mutex);
3886 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01003887 }
3888
Chris Wilson1637ef42010-04-20 17:10:35 +01003889rescan:
Chris Wilson31169712009-09-14 16:50:28 +01003890 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01003891 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01003892
Chris Wilson17250b72010-10-28 12:51:39 +01003893 list_for_each_entry_safe(obj, next,
3894 &dev_priv->mm.inactive_list,
3895 mm_list) {
3896 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00003897 if (i915_gem_object_unbind(obj) == 0 &&
3898 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01003899 break;
Chris Wilson31169712009-09-14 16:50:28 +01003900 }
Chris Wilson31169712009-09-14 16:50:28 +01003901 }
3902
3903 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01003904 cnt = 0;
3905 list_for_each_entry_safe(obj, next,
3906 &dev_priv->mm.inactive_list,
3907 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00003908 if (nr_to_scan &&
3909 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01003910 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00003911 else
Chris Wilson17250b72010-10-28 12:51:39 +01003912 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01003913 }
3914
Chris Wilson17250b72010-10-28 12:51:39 +01003915 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01003916 /*
3917 * We are desperate for pages, so as a last resort, wait
3918 * for the GPU to finish and discard whatever we can.
3919 * This has a dramatic impact to reduce the number of
3920 * OOM-killer events whilst running the GPU aggressively.
3921 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003922 if (i915_gpu_idle(dev, true) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01003923 goto rescan;
3924 }
Chris Wilson17250b72010-10-28 12:51:39 +01003925 mutex_unlock(&dev->struct_mutex);
3926 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01003927}