blob: 09c033e5e02bb26c81dbd61808e709ceb06fc92f [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
Chris Wilson05394f32010-11-08 19:18:58 +000045static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000046static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
47 unsigned alignment,
48 bool map_and_fenceable);
Chris Wilsond9e86c02010-11-10 16:40:20 +000049static void i915_gem_clear_fence_reg(struct drm_device *dev,
50 struct drm_i915_fence_reg *reg);
Chris Wilson05394f32010-11-08 19:18:58 +000051static int i915_gem_phys_pwrite(struct drm_device *dev,
52 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100053 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000054 struct drm_file *file);
55static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson17250b72010-10-28 12:51:39 +010057static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070058 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010059static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010060
Chris Wilson73aa8082010-09-30 11:46:12 +010061/* some bookkeeping */
62static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
63 size_t size)
64{
65 dev_priv->mm.object_count++;
66 dev_priv->mm.object_memory += size;
67}
68
69static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
70 size_t size)
71{
72 dev_priv->mm.object_count--;
73 dev_priv->mm.object_memory -= size;
74}
75
Chris Wilson21dd3732011-01-26 15:55:56 +000076static int
77i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010078{
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct completion *x = &dev_priv->error_completion;
81 unsigned long flags;
82 int ret;
83
84 if (!atomic_read(&dev_priv->mm.wedged))
85 return 0;
86
87 ret = wait_for_completion_interruptible(x);
88 if (ret)
89 return ret;
90
Chris Wilson21dd3732011-01-26 15:55:56 +000091 if (atomic_read(&dev_priv->mm.wedged)) {
92 /* GPU is hung, bump the completion count to account for
93 * the token we just consumed so that we never hit zero and
94 * end up waiting upon a subsequent completion event that
95 * will never happen.
96 */
97 spin_lock_irqsave(&x->wait.lock, flags);
98 x->done++;
99 spin_unlock_irqrestore(&x->wait.lock, flags);
100 }
101 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100102}
103
Chris Wilson54cf91d2010-11-25 18:00:26 +0000104int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100105{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100106 int ret;
107
Chris Wilson21dd3732011-01-26 15:55:56 +0000108 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100109 if (ret)
110 return ret;
111
112 ret = mutex_lock_interruptible(&dev->struct_mutex);
113 if (ret)
114 return ret;
115
Chris Wilson23bc5982010-09-29 16:10:57 +0100116 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100117 return 0;
118}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119
Chris Wilson7d1c4802010-08-07 21:45:03 +0100120static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000121i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100122{
Chris Wilson05394f32010-11-08 19:18:58 +0000123 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100124}
125
Eric Anholt673a3942008-07-30 12:06:12 -0700126int
127i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000128 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700129{
Eric Anholt673a3942008-07-30 12:06:12 -0700130 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000131
132 if (args->gtt_start >= args->gtt_end ||
133 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
134 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700135
136 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200137 i915_gem_init_global_gtt(dev, args->gtt_start,
138 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700139 mutex_unlock(&dev->struct_mutex);
140
Chris Wilson20217462010-11-23 15:26:33 +0000141 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700142}
143
Eric Anholt5a125c32008-10-22 21:40:13 -0700144int
145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000146 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700147{
Chris Wilson73aa8082010-09-30 11:46:12 +0100148 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700149 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000150 struct drm_i915_gem_object *obj;
151 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700152
153 if (!(dev->driver->driver_features & DRIVER_GEM))
154 return -ENODEV;
155
Chris Wilson6299f992010-11-24 12:23:44 +0000156 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000158 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
159 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100160 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700161
Chris Wilson6299f992010-11-24 12:23:44 +0000162 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400163 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000164
Eric Anholt5a125c32008-10-22 21:40:13 -0700165 return 0;
166}
167
Dave Airlieff72145b2011-02-07 12:16:14 +1000168static int
169i915_gem_create(struct drm_file *file,
170 struct drm_device *dev,
171 uint64_t size,
172 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700173{
Chris Wilson05394f32010-11-08 19:18:58 +0000174 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300175 int ret;
176 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700177
Dave Airlieff72145b2011-02-07 12:16:14 +1000178 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200179 if (size == 0)
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
182 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000183 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700184 if (obj == NULL)
185 return -ENOMEM;
186
Chris Wilson05394f32010-11-08 19:18:58 +0000187 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100188 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000189 drm_gem_object_release(&obj->base);
190 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100191 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700192 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100193 }
194
Chris Wilson202f2fe2010-10-14 13:20:40 +0100195 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000196 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100197 trace_i915_gem_object_create(obj);
198
Dave Airlieff72145b2011-02-07 12:16:14 +1000199 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700200 return 0;
201}
202
Dave Airlieff72145b2011-02-07 12:16:14 +1000203int
204i915_gem_dumb_create(struct drm_file *file,
205 struct drm_device *dev,
206 struct drm_mode_create_dumb *args)
207{
208 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000209 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000210 args->size = args->pitch * args->height;
211 return i915_gem_create(file, dev,
212 args->size, &args->handle);
213}
214
215int i915_gem_dumb_destroy(struct drm_file *file,
216 struct drm_device *dev,
217 uint32_t handle)
218{
219 return drm_gem_handle_delete(file, handle);
220}
221
222/**
223 * Creates a new mm object and returns a handle to it.
224 */
225int
226i915_gem_create_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file)
228{
229 struct drm_i915_gem_create *args = data;
230 return i915_gem_create(file, dev,
231 args->size, &args->handle);
232}
233
Chris Wilson05394f32010-11-08 19:18:58 +0000234static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700235{
Chris Wilson05394f32010-11-08 19:18:58 +0000236 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700237
238 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000239 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700240}
241
Eric Anholt673a3942008-07-30 12:06:12 -0700242/**
Eric Anholteb014592009-03-10 11:44:52 -0700243 * This is the fast shmem pread path, which attempts to copy_from_user directly
244 * from the backing pages of the object to the user's address space. On a
245 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
246 */
247static int
Chris Wilson05394f32010-11-08 19:18:58 +0000248i915_gem_shmem_pread_fast(struct drm_device *dev,
249 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700250 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000251 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700252{
Chris Wilson05394f32010-11-08 19:18:58 +0000253 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700254 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100255 loff_t offset;
Eric Anholteb014592009-03-10 11:44:52 -0700256 char __user *user_data;
257 int page_offset, page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700258
259 user_data = (char __user *) (uintptr_t) args->data_ptr;
260 remain = args->size;
261
Eric Anholteb014592009-03-10 11:44:52 -0700262 offset = args->offset;
263
264 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100265 struct page *page;
266 char *vaddr;
267 int ret;
268
Eric Anholteb014592009-03-10 11:44:52 -0700269 /* Operation in this page
270 *
Eric Anholteb014592009-03-10 11:44:52 -0700271 * page_offset = offset within page
272 * page_length = bytes to copy for this page
273 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100274 page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700275 page_length = remain;
276 if ((page_offset + remain) > PAGE_SIZE)
277 page_length = PAGE_SIZE - page_offset;
278
Hugh Dickins5949eac2011-06-27 16:18:18 -0700279 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100280 if (IS_ERR(page))
281 return PTR_ERR(page);
282
283 vaddr = kmap_atomic(page);
284 ret = __copy_to_user_inatomic(user_data,
285 vaddr + page_offset,
286 page_length);
287 kunmap_atomic(vaddr);
288
289 mark_page_accessed(page);
290 page_cache_release(page);
291 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100292 return -EFAULT;
Eric Anholteb014592009-03-10 11:44:52 -0700293
294 remain -= page_length;
295 user_data += page_length;
296 offset += page_length;
297 }
298
Chris Wilson4f27b752010-10-14 15:26:45 +0100299 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700300}
301
Daniel Vetter8c599672011-12-14 13:57:31 +0100302static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100303__copy_to_user_swizzled(char __user *cpu_vaddr,
304 const char *gpu_vaddr, int gpu_offset,
305 int length)
306{
307 int ret, cpu_offset = 0;
308
309 while (length > 0) {
310 int cacheline_end = ALIGN(gpu_offset + 1, 64);
311 int this_length = min(cacheline_end - gpu_offset, length);
312 int swizzled_gpu_offset = gpu_offset ^ 64;
313
314 ret = __copy_to_user(cpu_vaddr + cpu_offset,
315 gpu_vaddr + swizzled_gpu_offset,
316 this_length);
317 if (ret)
318 return ret + length;
319
320 cpu_offset += this_length;
321 gpu_offset += this_length;
322 length -= this_length;
323 }
324
325 return 0;
326}
327
328static inline int
Daniel Vetter8c599672011-12-14 13:57:31 +0100329__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
330 const char *cpu_vaddr,
331 int length)
332{
333 int ret, cpu_offset = 0;
334
335 while (length > 0) {
336 int cacheline_end = ALIGN(gpu_offset + 1, 64);
337 int this_length = min(cacheline_end - gpu_offset, length);
338 int swizzled_gpu_offset = gpu_offset ^ 64;
339
340 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
341 cpu_vaddr + cpu_offset,
342 this_length);
343 if (ret)
344 return ret + length;
345
346 cpu_offset += this_length;
347 gpu_offset += this_length;
348 length -= this_length;
349 }
350
351 return 0;
352}
353
Eric Anholteb014592009-03-10 11:44:52 -0700354/**
355 * This is the fallback shmem pread path, which allocates temporary storage
356 * in kernel space to copy_to_user into outside of the struct_mutex, so we
357 * can copy out of the object's backing pages while holding the struct mutex
358 * and not take page faults.
359 */
360static int
Chris Wilson05394f32010-11-08 19:18:58 +0000361i915_gem_shmem_pread_slow(struct drm_device *dev,
362 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700363 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000364 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700365{
Chris Wilson05394f32010-11-08 19:18:58 +0000366 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100367 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700368 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100369 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100370 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100371 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700372
Daniel Vetter8461d222011-12-14 13:57:32 +0100373 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700374 remain = args->size;
375
Daniel Vetter8461d222011-12-14 13:57:32 +0100376 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700377
Eric Anholteb014592009-03-10 11:44:52 -0700378 offset = args->offset;
379
Daniel Vetter8461d222011-12-14 13:57:32 +0100380 mutex_unlock(&dev->struct_mutex);
381
Eric Anholteb014592009-03-10 11:44:52 -0700382 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100383 struct page *page;
Daniel Vetter8461d222011-12-14 13:57:32 +0100384 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100385
Eric Anholteb014592009-03-10 11:44:52 -0700386 /* Operation in this page
387 *
Eric Anholteb014592009-03-10 11:44:52 -0700388 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700389 * page_length = bytes to copy for this page
390 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100391 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700392 page_length = remain;
393 if ((shmem_page_offset + page_length) > PAGE_SIZE)
394 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700395
Hugh Dickins5949eac2011-06-27 16:18:18 -0700396 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Jesper Juhlb65552f2011-06-12 20:53:44 +0000397 if (IS_ERR(page)) {
398 ret = PTR_ERR(page);
399 goto out;
400 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100401
Daniel Vetter8461d222011-12-14 13:57:32 +0100402 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
403 (page_to_phys(page) & (1 << 17)) != 0;
404
405 vaddr = kmap(page);
406 if (page_do_bit17_swizzling)
407 ret = __copy_to_user_swizzled(user_data,
408 vaddr, shmem_page_offset,
409 page_length);
410 else
411 ret = __copy_to_user(user_data,
412 vaddr + shmem_page_offset,
413 page_length);
414 kunmap(page);
Eric Anholteb014592009-03-10 11:44:52 -0700415
Chris Wilsone5281cc2010-10-28 13:45:36 +0100416 mark_page_accessed(page);
417 page_cache_release(page);
418
Daniel Vetter8461d222011-12-14 13:57:32 +0100419 if (ret) {
420 ret = -EFAULT;
421 goto out;
422 }
423
Eric Anholteb014592009-03-10 11:44:52 -0700424 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100425 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700426 offset += page_length;
427 }
428
Chris Wilson4f27b752010-10-14 15:26:45 +0100429out:
Daniel Vetter8461d222011-12-14 13:57:32 +0100430 mutex_lock(&dev->struct_mutex);
431 /* Fixup: Kill any reinstated backing storage pages */
432 if (obj->madv == __I915_MADV_PURGED)
433 i915_gem_object_truncate(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700434
435 return ret;
436}
437
Eric Anholt673a3942008-07-30 12:06:12 -0700438/**
439 * Reads data from the object referenced by handle.
440 *
441 * On error, the contents of *data are undefined.
442 */
443int
444i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000445 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700446{
447 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000448 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100449 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700450
Chris Wilson51311d02010-11-17 09:10:42 +0000451 if (args->size == 0)
452 return 0;
453
454 if (!access_ok(VERIFY_WRITE,
455 (char __user *)(uintptr_t)args->data_ptr,
456 args->size))
457 return -EFAULT;
458
459 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
460 args->size);
461 if (ret)
462 return -EFAULT;
463
Chris Wilson4f27b752010-10-14 15:26:45 +0100464 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100465 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100466 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700467
Chris Wilson05394f32010-11-08 19:18:58 +0000468 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000469 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100470 ret = -ENOENT;
471 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100472 }
Eric Anholt673a3942008-07-30 12:06:12 -0700473
Chris Wilson7dcd2492010-09-26 20:21:44 +0100474 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000475 if (args->offset > obj->base.size ||
476 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100477 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100478 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100479 }
480
Chris Wilsondb53a302011-02-03 11:57:46 +0000481 trace_i915_gem_object_pread(obj, args->offset, args->size);
482
Chris Wilson4f27b752010-10-14 15:26:45 +0100483 ret = i915_gem_object_set_cpu_read_domain_range(obj,
484 args->offset,
485 args->size);
486 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100487 goto out;
Chris Wilson4f27b752010-10-14 15:26:45 +0100488
489 ret = -EFAULT;
490 if (!i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson05394f32010-11-08 19:18:58 +0000491 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
Chris Wilson4f27b752010-10-14 15:26:45 +0100492 if (ret == -EFAULT)
Chris Wilson05394f32010-11-08 19:18:58 +0000493 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700494
Chris Wilson35b62a82010-09-26 20:23:38 +0100495out:
Chris Wilson05394f32010-11-08 19:18:58 +0000496 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100497unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100498 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700499 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700500}
501
Keith Packard0839ccb2008-10-30 19:38:48 -0700502/* This is the fast write path which cannot handle
503 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700504 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700505
Keith Packard0839ccb2008-10-30 19:38:48 -0700506static inline int
507fast_user_write(struct io_mapping *mapping,
508 loff_t page_base, int page_offset,
509 char __user *user_data,
510 int length)
511{
512 char *vaddr_atomic;
513 unsigned long unwritten;
514
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700515 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700516 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
517 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700518 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100519 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700520}
521
522/* Here's the write path which can sleep for
523 * page faults
524 */
525
Chris Wilsonab34c222010-05-27 14:15:35 +0100526static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700527slow_kernel_write(struct io_mapping *mapping,
528 loff_t gtt_base, int gtt_offset,
529 struct page *user_page, int user_offset,
530 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700531{
Chris Wilsonab34c222010-05-27 14:15:35 +0100532 char __iomem *dst_vaddr;
533 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700534
Chris Wilsonab34c222010-05-27 14:15:35 +0100535 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
536 src_vaddr = kmap(user_page);
537
538 memcpy_toio(dst_vaddr + gtt_offset,
539 src_vaddr + user_offset,
540 length);
541
542 kunmap(user_page);
543 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700544}
545
Eric Anholt3de09aa2009-03-09 09:42:23 -0700546/**
547 * This is the fast pwrite path, where we copy the data directly from the
548 * user into the GTT, uncached.
549 */
Eric Anholt673a3942008-07-30 12:06:12 -0700550static int
Chris Wilson05394f32010-11-08 19:18:58 +0000551i915_gem_gtt_pwrite_fast(struct drm_device *dev,
552 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700553 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000554 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700555{
Keith Packard0839ccb2008-10-30 19:38:48 -0700556 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700557 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700558 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700559 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700560 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700561
562 user_data = (char __user *) (uintptr_t) args->data_ptr;
563 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700564
Chris Wilson05394f32010-11-08 19:18:58 +0000565 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700566
567 while (remain > 0) {
568 /* Operation in this page
569 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700570 * page_base = page offset within aperture
571 * page_offset = offset within page
572 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700573 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100574 page_base = offset & PAGE_MASK;
575 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700576 page_length = remain;
577 if ((page_offset + remain) > PAGE_SIZE)
578 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700579
Keith Packard0839ccb2008-10-30 19:38:48 -0700580 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700581 * source page isn't available. Return the error and we'll
582 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700583 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100584 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
585 page_offset, user_data, page_length))
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100586 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700587
Keith Packard0839ccb2008-10-30 19:38:48 -0700588 remain -= page_length;
589 user_data += page_length;
590 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700591 }
Eric Anholt673a3942008-07-30 12:06:12 -0700592
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100593 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700594}
595
Eric Anholt3de09aa2009-03-09 09:42:23 -0700596/**
597 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
598 * the memory and maps it using kmap_atomic for copying.
599 *
600 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
601 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
602 */
Eric Anholt3043c602008-10-02 12:24:47 -0700603static int
Chris Wilson05394f32010-11-08 19:18:58 +0000604i915_gem_gtt_pwrite_slow(struct drm_device *dev,
605 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700606 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000607 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700608{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700609 drm_i915_private_t *dev_priv = dev->dev_private;
610 ssize_t remain;
611 loff_t gtt_page_base, offset;
612 loff_t first_data_page, last_data_page, num_pages;
613 loff_t pinned_pages, i;
614 struct page **user_pages;
615 struct mm_struct *mm = current->mm;
616 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700617 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700618 uint64_t data_ptr = args->data_ptr;
619
620 remain = args->size;
621
622 /* Pin the user pages containing the data. We can't fault while
623 * holding the struct mutex, and all of the pwrite implementations
624 * want to hold it while dereferencing the user data.
625 */
626 first_data_page = data_ptr / PAGE_SIZE;
627 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
628 num_pages = last_data_page - first_data_page + 1;
629
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100630 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700631 if (user_pages == NULL)
632 return -ENOMEM;
633
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100634 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700635 down_read(&mm->mmap_sem);
636 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
637 num_pages, 0, 0, user_pages, NULL);
638 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100639 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700640 if (pinned_pages < num_pages) {
641 ret = -EFAULT;
642 goto out_unpin_pages;
643 }
644
Chris Wilsond9e86c02010-11-10 16:40:20 +0000645 ret = i915_gem_object_set_to_gtt_domain(obj, true);
646 if (ret)
647 goto out_unpin_pages;
648
649 ret = i915_gem_object_put_fence(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700650 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100651 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700652
Chris Wilson05394f32010-11-08 19:18:58 +0000653 offset = obj->gtt_offset + args->offset;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700654
655 while (remain > 0) {
656 /* Operation in this page
657 *
658 * gtt_page_base = page offset within aperture
659 * gtt_page_offset = offset within page in aperture
660 * data_page_index = page number in get_user_pages return
661 * data_page_offset = offset with data_page_index page.
662 * page_length = bytes to copy for this page
663 */
664 gtt_page_base = offset & PAGE_MASK;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100665 gtt_page_offset = offset_in_page(offset);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700666 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100667 data_page_offset = offset_in_page(data_ptr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700668
669 page_length = remain;
670 if ((gtt_page_offset + page_length) > PAGE_SIZE)
671 page_length = PAGE_SIZE - gtt_page_offset;
672 if ((data_page_offset + page_length) > PAGE_SIZE)
673 page_length = PAGE_SIZE - data_page_offset;
674
Chris Wilsonab34c222010-05-27 14:15:35 +0100675 slow_kernel_write(dev_priv->mm.gtt_mapping,
676 gtt_page_base, gtt_page_offset,
677 user_pages[data_page_index],
678 data_page_offset,
679 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700680
681 remain -= page_length;
682 offset += page_length;
683 data_ptr += page_length;
684 }
685
Eric Anholt3de09aa2009-03-09 09:42:23 -0700686out_unpin_pages:
687 for (i = 0; i < pinned_pages; i++)
688 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700689 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700690
691 return ret;
692}
693
Eric Anholt40123c12009-03-09 13:42:30 -0700694/**
695 * This is the fast shmem pwrite path, which attempts to directly
696 * copy_from_user into the kmapped pages backing the object.
697 */
Eric Anholt673a3942008-07-30 12:06:12 -0700698static int
Chris Wilson05394f32010-11-08 19:18:58 +0000699i915_gem_shmem_pwrite_fast(struct drm_device *dev,
700 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700701 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000702 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700703{
Chris Wilson05394f32010-11-08 19:18:58 +0000704 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700705 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100706 loff_t offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700707 char __user *user_data;
708 int page_offset, page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700709
710 user_data = (char __user *) (uintptr_t) args->data_ptr;
711 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700712
Eric Anholt673a3942008-07-30 12:06:12 -0700713 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000714 obj->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700715
Eric Anholt40123c12009-03-09 13:42:30 -0700716 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100717 struct page *page;
718 char *vaddr;
719 int ret;
720
Eric Anholt40123c12009-03-09 13:42:30 -0700721 /* Operation in this page
722 *
Eric Anholt40123c12009-03-09 13:42:30 -0700723 * page_offset = offset within page
724 * page_length = bytes to copy for this page
725 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100726 page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700727 page_length = remain;
728 if ((page_offset + remain) > PAGE_SIZE)
729 page_length = PAGE_SIZE - page_offset;
730
Hugh Dickins5949eac2011-06-27 16:18:18 -0700731 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100732 if (IS_ERR(page))
733 return PTR_ERR(page);
734
Daniel Vetter130c2562011-09-17 20:55:46 +0200735 vaddr = kmap_atomic(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100736 ret = __copy_from_user_inatomic(vaddr + page_offset,
737 user_data,
738 page_length);
Daniel Vetter130c2562011-09-17 20:55:46 +0200739 kunmap_atomic(vaddr);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100740
741 set_page_dirty(page);
742 mark_page_accessed(page);
743 page_cache_release(page);
744
745 /* If we get a fault while copying data, then (presumably) our
746 * source page isn't available. Return the error and we'll
747 * retry in the slow path.
748 */
749 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100750 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700751
752 remain -= page_length;
753 user_data += page_length;
754 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700755 }
756
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100757 return 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700758}
759
760/**
761 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
762 * the memory and maps it using kmap_atomic for copying.
763 *
764 * This avoids taking mmap_sem for faulting on the user's address while the
765 * struct_mutex is held.
766 */
767static int
Chris Wilson05394f32010-11-08 19:18:58 +0000768i915_gem_shmem_pwrite_slow(struct drm_device *dev,
769 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700770 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000771 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700772{
Chris Wilson05394f32010-11-08 19:18:58 +0000773 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700774 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100775 loff_t offset;
776 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100777 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100778 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700779
Daniel Vetter8c599672011-12-14 13:57:31 +0100780 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700781 remain = args->size;
782
Daniel Vetter8c599672011-12-14 13:57:31 +0100783 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700784
Eric Anholt40123c12009-03-09 13:42:30 -0700785 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000786 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700787
Daniel Vetter8c599672011-12-14 13:57:31 +0100788 mutex_unlock(&dev->struct_mutex);
789
Eric Anholt40123c12009-03-09 13:42:30 -0700790 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100791 struct page *page;
Daniel Vetter8c599672011-12-14 13:57:31 +0100792 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100793
Eric Anholt40123c12009-03-09 13:42:30 -0700794 /* Operation in this page
795 *
Eric Anholt40123c12009-03-09 13:42:30 -0700796 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700797 * page_length = bytes to copy for this page
798 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100799 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700800
801 page_length = remain;
802 if ((shmem_page_offset + page_length) > PAGE_SIZE)
803 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700804
Hugh Dickins5949eac2011-06-27 16:18:18 -0700805 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100806 if (IS_ERR(page)) {
807 ret = PTR_ERR(page);
808 goto out;
809 }
810
Daniel Vetter8c599672011-12-14 13:57:31 +0100811 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
812 (page_to_phys(page) & (1 << 17)) != 0;
813
814 vaddr = kmap(page);
815 if (page_do_bit17_swizzling)
816 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
817 user_data,
818 page_length);
819 else
820 ret = __copy_from_user(vaddr + shmem_page_offset,
821 user_data,
822 page_length);
823 kunmap(page);
Eric Anholt40123c12009-03-09 13:42:30 -0700824
Chris Wilsone5281cc2010-10-28 13:45:36 +0100825 set_page_dirty(page);
826 mark_page_accessed(page);
827 page_cache_release(page);
828
Daniel Vetter8c599672011-12-14 13:57:31 +0100829 if (ret) {
830 ret = -EFAULT;
831 goto out;
832 }
833
Eric Anholt40123c12009-03-09 13:42:30 -0700834 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100835 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700836 offset += page_length;
837 }
838
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100839out:
Daniel Vetter8c599672011-12-14 13:57:31 +0100840 mutex_lock(&dev->struct_mutex);
841 /* Fixup: Kill any reinstated backing storage pages */
842 if (obj->madv == __I915_MADV_PURGED)
843 i915_gem_object_truncate(obj);
844 /* and flush dirty cachelines in case the object isn't in the cpu write
845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj);
848 intel_gtt_chipset_flush();
849 }
Eric Anholt40123c12009-03-09 13:42:30 -0700850
851 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700852}
853
854/**
855 * Writes data to the object referenced by handle.
856 *
857 * On error, the contents of the buffer that were to be modified are undefined.
858 */
859int
860i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100861 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700862{
863 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000864 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000865 int ret;
866
867 if (args->size == 0)
868 return 0;
869
870 if (!access_ok(VERIFY_READ,
871 (char __user *)(uintptr_t)args->data_ptr,
872 args->size))
873 return -EFAULT;
874
875 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
876 args->size);
877 if (ret)
878 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700879
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100880 ret = i915_mutex_lock_interruptible(dev);
881 if (ret)
882 return ret;
883
Chris Wilson05394f32010-11-08 19:18:58 +0000884 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000885 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100886 ret = -ENOENT;
887 goto unlock;
888 }
Eric Anholt673a3942008-07-30 12:06:12 -0700889
Chris Wilson7dcd2492010-09-26 20:21:44 +0100890 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000891 if (args->offset > obj->base.size ||
892 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100893 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100894 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100895 }
896
Chris Wilsondb53a302011-02-03 11:57:46 +0000897 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
898
Eric Anholt673a3942008-07-30 12:06:12 -0700899 /* We can only do the GTT pwrite on untiled buffers, as otherwise
900 * it would end up going through the fenced access, and we'll get
901 * different detiling behavior between reading and writing.
902 * pread/pwrite currently are reading and writing from the CPU
903 * perspective, requiring manual detiling by the client.
904 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100905 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100906 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100907 goto out;
908 }
909
910 if (obj->gtt_space &&
911 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Daniel Vetter75e9e912010-11-04 17:11:09 +0100912 ret = i915_gem_object_pin(obj, 0, true);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100913 if (ret)
914 goto out;
915
Chris Wilsond9e86c02010-11-10 16:40:20 +0000916 ret = i915_gem_object_set_to_gtt_domain(obj, true);
917 if (ret)
918 goto out_unpin;
919
920 ret = i915_gem_object_put_fence(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100921 if (ret)
922 goto out_unpin;
923
924 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
925 if (ret == -EFAULT)
926 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
927
928out_unpin:
929 i915_gem_object_unpin(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100930
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100931 if (ret != -EFAULT)
932 goto out;
933 /* Fall through to the shmfs paths because the gtt paths might
934 * fail with non-page-backed user pointers (e.g. gtt mappings
935 * when moving data between textures). */
Eric Anholt40123c12009-03-09 13:42:30 -0700936 }
Eric Anholt673a3942008-07-30 12:06:12 -0700937
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100938 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
939 if (ret)
940 goto out;
941
942 ret = -EFAULT;
943 if (!i915_gem_object_needs_bit17_swizzle(obj))
944 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
945 if (ret == -EFAULT)
946 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
947
Chris Wilson35b62a82010-09-26 20:23:38 +0100948out:
Chris Wilson05394f32010-11-08 19:18:58 +0000949 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100950unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100951 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700952 return ret;
953}
954
955/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800956 * Called when user space prepares to use an object with the CPU, either
957 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700958 */
959int
960i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000961 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700962{
963 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000964 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800965 uint32_t read_domains = args->read_domains;
966 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700967 int ret;
968
969 if (!(dev->driver->driver_features & DRIVER_GEM))
970 return -ENODEV;
971
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800972 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100973 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800974 return -EINVAL;
975
Chris Wilson21d509e2009-06-06 09:46:02 +0100976 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800977 return -EINVAL;
978
979 /* Having something in the write domain implies it's in the read
980 * domain, and only that read domain. Enforce that in the request.
981 */
982 if (write_domain != 0 && read_domains != write_domain)
983 return -EINVAL;
984
Chris Wilson76c1dec2010-09-25 11:22:51 +0100985 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100986 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100987 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700988
Chris Wilson05394f32010-11-08 19:18:58 +0000989 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000990 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100991 ret = -ENOENT;
992 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100993 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700994
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800995 if (read_domains & I915_GEM_DOMAIN_GTT) {
996 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800997
998 /* Silently promote "you're not bound, there was nothing to do"
999 * to success, since the client was just asking us to
1000 * make sure everything was done.
1001 */
1002 if (ret == -EINVAL)
1003 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001004 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001005 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001006 }
1007
Chris Wilson05394f32010-11-08 19:18:58 +00001008 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001009unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001010 mutex_unlock(&dev->struct_mutex);
1011 return ret;
1012}
1013
1014/**
1015 * Called when user space has done writes to this buffer
1016 */
1017int
1018i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001019 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001020{
1021 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001022 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001023 int ret = 0;
1024
1025 if (!(dev->driver->driver_features & DRIVER_GEM))
1026 return -ENODEV;
1027
Chris Wilson76c1dec2010-09-25 11:22:51 +01001028 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001029 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001030 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001031
Chris Wilson05394f32010-11-08 19:18:58 +00001032 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001033 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001034 ret = -ENOENT;
1035 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001036 }
1037
Eric Anholt673a3942008-07-30 12:06:12 -07001038 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001039 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001040 i915_gem_object_flush_cpu_write_domain(obj);
1041
Chris Wilson05394f32010-11-08 19:18:58 +00001042 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001043unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001044 mutex_unlock(&dev->struct_mutex);
1045 return ret;
1046}
1047
1048/**
1049 * Maps the contents of an object, returning the address it is mapped
1050 * into.
1051 *
1052 * While the mapping holds a reference on the contents of the object, it doesn't
1053 * imply a ref on the object itself.
1054 */
1055int
1056i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001057 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001058{
1059 struct drm_i915_gem_mmap *args = data;
1060 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001061 unsigned long addr;
1062
1063 if (!(dev->driver->driver_features & DRIVER_GEM))
1064 return -ENODEV;
1065
Chris Wilson05394f32010-11-08 19:18:58 +00001066 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001067 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001068 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001069
Eric Anholt673a3942008-07-30 12:06:12 -07001070 down_write(&current->mm->mmap_sem);
1071 addr = do_mmap(obj->filp, 0, args->size,
1072 PROT_READ | PROT_WRITE, MAP_SHARED,
1073 args->offset);
1074 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001075 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001076 if (IS_ERR((void *)addr))
1077 return addr;
1078
1079 args->addr_ptr = (uint64_t) addr;
1080
1081 return 0;
1082}
1083
Jesse Barnesde151cf2008-11-12 10:03:55 -08001084/**
1085 * i915_gem_fault - fault a page into the GTT
1086 * vma: VMA in question
1087 * vmf: fault info
1088 *
1089 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1090 * from userspace. The fault handler takes care of binding the object to
1091 * the GTT (if needed), allocating and programming a fence register (again,
1092 * only if needed based on whether the old reg is still valid or the object
1093 * is tiled) and inserting a new PTE into the faulting process.
1094 *
1095 * Note that the faulting process may involve evicting existing objects
1096 * from the GTT and/or fence registers to make room. So performance may
1097 * suffer if the GTT working set is large or there are few fence registers
1098 * left.
1099 */
1100int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1101{
Chris Wilson05394f32010-11-08 19:18:58 +00001102 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1103 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001104 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001105 pgoff_t page_offset;
1106 unsigned long pfn;
1107 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001108 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001109
1110 /* We don't use vmf->pgoff since that has the fake offset */
1111 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1112 PAGE_SHIFT;
1113
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001114 ret = i915_mutex_lock_interruptible(dev);
1115 if (ret)
1116 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001117
Chris Wilsondb53a302011-02-03 11:57:46 +00001118 trace_i915_gem_object_fault(obj, page_offset, true, write);
1119
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001120 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001121 if (!obj->map_and_fenceable) {
1122 ret = i915_gem_object_unbind(obj);
1123 if (ret)
1124 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001125 }
Chris Wilson05394f32010-11-08 19:18:58 +00001126 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001127 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001128 if (ret)
1129 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001130
Eric Anholte92d03b2011-06-14 16:43:09 -07001131 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1132 if (ret)
1133 goto unlock;
1134 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001135
Daniel Vetter74898d72012-02-15 23:50:22 +01001136 if (!obj->has_global_gtt_mapping)
1137 i915_gem_gtt_bind_object(obj, obj->cache_level);
1138
Chris Wilsond9e86c02010-11-10 16:40:20 +00001139 if (obj->tiling_mode == I915_TILING_NONE)
1140 ret = i915_gem_object_put_fence(obj);
1141 else
Chris Wilsonce453d82011-02-21 14:43:56 +00001142 ret = i915_gem_object_get_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001143 if (ret)
1144 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001145
Chris Wilson05394f32010-11-08 19:18:58 +00001146 if (i915_gem_object_is_inactive(obj))
1147 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001148
Chris Wilson6299f992010-11-24 12:23:44 +00001149 obj->fault_mappable = true;
1150
Chris Wilson05394f32010-11-08 19:18:58 +00001151 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001152 page_offset;
1153
1154 /* Finally, remap it using the new GTT offset */
1155 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001156unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001157 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001158out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001159 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001160 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001161 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001162 /* Give the error handler a chance to run and move the
1163 * objects off the GPU active list. Next time we service the
1164 * fault, we should be able to transition the page into the
1165 * GTT without touching the GPU (and so avoid further
1166 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1167 * with coherency, just lost writes.
1168 */
Chris Wilson045e7692010-11-07 09:18:22 +00001169 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001170 case 0:
1171 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001172 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001173 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001174 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001175 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001176 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001177 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001178 }
1179}
1180
1181/**
Chris Wilson901782b2009-07-10 08:18:50 +01001182 * i915_gem_release_mmap - remove physical page mappings
1183 * @obj: obj in question
1184 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001185 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001186 * relinquish ownership of the pages back to the system.
1187 *
1188 * It is vital that we remove the page mapping if we have mapped a tiled
1189 * object through the GTT and then lose the fence register due to
1190 * resource pressure. Similarly if the object has been moved out of the
1191 * aperture, than pages mapped into userspace must be revoked. Removing the
1192 * mapping will then trigger a page fault on the next user access, allowing
1193 * fixup by i915_gem_fault().
1194 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001195void
Chris Wilson05394f32010-11-08 19:18:58 +00001196i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001197{
Chris Wilson6299f992010-11-24 12:23:44 +00001198 if (!obj->fault_mappable)
1199 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001200
Chris Wilsonf6e47882011-03-20 21:09:12 +00001201 if (obj->base.dev->dev_mapping)
1202 unmap_mapping_range(obj->base.dev->dev_mapping,
1203 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1204 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001205
Chris Wilson6299f992010-11-24 12:23:44 +00001206 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001207}
1208
Chris Wilson92b88ae2010-11-09 11:47:32 +00001209static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001210i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001211{
Chris Wilsone28f8712011-07-18 13:11:49 -07001212 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001213
1214 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001215 tiling_mode == I915_TILING_NONE)
1216 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001217
1218 /* Previous chips need a power-of-two fence region when tiling */
1219 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001220 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001221 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001222 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001223
Chris Wilsone28f8712011-07-18 13:11:49 -07001224 while (gtt_size < size)
1225 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001226
Chris Wilsone28f8712011-07-18 13:11:49 -07001227 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001228}
1229
Jesse Barnesde151cf2008-11-12 10:03:55 -08001230/**
1231 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1232 * @obj: object to check
1233 *
1234 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001235 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001236 */
1237static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001238i915_gem_get_gtt_alignment(struct drm_device *dev,
1239 uint32_t size,
1240 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001241{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001242 /*
1243 * Minimum alignment is 4k (GTT page size), but might be greater
1244 * if a fence register is needed for the object.
1245 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001246 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001247 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001248 return 4096;
1249
1250 /*
1251 * Previous chips need to be aligned to the size of the smallest
1252 * fence register that can contain the object.
1253 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001254 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001255}
1256
Daniel Vetter5e783302010-11-14 22:32:36 +01001257/**
1258 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1259 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001260 * @dev: the device
1261 * @size: size of the object
1262 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001263 *
1264 * Return the required GTT alignment for an object, only taking into account
1265 * unfenced tiled surface requirements.
1266 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001267uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001268i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1269 uint32_t size,
1270 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001271{
Daniel Vetter5e783302010-11-14 22:32:36 +01001272 /*
1273 * Minimum alignment is 4k (GTT page size) for sane hw.
1274 */
1275 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001276 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001277 return 4096;
1278
Chris Wilsone28f8712011-07-18 13:11:49 -07001279 /* Previous hardware however needs to be aligned to a power-of-two
1280 * tile height. The simplest method for determining this is to reuse
1281 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001282 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001283 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001284}
1285
Jesse Barnesde151cf2008-11-12 10:03:55 -08001286int
Dave Airlieff72145b2011-02-07 12:16:14 +10001287i915_gem_mmap_gtt(struct drm_file *file,
1288 struct drm_device *dev,
1289 uint32_t handle,
1290 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001291{
Chris Wilsonda761a62010-10-27 17:37:08 +01001292 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001293 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001294 int ret;
1295
1296 if (!(dev->driver->driver_features & DRIVER_GEM))
1297 return -ENODEV;
1298
Chris Wilson76c1dec2010-09-25 11:22:51 +01001299 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001300 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001301 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001302
Dave Airlieff72145b2011-02-07 12:16:14 +10001303 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001304 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001305 ret = -ENOENT;
1306 goto unlock;
1307 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001308
Chris Wilson05394f32010-11-08 19:18:58 +00001309 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001310 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001311 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001312 }
1313
Chris Wilson05394f32010-11-08 19:18:58 +00001314 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001315 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001316 ret = -EINVAL;
1317 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001318 }
1319
Chris Wilson05394f32010-11-08 19:18:58 +00001320 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001321 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001322 if (ret)
1323 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001324 }
1325
Dave Airlieff72145b2011-02-07 12:16:14 +10001326 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001327
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001328out:
Chris Wilson05394f32010-11-08 19:18:58 +00001329 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001330unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001331 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001332 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001333}
1334
Dave Airlieff72145b2011-02-07 12:16:14 +10001335/**
1336 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1337 * @dev: DRM device
1338 * @data: GTT mapping ioctl data
1339 * @file: GEM object info
1340 *
1341 * Simply returns the fake offset to userspace so it can mmap it.
1342 * The mmap call will end up in drm_gem_mmap(), which will set things
1343 * up so we can get faults in the handler above.
1344 *
1345 * The fault handler will take care of binding the object into the GTT
1346 * (since it may have been evicted to make room for something), allocating
1347 * a fence register, and mapping the appropriate aperture address into
1348 * userspace.
1349 */
1350int
1351i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1352 struct drm_file *file)
1353{
1354 struct drm_i915_gem_mmap_gtt *args = data;
1355
1356 if (!(dev->driver->driver_features & DRIVER_GEM))
1357 return -ENODEV;
1358
1359 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1360}
1361
1362
Chris Wilsone5281cc2010-10-28 13:45:36 +01001363static int
Chris Wilson05394f32010-11-08 19:18:58 +00001364i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001365 gfp_t gfpmask)
1366{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001367 int page_count, i;
1368 struct address_space *mapping;
1369 struct inode *inode;
1370 struct page *page;
1371
1372 /* Get the list of pages out of our struct file. They'll be pinned
1373 * at this point until we release them.
1374 */
Chris Wilson05394f32010-11-08 19:18:58 +00001375 page_count = obj->base.size / PAGE_SIZE;
1376 BUG_ON(obj->pages != NULL);
1377 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1378 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001379 return -ENOMEM;
1380
Chris Wilson05394f32010-11-08 19:18:58 +00001381 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001382 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001383 gfpmask |= mapping_gfp_mask(mapping);
1384
Chris Wilsone5281cc2010-10-28 13:45:36 +01001385 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001386 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001387 if (IS_ERR(page))
1388 goto err_pages;
1389
Chris Wilson05394f32010-11-08 19:18:58 +00001390 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001391 }
1392
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001393 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001394 i915_gem_object_do_bit_17_swizzle(obj);
1395
1396 return 0;
1397
1398err_pages:
1399 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001400 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001401
Chris Wilson05394f32010-11-08 19:18:58 +00001402 drm_free_large(obj->pages);
1403 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001404 return PTR_ERR(page);
1405}
1406
Chris Wilson5cdf5882010-09-27 15:51:07 +01001407static void
Chris Wilson05394f32010-11-08 19:18:58 +00001408i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001409{
Chris Wilson05394f32010-11-08 19:18:58 +00001410 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001411 int i;
1412
Chris Wilson05394f32010-11-08 19:18:58 +00001413 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001414
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001415 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001416 i915_gem_object_save_bit_17_swizzle(obj);
1417
Chris Wilson05394f32010-11-08 19:18:58 +00001418 if (obj->madv == I915_MADV_DONTNEED)
1419 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001420
1421 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001422 if (obj->dirty)
1423 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001424
Chris Wilson05394f32010-11-08 19:18:58 +00001425 if (obj->madv == I915_MADV_WILLNEED)
1426 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001427
Chris Wilson05394f32010-11-08 19:18:58 +00001428 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001429 }
Chris Wilson05394f32010-11-08 19:18:58 +00001430 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001431
Chris Wilson05394f32010-11-08 19:18:58 +00001432 drm_free_large(obj->pages);
1433 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001434}
1435
Chris Wilson54cf91d2010-11-25 18:00:26 +00001436void
Chris Wilson05394f32010-11-08 19:18:58 +00001437i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001438 struct intel_ring_buffer *ring,
1439 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001440{
Chris Wilson05394f32010-11-08 19:18:58 +00001441 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001442 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001443
Zou Nan hai852835f2010-05-21 09:08:56 +08001444 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001445 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001446
1447 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001448 if (!obj->active) {
1449 drm_gem_object_reference(&obj->base);
1450 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001451 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001452
Eric Anholt673a3942008-07-30 12:06:12 -07001453 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001454 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1455 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001456
Chris Wilson05394f32010-11-08 19:18:58 +00001457 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001458 if (obj->fenced_gpu_access) {
1459 struct drm_i915_fence_reg *reg;
1460
1461 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1462
1463 obj->last_fenced_seqno = seqno;
1464 obj->last_fenced_ring = ring;
1465
1466 reg = &dev_priv->fence_regs[obj->fence_reg];
1467 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1468 }
1469}
1470
1471static void
1472i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1473{
1474 list_del_init(&obj->ring_list);
1475 obj->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001476}
1477
Eric Anholtce44b0e2008-11-06 16:00:31 -08001478static void
Chris Wilson05394f32010-11-08 19:18:58 +00001479i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001480{
Chris Wilson05394f32010-11-08 19:18:58 +00001481 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001482 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001483
Chris Wilson05394f32010-11-08 19:18:58 +00001484 BUG_ON(!obj->active);
1485 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001486
1487 i915_gem_object_move_off_active(obj);
1488}
1489
1490static void
1491i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1492{
1493 struct drm_device *dev = obj->base.dev;
1494 struct drm_i915_private *dev_priv = dev->dev_private;
1495
1496 if (obj->pin_count != 0)
1497 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1498 else
1499 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1500
1501 BUG_ON(!list_empty(&obj->gpu_write_list));
1502 BUG_ON(!obj->active);
1503 obj->ring = NULL;
1504
1505 i915_gem_object_move_off_active(obj);
1506 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001507
1508 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001509 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001510 drm_gem_object_unreference(&obj->base);
1511
1512 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001513}
Eric Anholt673a3942008-07-30 12:06:12 -07001514
Chris Wilson963b4832009-09-20 23:03:54 +01001515/* Immediately discard the backing storage */
1516static void
Chris Wilson05394f32010-11-08 19:18:58 +00001517i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001518{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001519 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001520
Chris Wilsonae9fed62010-08-07 11:01:30 +01001521 /* Our goal here is to return as much of the memory as
1522 * is possible back to the system as we are called from OOM.
1523 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001524 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001525 */
Chris Wilson05394f32010-11-08 19:18:58 +00001526 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001527 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001528
Chris Wilsona14917e2012-02-24 21:13:38 +00001529 if (obj->base.map_list.map)
1530 drm_gem_free_mmap_offset(&obj->base);
1531
Chris Wilson05394f32010-11-08 19:18:58 +00001532 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001533}
1534
1535static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001536i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001537{
Chris Wilson05394f32010-11-08 19:18:58 +00001538 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001539}
1540
Eric Anholt673a3942008-07-30 12:06:12 -07001541static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001542i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1543 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001544{
Chris Wilson05394f32010-11-08 19:18:58 +00001545 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001546
Chris Wilson05394f32010-11-08 19:18:58 +00001547 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001548 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001549 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001550 if (obj->base.write_domain & flush_domains) {
1551 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001552
Chris Wilson05394f32010-11-08 19:18:58 +00001553 obj->base.write_domain = 0;
1554 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001555 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001556 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001557
Daniel Vetter63560392010-02-19 11:51:59 +01001558 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001559 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001560 old_write_domain);
1561 }
1562 }
1563}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001564
Daniel Vetter53d227f2012-01-25 16:32:49 +01001565static u32
1566i915_gem_get_seqno(struct drm_device *dev)
1567{
1568 drm_i915_private_t *dev_priv = dev->dev_private;
1569 u32 seqno = dev_priv->next_seqno;
1570
1571 /* reserve 0 for non-seqno */
1572 if (++dev_priv->next_seqno == 0)
1573 dev_priv->next_seqno = 1;
1574
1575 return seqno;
1576}
1577
1578u32
1579i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1580{
1581 if (ring->outstanding_lazy_request == 0)
1582 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1583
1584 return ring->outstanding_lazy_request;
1585}
1586
Chris Wilson3cce4692010-10-27 16:11:02 +01001587int
Chris Wilsondb53a302011-02-03 11:57:46 +00001588i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001589 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001590 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001591{
Chris Wilsondb53a302011-02-03 11:57:46 +00001592 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001593 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001594 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001595 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001596 int ret;
1597
1598 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001599 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001600
Chris Wilsona71d8d92012-02-15 11:25:36 +00001601 /* Record the position of the start of the request so that
1602 * should we detect the updated seqno part-way through the
1603 * GPU processing the request, we never over-estimate the
1604 * position of the head.
1605 */
1606 request_ring_position = intel_ring_get_tail(ring);
1607
Chris Wilson3cce4692010-10-27 16:11:02 +01001608 ret = ring->add_request(ring, &seqno);
1609 if (ret)
1610 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001611
Chris Wilsondb53a302011-02-03 11:57:46 +00001612 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001613
1614 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001615 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001616 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001617 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001618 was_empty = list_empty(&ring->request_list);
1619 list_add_tail(&request->list, &ring->request_list);
1620
Chris Wilsondb53a302011-02-03 11:57:46 +00001621 if (file) {
1622 struct drm_i915_file_private *file_priv = file->driver_priv;
1623
Chris Wilson1c255952010-09-26 11:03:27 +01001624 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001625 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001626 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001627 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001628 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001629 }
Eric Anholt673a3942008-07-30 12:06:12 -07001630
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001631 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001632
Ben Gamarif65d9422009-09-14 17:48:44 -04001633 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001634 if (i915_enable_hangcheck) {
1635 mod_timer(&dev_priv->hangcheck_timer,
1636 jiffies +
1637 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1638 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001639 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001640 queue_delayed_work(dev_priv->wq,
1641 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001642 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001643 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001644}
1645
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001646static inline void
1647i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001648{
Chris Wilson1c255952010-09-26 11:03:27 +01001649 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001650
Chris Wilson1c255952010-09-26 11:03:27 +01001651 if (!file_priv)
1652 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001653
Chris Wilson1c255952010-09-26 11:03:27 +01001654 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001655 if (request->file_priv) {
1656 list_del(&request->client_list);
1657 request->file_priv = NULL;
1658 }
Chris Wilson1c255952010-09-26 11:03:27 +01001659 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001660}
1661
Chris Wilsondfaae392010-09-22 10:31:52 +01001662static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1663 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001664{
Chris Wilsondfaae392010-09-22 10:31:52 +01001665 while (!list_empty(&ring->request_list)) {
1666 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001667
Chris Wilsondfaae392010-09-22 10:31:52 +01001668 request = list_first_entry(&ring->request_list,
1669 struct drm_i915_gem_request,
1670 list);
1671
1672 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001673 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001674 kfree(request);
1675 }
1676
1677 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001678 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001679
Chris Wilson05394f32010-11-08 19:18:58 +00001680 obj = list_first_entry(&ring->active_list,
1681 struct drm_i915_gem_object,
1682 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001683
Chris Wilson05394f32010-11-08 19:18:58 +00001684 obj->base.write_domain = 0;
1685 list_del_init(&obj->gpu_write_list);
1686 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001687 }
Eric Anholt673a3942008-07-30 12:06:12 -07001688}
1689
Chris Wilson312817a2010-11-22 11:50:11 +00001690static void i915_gem_reset_fences(struct drm_device *dev)
1691{
1692 struct drm_i915_private *dev_priv = dev->dev_private;
1693 int i;
1694
Daniel Vetter4b9de732011-10-09 21:52:02 +02001695 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001696 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001697 struct drm_i915_gem_object *obj = reg->obj;
1698
1699 if (!obj)
1700 continue;
1701
1702 if (obj->tiling_mode)
1703 i915_gem_release_mmap(obj);
1704
Chris Wilsond9e86c02010-11-10 16:40:20 +00001705 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1706 reg->obj->fenced_gpu_access = false;
1707 reg->obj->last_fenced_seqno = 0;
1708 reg->obj->last_fenced_ring = NULL;
1709 i915_gem_clear_fence_reg(dev, reg);
Chris Wilson312817a2010-11-22 11:50:11 +00001710 }
1711}
1712
Chris Wilson069efc12010-09-30 16:53:18 +01001713void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001714{
Chris Wilsondfaae392010-09-22 10:31:52 +01001715 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001716 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001717 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001718
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001719 for (i = 0; i < I915_NUM_RINGS; i++)
1720 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001721
1722 /* Remove anything from the flushing lists. The GPU cache is likely
1723 * to be lost on reset along with the data, so simply move the
1724 * lost bo to the inactive list.
1725 */
1726 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001727 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001728 struct drm_i915_gem_object,
1729 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001730
Chris Wilson05394f32010-11-08 19:18:58 +00001731 obj->base.write_domain = 0;
1732 list_del_init(&obj->gpu_write_list);
1733 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001734 }
Chris Wilson9375e442010-09-19 12:21:28 +01001735
Chris Wilsondfaae392010-09-22 10:31:52 +01001736 /* Move everything out of the GPU domains to ensure we do any
1737 * necessary invalidation upon reuse.
1738 */
Chris Wilson05394f32010-11-08 19:18:58 +00001739 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001740 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001741 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001742 {
Chris Wilson05394f32010-11-08 19:18:58 +00001743 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001744 }
Chris Wilson069efc12010-09-30 16:53:18 +01001745
1746 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001747 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001748}
1749
1750/**
1751 * This function clears the request list as sequence numbers are passed.
1752 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001753void
Chris Wilsondb53a302011-02-03 11:57:46 +00001754i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001755{
Eric Anholt673a3942008-07-30 12:06:12 -07001756 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001757 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001758
Chris Wilsondb53a302011-02-03 11:57:46 +00001759 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001760 return;
1761
Chris Wilsondb53a302011-02-03 11:57:46 +00001762 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001763
Chris Wilson78501ea2010-10-27 12:18:21 +01001764 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001765
Chris Wilson076e2c02011-01-21 10:07:18 +00001766 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001767 if (seqno >= ring->sync_seqno[i])
1768 ring->sync_seqno[i] = 0;
1769
Zou Nan hai852835f2010-05-21 09:08:56 +08001770 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001771 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001772
Zou Nan hai852835f2010-05-21 09:08:56 +08001773 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001774 struct drm_i915_gem_request,
1775 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001776
Chris Wilsondfaae392010-09-22 10:31:52 +01001777 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001778 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001779
Chris Wilsondb53a302011-02-03 11:57:46 +00001780 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001781 /* We know the GPU must have read the request to have
1782 * sent us the seqno + interrupt, so use the position
1783 * of tail of the request to update the last known position
1784 * of the GPU head.
1785 */
1786 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001787
1788 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001789 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001790 kfree(request);
1791 }
1792
1793 /* Move any buffers on the active list that are no longer referenced
1794 * by the ringbuffer to the flushing/inactive lists as appropriate.
1795 */
1796 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001797 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001798
Akshay Joshi0206e352011-08-16 15:34:10 -04001799 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001800 struct drm_i915_gem_object,
1801 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001802
Chris Wilson05394f32010-11-08 19:18:58 +00001803 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001804 break;
1805
Chris Wilson05394f32010-11-08 19:18:58 +00001806 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001807 i915_gem_object_move_to_flushing(obj);
1808 else
1809 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001810 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001811
Chris Wilsondb53a302011-02-03 11:57:46 +00001812 if (unlikely(ring->trace_irq_seqno &&
1813 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001814 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001815 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001816 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001817
Chris Wilsondb53a302011-02-03 11:57:46 +00001818 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001819}
1820
1821void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001822i915_gem_retire_requests(struct drm_device *dev)
1823{
1824 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001825 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001826
Chris Wilsonbe726152010-07-23 23:18:50 +01001827 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001828 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001829
1830 /* We must be careful that during unbind() we do not
1831 * accidentally infinitely recurse into retire requests.
1832 * Currently:
1833 * retire -> free -> unbind -> wait -> retire_ring
1834 */
Chris Wilson05394f32010-11-08 19:18:58 +00001835 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001836 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001837 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001838 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001839 }
1840
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001841 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001842 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001843}
1844
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001845static void
Eric Anholt673a3942008-07-30 12:06:12 -07001846i915_gem_retire_work_handler(struct work_struct *work)
1847{
1848 drm_i915_private_t *dev_priv;
1849 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001850 bool idle;
1851 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001852
1853 dev_priv = container_of(work, drm_i915_private_t,
1854 mm.retire_work.work);
1855 dev = dev_priv->dev;
1856
Chris Wilson891b48c2010-09-29 12:26:37 +01001857 /* Come back later if the device is busy... */
1858 if (!mutex_trylock(&dev->struct_mutex)) {
1859 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1860 return;
1861 }
1862
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001863 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001864
Chris Wilson0a587052011-01-09 21:05:44 +00001865 /* Send a periodic flush down the ring so we don't hold onto GEM
1866 * objects indefinitely.
1867 */
1868 idle = true;
1869 for (i = 0; i < I915_NUM_RINGS; i++) {
1870 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1871
1872 if (!list_empty(&ring->gpu_write_list)) {
1873 struct drm_i915_gem_request *request;
1874 int ret;
1875
Chris Wilsondb53a302011-02-03 11:57:46 +00001876 ret = i915_gem_flush_ring(ring,
1877 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00001878 request = kzalloc(sizeof(*request), GFP_KERNEL);
1879 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001880 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001881 kfree(request);
1882 }
1883
1884 idle &= list_empty(&ring->request_list);
1885 }
1886
1887 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001888 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001889
Eric Anholt673a3942008-07-30 12:06:12 -07001890 mutex_unlock(&dev->struct_mutex);
1891}
1892
Chris Wilsondb53a302011-02-03 11:57:46 +00001893/**
1894 * Waits for a sequence number to be signaled, and cleans up the
1895 * request and object lists appropriately for that event.
1896 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001897int
Chris Wilsondb53a302011-02-03 11:57:46 +00001898i915_wait_request(struct intel_ring_buffer *ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001899 uint32_t seqno,
1900 bool do_retire)
Eric Anholt673a3942008-07-30 12:06:12 -07001901{
Chris Wilsondb53a302011-02-03 11:57:46 +00001902 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001903 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001904 int ret = 0;
1905
1906 BUG_ON(seqno == 0);
1907
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001908 if (atomic_read(&dev_priv->mm.wedged)) {
1909 struct completion *x = &dev_priv->error_completion;
1910 bool recovery_complete;
1911 unsigned long flags;
1912
1913 /* Give the error handler a chance to run. */
1914 spin_lock_irqsave(&x->wait.lock, flags);
1915 recovery_complete = x->done > 0;
1916 spin_unlock_irqrestore(&x->wait.lock, flags);
1917
1918 return recovery_complete ? -EIO : -EAGAIN;
1919 }
Ben Gamariffed1d02009-09-14 17:48:41 -04001920
Chris Wilson5d97eb62010-11-10 20:40:02 +00001921 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001922 struct drm_i915_gem_request *request;
1923
1924 request = kzalloc(sizeof(*request), GFP_KERNEL);
1925 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001926 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001927
Chris Wilsondb53a302011-02-03 11:57:46 +00001928 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01001929 if (ret) {
1930 kfree(request);
1931 return ret;
1932 }
1933
1934 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001935 }
1936
Chris Wilson78501ea2010-10-27 12:18:21 +01001937 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001938 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001939 ier = I915_READ(DEIER) | I915_READ(GTIER);
1940 else
1941 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001942 if (!ier) {
1943 DRM_ERROR("something (likely vbetool) disabled "
1944 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01001945 ring->dev->driver->irq_preinstall(ring->dev);
1946 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001947 }
1948
Chris Wilsondb53a302011-02-03 11:57:46 +00001949 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001950
Chris Wilsonb2223492010-10-27 15:27:33 +01001951 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001952 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00001953 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001954 ret = wait_event_interruptible(ring->irq_queue,
1955 i915_seqno_passed(ring->get_seqno(ring), seqno)
1956 || atomic_read(&dev_priv->mm.wedged));
1957 else
1958 wait_event(ring->irq_queue,
1959 i915_seqno_passed(ring->get_seqno(ring), seqno)
1960 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001961
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001962 ring->irq_put(ring);
Eric Anholte959b5d2011-12-22 14:55:01 -08001963 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1964 seqno) ||
1965 atomic_read(&dev_priv->mm.wedged), 3000))
Chris Wilsonb5ba1772010-12-14 12:17:15 +00001966 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01001967 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001968
Chris Wilsondb53a302011-02-03 11:57:46 +00001969 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001970 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001971 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001972 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001973
Eric Anholt673a3942008-07-30 12:06:12 -07001974 /* Directly dispatch request retiring. While we have the work queue
1975 * to handle this, the waiter on a request often wants an associated
1976 * buffer to have made it to the inactive list, and we would need
1977 * a separate wait queue to handle that.
1978 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001979 if (ret == 0 && do_retire)
Chris Wilsondb53a302011-02-03 11:57:46 +00001980 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001981
1982 return ret;
1983}
1984
Daniel Vetter48764bf2009-09-15 22:57:32 +02001985/**
Eric Anholt673a3942008-07-30 12:06:12 -07001986 * Ensures that all rendering to the object has completed and the object is
1987 * safe to unbind from the GTT or access from the CPU.
1988 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001989int
Chris Wilsonce453d82011-02-21 14:43:56 +00001990i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001991{
Eric Anholt673a3942008-07-30 12:06:12 -07001992 int ret;
1993
Eric Anholte47c68e2008-11-14 13:35:19 -08001994 /* This function only exists to support waiting for existing rendering,
1995 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001996 */
Chris Wilson05394f32010-11-08 19:18:58 +00001997 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001998
1999 /* If there is rendering queued on the buffer being evicted, wait for
2000 * it.
2001 */
Chris Wilson05394f32010-11-08 19:18:58 +00002002 if (obj->active) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002003 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
2004 true);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002005 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002006 return ret;
2007 }
2008
2009 return 0;
2010}
2011
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002012static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2013{
2014 u32 old_write_domain, old_read_domains;
2015
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002016 /* Act a barrier for all accesses through the GTT */
2017 mb();
2018
2019 /* Force a pagefault for domain tracking on next user access */
2020 i915_gem_release_mmap(obj);
2021
Keith Packardb97c3d92011-06-24 21:02:59 -07002022 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2023 return;
2024
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002025 old_read_domains = obj->base.read_domains;
2026 old_write_domain = obj->base.write_domain;
2027
2028 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2029 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2030
2031 trace_i915_gem_object_change_domain(obj,
2032 old_read_domains,
2033 old_write_domain);
2034}
2035
Eric Anholt673a3942008-07-30 12:06:12 -07002036/**
2037 * Unbinds an object from the GTT aperture.
2038 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002039int
Chris Wilson05394f32010-11-08 19:18:58 +00002040i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002041{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002042 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002043 int ret = 0;
2044
Chris Wilson05394f32010-11-08 19:18:58 +00002045 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002046 return 0;
2047
Chris Wilson05394f32010-11-08 19:18:58 +00002048 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002049 DRM_ERROR("Attempting to unbind pinned buffer\n");
2050 return -EINVAL;
2051 }
2052
Chris Wilsona8198ee2011-04-13 22:04:09 +01002053 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson8dc17752010-07-23 23:18:51 +01002054 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002055 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002056 /* Continue on if we fail due to EIO, the GPU is hung so we
2057 * should be safe and we need to cleanup or else we might
2058 * cause memory corruption through use-after-free.
2059 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002060
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002061 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002062
2063 /* Move the object to the CPU domain to ensure that
2064 * any possible CPU writes while it's not in the GTT
2065 * are flushed when we go to remap it.
2066 */
2067 if (ret == 0)
2068 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2069 if (ret == -ERESTARTSYS)
2070 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01002071 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01002072 /* In the event of a disaster, abandon all caches and
2073 * hope for the best.
2074 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002075 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002076 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002077 }
Eric Anholt673a3942008-07-30 12:06:12 -07002078
Daniel Vetter96b47b62009-12-15 17:50:00 +01002079 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002080 ret = i915_gem_object_put_fence(obj);
2081 if (ret == -ERESTARTSYS)
2082 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002083
Chris Wilsondb53a302011-02-03 11:57:46 +00002084 trace_i915_gem_object_unbind(obj);
2085
Daniel Vetter74898d72012-02-15 23:50:22 +01002086 if (obj->has_global_gtt_mapping)
2087 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002088 if (obj->has_aliasing_ppgtt_mapping) {
2089 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2090 obj->has_aliasing_ppgtt_mapping = 0;
2091 }
Daniel Vetter74163902012-02-15 23:50:21 +01002092 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002093
Chris Wilsone5281cc2010-10-28 13:45:36 +01002094 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002095
Chris Wilson6299f992010-11-24 12:23:44 +00002096 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002097 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002098 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002099 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002100
Chris Wilson05394f32010-11-08 19:18:58 +00002101 drm_mm_put_block(obj->gtt_space);
2102 obj->gtt_space = NULL;
2103 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002104
Chris Wilson05394f32010-11-08 19:18:58 +00002105 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002106 i915_gem_object_truncate(obj);
2107
Chris Wilson8dc17752010-07-23 23:18:51 +01002108 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002109}
2110
Chris Wilson88241782011-01-07 17:09:48 +00002111int
Chris Wilsondb53a302011-02-03 11:57:46 +00002112i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00002113 uint32_t invalidate_domains,
2114 uint32_t flush_domains)
2115{
Chris Wilson88241782011-01-07 17:09:48 +00002116 int ret;
2117
Chris Wilson36d527d2011-03-19 22:26:49 +00002118 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2119 return 0;
2120
Chris Wilsondb53a302011-02-03 11:57:46 +00002121 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2122
Chris Wilson88241782011-01-07 17:09:48 +00002123 ret = ring->flush(ring, invalidate_domains, flush_domains);
2124 if (ret)
2125 return ret;
2126
Chris Wilson36d527d2011-03-19 22:26:49 +00002127 if (flush_domains & I915_GEM_GPU_DOMAINS)
2128 i915_gem_process_flushing_list(ring, flush_domains);
2129
Chris Wilson88241782011-01-07 17:09:48 +00002130 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002131}
2132
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002133static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
Chris Wilsona56ba562010-09-28 10:07:56 +01002134{
Chris Wilson88241782011-01-07 17:09:48 +00002135 int ret;
2136
Chris Wilson395b70b2010-10-28 21:28:46 +01002137 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002138 return 0;
2139
Chris Wilson88241782011-01-07 17:09:48 +00002140 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002141 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002142 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002143 if (ret)
2144 return ret;
2145 }
2146
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002147 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2148 do_retire);
Chris Wilsona56ba562010-09-28 10:07:56 +01002149}
2150
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002151int i915_gpu_idle(struct drm_device *dev, bool do_retire)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002152{
2153 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002154 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002155
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002156 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002157 for (i = 0; i < I915_NUM_RINGS; i++) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002158 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002159 if (ret)
2160 return ret;
2161 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002162
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002163 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002164}
2165
Daniel Vetterc6642782010-11-12 13:46:18 +00002166static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2167 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002168{
Chris Wilson05394f32010-11-08 19:18:58 +00002169 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002170 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002171 u32 size = obj->gtt_space->size;
2172 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002173 uint64_t val;
2174
Chris Wilson05394f32010-11-08 19:18:58 +00002175 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00002176 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002177 val |= obj->gtt_offset & 0xfffff000;
2178 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07002179 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2180
Chris Wilson05394f32010-11-08 19:18:58 +00002181 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002182 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2183 val |= I965_FENCE_REG_VALID;
2184
Daniel Vetterc6642782010-11-12 13:46:18 +00002185 if (pipelined) {
2186 int ret = intel_ring_begin(pipelined, 6);
2187 if (ret)
2188 return ret;
2189
2190 intel_ring_emit(pipelined, MI_NOOP);
2191 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2192 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2193 intel_ring_emit(pipelined, (u32)val);
2194 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2195 intel_ring_emit(pipelined, (u32)(val >> 32));
2196 intel_ring_advance(pipelined);
2197 } else
2198 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2199
2200 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002201}
2202
Daniel Vetterc6642782010-11-12 13:46:18 +00002203static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2204 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002205{
Chris Wilson05394f32010-11-08 19:18:58 +00002206 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002207 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002208 u32 size = obj->gtt_space->size;
2209 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002210 uint64_t val;
2211
Chris Wilson05394f32010-11-08 19:18:58 +00002212 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002213 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002214 val |= obj->gtt_offset & 0xfffff000;
2215 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2216 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002217 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2218 val |= I965_FENCE_REG_VALID;
2219
Daniel Vetterc6642782010-11-12 13:46:18 +00002220 if (pipelined) {
2221 int ret = intel_ring_begin(pipelined, 6);
2222 if (ret)
2223 return ret;
2224
2225 intel_ring_emit(pipelined, MI_NOOP);
2226 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2227 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2228 intel_ring_emit(pipelined, (u32)val);
2229 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2230 intel_ring_emit(pipelined, (u32)(val >> 32));
2231 intel_ring_advance(pipelined);
2232 } else
2233 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2234
2235 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002236}
2237
Daniel Vetterc6642782010-11-12 13:46:18 +00002238static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2239 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002240{
Chris Wilson05394f32010-11-08 19:18:58 +00002241 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002242 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002243 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002244 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002245 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002246
Daniel Vetterc6642782010-11-12 13:46:18 +00002247 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2248 (size & -size) != size ||
2249 (obj->gtt_offset & (size - 1)),
2250 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2251 obj->gtt_offset, obj->map_and_fenceable, size))
2252 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002253
Daniel Vetterc6642782010-11-12 13:46:18 +00002254 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002255 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002256 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002257 tile_width = 512;
2258
2259 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002260 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002261 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002262
Chris Wilson05394f32010-11-08 19:18:58 +00002263 val = obj->gtt_offset;
2264 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002265 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002266 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002267 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2268 val |= I830_FENCE_REG_VALID;
2269
Chris Wilson05394f32010-11-08 19:18:58 +00002270 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002271 if (fence_reg < 8)
2272 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002273 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002274 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002275
2276 if (pipelined) {
2277 int ret = intel_ring_begin(pipelined, 4);
2278 if (ret)
2279 return ret;
2280
2281 intel_ring_emit(pipelined, MI_NOOP);
2282 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2283 intel_ring_emit(pipelined, fence_reg);
2284 intel_ring_emit(pipelined, val);
2285 intel_ring_advance(pipelined);
2286 } else
2287 I915_WRITE(fence_reg, val);
2288
2289 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002290}
2291
Daniel Vetterc6642782010-11-12 13:46:18 +00002292static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2293 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002294{
Chris Wilson05394f32010-11-08 19:18:58 +00002295 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002296 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002297 u32 size = obj->gtt_space->size;
2298 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002299 uint32_t val;
2300 uint32_t pitch_val;
2301
Daniel Vetterc6642782010-11-12 13:46:18 +00002302 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2303 (size & -size) != size ||
2304 (obj->gtt_offset & (size - 1)),
2305 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2306 obj->gtt_offset, size))
2307 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002308
Chris Wilson05394f32010-11-08 19:18:58 +00002309 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002310 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002311
Chris Wilson05394f32010-11-08 19:18:58 +00002312 val = obj->gtt_offset;
2313 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002314 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002315 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002316 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2317 val |= I830_FENCE_REG_VALID;
2318
Daniel Vetterc6642782010-11-12 13:46:18 +00002319 if (pipelined) {
2320 int ret = intel_ring_begin(pipelined, 4);
2321 if (ret)
2322 return ret;
2323
2324 intel_ring_emit(pipelined, MI_NOOP);
2325 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2326 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2327 intel_ring_emit(pipelined, val);
2328 intel_ring_advance(pipelined);
2329 } else
2330 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2331
2332 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002333}
2334
Chris Wilsond9e86c02010-11-10 16:40:20 +00002335static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2336{
2337 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2338}
2339
2340static int
2341i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002342 struct intel_ring_buffer *pipelined)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002343{
2344 int ret;
2345
2346 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002347 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002348 ret = i915_gem_flush_ring(obj->last_fenced_ring,
Chris Wilson88241782011-01-07 17:09:48 +00002349 0, obj->base.write_domain);
2350 if (ret)
2351 return ret;
2352 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002353
2354 obj->fenced_gpu_access = false;
2355 }
2356
2357 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2358 if (!ring_passed_seqno(obj->last_fenced_ring,
2359 obj->last_fenced_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002360 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002361 obj->last_fenced_seqno,
2362 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002363 if (ret)
2364 return ret;
2365 }
2366
2367 obj->last_fenced_seqno = 0;
2368 obj->last_fenced_ring = NULL;
2369 }
2370
Chris Wilson63256ec2011-01-04 18:42:07 +00002371 /* Ensure that all CPU reads are completed before installing a fence
2372 * and all writes before removing the fence.
2373 */
2374 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2375 mb();
2376
Chris Wilsond9e86c02010-11-10 16:40:20 +00002377 return 0;
2378}
2379
2380int
2381i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2382{
2383 int ret;
2384
2385 if (obj->tiling_mode)
2386 i915_gem_release_mmap(obj);
2387
Chris Wilsonce453d82011-02-21 14:43:56 +00002388 ret = i915_gem_object_flush_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002389 if (ret)
2390 return ret;
2391
2392 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2393 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002394
2395 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002396 i915_gem_clear_fence_reg(obj->base.dev,
2397 &dev_priv->fence_regs[obj->fence_reg]);
2398
2399 obj->fence_reg = I915_FENCE_REG_NONE;
2400 }
2401
2402 return 0;
2403}
2404
2405static struct drm_i915_fence_reg *
2406i915_find_fence_reg(struct drm_device *dev,
2407 struct intel_ring_buffer *pipelined)
Daniel Vetterae3db242010-02-19 11:51:58 +01002408{
Daniel Vetterae3db242010-02-19 11:51:58 +01002409 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002410 struct drm_i915_fence_reg *reg, *first, *avail;
2411 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002412
2413 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002414 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002415 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2416 reg = &dev_priv->fence_regs[i];
2417 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002418 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002419
Chris Wilson1690e1e2011-12-14 13:57:08 +01002420 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002421 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002422 }
2423
Chris Wilsond9e86c02010-11-10 16:40:20 +00002424 if (avail == NULL)
2425 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002426
2427 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002428 avail = first = NULL;
2429 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002430 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002431 continue;
2432
Chris Wilsond9e86c02010-11-10 16:40:20 +00002433 if (first == NULL)
2434 first = reg;
2435
2436 if (!pipelined ||
2437 !reg->obj->last_fenced_ring ||
2438 reg->obj->last_fenced_ring == pipelined) {
2439 avail = reg;
2440 break;
2441 }
Daniel Vetterae3db242010-02-19 11:51:58 +01002442 }
2443
Chris Wilsond9e86c02010-11-10 16:40:20 +00002444 if (avail == NULL)
2445 avail = first;
Daniel Vetterae3db242010-02-19 11:51:58 +01002446
Chris Wilsona00b10c2010-09-24 21:15:47 +01002447 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002448}
2449
Jesse Barnesde151cf2008-11-12 10:03:55 -08002450/**
Chris Wilsond9e86c02010-11-10 16:40:20 +00002451 * i915_gem_object_get_fence - set up a fence reg for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002452 * @obj: object to map through a fence reg
Chris Wilsond9e86c02010-11-10 16:40:20 +00002453 * @pipelined: ring on which to queue the change, or NULL for CPU access
2454 * @interruptible: must we wait uninterruptibly for the register to retire?
Jesse Barnesde151cf2008-11-12 10:03:55 -08002455 *
2456 * When mapping objects through the GTT, userspace wants to be able to write
2457 * to them without having to worry about swizzling if the object is tiled.
2458 *
2459 * This function walks the fence regs looking for a free one for @obj,
2460 * stealing one if it can't find any.
2461 *
2462 * It then sets up the reg based on the object's properties: address, pitch
2463 * and tiling format.
2464 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002465int
Chris Wilsond9e86c02010-11-10 16:40:20 +00002466i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002467 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002468{
Chris Wilson05394f32010-11-08 19:18:58 +00002469 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002470 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002471 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002472 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002473
Chris Wilson6bda10d2010-12-05 21:04:18 +00002474 /* XXX disable pipelining. There are bugs. Shocking. */
2475 pipelined = NULL;
2476
Chris Wilsond9e86c02010-11-10 16:40:20 +00002477 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002478 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2479 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002480 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002481
Chris Wilson29c5a582011-03-17 15:23:22 +00002482 if (obj->tiling_changed) {
2483 ret = i915_gem_object_flush_fence(obj, pipelined);
2484 if (ret)
2485 return ret;
2486
2487 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2488 pipelined = NULL;
2489
2490 if (pipelined) {
2491 reg->setup_seqno =
2492 i915_gem_next_request_seqno(pipelined);
2493 obj->last_fenced_seqno = reg->setup_seqno;
2494 obj->last_fenced_ring = pipelined;
2495 }
2496
2497 goto update;
2498 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002499
2500 if (!pipelined) {
2501 if (reg->setup_seqno) {
2502 if (!ring_passed_seqno(obj->last_fenced_ring,
2503 reg->setup_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002504 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002505 reg->setup_seqno,
2506 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002507 if (ret)
2508 return ret;
2509 }
2510
2511 reg->setup_seqno = 0;
2512 }
2513 } else if (obj->last_fenced_ring &&
2514 obj->last_fenced_ring != pipelined) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002515 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002516 if (ret)
2517 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002518 }
2519
Eric Anholta09ba7f2009-08-29 12:49:51 -07002520 return 0;
2521 }
2522
Chris Wilsond9e86c02010-11-10 16:40:20 +00002523 reg = i915_find_fence_reg(dev, pipelined);
2524 if (reg == NULL)
Daniel Vetter39965b32011-12-14 13:57:09 +01002525 return -EDEADLK;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002526
Chris Wilsonce453d82011-02-21 14:43:56 +00002527 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002528 if (ret)
Daniel Vetterae3db242010-02-19 11:51:58 +01002529 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002530
Chris Wilsond9e86c02010-11-10 16:40:20 +00002531 if (reg->obj) {
2532 struct drm_i915_gem_object *old = reg->obj;
2533
2534 drm_gem_object_reference(&old->base);
2535
2536 if (old->tiling_mode)
2537 i915_gem_release_mmap(old);
2538
Chris Wilsonce453d82011-02-21 14:43:56 +00002539 ret = i915_gem_object_flush_fence(old, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002540 if (ret) {
2541 drm_gem_object_unreference(&old->base);
2542 return ret;
2543 }
2544
2545 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2546 pipelined = NULL;
2547
2548 old->fence_reg = I915_FENCE_REG_NONE;
2549 old->last_fenced_ring = pipelined;
2550 old->last_fenced_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002551 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002552
2553 drm_gem_object_unreference(&old->base);
2554 } else if (obj->last_fenced_seqno == 0)
2555 pipelined = NULL;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002556
Jesse Barnesde151cf2008-11-12 10:03:55 -08002557 reg->obj = obj;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002558 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2559 obj->fence_reg = reg - dev_priv->fence_regs;
2560 obj->last_fenced_ring = pipelined;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002561
Chris Wilsond9e86c02010-11-10 16:40:20 +00002562 reg->setup_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002563 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002564 obj->last_fenced_seqno = reg->setup_seqno;
2565
2566update:
2567 obj->tiling_changed = false;
Chris Wilsone259bef2010-09-17 00:32:02 +01002568 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002569 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002570 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002571 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002572 break;
2573 case 5:
2574 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002575 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002576 break;
2577 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002578 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002579 break;
2580 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002581 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002582 break;
2583 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002584
Daniel Vetterc6642782010-11-12 13:46:18 +00002585 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002586}
2587
2588/**
2589 * i915_gem_clear_fence_reg - clear out fence register info
2590 * @obj: object to clear
2591 *
2592 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002593 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002594 */
2595static void
Chris Wilsond9e86c02010-11-10 16:40:20 +00002596i915_gem_clear_fence_reg(struct drm_device *dev,
2597 struct drm_i915_fence_reg *reg)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002598{
Jesse Barnes79e53942008-11-07 14:24:08 -08002599 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002600 uint32_t fence_reg = reg - dev_priv->fence_regs;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002601
Chris Wilsone259bef2010-09-17 00:32:02 +01002602 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002603 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002604 case 6:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002605 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002606 break;
2607 case 5:
2608 case 4:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002609 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002610 break;
2611 case 3:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002612 if (fence_reg >= 8)
2613 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002614 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002615 case 2:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002616 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002617
2618 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002619 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002620 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002621
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002622 list_del_init(&reg->lru_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002623 reg->obj = NULL;
2624 reg->setup_seqno = 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002625 reg->pin_count = 0;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002626}
2627
2628/**
Eric Anholt673a3942008-07-30 12:06:12 -07002629 * Finds free space in the GTT aperture and binds the object there.
2630 */
2631static int
Chris Wilson05394f32010-11-08 19:18:58 +00002632i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002633 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002634 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002635{
Chris Wilson05394f32010-11-08 19:18:58 +00002636 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002637 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002638 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002639 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002640 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002641 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002642 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002643
Chris Wilson05394f32010-11-08 19:18:58 +00002644 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002645 DRM_ERROR("Attempting to bind a purgeable object\n");
2646 return -EINVAL;
2647 }
2648
Chris Wilsone28f8712011-07-18 13:11:49 -07002649 fence_size = i915_gem_get_gtt_size(dev,
2650 obj->base.size,
2651 obj->tiling_mode);
2652 fence_alignment = i915_gem_get_gtt_alignment(dev,
2653 obj->base.size,
2654 obj->tiling_mode);
2655 unfenced_alignment =
2656 i915_gem_get_unfenced_gtt_alignment(dev,
2657 obj->base.size,
2658 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002659
Eric Anholt673a3942008-07-30 12:06:12 -07002660 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002661 alignment = map_and_fenceable ? fence_alignment :
2662 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002663 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002664 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2665 return -EINVAL;
2666 }
2667
Chris Wilson05394f32010-11-08 19:18:58 +00002668 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002669
Chris Wilson654fc602010-05-27 13:18:21 +01002670 /* If the object is bigger than the entire aperture, reject it early
2671 * before evicting everything in a vain attempt to find space.
2672 */
Chris Wilson05394f32010-11-08 19:18:58 +00002673 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002674 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002675 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2676 return -E2BIG;
2677 }
2678
Eric Anholt673a3942008-07-30 12:06:12 -07002679 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002680 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002681 free_space =
2682 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002683 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002684 dev_priv->mm.gtt_mappable_end,
2685 0);
2686 else
2687 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002688 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002689
2690 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002691 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002692 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002693 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002694 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002695 dev_priv->mm.gtt_mappable_end,
2696 0);
2697 else
Chris Wilson05394f32010-11-08 19:18:58 +00002698 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002699 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002700 }
Chris Wilson05394f32010-11-08 19:18:58 +00002701 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002702 /* If the gtt is empty and we're still having trouble
2703 * fitting our object in, we're out of memory.
2704 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002705 ret = i915_gem_evict_something(dev, size, alignment,
2706 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002707 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002708 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002709
Eric Anholt673a3942008-07-30 12:06:12 -07002710 goto search_free;
2711 }
2712
Chris Wilsone5281cc2010-10-28 13:45:36 +01002713 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002714 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002715 drm_mm_put_block(obj->gtt_space);
2716 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002717
2718 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002719 /* first try to reclaim some memory by clearing the GTT */
2720 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002721 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002722 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002723 if (gfpmask) {
2724 gfpmask = 0;
2725 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002726 }
2727
Chris Wilson809b6332011-01-10 17:33:15 +00002728 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002729 }
2730
2731 goto search_free;
2732 }
2733
Eric Anholt673a3942008-07-30 12:06:12 -07002734 return ret;
2735 }
2736
Daniel Vetter74163902012-02-15 23:50:21 +01002737 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002738 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002739 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002740 drm_mm_put_block(obj->gtt_space);
2741 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002742
Chris Wilson809b6332011-01-10 17:33:15 +00002743 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002744 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002745
2746 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002747 }
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002748
2749 if (!dev_priv->mm.aliasing_ppgtt)
2750 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002751
Chris Wilson6299f992010-11-24 12:23:44 +00002752 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002753 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002754
Eric Anholt673a3942008-07-30 12:06:12 -07002755 /* Assert that the object is not currently in any GPU domain. As it
2756 * wasn't in the GTT, there shouldn't be any way it could have been in
2757 * a GPU cache
2758 */
Chris Wilson05394f32010-11-08 19:18:58 +00002759 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2760 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002761
Chris Wilson6299f992010-11-24 12:23:44 +00002762 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002763
Daniel Vetter75e9e912010-11-04 17:11:09 +01002764 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002765 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002766 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002767
Daniel Vetter75e9e912010-11-04 17:11:09 +01002768 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002769 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002770
Chris Wilson05394f32010-11-08 19:18:58 +00002771 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002772
Chris Wilsondb53a302011-02-03 11:57:46 +00002773 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002774 return 0;
2775}
2776
2777void
Chris Wilson05394f32010-11-08 19:18:58 +00002778i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002779{
Eric Anholt673a3942008-07-30 12:06:12 -07002780 /* If we don't have a page list set up, then we're not pinned
2781 * to GPU, and we can ignore the cache flush because it'll happen
2782 * again at bind time.
2783 */
Chris Wilson05394f32010-11-08 19:18:58 +00002784 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002785 return;
2786
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002787 /* If the GPU is snooping the contents of the CPU cache,
2788 * we do not need to manually clear the CPU cache lines. However,
2789 * the caches are only snooped when the render cache is
2790 * flushed/invalidated. As we always have to emit invalidations
2791 * and flushes when moving into and out of the RENDER domain, correct
2792 * snooping behaviour occurs naturally as the result of our domain
2793 * tracking.
2794 */
2795 if (obj->cache_level != I915_CACHE_NONE)
2796 return;
2797
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002798 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002799
Chris Wilson05394f32010-11-08 19:18:58 +00002800 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002801}
2802
Eric Anholte47c68e2008-11-14 13:35:19 -08002803/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002804static int
Chris Wilson3619df02010-11-28 15:37:17 +00002805i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002806{
Chris Wilson05394f32010-11-08 19:18:58 +00002807 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002808 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002809
2810 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002811 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002812}
2813
2814/** Flushes the GTT write domain for the object if it's dirty. */
2815static void
Chris Wilson05394f32010-11-08 19:18:58 +00002816i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002817{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002818 uint32_t old_write_domain;
2819
Chris Wilson05394f32010-11-08 19:18:58 +00002820 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002821 return;
2822
Chris Wilson63256ec2011-01-04 18:42:07 +00002823 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002824 * to it immediately go to main memory as far as we know, so there's
2825 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002826 *
2827 * However, we do have to enforce the order so that all writes through
2828 * the GTT land before any writes to the device, such as updates to
2829 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002830 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002831 wmb();
2832
Chris Wilson05394f32010-11-08 19:18:58 +00002833 old_write_domain = obj->base.write_domain;
2834 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002835
2836 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002837 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002838 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002839}
2840
2841/** Flushes the CPU write domain for the object if it's dirty. */
2842static void
Chris Wilson05394f32010-11-08 19:18:58 +00002843i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002844{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002845 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002846
Chris Wilson05394f32010-11-08 19:18:58 +00002847 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002848 return;
2849
2850 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002851 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002852 old_write_domain = obj->base.write_domain;
2853 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002854
2855 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002856 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002857 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002858}
2859
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002860/**
2861 * Moves a single object to the GTT read, and possibly write domain.
2862 *
2863 * This function returns when the move is complete, including waiting on
2864 * flushes to occur.
2865 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002866int
Chris Wilson20217462010-11-23 15:26:33 +00002867i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002868{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002869 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002870 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002871
Eric Anholt02354392008-11-26 13:58:13 -08002872 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002873 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002874 return -EINVAL;
2875
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002876 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2877 return 0;
2878
Chris Wilson88241782011-01-07 17:09:48 +00002879 ret = i915_gem_object_flush_gpu_write_domain(obj);
2880 if (ret)
2881 return ret;
2882
Chris Wilson87ca9c82010-12-02 09:42:56 +00002883 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002884 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002885 if (ret)
2886 return ret;
2887 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002888
Chris Wilson72133422010-09-13 23:56:38 +01002889 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002890
Chris Wilson05394f32010-11-08 19:18:58 +00002891 old_write_domain = obj->base.write_domain;
2892 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002893
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002894 /* It should now be out of any other write domains, and we can update
2895 * the domain values for our changes.
2896 */
Chris Wilson05394f32010-11-08 19:18:58 +00002897 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2898 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002899 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002900 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2901 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2902 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002903 }
2904
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002905 trace_i915_gem_object_change_domain(obj,
2906 old_read_domains,
2907 old_write_domain);
2908
Eric Anholte47c68e2008-11-14 13:35:19 -08002909 return 0;
2910}
2911
Chris Wilsone4ffd172011-04-04 09:44:39 +01002912int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2913 enum i915_cache_level cache_level)
2914{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002915 struct drm_device *dev = obj->base.dev;
2916 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002917 int ret;
2918
2919 if (obj->cache_level == cache_level)
2920 return 0;
2921
2922 if (obj->pin_count) {
2923 DRM_DEBUG("can not change the cache level of pinned objects\n");
2924 return -EBUSY;
2925 }
2926
2927 if (obj->gtt_space) {
2928 ret = i915_gem_object_finish_gpu(obj);
2929 if (ret)
2930 return ret;
2931
2932 i915_gem_object_finish_gtt(obj);
2933
2934 /* Before SandyBridge, you could not use tiling or fence
2935 * registers with snooped memory, so relinquish any fences
2936 * currently pointing to our region in the aperture.
2937 */
2938 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2939 ret = i915_gem_object_put_fence(obj);
2940 if (ret)
2941 return ret;
2942 }
2943
Daniel Vetter74898d72012-02-15 23:50:22 +01002944 if (obj->has_global_gtt_mapping)
2945 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002946 if (obj->has_aliasing_ppgtt_mapping)
2947 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2948 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01002949 }
2950
2951 if (cache_level == I915_CACHE_NONE) {
2952 u32 old_read_domains, old_write_domain;
2953
2954 /* If we're coming from LLC cached, then we haven't
2955 * actually been tracking whether the data is in the
2956 * CPU cache or not, since we only allow one bit set
2957 * in obj->write_domain and have been skipping the clflushes.
2958 * Just set it to the CPU cache for now.
2959 */
2960 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2961 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2962
2963 old_read_domains = obj->base.read_domains;
2964 old_write_domain = obj->base.write_domain;
2965
2966 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2967 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2968
2969 trace_i915_gem_object_change_domain(obj,
2970 old_read_domains,
2971 old_write_domain);
2972 }
2973
2974 obj->cache_level = cache_level;
2975 return 0;
2976}
2977
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002978/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002979 * Prepare buffer for display plane (scanout, cursors, etc).
2980 * Can be called from an uninterruptible phase (modesetting) and allows
2981 * any flushes to be pipelined (for pageflips).
2982 *
2983 * For the display plane, we want to be in the GTT but out of any write
2984 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2985 * ability to pipeline the waits, pinning and any additional subtleties
2986 * that may differentiate the display plane from ordinary buffers.
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002987 */
2988int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002989i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2990 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00002991 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002992{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002993 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002994 int ret;
2995
Chris Wilson88241782011-01-07 17:09:48 +00002996 ret = i915_gem_object_flush_gpu_write_domain(obj);
2997 if (ret)
2998 return ret;
2999
Chris Wilson0be73282010-12-06 14:36:27 +00003000 if (pipelined != obj->ring) {
Chris Wilsonce453d82011-02-21 14:43:56 +00003001 ret = i915_gem_object_wait_rendering(obj);
Keith Packardf0b69ef2011-07-19 16:21:40 -07003002 if (ret == -ERESTARTSYS)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003003 return ret;
3004 }
3005
Eric Anholta7ef0642011-03-29 16:59:54 -07003006 /* The display engine is not coherent with the LLC cache on gen6. As
3007 * a result, we make sure that the pinning that is about to occur is
3008 * done with uncached PTEs. This is lowest common denominator for all
3009 * chipsets.
3010 *
3011 * However for gen6+, we could do better by using the GFDT bit instead
3012 * of uncaching, which would allow us to flush all the LLC-cached data
3013 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3014 */
3015 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3016 if (ret)
3017 return ret;
3018
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003019 /* As the user may map the buffer once pinned in the display plane
3020 * (e.g. libkms for the bootup splash), we have to ensure that we
3021 * always use map_and_fenceable for all scanout buffers.
3022 */
3023 ret = i915_gem_object_pin(obj, alignment, true);
3024 if (ret)
3025 return ret;
3026
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003027 i915_gem_object_flush_cpu_write_domain(obj);
3028
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003029 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003030 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003031
3032 /* It should now be out of any other write domains, and we can update
3033 * the domain values for our changes.
3034 */
3035 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003036 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003037
3038 trace_i915_gem_object_change_domain(obj,
3039 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003040 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003041
3042 return 0;
3043}
3044
Chris Wilson85345512010-11-13 09:49:11 +00003045int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003046i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003047{
Chris Wilson88241782011-01-07 17:09:48 +00003048 int ret;
3049
Chris Wilsona8198ee2011-04-13 22:04:09 +01003050 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003051 return 0;
3052
Chris Wilson88241782011-01-07 17:09:48 +00003053 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003054 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00003055 if (ret)
3056 return ret;
3057 }
Chris Wilson85345512010-11-13 09:49:11 +00003058
Chris Wilsonc501ae72011-12-14 13:57:23 +01003059 ret = i915_gem_object_wait_rendering(obj);
3060 if (ret)
3061 return ret;
3062
Chris Wilsona8198ee2011-04-13 22:04:09 +01003063 /* Ensure that we invalidate the GPU's caches and TLBs. */
3064 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003065 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003066}
3067
Eric Anholte47c68e2008-11-14 13:35:19 -08003068/**
3069 * Moves a single object to the CPU read, and possibly write domain.
3070 *
3071 * This function returns when the move is complete, including waiting on
3072 * flushes to occur.
3073 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003074int
Chris Wilson919926a2010-11-12 13:42:53 +00003075i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003076{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003077 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003078 int ret;
3079
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003080 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3081 return 0;
3082
Chris Wilson88241782011-01-07 17:09:48 +00003083 ret = i915_gem_object_flush_gpu_write_domain(obj);
3084 if (ret)
3085 return ret;
3086
Chris Wilsonce453d82011-02-21 14:43:56 +00003087 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01003088 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003089 return ret;
3090
3091 i915_gem_object_flush_gtt_write_domain(obj);
3092
3093 /* If we have a partially-valid cache of the object in the CPU,
3094 * finish invalidating it and free the per-page flags.
3095 */
3096 i915_gem_object_set_to_full_cpu_read_domain(obj);
3097
Chris Wilson05394f32010-11-08 19:18:58 +00003098 old_write_domain = obj->base.write_domain;
3099 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003100
Eric Anholte47c68e2008-11-14 13:35:19 -08003101 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003102 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003103 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003104
Chris Wilson05394f32010-11-08 19:18:58 +00003105 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003106 }
3107
3108 /* It should now be out of any other write domains, and we can update
3109 * the domain values for our changes.
3110 */
Chris Wilson05394f32010-11-08 19:18:58 +00003111 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003112
3113 /* If we're writing through the CPU, then the GPU read domains will
3114 * need to be invalidated at next use.
3115 */
3116 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003117 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3118 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003119 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003120
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003121 trace_i915_gem_object_change_domain(obj,
3122 old_read_domains,
3123 old_write_domain);
3124
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003125 return 0;
3126}
3127
Eric Anholt673a3942008-07-30 12:06:12 -07003128/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003129 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003130 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003131 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3132 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3133 */
3134static void
Chris Wilson05394f32010-11-08 19:18:58 +00003135i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003136{
Chris Wilson05394f32010-11-08 19:18:58 +00003137 if (!obj->page_cpu_valid)
Eric Anholte47c68e2008-11-14 13:35:19 -08003138 return;
3139
3140 /* If we're partially in the CPU read domain, finish moving it in.
3141 */
Chris Wilson05394f32010-11-08 19:18:58 +00003142 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003143 int i;
3144
Chris Wilson05394f32010-11-08 19:18:58 +00003145 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3146 if (obj->page_cpu_valid[i])
Eric Anholte47c68e2008-11-14 13:35:19 -08003147 continue;
Chris Wilson05394f32010-11-08 19:18:58 +00003148 drm_clflush_pages(obj->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003149 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003150 }
3151
3152 /* Free the page_cpu_valid mappings which are now stale, whether
3153 * or not we've got I915_GEM_DOMAIN_CPU.
3154 */
Chris Wilson05394f32010-11-08 19:18:58 +00003155 kfree(obj->page_cpu_valid);
3156 obj->page_cpu_valid = NULL;
Eric Anholte47c68e2008-11-14 13:35:19 -08003157}
3158
3159/**
3160 * Set the CPU read domain on a range of the object.
3161 *
3162 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3163 * not entirely valid. The page_cpu_valid member of the object flags which
3164 * pages have been flushed, and will be respected by
3165 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3166 * of the whole object.
3167 *
3168 * This function returns when the move is complete, including waiting on
3169 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003170 */
3171static int
Chris Wilson05394f32010-11-08 19:18:58 +00003172i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -08003173 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003174{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003175 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003176 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003177
Chris Wilson05394f32010-11-08 19:18:58 +00003178 if (offset == 0 && size == obj->base.size)
Eric Anholte47c68e2008-11-14 13:35:19 -08003179 return i915_gem_object_set_to_cpu_domain(obj, 0);
3180
Chris Wilson88241782011-01-07 17:09:48 +00003181 ret = i915_gem_object_flush_gpu_write_domain(obj);
3182 if (ret)
3183 return ret;
3184
Chris Wilsonce453d82011-02-21 14:43:56 +00003185 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01003186 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003187 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003188
Eric Anholte47c68e2008-11-14 13:35:19 -08003189 i915_gem_object_flush_gtt_write_domain(obj);
3190
3191 /* If we're already fully in the CPU read domain, we're done. */
Chris Wilson05394f32010-11-08 19:18:58 +00003192 if (obj->page_cpu_valid == NULL &&
3193 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003194 return 0;
3195
Eric Anholte47c68e2008-11-14 13:35:19 -08003196 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3197 * newly adding I915_GEM_DOMAIN_CPU
3198 */
Chris Wilson05394f32010-11-08 19:18:58 +00003199 if (obj->page_cpu_valid == NULL) {
3200 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3201 GFP_KERNEL);
3202 if (obj->page_cpu_valid == NULL)
Eric Anholte47c68e2008-11-14 13:35:19 -08003203 return -ENOMEM;
Chris Wilson05394f32010-11-08 19:18:58 +00003204 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3205 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003206
3207 /* Flush the cache on any pages that are still invalid from the CPU's
3208 * perspective.
3209 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003210 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3211 i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00003212 if (obj->page_cpu_valid[i])
Eric Anholt673a3942008-07-30 12:06:12 -07003213 continue;
3214
Chris Wilson05394f32010-11-08 19:18:58 +00003215 drm_clflush_pages(obj->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003216
Chris Wilson05394f32010-11-08 19:18:58 +00003217 obj->page_cpu_valid[i] = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003218 }
3219
Eric Anholte47c68e2008-11-14 13:35:19 -08003220 /* It should now be out of any other write domains, and we can update
3221 * the domain values for our changes.
3222 */
Chris Wilson05394f32010-11-08 19:18:58 +00003223 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003224
Chris Wilson05394f32010-11-08 19:18:58 +00003225 old_read_domains = obj->base.read_domains;
3226 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003227
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003228 trace_i915_gem_object_change_domain(obj,
3229 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003230 obj->base.write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003231
Eric Anholt673a3942008-07-30 12:06:12 -07003232 return 0;
3233}
3234
Eric Anholt673a3942008-07-30 12:06:12 -07003235/* Throttle our rendering by waiting until the ring has completed our requests
3236 * emitted over 20 msec ago.
3237 *
Eric Anholtb9624422009-06-03 07:27:35 +00003238 * Note that if we were to use the current jiffies each time around the loop,
3239 * we wouldn't escape the function with any frames outstanding if the time to
3240 * render a frame was over 20ms.
3241 *
Eric Anholt673a3942008-07-30 12:06:12 -07003242 * This should get us reasonable parallelism between CPU and GPU but also
3243 * relatively low latency when blocking on a particular request to finish.
3244 */
3245static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003246i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003247{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003248 struct drm_i915_private *dev_priv = dev->dev_private;
3249 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003250 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003251 struct drm_i915_gem_request *request;
3252 struct intel_ring_buffer *ring = NULL;
3253 u32 seqno = 0;
3254 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003255
Chris Wilsone110e8d2011-01-26 15:39:14 +00003256 if (atomic_read(&dev_priv->mm.wedged))
3257 return -EIO;
3258
Chris Wilson1c255952010-09-26 11:03:27 +01003259 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003260 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003261 if (time_after_eq(request->emitted_jiffies, recent_enough))
3262 break;
3263
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003264 ring = request->ring;
3265 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003266 }
Chris Wilson1c255952010-09-26 11:03:27 +01003267 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003268
3269 if (seqno == 0)
3270 return 0;
3271
3272 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003273 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003274 /* And wait for the seqno passing without holding any locks and
3275 * causing extra latency for others. This is safe as the irq
3276 * generation is designed to be run atomically and so is
3277 * lockless.
3278 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003279 if (ring->irq_get(ring)) {
3280 ret = wait_event_interruptible(ring->irq_queue,
3281 i915_seqno_passed(ring->get_seqno(ring), seqno)
3282 || atomic_read(&dev_priv->mm.wedged));
3283 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003284
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003285 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3286 ret = -EIO;
Eric Anholte959b5d2011-12-22 14:55:01 -08003287 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3288 seqno) ||
Eric Anholt7ea29b12011-12-22 14:54:59 -08003289 atomic_read(&dev_priv->mm.wedged), 3000)) {
3290 ret = -EBUSY;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003291 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003292 }
3293
3294 if (ret == 0)
3295 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003296
Eric Anholt673a3942008-07-30 12:06:12 -07003297 return ret;
3298}
3299
Eric Anholt673a3942008-07-30 12:06:12 -07003300int
Chris Wilson05394f32010-11-08 19:18:58 +00003301i915_gem_object_pin(struct drm_i915_gem_object *obj,
3302 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003303 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003304{
Chris Wilson05394f32010-11-08 19:18:58 +00003305 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003306 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003307 int ret;
3308
Chris Wilson05394f32010-11-08 19:18:58 +00003309 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01003310 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003311
Chris Wilson05394f32010-11-08 19:18:58 +00003312 if (obj->gtt_space != NULL) {
3313 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3314 (map_and_fenceable && !obj->map_and_fenceable)) {
3315 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003316 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003317 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3318 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003319 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003320 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003321 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003322 ret = i915_gem_object_unbind(obj);
3323 if (ret)
3324 return ret;
3325 }
3326 }
3327
Chris Wilson05394f32010-11-08 19:18:58 +00003328 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003329 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003330 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003331 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003332 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003333 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003334
Daniel Vetter74898d72012-02-15 23:50:22 +01003335 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3336 i915_gem_gtt_bind_object(obj, obj->cache_level);
3337
Chris Wilson05394f32010-11-08 19:18:58 +00003338 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003339 if (!obj->active)
3340 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003341 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003342 }
Chris Wilson6299f992010-11-24 12:23:44 +00003343 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003344
Chris Wilson23bc5982010-09-29 16:10:57 +01003345 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003346 return 0;
3347}
3348
3349void
Chris Wilson05394f32010-11-08 19:18:58 +00003350i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003351{
Chris Wilson05394f32010-11-08 19:18:58 +00003352 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003353 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003354
Chris Wilson23bc5982010-09-29 16:10:57 +01003355 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003356 BUG_ON(obj->pin_count == 0);
3357 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003358
Chris Wilson05394f32010-11-08 19:18:58 +00003359 if (--obj->pin_count == 0) {
3360 if (!obj->active)
3361 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003362 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003363 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003364 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003365 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003366}
3367
3368int
3369i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003370 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003371{
3372 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003373 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003374 int ret;
3375
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003376 ret = i915_mutex_lock_interruptible(dev);
3377 if (ret)
3378 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003379
Chris Wilson05394f32010-11-08 19:18:58 +00003380 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003381 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003382 ret = -ENOENT;
3383 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003384 }
Eric Anholt673a3942008-07-30 12:06:12 -07003385
Chris Wilson05394f32010-11-08 19:18:58 +00003386 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003387 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003388 ret = -EINVAL;
3389 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003390 }
3391
Chris Wilson05394f32010-11-08 19:18:58 +00003392 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003393 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3394 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003395 ret = -EINVAL;
3396 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003397 }
3398
Chris Wilson05394f32010-11-08 19:18:58 +00003399 obj->user_pin_count++;
3400 obj->pin_filp = file;
3401 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003402 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003403 if (ret)
3404 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003405 }
3406
3407 /* XXX - flush the CPU caches for pinned objects
3408 * as the X server doesn't manage domains yet
3409 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003410 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003411 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003412out:
Chris Wilson05394f32010-11-08 19:18:58 +00003413 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003414unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003415 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003416 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003417}
3418
3419int
3420i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003421 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003422{
3423 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003424 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003425 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003426
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003427 ret = i915_mutex_lock_interruptible(dev);
3428 if (ret)
3429 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003430
Chris Wilson05394f32010-11-08 19:18:58 +00003431 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003432 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003433 ret = -ENOENT;
3434 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003435 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003436
Chris Wilson05394f32010-11-08 19:18:58 +00003437 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003438 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3439 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003440 ret = -EINVAL;
3441 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003442 }
Chris Wilson05394f32010-11-08 19:18:58 +00003443 obj->user_pin_count--;
3444 if (obj->user_pin_count == 0) {
3445 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003446 i915_gem_object_unpin(obj);
3447 }
Eric Anholt673a3942008-07-30 12:06:12 -07003448
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003449out:
Chris Wilson05394f32010-11-08 19:18:58 +00003450 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003451unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003452 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003453 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003454}
3455
3456int
3457i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003458 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003459{
3460 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003461 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003462 int ret;
3463
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003464 ret = i915_mutex_lock_interruptible(dev);
3465 if (ret)
3466 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003467
Chris Wilson05394f32010-11-08 19:18:58 +00003468 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003469 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003470 ret = -ENOENT;
3471 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003472 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003473
Chris Wilson0be555b2010-08-04 15:36:30 +01003474 /* Count all active objects as busy, even if they are currently not used
3475 * by the gpu. Users of this interface expect objects to eventually
3476 * become non-busy without any further actions, therefore emit any
3477 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003478 */
Chris Wilson05394f32010-11-08 19:18:58 +00003479 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003480 if (args->busy) {
3481 /* Unconditionally flush objects, even when the gpu still uses this
3482 * object. Userspace calling this function indicates that it wants to
3483 * use this buffer rather sooner than later, so issuing the required
3484 * flush earlier is beneficial.
3485 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003486 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003487 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003488 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003489 } else if (obj->ring->outstanding_lazy_request ==
3490 obj->last_rendering_seqno) {
3491 struct drm_i915_gem_request *request;
3492
Chris Wilson7a194872010-12-07 10:38:40 +00003493 /* This ring is not being cleared by active usage,
3494 * so emit a request to do so.
3495 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003496 request = kzalloc(sizeof(*request), GFP_KERNEL);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003497 if (request) {
Akshay Joshi0206e352011-08-16 15:34:10 -04003498 ret = i915_add_request(obj->ring, NULL, request);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003499 if (ret)
3500 kfree(request);
3501 } else
Chris Wilson7a194872010-12-07 10:38:40 +00003502 ret = -ENOMEM;
3503 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003504
3505 /* Update the active list for the hardware's current position.
3506 * Otherwise this only updates on a delayed timer or when irqs
3507 * are actually unmasked, and our working set ends up being
3508 * larger than required.
3509 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003510 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003511
Chris Wilson05394f32010-11-08 19:18:58 +00003512 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003513 }
Eric Anholt673a3942008-07-30 12:06:12 -07003514
Chris Wilson05394f32010-11-08 19:18:58 +00003515 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003516unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003517 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003518 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003519}
3520
3521int
3522i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3523 struct drm_file *file_priv)
3524{
Akshay Joshi0206e352011-08-16 15:34:10 -04003525 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003526}
3527
Chris Wilson3ef94da2009-09-14 16:50:29 +01003528int
3529i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv)
3531{
3532 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003533 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003534 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003535
3536 switch (args->madv) {
3537 case I915_MADV_DONTNEED:
3538 case I915_MADV_WILLNEED:
3539 break;
3540 default:
3541 return -EINVAL;
3542 }
3543
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003544 ret = i915_mutex_lock_interruptible(dev);
3545 if (ret)
3546 return ret;
3547
Chris Wilson05394f32010-11-08 19:18:58 +00003548 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003549 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003550 ret = -ENOENT;
3551 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003552 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003553
Chris Wilson05394f32010-11-08 19:18:58 +00003554 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003555 ret = -EINVAL;
3556 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003557 }
3558
Chris Wilson05394f32010-11-08 19:18:58 +00003559 if (obj->madv != __I915_MADV_PURGED)
3560 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003561
Chris Wilson2d7ef392009-09-20 23:13:10 +01003562 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003563 if (i915_gem_object_is_purgeable(obj) &&
3564 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003565 i915_gem_object_truncate(obj);
3566
Chris Wilson05394f32010-11-08 19:18:58 +00003567 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003568
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003569out:
Chris Wilson05394f32010-11-08 19:18:58 +00003570 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003571unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003572 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003573 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003574}
3575
Chris Wilson05394f32010-11-08 19:18:58 +00003576struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3577 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003578{
Chris Wilson73aa8082010-09-30 11:46:12 +01003579 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003580 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003581 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003582
3583 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3584 if (obj == NULL)
3585 return NULL;
3586
3587 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3588 kfree(obj);
3589 return NULL;
3590 }
3591
Hugh Dickins5949eac2011-06-27 16:18:18 -07003592 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3593 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3594
Chris Wilson73aa8082010-09-30 11:46:12 +01003595 i915_gem_info_add_obj(dev_priv, size);
3596
Daniel Vetterc397b902010-04-09 19:05:07 +00003597 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3598 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3599
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003600 if (HAS_LLC(dev)) {
3601 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003602 * cache) for about a 10% performance improvement
3603 * compared to uncached. Graphics requests other than
3604 * display scanout are coherent with the CPU in
3605 * accessing this cache. This means in this mode we
3606 * don't need to clflush on the CPU side, and on the
3607 * GPU side we only need to flush internal caches to
3608 * get data visible to the CPU.
3609 *
3610 * However, we maintain the display planes as UC, and so
3611 * need to rebind when first used as such.
3612 */
3613 obj->cache_level = I915_CACHE_LLC;
3614 } else
3615 obj->cache_level = I915_CACHE_NONE;
3616
Daniel Vetter62b8b212010-04-09 19:05:08 +00003617 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003618 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003619 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003620 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003621 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003622 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003623 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003624 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003625 /* Avoid an unnecessary call to unbind on the first bind. */
3626 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003627
Chris Wilson05394f32010-11-08 19:18:58 +00003628 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003629}
3630
Eric Anholt673a3942008-07-30 12:06:12 -07003631int i915_gem_init_object(struct drm_gem_object *obj)
3632{
Daniel Vetterc397b902010-04-09 19:05:07 +00003633 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003634
Eric Anholt673a3942008-07-30 12:06:12 -07003635 return 0;
3636}
3637
Chris Wilson05394f32010-11-08 19:18:58 +00003638static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003639{
Chris Wilson05394f32010-11-08 19:18:58 +00003640 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003641 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003642 int ret;
3643
3644 ret = i915_gem_object_unbind(obj);
3645 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003646 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003647 &dev_priv->mm.deferred_free_list);
3648 return;
3649 }
3650
Chris Wilson26e12f892011-03-20 11:20:19 +00003651 trace_i915_gem_object_destroy(obj);
3652
Chris Wilson05394f32010-11-08 19:18:58 +00003653 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003654 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003655
Chris Wilson05394f32010-11-08 19:18:58 +00003656 drm_gem_object_release(&obj->base);
3657 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003658
Chris Wilson05394f32010-11-08 19:18:58 +00003659 kfree(obj->page_cpu_valid);
3660 kfree(obj->bit_17);
3661 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003662}
3663
Chris Wilson05394f32010-11-08 19:18:58 +00003664void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003665{
Chris Wilson05394f32010-11-08 19:18:58 +00003666 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3667 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003668
Chris Wilson05394f32010-11-08 19:18:58 +00003669 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003670 i915_gem_object_unpin(obj);
3671
Chris Wilson05394f32010-11-08 19:18:58 +00003672 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003673 i915_gem_detach_phys_object(dev, obj);
3674
Chris Wilsonbe726152010-07-23 23:18:50 +01003675 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003676}
3677
Jesse Barnes5669fca2009-02-17 15:13:31 -08003678int
Eric Anholt673a3942008-07-30 12:06:12 -07003679i915_gem_idle(struct drm_device *dev)
3680{
3681 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003682 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003683
Keith Packard6dbe2772008-10-14 21:41:13 -07003684 mutex_lock(&dev->struct_mutex);
3685
Chris Wilson87acb0a2010-10-19 10:13:00 +01003686 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003687 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003688 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003689 }
Eric Anholt673a3942008-07-30 12:06:12 -07003690
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003691 ret = i915_gpu_idle(dev, true);
Keith Packard6dbe2772008-10-14 21:41:13 -07003692 if (ret) {
3693 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003694 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003695 }
Eric Anholt673a3942008-07-30 12:06:12 -07003696
Chris Wilson29105cc2010-01-07 10:39:13 +00003697 /* Under UMS, be paranoid and evict. */
3698 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003699 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003700 if (ret) {
3701 mutex_unlock(&dev->struct_mutex);
3702 return ret;
3703 }
3704 }
3705
Chris Wilson312817a2010-11-22 11:50:11 +00003706 i915_gem_reset_fences(dev);
3707
Chris Wilson29105cc2010-01-07 10:39:13 +00003708 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3709 * We need to replace this with a semaphore, or something.
3710 * And not confound mm.suspended!
3711 */
3712 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003713 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003714
3715 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003716 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003717
Keith Packard6dbe2772008-10-14 21:41:13 -07003718 mutex_unlock(&dev->struct_mutex);
3719
Chris Wilson29105cc2010-01-07 10:39:13 +00003720 /* Cancel the retire work handler, which should be idle now. */
3721 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3722
Eric Anholt673a3942008-07-30 12:06:12 -07003723 return 0;
3724}
3725
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003726void i915_gem_init_swizzling(struct drm_device *dev)
3727{
3728 drm_i915_private_t *dev_priv = dev->dev_private;
3729
Daniel Vetter11782b02012-01-31 16:47:55 +01003730 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003731 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3732 return;
3733
3734 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3735 DISP_TILE_SURFACE_SWIZZLING);
3736
Daniel Vetter11782b02012-01-31 16:47:55 +01003737 if (IS_GEN5(dev))
3738 return;
3739
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003740 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3741 if (IS_GEN6(dev))
3742 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
3743 else
3744 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
3745}
Daniel Vettere21af882012-02-09 20:53:27 +01003746
3747void i915_gem_init_ppgtt(struct drm_device *dev)
3748{
3749 drm_i915_private_t *dev_priv = dev->dev_private;
3750 uint32_t pd_offset;
3751 struct intel_ring_buffer *ring;
3752 int i;
3753
3754 if (!dev_priv->mm.aliasing_ppgtt)
3755 return;
3756
3757 pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
3758 pd_offset /= 64; /* in cachelines, */
3759 pd_offset <<= 16;
3760
3761 if (INTEL_INFO(dev)->gen == 6) {
3762 uint32_t ecochk = I915_READ(GAM_ECOCHK);
3763 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3764 ECOCHK_PPGTT_CACHE64B);
3765 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3766 } else if (INTEL_INFO(dev)->gen >= 7) {
3767 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3768 /* GFX_MODE is per-ring on gen7+ */
3769 }
3770
3771 for (i = 0; i < I915_NUM_RINGS; i++) {
3772 ring = &dev_priv->ring[i];
3773
3774 if (INTEL_INFO(dev)->gen >= 7)
3775 I915_WRITE(RING_MODE_GEN7(ring),
3776 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3777
3778 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3779 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3780 }
3781}
3782
Eric Anholt673a3942008-07-30 12:06:12 -07003783int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003784i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003785{
3786 drm_i915_private_t *dev_priv = dev->dev_private;
3787 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003788
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003789 i915_gem_init_swizzling(dev);
3790
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003791 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003792 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003793 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003794
3795 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003796 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003797 if (ret)
3798 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003799 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003800
Chris Wilson549f7362010-10-19 11:19:32 +01003801 if (HAS_BLT(dev)) {
3802 ret = intel_init_blt_ring_buffer(dev);
3803 if (ret)
3804 goto cleanup_bsd_ring;
3805 }
3806
Chris Wilson6f392d5482010-08-07 11:01:22 +01003807 dev_priv->next_seqno = 1;
3808
Daniel Vettere21af882012-02-09 20:53:27 +01003809 i915_gem_init_ppgtt(dev);
3810
Chris Wilson68f95ba2010-05-27 13:18:22 +01003811 return 0;
3812
Chris Wilson549f7362010-10-19 11:19:32 +01003813cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003814 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003815cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003816 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003817 return ret;
3818}
3819
3820void
3821i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3822{
3823 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003824 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003825
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003826 for (i = 0; i < I915_NUM_RINGS; i++)
3827 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003828}
3829
3830int
Eric Anholt673a3942008-07-30 12:06:12 -07003831i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3832 struct drm_file *file_priv)
3833{
3834 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003835 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003836
Jesse Barnes79e53942008-11-07 14:24:08 -08003837 if (drm_core_check_feature(dev, DRIVER_MODESET))
3838 return 0;
3839
Ben Gamariba1234d2009-09-14 17:48:47 -04003840 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003841 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003842 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003843 }
3844
Eric Anholt673a3942008-07-30 12:06:12 -07003845 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003846 dev_priv->mm.suspended = 0;
3847
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003848 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003849 if (ret != 0) {
3850 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003851 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003852 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003853
Chris Wilson69dc4982010-10-19 10:36:51 +01003854 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003855 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3856 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003857 for (i = 0; i < I915_NUM_RINGS; i++) {
3858 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3859 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3860 }
Eric Anholt673a3942008-07-30 12:06:12 -07003861 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003862
Chris Wilson5f353082010-06-07 14:03:03 +01003863 ret = drm_irq_install(dev);
3864 if (ret)
3865 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003866
Eric Anholt673a3942008-07-30 12:06:12 -07003867 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003868
3869cleanup_ringbuffer:
3870 mutex_lock(&dev->struct_mutex);
3871 i915_gem_cleanup_ringbuffer(dev);
3872 dev_priv->mm.suspended = 1;
3873 mutex_unlock(&dev->struct_mutex);
3874
3875 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003876}
3877
3878int
3879i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3880 struct drm_file *file_priv)
3881{
Jesse Barnes79e53942008-11-07 14:24:08 -08003882 if (drm_core_check_feature(dev, DRIVER_MODESET))
3883 return 0;
3884
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003885 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003886 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003887}
3888
3889void
3890i915_gem_lastclose(struct drm_device *dev)
3891{
3892 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003893
Eric Anholte806b492009-01-22 09:56:58 -08003894 if (drm_core_check_feature(dev, DRIVER_MODESET))
3895 return;
3896
Keith Packard6dbe2772008-10-14 21:41:13 -07003897 ret = i915_gem_idle(dev);
3898 if (ret)
3899 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003900}
3901
Chris Wilson64193402010-10-24 12:38:05 +01003902static void
3903init_ring_lists(struct intel_ring_buffer *ring)
3904{
3905 INIT_LIST_HEAD(&ring->active_list);
3906 INIT_LIST_HEAD(&ring->request_list);
3907 INIT_LIST_HEAD(&ring->gpu_write_list);
3908}
3909
Eric Anholt673a3942008-07-30 12:06:12 -07003910void
3911i915_gem_load(struct drm_device *dev)
3912{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003913 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003914 drm_i915_private_t *dev_priv = dev->dev_private;
3915
Chris Wilson69dc4982010-10-19 10:36:51 +01003916 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003917 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3918 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003919 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003920 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003921 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003922 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003923 for (i = 0; i < I915_NUM_RINGS; i++)
3924 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003925 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003926 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003927 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3928 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003929 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003930
Dave Airlie94400122010-07-20 13:15:31 +10003931 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3932 if (IS_GEN3(dev)) {
3933 u32 tmp = I915_READ(MI_ARB_STATE);
3934 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3935 /* arb state is a masked write, so set bit + bit in mask */
3936 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3937 I915_WRITE(MI_ARB_STATE, tmp);
3938 }
3939 }
3940
Chris Wilson72bfa192010-12-19 11:42:05 +00003941 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3942
Jesse Barnesde151cf2008-11-12 10:03:55 -08003943 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003944 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3945 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003946
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003947 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003948 dev_priv->num_fence_regs = 16;
3949 else
3950 dev_priv->num_fence_regs = 8;
3951
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003952 /* Initialize fence registers to zero */
Eric Anholt10ed13e2011-05-06 13:53:49 -07003953 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3954 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003955 }
Eric Anholt10ed13e2011-05-06 13:53:49 -07003956
Eric Anholt673a3942008-07-30 12:06:12 -07003957 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003958 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003959
Chris Wilsonce453d82011-02-21 14:43:56 +00003960 dev_priv->mm.interruptible = true;
3961
Chris Wilson17250b72010-10-28 12:51:39 +01003962 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3963 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3964 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003965}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003966
3967/*
3968 * Create a physically contiguous memory object for this object
3969 * e.g. for cursor + overlay regs
3970 */
Chris Wilson995b6762010-08-20 13:23:26 +01003971static int i915_gem_init_phys_object(struct drm_device *dev,
3972 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003973{
3974 drm_i915_private_t *dev_priv = dev->dev_private;
3975 struct drm_i915_gem_phys_object *phys_obj;
3976 int ret;
3977
3978 if (dev_priv->mm.phys_objs[id - 1] || !size)
3979 return 0;
3980
Eric Anholt9a298b22009-03-24 12:23:04 -07003981 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003982 if (!phys_obj)
3983 return -ENOMEM;
3984
3985 phys_obj->id = id;
3986
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003987 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003988 if (!phys_obj->handle) {
3989 ret = -ENOMEM;
3990 goto kfree_obj;
3991 }
3992#ifdef CONFIG_X86
3993 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3994#endif
3995
3996 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3997
3998 return 0;
3999kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004000 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004001 return ret;
4002}
4003
Chris Wilson995b6762010-08-20 13:23:26 +01004004static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004005{
4006 drm_i915_private_t *dev_priv = dev->dev_private;
4007 struct drm_i915_gem_phys_object *phys_obj;
4008
4009 if (!dev_priv->mm.phys_objs[id - 1])
4010 return;
4011
4012 phys_obj = dev_priv->mm.phys_objs[id - 1];
4013 if (phys_obj->cur_obj) {
4014 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4015 }
4016
4017#ifdef CONFIG_X86
4018 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4019#endif
4020 drm_pci_free(dev, phys_obj->handle);
4021 kfree(phys_obj);
4022 dev_priv->mm.phys_objs[id - 1] = NULL;
4023}
4024
4025void i915_gem_free_all_phys_object(struct drm_device *dev)
4026{
4027 int i;
4028
Dave Airlie260883c2009-01-22 17:58:49 +10004029 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004030 i915_gem_free_phys_object(dev, i);
4031}
4032
4033void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004034 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004035{
Chris Wilson05394f32010-11-08 19:18:58 +00004036 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004037 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004038 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004039 int page_count;
4040
Chris Wilson05394f32010-11-08 19:18:58 +00004041 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004042 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004043 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004044
Chris Wilson05394f32010-11-08 19:18:58 +00004045 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004046 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004047 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004048 if (!IS_ERR(page)) {
4049 char *dst = kmap_atomic(page);
4050 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4051 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004052
Chris Wilsone5281cc2010-10-28 13:45:36 +01004053 drm_clflush_pages(&page, 1);
4054
4055 set_page_dirty(page);
4056 mark_page_accessed(page);
4057 page_cache_release(page);
4058 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004059 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01004060 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01004061
Chris Wilson05394f32010-11-08 19:18:58 +00004062 obj->phys_obj->cur_obj = NULL;
4063 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004064}
4065
4066int
4067i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004068 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004069 int id,
4070 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004071{
Chris Wilson05394f32010-11-08 19:18:58 +00004072 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004073 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004074 int ret = 0;
4075 int page_count;
4076 int i;
4077
4078 if (id > I915_MAX_PHYS_OBJECT)
4079 return -EINVAL;
4080
Chris Wilson05394f32010-11-08 19:18:58 +00004081 if (obj->phys_obj) {
4082 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004083 return 0;
4084 i915_gem_detach_phys_object(dev, obj);
4085 }
4086
Dave Airlie71acb5e2008-12-30 20:31:46 +10004087 /* create a new object */
4088 if (!dev_priv->mm.phys_objs[id - 1]) {
4089 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004090 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004091 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004092 DRM_ERROR("failed to init phys object %d size: %zu\n",
4093 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004094 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004095 }
4096 }
4097
4098 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004099 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4100 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004101
Chris Wilson05394f32010-11-08 19:18:58 +00004102 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004103
4104 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004105 struct page *page;
4106 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004107
Hugh Dickins5949eac2011-06-27 16:18:18 -07004108 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004109 if (IS_ERR(page))
4110 return PTR_ERR(page);
4111
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004112 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004113 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004114 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004115 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004116
4117 mark_page_accessed(page);
4118 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004119 }
4120
4121 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004122}
4123
4124static int
Chris Wilson05394f32010-11-08 19:18:58 +00004125i915_gem_phys_pwrite(struct drm_device *dev,
4126 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004127 struct drm_i915_gem_pwrite *args,
4128 struct drm_file *file_priv)
4129{
Chris Wilson05394f32010-11-08 19:18:58 +00004130 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004131 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004132
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004133 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4134 unsigned long unwritten;
4135
4136 /* The physical object once assigned is fixed for the lifetime
4137 * of the obj, so we can safely drop the lock and continue
4138 * to access vaddr.
4139 */
4140 mutex_unlock(&dev->struct_mutex);
4141 unwritten = copy_from_user(vaddr, user_data, args->size);
4142 mutex_lock(&dev->struct_mutex);
4143 if (unwritten)
4144 return -EFAULT;
4145 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004146
Daniel Vetter40ce6572010-11-05 18:12:18 +01004147 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10004148 return 0;
4149}
Eric Anholtb9624422009-06-03 07:27:35 +00004150
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004151void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004152{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004153 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004154
4155 /* Clean up our request list when the client is going away, so that
4156 * later retire_requests won't dereference our soon-to-be-gone
4157 * file_priv.
4158 */
Chris Wilson1c255952010-09-26 11:03:27 +01004159 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004160 while (!list_empty(&file_priv->mm.request_list)) {
4161 struct drm_i915_gem_request *request;
4162
4163 request = list_first_entry(&file_priv->mm.request_list,
4164 struct drm_i915_gem_request,
4165 client_list);
4166 list_del(&request->client_list);
4167 request->file_priv = NULL;
4168 }
Chris Wilson1c255952010-09-26 11:03:27 +01004169 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004170}
Chris Wilson31169712009-09-14 16:50:28 +01004171
Chris Wilson31169712009-09-14 16:50:28 +01004172static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004173i915_gpu_is_active(struct drm_device *dev)
4174{
4175 drm_i915_private_t *dev_priv = dev->dev_private;
4176 int lists_empty;
4177
Chris Wilson1637ef42010-04-20 17:10:35 +01004178 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01004179 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004180
4181 return !lists_empty;
4182}
4183
4184static int
Ying Han1495f232011-05-24 17:12:27 -07004185i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004186{
Chris Wilson17250b72010-10-28 12:51:39 +01004187 struct drm_i915_private *dev_priv =
4188 container_of(shrinker,
4189 struct drm_i915_private,
4190 mm.inactive_shrinker);
4191 struct drm_device *dev = dev_priv->dev;
4192 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07004193 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01004194 int cnt;
4195
4196 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004197 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004198
4199 /* "fast-path" to count number of available objects */
4200 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004201 cnt = 0;
4202 list_for_each_entry(obj,
4203 &dev_priv->mm.inactive_list,
4204 mm_list)
4205 cnt++;
4206 mutex_unlock(&dev->struct_mutex);
4207 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004208 }
4209
Chris Wilson1637ef42010-04-20 17:10:35 +01004210rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004211 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004212 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004213
Chris Wilson17250b72010-10-28 12:51:39 +01004214 list_for_each_entry_safe(obj, next,
4215 &dev_priv->mm.inactive_list,
4216 mm_list) {
4217 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004218 if (i915_gem_object_unbind(obj) == 0 &&
4219 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004220 break;
Chris Wilson31169712009-09-14 16:50:28 +01004221 }
Chris Wilson31169712009-09-14 16:50:28 +01004222 }
4223
4224 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004225 cnt = 0;
4226 list_for_each_entry_safe(obj, next,
4227 &dev_priv->mm.inactive_list,
4228 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004229 if (nr_to_scan &&
4230 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004231 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004232 else
Chris Wilson17250b72010-10-28 12:51:39 +01004233 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004234 }
4235
Chris Wilson17250b72010-10-28 12:51:39 +01004236 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004237 /*
4238 * We are desperate for pages, so as a last resort, wait
4239 * for the GPU to finish and discard whatever we can.
4240 * This has a dramatic impact to reduce the number of
4241 * OOM-killer events whilst running the GPU aggressively.
4242 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08004243 if (i915_gpu_idle(dev, true) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004244 goto rescan;
4245 }
Chris Wilson17250b72010-10-28 12:51:39 +01004246 mutex_unlock(&dev->struct_mutex);
4247 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004248}