blob: a087e1bf0c2f4359af4374aab8ae7c50ae5040a2 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 bool write);
44static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
Chris Wilson05394f32010-11-08 19:18:58 +000047static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000048static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 unsigned alignment,
50 bool map_and_fenceable);
Chris Wilsond9e86c02010-11-10 16:40:20 +000051static void i915_gem_clear_fence_reg(struct drm_device *dev,
52 struct drm_i915_fence_reg *reg);
Chris Wilson05394f32010-11-08 19:18:58 +000053static int i915_gem_phys_pwrite(struct drm_device *dev,
54 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100055 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000056 struct drm_file *file);
57static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070058
Chris Wilson17250b72010-10-28 12:51:39 +010059static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070060 struct shrink_control *sc);
Chris Wilson31169712009-09-14 16:50:28 +010061
Chris Wilson73aa8082010-09-30 11:46:12 +010062/* some bookkeeping */
63static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64 size_t size)
65{
66 dev_priv->mm.object_count++;
67 dev_priv->mm.object_memory += size;
68}
69
70static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71 size_t size)
72{
73 dev_priv->mm.object_count--;
74 dev_priv->mm.object_memory -= size;
75}
76
Chris Wilson21dd3732011-01-26 15:55:56 +000077static int
78i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010079{
80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct completion *x = &dev_priv->error_completion;
82 unsigned long flags;
83 int ret;
84
85 if (!atomic_read(&dev_priv->mm.wedged))
86 return 0;
87
88 ret = wait_for_completion_interruptible(x);
89 if (ret)
90 return ret;
91
Chris Wilson21dd3732011-01-26 15:55:56 +000092 if (atomic_read(&dev_priv->mm.wedged)) {
93 /* GPU is hung, bump the completion count to account for
94 * the token we just consumed so that we never hit zero and
95 * end up waiting upon a subsequent completion event that
96 * will never happen.
97 */
98 spin_lock_irqsave(&x->wait.lock, flags);
99 x->done++;
100 spin_unlock_irqrestore(&x->wait.lock, flags);
101 }
102 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100103}
104
Chris Wilson54cf91d2010-11-25 18:00:26 +0000105int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100106{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100107 int ret;
108
Chris Wilson21dd3732011-01-26 15:55:56 +0000109 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100110 if (ret)
111 return ret;
112
113 ret = mutex_lock_interruptible(&dev->struct_mutex);
114 if (ret)
115 return ret;
116
Chris Wilson23bc5982010-09-29 16:10:57 +0100117 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100118 return 0;
119}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100120
Chris Wilson7d1c4802010-08-07 21:45:03 +0100121static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000122i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100123{
Chris Wilson05394f32010-11-08 19:18:58 +0000124 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100125}
126
Chris Wilson20217462010-11-23 15:26:33 +0000127void i915_gem_do_init(struct drm_device *dev,
128 unsigned long start,
129 unsigned long mappable_end,
130 unsigned long end)
Jesse Barnes79e53942008-11-07 14:24:08 -0800131{
132 drm_i915_private_t *dev_priv = dev->dev_private;
133
Chris Wilsonbee4a182011-01-21 10:54:32 +0000134 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
Jesse Barnes79e53942008-11-07 14:24:08 -0800135
Chris Wilsonbee4a182011-01-21 10:54:32 +0000136 dev_priv->mm.gtt_start = start;
137 dev_priv->mm.gtt_mappable_end = mappable_end;
138 dev_priv->mm.gtt_end = end;
Chris Wilson73aa8082010-09-30 11:46:12 +0100139 dev_priv->mm.gtt_total = end - start;
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200140 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
Chris Wilsonbee4a182011-01-21 10:54:32 +0000141
142 /* Take over this portion of the GTT */
143 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
Jesse Barnes79e53942008-11-07 14:24:08 -0800144}
Keith Packard6dbe2772008-10-14 21:41:13 -0700145
Eric Anholt673a3942008-07-30 12:06:12 -0700146int
147i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000148 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700149{
Eric Anholt673a3942008-07-30 12:06:12 -0700150 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000151
152 if (args->gtt_start >= args->gtt_end ||
153 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700155
156 mutex_lock(&dev->struct_mutex);
Chris Wilson20217462010-11-23 15:26:33 +0000157 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700158 mutex_unlock(&dev->struct_mutex);
159
Chris Wilson20217462010-11-23 15:26:33 +0000160 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700161}
162
Eric Anholt5a125c32008-10-22 21:40:13 -0700163int
164i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000165 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700166{
Chris Wilson73aa8082010-09-30 11:46:12 +0100167 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700168 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000169 struct drm_i915_gem_object *obj;
170 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700171
172 if (!(dev->driver->driver_features & DRIVER_GEM))
173 return -ENODEV;
174
Chris Wilson6299f992010-11-24 12:23:44 +0000175 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100176 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000177 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100179 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700180
Chris Wilson6299f992010-11-24 12:23:44 +0000181 args->aper_size = dev_priv->mm.gtt_total;
182 args->aper_available_size = args->aper_size -pinned;
183
Eric Anholt5a125c32008-10-22 21:40:13 -0700184 return 0;
185}
186
Dave Airlieff72145b2011-02-07 12:16:14 +1000187static int
188i915_gem_create(struct drm_file *file,
189 struct drm_device *dev,
190 uint64_t size,
191 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700192{
Chris Wilson05394f32010-11-08 19:18:58 +0000193 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300194 int ret;
195 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700196
Dave Airlieff72145b2011-02-07 12:16:14 +1000197 size = roundup(size, PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -0700198
199 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000200 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700201 if (obj == NULL)
202 return -ENOMEM;
203
Chris Wilson05394f32010-11-08 19:18:58 +0000204 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100205 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000206 drm_gem_object_release(&obj->base);
207 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100208 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700209 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100210 }
211
Chris Wilson202f2fe2010-10-14 13:20:40 +0100212 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000213 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100214 trace_i915_gem_object_create(obj);
215
Dave Airlieff72145b2011-02-07 12:16:14 +1000216 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700217 return 0;
218}
219
Dave Airlieff72145b2011-02-07 12:16:14 +1000220int
221i915_gem_dumb_create(struct drm_file *file,
222 struct drm_device *dev,
223 struct drm_mode_create_dumb *args)
224{
225 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000226 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000227 args->size = args->pitch * args->height;
228 return i915_gem_create(file, dev,
229 args->size, &args->handle);
230}
231
232int i915_gem_dumb_destroy(struct drm_file *file,
233 struct drm_device *dev,
234 uint32_t handle)
235{
236 return drm_gem_handle_delete(file, handle);
237}
238
239/**
240 * Creates a new mm object and returns a handle to it.
241 */
242int
243i915_gem_create_ioctl(struct drm_device *dev, void *data,
244 struct drm_file *file)
245{
246 struct drm_i915_gem_create *args = data;
247 return i915_gem_create(file, dev,
248 args->size, &args->handle);
249}
250
Chris Wilson05394f32010-11-08 19:18:58 +0000251static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700252{
Chris Wilson05394f32010-11-08 19:18:58 +0000253 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700254
255 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000256 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700257}
258
Chris Wilson99a03df2010-05-27 14:15:34 +0100259static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700260slow_shmem_copy(struct page *dst_page,
261 int dst_offset,
262 struct page *src_page,
263 int src_offset,
264 int length)
265{
266 char *dst_vaddr, *src_vaddr;
267
Chris Wilson99a03df2010-05-27 14:15:34 +0100268 dst_vaddr = kmap(dst_page);
269 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700270
271 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
272
Chris Wilson99a03df2010-05-27 14:15:34 +0100273 kunmap(src_page);
274 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700275}
276
Chris Wilson99a03df2010-05-27 14:15:34 +0100277static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700278slow_shmem_bit17_copy(struct page *gpu_page,
279 int gpu_offset,
280 struct page *cpu_page,
281 int cpu_offset,
282 int length,
283 int is_read)
284{
285 char *gpu_vaddr, *cpu_vaddr;
286
287 /* Use the unswizzled path if this page isn't affected. */
288 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
289 if (is_read)
290 return slow_shmem_copy(cpu_page, cpu_offset,
291 gpu_page, gpu_offset, length);
292 else
293 return slow_shmem_copy(gpu_page, gpu_offset,
294 cpu_page, cpu_offset, length);
295 }
296
Chris Wilson99a03df2010-05-27 14:15:34 +0100297 gpu_vaddr = kmap(gpu_page);
298 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700299
300 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
301 * XORing with the other bits (A9 for Y, A9 and A10 for X)
302 */
303 while (length > 0) {
304 int cacheline_end = ALIGN(gpu_offset + 1, 64);
305 int this_length = min(cacheline_end - gpu_offset, length);
306 int swizzled_gpu_offset = gpu_offset ^ 64;
307
308 if (is_read) {
309 memcpy(cpu_vaddr + cpu_offset,
310 gpu_vaddr + swizzled_gpu_offset,
311 this_length);
312 } else {
313 memcpy(gpu_vaddr + swizzled_gpu_offset,
314 cpu_vaddr + cpu_offset,
315 this_length);
316 }
317 cpu_offset += this_length;
318 gpu_offset += this_length;
319 length -= this_length;
320 }
321
Chris Wilson99a03df2010-05-27 14:15:34 +0100322 kunmap(cpu_page);
323 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700324}
325
Eric Anholt673a3942008-07-30 12:06:12 -0700326/**
Eric Anholteb014592009-03-10 11:44:52 -0700327 * This is the fast shmem pread path, which attempts to copy_from_user directly
328 * from the backing pages of the object to the user's address space. On a
329 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
330 */
331static int
Chris Wilson05394f32010-11-08 19:18:58 +0000332i915_gem_shmem_pread_fast(struct drm_device *dev,
333 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700334 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000335 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700336{
Chris Wilson05394f32010-11-08 19:18:58 +0000337 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700338 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100339 loff_t offset;
Eric Anholteb014592009-03-10 11:44:52 -0700340 char __user *user_data;
341 int page_offset, page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700342
343 user_data = (char __user *) (uintptr_t) args->data_ptr;
344 remain = args->size;
345
Eric Anholteb014592009-03-10 11:44:52 -0700346 offset = args->offset;
347
348 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100349 struct page *page;
350 char *vaddr;
351 int ret;
352
Eric Anholteb014592009-03-10 11:44:52 -0700353 /* Operation in this page
354 *
Eric Anholteb014592009-03-10 11:44:52 -0700355 * page_offset = offset within page
356 * page_length = bytes to copy for this page
357 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100358 page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700359 page_length = remain;
360 if ((page_offset + remain) > PAGE_SIZE)
361 page_length = PAGE_SIZE - page_offset;
362
Hugh Dickins5949eac2011-06-27 16:18:18 -0700363 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100364 if (IS_ERR(page))
365 return PTR_ERR(page);
366
367 vaddr = kmap_atomic(page);
368 ret = __copy_to_user_inatomic(user_data,
369 vaddr + page_offset,
370 page_length);
371 kunmap_atomic(vaddr);
372
373 mark_page_accessed(page);
374 page_cache_release(page);
375 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100376 return -EFAULT;
Eric Anholteb014592009-03-10 11:44:52 -0700377
378 remain -= page_length;
379 user_data += page_length;
380 offset += page_length;
381 }
382
Chris Wilson4f27b752010-10-14 15:26:45 +0100383 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700384}
385
386/**
387 * This is the fallback shmem pread path, which allocates temporary storage
388 * in kernel space to copy_to_user into outside of the struct_mutex, so we
389 * can copy out of the object's backing pages while holding the struct mutex
390 * and not take page faults.
391 */
392static int
Chris Wilson05394f32010-11-08 19:18:58 +0000393i915_gem_shmem_pread_slow(struct drm_device *dev,
394 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700395 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000396 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700397{
Chris Wilson05394f32010-11-08 19:18:58 +0000398 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700399 struct mm_struct *mm = current->mm;
400 struct page **user_pages;
401 ssize_t remain;
402 loff_t offset, pinned_pages, i;
403 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100404 int shmem_page_offset;
405 int data_page_index, data_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700406 int page_length;
407 int ret;
408 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700409 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700410
411 remain = args->size;
412
413 /* Pin the user pages containing the data. We can't fault while
414 * holding the struct mutex, yet we want to hold it while
415 * dereferencing the user data.
416 */
417 first_data_page = data_ptr / PAGE_SIZE;
418 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
419 num_pages = last_data_page - first_data_page + 1;
420
Chris Wilson4f27b752010-10-14 15:26:45 +0100421 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700422 if (user_pages == NULL)
423 return -ENOMEM;
424
Chris Wilson4f27b752010-10-14 15:26:45 +0100425 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700426 down_read(&mm->mmap_sem);
427 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700428 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700429 up_read(&mm->mmap_sem);
Chris Wilson4f27b752010-10-14 15:26:45 +0100430 mutex_lock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700431 if (pinned_pages < num_pages) {
432 ret = -EFAULT;
Chris Wilson4f27b752010-10-14 15:26:45 +0100433 goto out;
Eric Anholteb014592009-03-10 11:44:52 -0700434 }
435
Chris Wilson4f27b752010-10-14 15:26:45 +0100436 ret = i915_gem_object_set_cpu_read_domain_range(obj,
437 args->offset,
Eric Anholteb014592009-03-10 11:44:52 -0700438 args->size);
Chris Wilson4f27b752010-10-14 15:26:45 +0100439 if (ret)
440 goto out;
441
442 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700443
Eric Anholteb014592009-03-10 11:44:52 -0700444 offset = args->offset;
445
446 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100447 struct page *page;
448
Eric Anholteb014592009-03-10 11:44:52 -0700449 /* Operation in this page
450 *
Eric Anholteb014592009-03-10 11:44:52 -0700451 * shmem_page_offset = offset within page in shmem file
452 * data_page_index = page number in get_user_pages return
453 * data_page_offset = offset with data_page_index page.
454 * page_length = bytes to copy for this page
455 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100456 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100458 data_page_offset = offset_in_page(data_ptr);
Eric Anholteb014592009-03-10 11:44:52 -0700459
460 page_length = remain;
461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
462 page_length = PAGE_SIZE - shmem_page_offset;
463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset;
465
Hugh Dickins5949eac2011-06-27 16:18:18 -0700466 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Jesper Juhlb65552f2011-06-12 20:53:44 +0000467 if (IS_ERR(page)) {
468 ret = PTR_ERR(page);
469 goto out;
470 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100471
Eric Anholt280b7132009-03-12 16:56:27 -0700472 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100473 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700474 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100475 user_pages[data_page_index],
476 data_page_offset,
477 page_length,
478 1);
479 } else {
480 slow_shmem_copy(user_pages[data_page_index],
481 data_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100482 page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100483 shmem_page_offset,
484 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700485 }
Eric Anholteb014592009-03-10 11:44:52 -0700486
Chris Wilsone5281cc2010-10-28 13:45:36 +0100487 mark_page_accessed(page);
488 page_cache_release(page);
489
Eric Anholteb014592009-03-10 11:44:52 -0700490 remain -= page_length;
491 data_ptr += page_length;
492 offset += page_length;
493 }
494
Chris Wilson4f27b752010-10-14 15:26:45 +0100495out:
Eric Anholteb014592009-03-10 11:44:52 -0700496 for (i = 0; i < pinned_pages; i++) {
497 SetPageDirty(user_pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100498 mark_page_accessed(user_pages[i]);
Eric Anholteb014592009-03-10 11:44:52 -0700499 page_cache_release(user_pages[i]);
500 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700501 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700502
503 return ret;
504}
505
Eric Anholt673a3942008-07-30 12:06:12 -0700506/**
507 * Reads data from the object referenced by handle.
508 *
509 * On error, the contents of *data are undefined.
510 */
511int
512i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000513 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700514{
515 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000516 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100517 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700518
Chris Wilson51311d02010-11-17 09:10:42 +0000519 if (args->size == 0)
520 return 0;
521
522 if (!access_ok(VERIFY_WRITE,
523 (char __user *)(uintptr_t)args->data_ptr,
524 args->size))
525 return -EFAULT;
526
527 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
528 args->size);
529 if (ret)
530 return -EFAULT;
531
Chris Wilson4f27b752010-10-14 15:26:45 +0100532 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100533 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100534 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700535
Chris Wilson05394f32010-11-08 19:18:58 +0000536 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000537 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100538 ret = -ENOENT;
539 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100540 }
Eric Anholt673a3942008-07-30 12:06:12 -0700541
Chris Wilson7dcd2492010-09-26 20:21:44 +0100542 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000543 if (args->offset > obj->base.size ||
544 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100545 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100546 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100547 }
548
Chris Wilsondb53a302011-02-03 11:57:46 +0000549 trace_i915_gem_object_pread(obj, args->offset, args->size);
550
Chris Wilson4f27b752010-10-14 15:26:45 +0100551 ret = i915_gem_object_set_cpu_read_domain_range(obj,
552 args->offset,
553 args->size);
554 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100555 goto out;
Chris Wilson4f27b752010-10-14 15:26:45 +0100556
557 ret = -EFAULT;
558 if (!i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson05394f32010-11-08 19:18:58 +0000559 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
Chris Wilson4f27b752010-10-14 15:26:45 +0100560 if (ret == -EFAULT)
Chris Wilson05394f32010-11-08 19:18:58 +0000561 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700562
Chris Wilson35b62a82010-09-26 20:23:38 +0100563out:
Chris Wilson05394f32010-11-08 19:18:58 +0000564 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100565unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100566 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700567 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700568}
569
Keith Packard0839ccb2008-10-30 19:38:48 -0700570/* This is the fast write path which cannot handle
571 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700573
Keith Packard0839ccb2008-10-30 19:38:48 -0700574static inline int
575fast_user_write(struct io_mapping *mapping,
576 loff_t page_base, int page_offset,
577 char __user *user_data,
578 int length)
579{
580 char *vaddr_atomic;
581 unsigned long unwritten;
582
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700584 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
585 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700586 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100587 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700588}
589
590/* Here's the write path which can sleep for
591 * page faults
592 */
593
Chris Wilsonab34c222010-05-27 14:15:35 +0100594static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700595slow_kernel_write(struct io_mapping *mapping,
596 loff_t gtt_base, int gtt_offset,
597 struct page *user_page, int user_offset,
598 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700599{
Chris Wilsonab34c222010-05-27 14:15:35 +0100600 char __iomem *dst_vaddr;
601 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700602
Chris Wilsonab34c222010-05-27 14:15:35 +0100603 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
604 src_vaddr = kmap(user_page);
605
606 memcpy_toio(dst_vaddr + gtt_offset,
607 src_vaddr + user_offset,
608 length);
609
610 kunmap(user_page);
611 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700612}
613
Eric Anholt3de09aa2009-03-09 09:42:23 -0700614/**
615 * This is the fast pwrite path, where we copy the data directly from the
616 * user into the GTT, uncached.
617 */
Eric Anholt673a3942008-07-30 12:06:12 -0700618static int
Chris Wilson05394f32010-11-08 19:18:58 +0000619i915_gem_gtt_pwrite_fast(struct drm_device *dev,
620 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700621 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000622 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700623{
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700625 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700626 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700627 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700629
630 user_data = (char __user *) (uintptr_t) args->data_ptr;
631 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700632
Chris Wilson05394f32010-11-08 19:18:58 +0000633 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700634
635 while (remain > 0) {
636 /* Operation in this page
637 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700638 * page_base = page offset within aperture
639 * page_offset = offset within page
640 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700641 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100642 page_base = offset & PAGE_MASK;
643 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700644 page_length = remain;
645 if ((page_offset + remain) > PAGE_SIZE)
646 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Keith Packard0839ccb2008-10-30 19:38:48 -0700648 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700649 * source page isn't available. Return the error and we'll
650 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700651 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100652 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
653 page_offset, user_data, page_length))
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100654 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700655
Keith Packard0839ccb2008-10-30 19:38:48 -0700656 remain -= page_length;
657 user_data += page_length;
658 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700659 }
Eric Anholt673a3942008-07-30 12:06:12 -0700660
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100661 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700662}
663
Eric Anholt3de09aa2009-03-09 09:42:23 -0700664/**
665 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
666 * the memory and maps it using kmap_atomic for copying.
667 *
668 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
669 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
670 */
Eric Anholt3043c602008-10-02 12:24:47 -0700671static int
Chris Wilson05394f32010-11-08 19:18:58 +0000672i915_gem_gtt_pwrite_slow(struct drm_device *dev,
673 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700674 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000675 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700676{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700677 drm_i915_private_t *dev_priv = dev->dev_private;
678 ssize_t remain;
679 loff_t gtt_page_base, offset;
680 loff_t first_data_page, last_data_page, num_pages;
681 loff_t pinned_pages, i;
682 struct page **user_pages;
683 struct mm_struct *mm = current->mm;
684 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700685 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700686 uint64_t data_ptr = args->data_ptr;
687
688 remain = args->size;
689
690 /* Pin the user pages containing the data. We can't fault while
691 * holding the struct mutex, and all of the pwrite implementations
692 * want to hold it while dereferencing the user data.
693 */
694 first_data_page = data_ptr / PAGE_SIZE;
695 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
696 num_pages = last_data_page - first_data_page + 1;
697
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100698 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700699 if (user_pages == NULL)
700 return -ENOMEM;
701
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100702 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700703 down_read(&mm->mmap_sem);
704 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
705 num_pages, 0, 0, user_pages, NULL);
706 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100707 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700708 if (pinned_pages < num_pages) {
709 ret = -EFAULT;
710 goto out_unpin_pages;
711 }
712
Chris Wilsond9e86c02010-11-10 16:40:20 +0000713 ret = i915_gem_object_set_to_gtt_domain(obj, true);
714 if (ret)
715 goto out_unpin_pages;
716
717 ret = i915_gem_object_put_fence(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700718 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100719 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700720
Chris Wilson05394f32010-11-08 19:18:58 +0000721 offset = obj->gtt_offset + args->offset;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700722
723 while (remain > 0) {
724 /* Operation in this page
725 *
726 * gtt_page_base = page offset within aperture
727 * gtt_page_offset = offset within page in aperture
728 * data_page_index = page number in get_user_pages return
729 * data_page_offset = offset with data_page_index page.
730 * page_length = bytes to copy for this page
731 */
732 gtt_page_base = offset & PAGE_MASK;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100733 gtt_page_offset = offset_in_page(offset);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700734 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100735 data_page_offset = offset_in_page(data_ptr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700736
737 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE)
739 page_length = PAGE_SIZE - gtt_page_offset;
740 if ((data_page_offset + page_length) > PAGE_SIZE)
741 page_length = PAGE_SIZE - data_page_offset;
742
Chris Wilsonab34c222010-05-27 14:15:35 +0100743 slow_kernel_write(dev_priv->mm.gtt_mapping,
744 gtt_page_base, gtt_page_offset,
745 user_pages[data_page_index],
746 data_page_offset,
747 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700748
749 remain -= page_length;
750 offset += page_length;
751 data_ptr += page_length;
752 }
753
Eric Anholt3de09aa2009-03-09 09:42:23 -0700754out_unpin_pages:
755 for (i = 0; i < pinned_pages; i++)
756 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700757 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700758
759 return ret;
760}
761
Eric Anholt40123c12009-03-09 13:42:30 -0700762/**
763 * This is the fast shmem pwrite path, which attempts to directly
764 * copy_from_user into the kmapped pages backing the object.
765 */
Eric Anholt673a3942008-07-30 12:06:12 -0700766static int
Chris Wilson05394f32010-11-08 19:18:58 +0000767i915_gem_shmem_pwrite_fast(struct drm_device *dev,
768 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700769 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000770 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700771{
Chris Wilson05394f32010-11-08 19:18:58 +0000772 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700773 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100774 loff_t offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700775 char __user *user_data;
776 int page_offset, page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700777
778 user_data = (char __user *) (uintptr_t) args->data_ptr;
779 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700780
Eric Anholt673a3942008-07-30 12:06:12 -0700781 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000782 obj->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700783
Eric Anholt40123c12009-03-09 13:42:30 -0700784 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100785 struct page *page;
786 char *vaddr;
787 int ret;
788
Eric Anholt40123c12009-03-09 13:42:30 -0700789 /* Operation in this page
790 *
Eric Anholt40123c12009-03-09 13:42:30 -0700791 * page_offset = offset within page
792 * page_length = bytes to copy for this page
793 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100794 page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700795 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset;
798
Hugh Dickins5949eac2011-06-27 16:18:18 -0700799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100800 if (IS_ERR(page))
801 return PTR_ERR(page);
802
803 vaddr = kmap_atomic(page, KM_USER0);
804 ret = __copy_from_user_inatomic(vaddr + page_offset,
805 user_data,
806 page_length);
807 kunmap_atomic(vaddr, KM_USER0);
808
809 set_page_dirty(page);
810 mark_page_accessed(page);
811 page_cache_release(page);
812
813 /* If we get a fault while copying data, then (presumably) our
814 * source page isn't available. Return the error and we'll
815 * retry in the slow path.
816 */
817 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100818 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700819
820 remain -= page_length;
821 user_data += page_length;
822 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700823 }
824
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100825 return 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700826}
827
828/**
829 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
830 * the memory and maps it using kmap_atomic for copying.
831 *
832 * This avoids taking mmap_sem for faulting on the user's address while the
833 * struct_mutex is held.
834 */
835static int
Chris Wilson05394f32010-11-08 19:18:58 +0000836i915_gem_shmem_pwrite_slow(struct drm_device *dev,
837 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700838 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000839 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700840{
Chris Wilson05394f32010-11-08 19:18:58 +0000841 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700842 struct mm_struct *mm = current->mm;
843 struct page **user_pages;
844 ssize_t remain;
845 loff_t offset, pinned_pages, i;
846 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100847 int shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700848 int data_page_index, data_page_offset;
849 int page_length;
850 int ret;
851 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700852 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700853
854 remain = args->size;
855
856 /* Pin the user pages containing the data. We can't fault while
857 * holding the struct mutex, and all of the pwrite implementations
858 * want to hold it while dereferencing the user data.
859 */
860 first_data_page = data_ptr / PAGE_SIZE;
861 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
862 num_pages = last_data_page - first_data_page + 1;
863
Chris Wilson4f27b752010-10-14 15:26:45 +0100864 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700865 if (user_pages == NULL)
866 return -ENOMEM;
867
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100868 mutex_unlock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700869 down_read(&mm->mmap_sem);
870 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
871 num_pages, 0, 0, user_pages, NULL);
872 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100873 mutex_lock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700874 if (pinned_pages < num_pages) {
875 ret = -EFAULT;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100876 goto out;
Eric Anholt40123c12009-03-09 13:42:30 -0700877 }
878
Eric Anholt40123c12009-03-09 13:42:30 -0700879 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100880 if (ret)
881 goto out;
882
883 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700884
Eric Anholt40123c12009-03-09 13:42:30 -0700885 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000886 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700887
888 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100889 struct page *page;
890
Eric Anholt40123c12009-03-09 13:42:30 -0700891 /* Operation in this page
892 *
Eric Anholt40123c12009-03-09 13:42:30 -0700893 * shmem_page_offset = offset within page in shmem file
894 * data_page_index = page number in get_user_pages return
895 * data_page_offset = offset with data_page_index page.
896 * page_length = bytes to copy for this page
897 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100898 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700899 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100900 data_page_offset = offset_in_page(data_ptr);
Eric Anholt40123c12009-03-09 13:42:30 -0700901
902 page_length = remain;
903 if ((shmem_page_offset + page_length) > PAGE_SIZE)
904 page_length = PAGE_SIZE - shmem_page_offset;
905 if ((data_page_offset + page_length) > PAGE_SIZE)
906 page_length = PAGE_SIZE - data_page_offset;
907
Hugh Dickins5949eac2011-06-27 16:18:18 -0700908 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100909 if (IS_ERR(page)) {
910 ret = PTR_ERR(page);
911 goto out;
912 }
913
Eric Anholt280b7132009-03-12 16:56:27 -0700914 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100915 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700916 shmem_page_offset,
917 user_pages[data_page_index],
918 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100919 page_length,
920 0);
921 } else {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100922 slow_shmem_copy(page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100923 shmem_page_offset,
924 user_pages[data_page_index],
925 data_page_offset,
926 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700927 }
Eric Anholt40123c12009-03-09 13:42:30 -0700928
Chris Wilsone5281cc2010-10-28 13:45:36 +0100929 set_page_dirty(page);
930 mark_page_accessed(page);
931 page_cache_release(page);
932
Eric Anholt40123c12009-03-09 13:42:30 -0700933 remain -= page_length;
934 data_ptr += page_length;
935 offset += page_length;
936 }
937
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100938out:
Eric Anholt40123c12009-03-09 13:42:30 -0700939 for (i = 0; i < pinned_pages; i++)
940 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700941 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700942
943 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700944}
945
946/**
947 * Writes data to the object referenced by handle.
948 *
949 * On error, the contents of the buffer that were to be modified are undefined.
950 */
951int
952i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100953 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700954{
955 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000956 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000957 int ret;
958
959 if (args->size == 0)
960 return 0;
961
962 if (!access_ok(VERIFY_READ,
963 (char __user *)(uintptr_t)args->data_ptr,
964 args->size))
965 return -EFAULT;
966
967 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
968 args->size);
969 if (ret)
970 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700971
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100972 ret = i915_mutex_lock_interruptible(dev);
973 if (ret)
974 return ret;
975
Chris Wilson05394f32010-11-08 19:18:58 +0000976 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000977 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100978 ret = -ENOENT;
979 goto unlock;
980 }
Eric Anholt673a3942008-07-30 12:06:12 -0700981
Chris Wilson7dcd2492010-09-26 20:21:44 +0100982 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000983 if (args->offset > obj->base.size ||
984 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100985 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100986 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100987 }
988
Chris Wilsondb53a302011-02-03 11:57:46 +0000989 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
990
Eric Anholt673a3942008-07-30 12:06:12 -0700991 /* We can only do the GTT pwrite on untiled buffers, as otherwise
992 * it would end up going through the fenced access, and we'll get
993 * different detiling behavior between reading and writing.
994 * pread/pwrite currently are reading and writing from the CPU
995 * perspective, requiring manual detiling by the client.
996 */
Chris Wilson05394f32010-11-08 19:18:58 +0000997 if (obj->phys_obj)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100998 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Chris Wilsond9e86c02010-11-10 16:40:20 +0000999 else if (obj->gtt_space &&
Chris Wilson05394f32010-11-08 19:18:58 +00001000 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001001 ret = i915_gem_object_pin(obj, 0, true);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001002 if (ret)
1003 goto out;
1004
Chris Wilsond9e86c02010-11-10 16:40:20 +00001005 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1006 if (ret)
1007 goto out_unpin;
1008
1009 ret = i915_gem_object_put_fence(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001010 if (ret)
1011 goto out_unpin;
1012
1013 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1014 if (ret == -EFAULT)
1015 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1016
1017out_unpin:
1018 i915_gem_object_unpin(obj);
Eric Anholt40123c12009-03-09 13:42:30 -07001019 } else {
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001020 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1021 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001022 goto out;
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001023
1024 ret = -EFAULT;
1025 if (!i915_gem_object_needs_bit17_swizzle(obj))
1026 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1027 if (ret == -EFAULT)
1028 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
Eric Anholt40123c12009-03-09 13:42:30 -07001029 }
Eric Anholt673a3942008-07-30 12:06:12 -07001030
Chris Wilson35b62a82010-09-26 20:23:38 +01001031out:
Chris Wilson05394f32010-11-08 19:18:58 +00001032 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001033unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001034 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07001035 return ret;
1036}
1037
1038/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001039 * Called when user space prepares to use an object with the CPU, either
1040 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001041 */
1042int
1043i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001044 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001045{
1046 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001047 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001048 uint32_t read_domains = args->read_domains;
1049 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001050 int ret;
1051
1052 if (!(dev->driver->driver_features & DRIVER_GEM))
1053 return -ENODEV;
1054
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001055 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001056 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001057 return -EINVAL;
1058
Chris Wilson21d509e2009-06-06 09:46:02 +01001059 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001060 return -EINVAL;
1061
1062 /* Having something in the write domain implies it's in the read
1063 * domain, and only that read domain. Enforce that in the request.
1064 */
1065 if (write_domain != 0 && read_domains != write_domain)
1066 return -EINVAL;
1067
Chris Wilson76c1dec2010-09-25 11:22:51 +01001068 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001069 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001070 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001071
Chris Wilson05394f32010-11-08 19:18:58 +00001072 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001073 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001074 ret = -ENOENT;
1075 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001076 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001077
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001078 if (read_domains & I915_GEM_DOMAIN_GTT) {
1079 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001080
1081 /* Silently promote "you're not bound, there was nothing to do"
1082 * to success, since the client was just asking us to
1083 * make sure everything was done.
1084 */
1085 if (ret == -EINVAL)
1086 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001087 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001088 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001089 }
1090
Chris Wilson05394f32010-11-08 19:18:58 +00001091 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001092unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001093 mutex_unlock(&dev->struct_mutex);
1094 return ret;
1095}
1096
1097/**
1098 * Called when user space has done writes to this buffer
1099 */
1100int
1101i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001102 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001103{
1104 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001105 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001106 int ret = 0;
1107
1108 if (!(dev->driver->driver_features & DRIVER_GEM))
1109 return -ENODEV;
1110
Chris Wilson76c1dec2010-09-25 11:22:51 +01001111 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001112 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001113 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001114
Chris Wilson05394f32010-11-08 19:18:58 +00001115 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001116 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001117 ret = -ENOENT;
1118 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001119 }
1120
Eric Anholt673a3942008-07-30 12:06:12 -07001121 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001122 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001123 i915_gem_object_flush_cpu_write_domain(obj);
1124
Chris Wilson05394f32010-11-08 19:18:58 +00001125 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001126unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001127 mutex_unlock(&dev->struct_mutex);
1128 return ret;
1129}
1130
1131/**
1132 * Maps the contents of an object, returning the address it is mapped
1133 * into.
1134 *
1135 * While the mapping holds a reference on the contents of the object, it doesn't
1136 * imply a ref on the object itself.
1137 */
1138int
1139i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001140 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001141{
Chris Wilsonda761a62010-10-27 17:37:08 +01001142 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001143 struct drm_i915_gem_mmap *args = data;
1144 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001145 unsigned long addr;
1146
1147 if (!(dev->driver->driver_features & DRIVER_GEM))
1148 return -ENODEV;
1149
Chris Wilson05394f32010-11-08 19:18:58 +00001150 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001151 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001152 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001153
Chris Wilsonda761a62010-10-27 17:37:08 +01001154 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1155 drm_gem_object_unreference_unlocked(obj);
1156 return -E2BIG;
1157 }
1158
Eric Anholt673a3942008-07-30 12:06:12 -07001159 down_write(&current->mm->mmap_sem);
1160 addr = do_mmap(obj->filp, 0, args->size,
1161 PROT_READ | PROT_WRITE, MAP_SHARED,
1162 args->offset);
1163 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001164 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001165 if (IS_ERR((void *)addr))
1166 return addr;
1167
1168 args->addr_ptr = (uint64_t) addr;
1169
1170 return 0;
1171}
1172
Jesse Barnesde151cf2008-11-12 10:03:55 -08001173/**
1174 * i915_gem_fault - fault a page into the GTT
1175 * vma: VMA in question
1176 * vmf: fault info
1177 *
1178 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1179 * from userspace. The fault handler takes care of binding the object to
1180 * the GTT (if needed), allocating and programming a fence register (again,
1181 * only if needed based on whether the old reg is still valid or the object
1182 * is tiled) and inserting a new PTE into the faulting process.
1183 *
1184 * Note that the faulting process may involve evicting existing objects
1185 * from the GTT and/or fence registers to make room. So performance may
1186 * suffer if the GTT working set is large or there are few fence registers
1187 * left.
1188 */
1189int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1190{
Chris Wilson05394f32010-11-08 19:18:58 +00001191 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1192 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001193 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001194 pgoff_t page_offset;
1195 unsigned long pfn;
1196 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001197 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001198
1199 /* We don't use vmf->pgoff since that has the fake offset */
1200 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1201 PAGE_SHIFT;
1202
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001203 ret = i915_mutex_lock_interruptible(dev);
1204 if (ret)
1205 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001206
Chris Wilsondb53a302011-02-03 11:57:46 +00001207 trace_i915_gem_object_fault(obj, page_offset, true, write);
1208
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001209 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001210 if (!obj->map_and_fenceable) {
1211 ret = i915_gem_object_unbind(obj);
1212 if (ret)
1213 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001214 }
Chris Wilson05394f32010-11-08 19:18:58 +00001215 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001216 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001217 if (ret)
1218 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001219
Eric Anholte92d03b2011-06-14 16:43:09 -07001220 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1221 if (ret)
1222 goto unlock;
1223 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001224
Chris Wilsond9e86c02010-11-10 16:40:20 +00001225 if (obj->tiling_mode == I915_TILING_NONE)
1226 ret = i915_gem_object_put_fence(obj);
1227 else
Chris Wilsonce453d82011-02-21 14:43:56 +00001228 ret = i915_gem_object_get_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001229 if (ret)
1230 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001231
Chris Wilson05394f32010-11-08 19:18:58 +00001232 if (i915_gem_object_is_inactive(obj))
1233 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001234
Chris Wilson6299f992010-11-24 12:23:44 +00001235 obj->fault_mappable = true;
1236
Chris Wilson05394f32010-11-08 19:18:58 +00001237 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001238 page_offset;
1239
1240 /* Finally, remap it using the new GTT offset */
1241 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001242unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001243 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001244out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001245 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001246 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001247 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001248 /* Give the error handler a chance to run and move the
1249 * objects off the GPU active list. Next time we service the
1250 * fault, we should be able to transition the page into the
1251 * GTT without touching the GPU (and so avoid further
1252 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1253 * with coherency, just lost writes.
1254 */
Chris Wilson045e7692010-11-07 09:18:22 +00001255 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001256 case 0:
1257 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001258 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001259 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001260 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001261 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001262 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001263 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001264 }
1265}
1266
1267/**
1268 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1269 * @obj: obj in question
1270 *
1271 * GEM memory mapping works by handing back to userspace a fake mmap offset
1272 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1273 * up the object based on the offset and sets up the various memory mapping
1274 * structures.
1275 *
1276 * This routine allocates and attaches a fake offset for @obj.
1277 */
1278static int
Chris Wilson05394f32010-11-08 19:18:58 +00001279i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001280{
Chris Wilson05394f32010-11-08 19:18:58 +00001281 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001282 struct drm_gem_mm *mm = dev->mm_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001283 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001284 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001285 int ret = 0;
1286
1287 /* Set the object up for mmap'ing */
Chris Wilson05394f32010-11-08 19:18:58 +00001288 list = &obj->base.map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001289 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001290 if (!list->map)
1291 return -ENOMEM;
1292
1293 map = list->map;
1294 map->type = _DRM_GEM;
Chris Wilson05394f32010-11-08 19:18:58 +00001295 map->size = obj->base.size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001296 map->handle = obj;
1297
1298 /* Get a DRM GEM mmap offset allocated... */
1299 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
Chris Wilson05394f32010-11-08 19:18:58 +00001300 obj->base.size / PAGE_SIZE,
1301 0, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001302 if (!list->file_offset_node) {
Chris Wilson05394f32010-11-08 19:18:58 +00001303 DRM_ERROR("failed to allocate offset for bo %d\n",
1304 obj->base.name);
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001305 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001306 goto out_free_list;
1307 }
1308
1309 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
Chris Wilson05394f32010-11-08 19:18:58 +00001310 obj->base.size / PAGE_SIZE,
1311 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001312 if (!list->file_offset_node) {
1313 ret = -ENOMEM;
1314 goto out_free_list;
1315 }
1316
1317 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001318 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1319 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001320 DRM_ERROR("failed to add to map hash\n");
1321 goto out_free_mm;
1322 }
1323
Jesse Barnesde151cf2008-11-12 10:03:55 -08001324 return 0;
1325
1326out_free_mm:
1327 drm_mm_put_block(list->file_offset_node);
1328out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001329 kfree(list->map);
Chris Wilson39a01d12010-10-28 13:03:06 +01001330 list->map = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001331
1332 return ret;
1333}
1334
Chris Wilson901782b2009-07-10 08:18:50 +01001335/**
1336 * i915_gem_release_mmap - remove physical page mappings
1337 * @obj: obj in question
1338 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001339 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001340 * relinquish ownership of the pages back to the system.
1341 *
1342 * It is vital that we remove the page mapping if we have mapped a tiled
1343 * object through the GTT and then lose the fence register due to
1344 * resource pressure. Similarly if the object has been moved out of the
1345 * aperture, than pages mapped into userspace must be revoked. Removing the
1346 * mapping will then trigger a page fault on the next user access, allowing
1347 * fixup by i915_gem_fault().
1348 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001349void
Chris Wilson05394f32010-11-08 19:18:58 +00001350i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001351{
Chris Wilson6299f992010-11-24 12:23:44 +00001352 if (!obj->fault_mappable)
1353 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001354
Chris Wilsonf6e47882011-03-20 21:09:12 +00001355 if (obj->base.dev->dev_mapping)
1356 unmap_mapping_range(obj->base.dev->dev_mapping,
1357 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1358 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001359
Chris Wilson6299f992010-11-24 12:23:44 +00001360 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001361}
1362
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001363static void
Chris Wilson05394f32010-11-08 19:18:58 +00001364i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001365{
Chris Wilson05394f32010-11-08 19:18:58 +00001366 struct drm_device *dev = obj->base.dev;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001367 struct drm_gem_mm *mm = dev->mm_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001368 struct drm_map_list *list = &obj->base.map_list;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001369
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001370 drm_ht_remove_item(&mm->offset_hash, &list->hash);
Chris Wilson39a01d12010-10-28 13:03:06 +01001371 drm_mm_put_block(list->file_offset_node);
1372 kfree(list->map);
1373 list->map = NULL;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001374}
1375
Chris Wilson92b88ae2010-11-09 11:47:32 +00001376static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001377i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001378{
Chris Wilsone28f8712011-07-18 13:11:49 -07001379 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001380
1381 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001382 tiling_mode == I915_TILING_NONE)
1383 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001384
1385 /* Previous chips need a power-of-two fence region when tiling */
1386 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001387 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001388 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001389 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001390
Chris Wilsone28f8712011-07-18 13:11:49 -07001391 while (gtt_size < size)
1392 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001393
Chris Wilsone28f8712011-07-18 13:11:49 -07001394 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001395}
1396
Jesse Barnesde151cf2008-11-12 10:03:55 -08001397/**
1398 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1399 * @obj: object to check
1400 *
1401 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001402 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001403 */
1404static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001405i915_gem_get_gtt_alignment(struct drm_device *dev,
1406 uint32_t size,
1407 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001408{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001409 /*
1410 * Minimum alignment is 4k (GTT page size), but might be greater
1411 * if a fence register is needed for the object.
1412 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001413 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001414 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001415 return 4096;
1416
1417 /*
1418 * Previous chips need to be aligned to the size of the smallest
1419 * fence register that can contain the object.
1420 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001421 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001422}
1423
Daniel Vetter5e783302010-11-14 22:32:36 +01001424/**
1425 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1426 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001427 * @dev: the device
1428 * @size: size of the object
1429 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001430 *
1431 * Return the required GTT alignment for an object, only taking into account
1432 * unfenced tiled surface requirements.
1433 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001434uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001435i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1436 uint32_t size,
1437 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001438{
Daniel Vetter5e783302010-11-14 22:32:36 +01001439 /*
1440 * Minimum alignment is 4k (GTT page size) for sane hw.
1441 */
1442 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001443 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001444 return 4096;
1445
Chris Wilsone28f8712011-07-18 13:11:49 -07001446 /* Previous hardware however needs to be aligned to a power-of-two
1447 * tile height. The simplest method for determining this is to reuse
1448 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001449 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001450 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001451}
1452
Jesse Barnesde151cf2008-11-12 10:03:55 -08001453int
Dave Airlieff72145b2011-02-07 12:16:14 +10001454i915_gem_mmap_gtt(struct drm_file *file,
1455 struct drm_device *dev,
1456 uint32_t handle,
1457 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001458{
Chris Wilsonda761a62010-10-27 17:37:08 +01001459 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001460 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001461 int ret;
1462
1463 if (!(dev->driver->driver_features & DRIVER_GEM))
1464 return -ENODEV;
1465
Chris Wilson76c1dec2010-09-25 11:22:51 +01001466 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001467 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001468 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001469
Dave Airlieff72145b2011-02-07 12:16:14 +10001470 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001471 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001472 ret = -ENOENT;
1473 goto unlock;
1474 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475
Chris Wilson05394f32010-11-08 19:18:58 +00001476 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001477 ret = -E2BIG;
1478 goto unlock;
1479 }
1480
Chris Wilson05394f32010-11-08 19:18:58 +00001481 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001482 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001483 ret = -EINVAL;
1484 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001485 }
1486
Chris Wilson05394f32010-11-08 19:18:58 +00001487 if (!obj->base.map_list.map) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001488 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001489 if (ret)
1490 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001491 }
1492
Dave Airlieff72145b2011-02-07 12:16:14 +10001493 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001494
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001495out:
Chris Wilson05394f32010-11-08 19:18:58 +00001496 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001497unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001498 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001499 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001500}
1501
Dave Airlieff72145b2011-02-07 12:16:14 +10001502/**
1503 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1504 * @dev: DRM device
1505 * @data: GTT mapping ioctl data
1506 * @file: GEM object info
1507 *
1508 * Simply returns the fake offset to userspace so it can mmap it.
1509 * The mmap call will end up in drm_gem_mmap(), which will set things
1510 * up so we can get faults in the handler above.
1511 *
1512 * The fault handler will take care of binding the object into the GTT
1513 * (since it may have been evicted to make room for something), allocating
1514 * a fence register, and mapping the appropriate aperture address into
1515 * userspace.
1516 */
1517int
1518i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1519 struct drm_file *file)
1520{
1521 struct drm_i915_gem_mmap_gtt *args = data;
1522
1523 if (!(dev->driver->driver_features & DRIVER_GEM))
1524 return -ENODEV;
1525
1526 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1527}
1528
1529
Chris Wilsone5281cc2010-10-28 13:45:36 +01001530static int
Chris Wilson05394f32010-11-08 19:18:58 +00001531i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001532 gfp_t gfpmask)
1533{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001534 int page_count, i;
1535 struct address_space *mapping;
1536 struct inode *inode;
1537 struct page *page;
1538
1539 /* Get the list of pages out of our struct file. They'll be pinned
1540 * at this point until we release them.
1541 */
Chris Wilson05394f32010-11-08 19:18:58 +00001542 page_count = obj->base.size / PAGE_SIZE;
1543 BUG_ON(obj->pages != NULL);
1544 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1545 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001546 return -ENOMEM;
1547
Chris Wilson05394f32010-11-08 19:18:58 +00001548 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001549 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001550 gfpmask |= mapping_gfp_mask(mapping);
1551
Chris Wilsone5281cc2010-10-28 13:45:36 +01001552 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001553 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001554 if (IS_ERR(page))
1555 goto err_pages;
1556
Chris Wilson05394f32010-11-08 19:18:58 +00001557 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001558 }
1559
Chris Wilson05394f32010-11-08 19:18:58 +00001560 if (obj->tiling_mode != I915_TILING_NONE)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001561 i915_gem_object_do_bit_17_swizzle(obj);
1562
1563 return 0;
1564
1565err_pages:
1566 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001567 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001568
Chris Wilson05394f32010-11-08 19:18:58 +00001569 drm_free_large(obj->pages);
1570 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001571 return PTR_ERR(page);
1572}
1573
Chris Wilson5cdf5882010-09-27 15:51:07 +01001574static void
Chris Wilson05394f32010-11-08 19:18:58 +00001575i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001576{
Chris Wilson05394f32010-11-08 19:18:58 +00001577 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001578 int i;
1579
Chris Wilson05394f32010-11-08 19:18:58 +00001580 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001581
Chris Wilson05394f32010-11-08 19:18:58 +00001582 if (obj->tiling_mode != I915_TILING_NONE)
Eric Anholt280b7132009-03-12 16:56:27 -07001583 i915_gem_object_save_bit_17_swizzle(obj);
1584
Chris Wilson05394f32010-11-08 19:18:58 +00001585 if (obj->madv == I915_MADV_DONTNEED)
1586 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001587
1588 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001589 if (obj->dirty)
1590 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001591
Chris Wilson05394f32010-11-08 19:18:58 +00001592 if (obj->madv == I915_MADV_WILLNEED)
1593 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001594
Chris Wilson05394f32010-11-08 19:18:58 +00001595 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001596 }
Chris Wilson05394f32010-11-08 19:18:58 +00001597 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001598
Chris Wilson05394f32010-11-08 19:18:58 +00001599 drm_free_large(obj->pages);
1600 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001601}
1602
Chris Wilson54cf91d2010-11-25 18:00:26 +00001603void
Chris Wilson05394f32010-11-08 19:18:58 +00001604i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001605 struct intel_ring_buffer *ring,
1606 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001607{
Chris Wilson05394f32010-11-08 19:18:58 +00001608 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001609 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001610
Zou Nan hai852835f2010-05-21 09:08:56 +08001611 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001612 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001613
1614 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001615 if (!obj->active) {
1616 drm_gem_object_reference(&obj->base);
1617 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001618 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001619
Eric Anholt673a3942008-07-30 12:06:12 -07001620 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001621 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1622 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001623
Chris Wilson05394f32010-11-08 19:18:58 +00001624 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001625 if (obj->fenced_gpu_access) {
1626 struct drm_i915_fence_reg *reg;
1627
1628 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1629
1630 obj->last_fenced_seqno = seqno;
1631 obj->last_fenced_ring = ring;
1632
1633 reg = &dev_priv->fence_regs[obj->fence_reg];
1634 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1635 }
1636}
1637
1638static void
1639i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1640{
1641 list_del_init(&obj->ring_list);
1642 obj->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001643}
1644
Eric Anholtce44b0e2008-11-06 16:00:31 -08001645static void
Chris Wilson05394f32010-11-08 19:18:58 +00001646i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001647{
Chris Wilson05394f32010-11-08 19:18:58 +00001648 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001649 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001650
Chris Wilson05394f32010-11-08 19:18:58 +00001651 BUG_ON(!obj->active);
1652 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001653
1654 i915_gem_object_move_off_active(obj);
1655}
1656
1657static void
1658i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1659{
1660 struct drm_device *dev = obj->base.dev;
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662
1663 if (obj->pin_count != 0)
1664 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1665 else
1666 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1667
1668 BUG_ON(!list_empty(&obj->gpu_write_list));
1669 BUG_ON(!obj->active);
1670 obj->ring = NULL;
1671
1672 i915_gem_object_move_off_active(obj);
1673 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001674
1675 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001676 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001677 drm_gem_object_unreference(&obj->base);
1678
1679 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001680}
Eric Anholt673a3942008-07-30 12:06:12 -07001681
Chris Wilson963b4832009-09-20 23:03:54 +01001682/* Immediately discard the backing storage */
1683static void
Chris Wilson05394f32010-11-08 19:18:58 +00001684i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001685{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001686 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001687
Chris Wilsonae9fed62010-08-07 11:01:30 +01001688 /* Our goal here is to return as much of the memory as
1689 * is possible back to the system as we are called from OOM.
1690 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001691 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001692 */
Chris Wilson05394f32010-11-08 19:18:58 +00001693 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001694 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001695
Chris Wilson05394f32010-11-08 19:18:58 +00001696 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001697}
1698
1699static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001700i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001701{
Chris Wilson05394f32010-11-08 19:18:58 +00001702 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001703}
1704
Eric Anholt673a3942008-07-30 12:06:12 -07001705static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001706i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1707 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001708{
Chris Wilson05394f32010-11-08 19:18:58 +00001709 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001710
Chris Wilson05394f32010-11-08 19:18:58 +00001711 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001712 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001713 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001714 if (obj->base.write_domain & flush_domains) {
1715 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001716
Chris Wilson05394f32010-11-08 19:18:58 +00001717 obj->base.write_domain = 0;
1718 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001719 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001720 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001721
Daniel Vetter63560392010-02-19 11:51:59 +01001722 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001723 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001724 old_write_domain);
1725 }
1726 }
1727}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001728
Chris Wilson3cce4692010-10-27 16:11:02 +01001729int
Chris Wilsondb53a302011-02-03 11:57:46 +00001730i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001731 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001732 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001733{
Chris Wilsondb53a302011-02-03 11:57:46 +00001734 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001735 uint32_t seqno;
1736 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001737 int ret;
1738
1739 BUG_ON(request == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07001740
Chris Wilson3cce4692010-10-27 16:11:02 +01001741 ret = ring->add_request(ring, &seqno);
1742 if (ret)
1743 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001744
Chris Wilsondb53a302011-02-03 11:57:46 +00001745 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001746
1747 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001748 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001749 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001750 was_empty = list_empty(&ring->request_list);
1751 list_add_tail(&request->list, &ring->request_list);
1752
Chris Wilsondb53a302011-02-03 11:57:46 +00001753 if (file) {
1754 struct drm_i915_file_private *file_priv = file->driver_priv;
1755
Chris Wilson1c255952010-09-26 11:03:27 +01001756 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001757 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001758 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001759 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001760 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001761 }
Eric Anholt673a3942008-07-30 12:06:12 -07001762
Chris Wilsondb53a302011-02-03 11:57:46 +00001763 ring->outstanding_lazy_request = false;
1764
Ben Gamarif65d9422009-09-14 17:48:44 -04001765 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001766 mod_timer(&dev_priv->hangcheck_timer,
1767 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001768 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001769 queue_delayed_work(dev_priv->wq,
1770 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001771 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001772 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001773}
1774
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001775static inline void
1776i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001777{
Chris Wilson1c255952010-09-26 11:03:27 +01001778 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001779
Chris Wilson1c255952010-09-26 11:03:27 +01001780 if (!file_priv)
1781 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001782
Chris Wilson1c255952010-09-26 11:03:27 +01001783 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001784 if (request->file_priv) {
1785 list_del(&request->client_list);
1786 request->file_priv = NULL;
1787 }
Chris Wilson1c255952010-09-26 11:03:27 +01001788 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001789}
1790
Chris Wilsondfaae392010-09-22 10:31:52 +01001791static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1792 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001793{
Chris Wilsondfaae392010-09-22 10:31:52 +01001794 while (!list_empty(&ring->request_list)) {
1795 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001796
Chris Wilsondfaae392010-09-22 10:31:52 +01001797 request = list_first_entry(&ring->request_list,
1798 struct drm_i915_gem_request,
1799 list);
1800
1801 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001802 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001803 kfree(request);
1804 }
1805
1806 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001807 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001808
Chris Wilson05394f32010-11-08 19:18:58 +00001809 obj = list_first_entry(&ring->active_list,
1810 struct drm_i915_gem_object,
1811 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001812
Chris Wilson05394f32010-11-08 19:18:58 +00001813 obj->base.write_domain = 0;
1814 list_del_init(&obj->gpu_write_list);
1815 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001816 }
Eric Anholt673a3942008-07-30 12:06:12 -07001817}
1818
Chris Wilson312817a2010-11-22 11:50:11 +00001819static void i915_gem_reset_fences(struct drm_device *dev)
1820{
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1822 int i;
1823
1824 for (i = 0; i < 16; i++) {
1825 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001826 struct drm_i915_gem_object *obj = reg->obj;
1827
1828 if (!obj)
1829 continue;
1830
1831 if (obj->tiling_mode)
1832 i915_gem_release_mmap(obj);
1833
Chris Wilsond9e86c02010-11-10 16:40:20 +00001834 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1835 reg->obj->fenced_gpu_access = false;
1836 reg->obj->last_fenced_seqno = 0;
1837 reg->obj->last_fenced_ring = NULL;
1838 i915_gem_clear_fence_reg(dev, reg);
Chris Wilson312817a2010-11-22 11:50:11 +00001839 }
1840}
1841
Chris Wilson069efc12010-09-30 16:53:18 +01001842void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001843{
Chris Wilsondfaae392010-09-22 10:31:52 +01001844 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001845 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001846 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001847
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001848 for (i = 0; i < I915_NUM_RINGS; i++)
1849 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001850
1851 /* Remove anything from the flushing lists. The GPU cache is likely
1852 * to be lost on reset along with the data, so simply move the
1853 * lost bo to the inactive list.
1854 */
1855 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001856 obj= list_first_entry(&dev_priv->mm.flushing_list,
1857 struct drm_i915_gem_object,
1858 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001859
Chris Wilson05394f32010-11-08 19:18:58 +00001860 obj->base.write_domain = 0;
1861 list_del_init(&obj->gpu_write_list);
1862 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001863 }
Chris Wilson9375e442010-09-19 12:21:28 +01001864
Chris Wilsondfaae392010-09-22 10:31:52 +01001865 /* Move everything out of the GPU domains to ensure we do any
1866 * necessary invalidation upon reuse.
1867 */
Chris Wilson05394f32010-11-08 19:18:58 +00001868 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001869 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001870 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001871 {
Chris Wilson05394f32010-11-08 19:18:58 +00001872 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001873 }
Chris Wilson069efc12010-09-30 16:53:18 +01001874
1875 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001876 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001877}
1878
1879/**
1880 * This function clears the request list as sequence numbers are passed.
1881 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001882static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001883i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001884{
Eric Anholt673a3942008-07-30 12:06:12 -07001885 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001886 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001887
Chris Wilsondb53a302011-02-03 11:57:46 +00001888 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001889 return;
1890
Chris Wilsondb53a302011-02-03 11:57:46 +00001891 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001892
Chris Wilson78501ea2010-10-27 12:18:21 +01001893 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001894
Chris Wilson076e2c02011-01-21 10:07:18 +00001895 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001896 if (seqno >= ring->sync_seqno[i])
1897 ring->sync_seqno[i] = 0;
1898
Zou Nan hai852835f2010-05-21 09:08:56 +08001899 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001900 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001901
Zou Nan hai852835f2010-05-21 09:08:56 +08001902 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001903 struct drm_i915_gem_request,
1904 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001905
Chris Wilsondfaae392010-09-22 10:31:52 +01001906 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001907 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001908
Chris Wilsondb53a302011-02-03 11:57:46 +00001909 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001910
1911 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001912 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001913 kfree(request);
1914 }
1915
1916 /* Move any buffers on the active list that are no longer referenced
1917 * by the ringbuffer to the flushing/inactive lists as appropriate.
1918 */
1919 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001920 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001921
Chris Wilson05394f32010-11-08 19:18:58 +00001922 obj= list_first_entry(&ring->active_list,
1923 struct drm_i915_gem_object,
1924 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001925
Chris Wilson05394f32010-11-08 19:18:58 +00001926 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001927 break;
1928
Chris Wilson05394f32010-11-08 19:18:58 +00001929 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001930 i915_gem_object_move_to_flushing(obj);
1931 else
1932 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001933 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001934
Chris Wilsondb53a302011-02-03 11:57:46 +00001935 if (unlikely(ring->trace_irq_seqno &&
1936 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001937 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001938 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001939 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001940
Chris Wilsondb53a302011-02-03 11:57:46 +00001941 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001942}
1943
1944void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001945i915_gem_retire_requests(struct drm_device *dev)
1946{
1947 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001948 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001949
Chris Wilsonbe726152010-07-23 23:18:50 +01001950 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001951 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001952
1953 /* We must be careful that during unbind() we do not
1954 * accidentally infinitely recurse into retire requests.
1955 * Currently:
1956 * retire -> free -> unbind -> wait -> retire_ring
1957 */
Chris Wilson05394f32010-11-08 19:18:58 +00001958 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001959 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001960 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001961 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001962 }
1963
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001964 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001965 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001966}
1967
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001968static void
Eric Anholt673a3942008-07-30 12:06:12 -07001969i915_gem_retire_work_handler(struct work_struct *work)
1970{
1971 drm_i915_private_t *dev_priv;
1972 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001973 bool idle;
1974 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001975
1976 dev_priv = container_of(work, drm_i915_private_t,
1977 mm.retire_work.work);
1978 dev = dev_priv->dev;
1979
Chris Wilson891b48c2010-09-29 12:26:37 +01001980 /* Come back later if the device is busy... */
1981 if (!mutex_trylock(&dev->struct_mutex)) {
1982 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1983 return;
1984 }
1985
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001986 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001987
Chris Wilson0a587052011-01-09 21:05:44 +00001988 /* Send a periodic flush down the ring so we don't hold onto GEM
1989 * objects indefinitely.
1990 */
1991 idle = true;
1992 for (i = 0; i < I915_NUM_RINGS; i++) {
1993 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1994
1995 if (!list_empty(&ring->gpu_write_list)) {
1996 struct drm_i915_gem_request *request;
1997 int ret;
1998
Chris Wilsondb53a302011-02-03 11:57:46 +00001999 ret = i915_gem_flush_ring(ring,
2000 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00002001 request = kzalloc(sizeof(*request), GFP_KERNEL);
2002 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00002003 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00002004 kfree(request);
2005 }
2006
2007 idle &= list_empty(&ring->request_list);
2008 }
2009
2010 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07002011 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00002012
Eric Anholt673a3942008-07-30 12:06:12 -07002013 mutex_unlock(&dev->struct_mutex);
2014}
2015
Chris Wilsondb53a302011-02-03 11:57:46 +00002016/**
2017 * Waits for a sequence number to be signaled, and cleans up the
2018 * request and object lists appropriately for that event.
2019 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02002020int
Chris Wilsondb53a302011-02-03 11:57:46 +00002021i915_wait_request(struct intel_ring_buffer *ring,
Chris Wilsonce453d82011-02-21 14:43:56 +00002022 uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002023{
Chris Wilsondb53a302011-02-03 11:57:46 +00002024 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07002025 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07002026 int ret = 0;
2027
2028 BUG_ON(seqno == 0);
2029
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002030 if (atomic_read(&dev_priv->mm.wedged)) {
2031 struct completion *x = &dev_priv->error_completion;
2032 bool recovery_complete;
2033 unsigned long flags;
2034
2035 /* Give the error handler a chance to run. */
2036 spin_lock_irqsave(&x->wait.lock, flags);
2037 recovery_complete = x->done > 0;
2038 spin_unlock_irqrestore(&x->wait.lock, flags);
2039
2040 return recovery_complete ? -EIO : -EAGAIN;
2041 }
Ben Gamariffed1d02009-09-14 17:48:41 -04002042
Chris Wilson5d97eb62010-11-10 20:40:02 +00002043 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01002044 struct drm_i915_gem_request *request;
2045
2046 request = kzalloc(sizeof(*request), GFP_KERNEL);
2047 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01002048 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01002049
Chris Wilsondb53a302011-02-03 11:57:46 +00002050 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01002051 if (ret) {
2052 kfree(request);
2053 return ret;
2054 }
2055
2056 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01002057 }
2058
Chris Wilson78501ea2010-10-27 12:18:21 +01002059 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002060 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002061 ier = I915_READ(DEIER) | I915_READ(GTIER);
2062 else
2063 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07002064 if (!ier) {
2065 DRM_ERROR("something (likely vbetool) disabled "
2066 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01002067 ring->dev->driver->irq_preinstall(ring->dev);
2068 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07002069 }
2070
Chris Wilsondb53a302011-02-03 11:57:46 +00002071 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002072
Chris Wilsonb2223492010-10-27 15:27:33 +01002073 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002074 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002075 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002076 ret = wait_event_interruptible(ring->irq_queue,
2077 i915_seqno_passed(ring->get_seqno(ring), seqno)
2078 || atomic_read(&dev_priv->mm.wedged));
2079 else
2080 wait_event(ring->irq_queue,
2081 i915_seqno_passed(ring->get_seqno(ring), seqno)
2082 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02002083
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002084 ring->irq_put(ring);
Chris Wilsonb5ba1772010-12-14 12:17:15 +00002085 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2086 seqno) ||
2087 atomic_read(&dev_priv->mm.wedged), 3000))
2088 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01002089 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002090
Chris Wilsondb53a302011-02-03 11:57:46 +00002091 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002092 }
Ben Gamariba1234d2009-09-14 17:48:47 -04002093 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01002094 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07002095
2096 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01002097 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilson78501ea2010-10-27 12:18:21 +01002098 __func__, ret, seqno, ring->get_seqno(ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01002099 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002100
2101 /* Directly dispatch request retiring. While we have the work queue
2102 * to handle this, the waiter on a request often wants an associated
2103 * buffer to have made it to the inactive list, and we would need
2104 * a separate wait queue to handle that.
2105 */
2106 if (ret == 0)
Chris Wilsondb53a302011-02-03 11:57:46 +00002107 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002108
2109 return ret;
2110}
2111
Daniel Vetter48764bf2009-09-15 22:57:32 +02002112/**
Eric Anholt673a3942008-07-30 12:06:12 -07002113 * Ensures that all rendering to the object has completed and the object is
2114 * safe to unbind from the GTT or access from the CPU.
2115 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00002116int
Chris Wilsonce453d82011-02-21 14:43:56 +00002117i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002118{
Eric Anholt673a3942008-07-30 12:06:12 -07002119 int ret;
2120
Eric Anholte47c68e2008-11-14 13:35:19 -08002121 /* This function only exists to support waiting for existing rendering,
2122 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002123 */
Chris Wilson05394f32010-11-08 19:18:58 +00002124 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002125
2126 /* If there is rendering queued on the buffer being evicted, wait for
2127 * it.
2128 */
Chris Wilson05394f32010-11-08 19:18:58 +00002129 if (obj->active) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002130 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002131 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002132 return ret;
2133 }
2134
2135 return 0;
2136}
2137
2138/**
2139 * Unbinds an object from the GTT aperture.
2140 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002141int
Chris Wilson05394f32010-11-08 19:18:58 +00002142i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002143{
Eric Anholt673a3942008-07-30 12:06:12 -07002144 int ret = 0;
2145
Chris Wilson05394f32010-11-08 19:18:58 +00002146 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002147 return 0;
2148
Chris Wilson05394f32010-11-08 19:18:58 +00002149 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002150 DRM_ERROR("Attempting to unbind pinned buffer\n");
2151 return -EINVAL;
2152 }
2153
Eric Anholt5323fd02009-09-09 11:50:45 -07002154 /* blow away mappings if mapped through GTT */
2155 i915_gem_release_mmap(obj);
2156
Eric Anholt673a3942008-07-30 12:06:12 -07002157 /* Move the object to the CPU domain to ensure that
2158 * any possible CPU writes while it's not in the GTT
2159 * are flushed when we go to remap it. This will
2160 * also ensure that all pending GPU writes are finished
2161 * before we unbind.
2162 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002163 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002164 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002165 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002166 /* Continue on if we fail due to EIO, the GPU is hung so we
2167 * should be safe and we need to cleanup or else we might
2168 * cause memory corruption through use-after-free.
2169 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002170 if (ret) {
2171 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002172 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002173 }
Eric Anholt673a3942008-07-30 12:06:12 -07002174
Daniel Vetter96b47b62009-12-15 17:50:00 +01002175 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002176 ret = i915_gem_object_put_fence(obj);
2177 if (ret == -ERESTARTSYS)
2178 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002179
Chris Wilsondb53a302011-02-03 11:57:46 +00002180 trace_i915_gem_object_unbind(obj);
2181
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002182 i915_gem_gtt_unbind_object(obj);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002183 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002184
Chris Wilson6299f992010-11-24 12:23:44 +00002185 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002186 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002187 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002188 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002189
Chris Wilson05394f32010-11-08 19:18:58 +00002190 drm_mm_put_block(obj->gtt_space);
2191 obj->gtt_space = NULL;
2192 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002193
Chris Wilson05394f32010-11-08 19:18:58 +00002194 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002195 i915_gem_object_truncate(obj);
2196
Chris Wilson8dc17752010-07-23 23:18:51 +01002197 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002198}
2199
Chris Wilson88241782011-01-07 17:09:48 +00002200int
Chris Wilsondb53a302011-02-03 11:57:46 +00002201i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00002202 uint32_t invalidate_domains,
2203 uint32_t flush_domains)
2204{
Chris Wilson88241782011-01-07 17:09:48 +00002205 int ret;
2206
Chris Wilson36d527d2011-03-19 22:26:49 +00002207 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2208 return 0;
2209
Chris Wilsondb53a302011-02-03 11:57:46 +00002210 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2211
Chris Wilson88241782011-01-07 17:09:48 +00002212 ret = ring->flush(ring, invalidate_domains, flush_domains);
2213 if (ret)
2214 return ret;
2215
Chris Wilson36d527d2011-03-19 22:26:49 +00002216 if (flush_domains & I915_GEM_GPU_DOMAINS)
2217 i915_gem_process_flushing_list(ring, flush_domains);
2218
Chris Wilson88241782011-01-07 17:09:48 +00002219 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002220}
2221
Chris Wilsondb53a302011-02-03 11:57:46 +00002222static int i915_ring_idle(struct intel_ring_buffer *ring)
Chris Wilsona56ba562010-09-28 10:07:56 +01002223{
Chris Wilson88241782011-01-07 17:09:48 +00002224 int ret;
2225
Chris Wilson395b70b2010-10-28 21:28:46 +01002226 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002227 return 0;
2228
Chris Wilson88241782011-01-07 17:09:48 +00002229 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002230 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002231 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002232 if (ret)
2233 return ret;
2234 }
2235
Chris Wilsonce453d82011-02-21 14:43:56 +00002236 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
Chris Wilsona56ba562010-09-28 10:07:56 +01002237}
2238
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002239int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002240i915_gpu_idle(struct drm_device *dev)
2241{
2242 drm_i915_private_t *dev_priv = dev->dev_private;
2243 bool lists_empty;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002244 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002245
Zou Nan haid1b851f2010-05-21 09:08:57 +08002246 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +01002247 list_empty(&dev_priv->mm.active_list));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002248 if (lists_empty)
2249 return 0;
2250
2251 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002252 for (i = 0; i < I915_NUM_RINGS; i++) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002253 ret = i915_ring_idle(&dev_priv->ring[i]);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002254 if (ret)
2255 return ret;
2256 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002257
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002258 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002259}
2260
Daniel Vetterc6642782010-11-12 13:46:18 +00002261static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2262 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002263{
Chris Wilson05394f32010-11-08 19:18:58 +00002264 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002265 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002266 u32 size = obj->gtt_space->size;
2267 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002268 uint64_t val;
2269
Chris Wilson05394f32010-11-08 19:18:58 +00002270 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00002271 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002272 val |= obj->gtt_offset & 0xfffff000;
2273 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07002274 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2275
Chris Wilson05394f32010-11-08 19:18:58 +00002276 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002277 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2278 val |= I965_FENCE_REG_VALID;
2279
Daniel Vetterc6642782010-11-12 13:46:18 +00002280 if (pipelined) {
2281 int ret = intel_ring_begin(pipelined, 6);
2282 if (ret)
2283 return ret;
2284
2285 intel_ring_emit(pipelined, MI_NOOP);
2286 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2287 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2288 intel_ring_emit(pipelined, (u32)val);
2289 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2290 intel_ring_emit(pipelined, (u32)(val >> 32));
2291 intel_ring_advance(pipelined);
2292 } else
2293 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2294
2295 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002296}
2297
Daniel Vetterc6642782010-11-12 13:46:18 +00002298static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2299 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002300{
Chris Wilson05394f32010-11-08 19:18:58 +00002301 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002302 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002303 u32 size = obj->gtt_space->size;
2304 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002305 uint64_t val;
2306
Chris Wilson05394f32010-11-08 19:18:58 +00002307 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002308 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002309 val |= obj->gtt_offset & 0xfffff000;
2310 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2311 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002312 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2313 val |= I965_FENCE_REG_VALID;
2314
Daniel Vetterc6642782010-11-12 13:46:18 +00002315 if (pipelined) {
2316 int ret = intel_ring_begin(pipelined, 6);
2317 if (ret)
2318 return ret;
2319
2320 intel_ring_emit(pipelined, MI_NOOP);
2321 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2322 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2323 intel_ring_emit(pipelined, (u32)val);
2324 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2325 intel_ring_emit(pipelined, (u32)(val >> 32));
2326 intel_ring_advance(pipelined);
2327 } else
2328 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2329
2330 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002331}
2332
Daniel Vetterc6642782010-11-12 13:46:18 +00002333static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2334 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002335{
Chris Wilson05394f32010-11-08 19:18:58 +00002336 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002337 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002338 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002339 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002340 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002341
Daniel Vetterc6642782010-11-12 13:46:18 +00002342 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2343 (size & -size) != size ||
2344 (obj->gtt_offset & (size - 1)),
2345 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2346 obj->gtt_offset, obj->map_and_fenceable, size))
2347 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002348
Daniel Vetterc6642782010-11-12 13:46:18 +00002349 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002350 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002351 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002352 tile_width = 512;
2353
2354 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002355 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002356 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002357
Chris Wilson05394f32010-11-08 19:18:58 +00002358 val = obj->gtt_offset;
2359 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002360 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002361 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002362 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2363 val |= I830_FENCE_REG_VALID;
2364
Chris Wilson05394f32010-11-08 19:18:58 +00002365 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002366 if (fence_reg < 8)
2367 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002368 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002369 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002370
2371 if (pipelined) {
2372 int ret = intel_ring_begin(pipelined, 4);
2373 if (ret)
2374 return ret;
2375
2376 intel_ring_emit(pipelined, MI_NOOP);
2377 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2378 intel_ring_emit(pipelined, fence_reg);
2379 intel_ring_emit(pipelined, val);
2380 intel_ring_advance(pipelined);
2381 } else
2382 I915_WRITE(fence_reg, val);
2383
2384 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002385}
2386
Daniel Vetterc6642782010-11-12 13:46:18 +00002387static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2388 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002389{
Chris Wilson05394f32010-11-08 19:18:58 +00002390 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002391 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002392 u32 size = obj->gtt_space->size;
2393 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002394 uint32_t val;
2395 uint32_t pitch_val;
2396
Daniel Vetterc6642782010-11-12 13:46:18 +00002397 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2398 (size & -size) != size ||
2399 (obj->gtt_offset & (size - 1)),
2400 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2401 obj->gtt_offset, size))
2402 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002403
Chris Wilson05394f32010-11-08 19:18:58 +00002404 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002405 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002406
Chris Wilson05394f32010-11-08 19:18:58 +00002407 val = obj->gtt_offset;
2408 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002409 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002410 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002411 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2412 val |= I830_FENCE_REG_VALID;
2413
Daniel Vetterc6642782010-11-12 13:46:18 +00002414 if (pipelined) {
2415 int ret = intel_ring_begin(pipelined, 4);
2416 if (ret)
2417 return ret;
2418
2419 intel_ring_emit(pipelined, MI_NOOP);
2420 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2421 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2422 intel_ring_emit(pipelined, val);
2423 intel_ring_advance(pipelined);
2424 } else
2425 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2426
2427 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002428}
2429
Chris Wilsond9e86c02010-11-10 16:40:20 +00002430static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2431{
2432 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2433}
2434
2435static int
2436i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002437 struct intel_ring_buffer *pipelined)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002438{
2439 int ret;
2440
2441 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002442 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002443 ret = i915_gem_flush_ring(obj->last_fenced_ring,
Chris Wilson88241782011-01-07 17:09:48 +00002444 0, obj->base.write_domain);
2445 if (ret)
2446 return ret;
2447 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002448
2449 obj->fenced_gpu_access = false;
2450 }
2451
2452 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2453 if (!ring_passed_seqno(obj->last_fenced_ring,
2454 obj->last_fenced_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002455 ret = i915_wait_request(obj->last_fenced_ring,
Chris Wilsonce453d82011-02-21 14:43:56 +00002456 obj->last_fenced_seqno);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002457 if (ret)
2458 return ret;
2459 }
2460
2461 obj->last_fenced_seqno = 0;
2462 obj->last_fenced_ring = NULL;
2463 }
2464
Chris Wilson63256ec2011-01-04 18:42:07 +00002465 /* Ensure that all CPU reads are completed before installing a fence
2466 * and all writes before removing the fence.
2467 */
2468 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2469 mb();
2470
Chris Wilsond9e86c02010-11-10 16:40:20 +00002471 return 0;
2472}
2473
2474int
2475i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2476{
2477 int ret;
2478
2479 if (obj->tiling_mode)
2480 i915_gem_release_mmap(obj);
2481
Chris Wilsonce453d82011-02-21 14:43:56 +00002482 ret = i915_gem_object_flush_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002483 if (ret)
2484 return ret;
2485
2486 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2487 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2488 i915_gem_clear_fence_reg(obj->base.dev,
2489 &dev_priv->fence_regs[obj->fence_reg]);
2490
2491 obj->fence_reg = I915_FENCE_REG_NONE;
2492 }
2493
2494 return 0;
2495}
2496
2497static struct drm_i915_fence_reg *
2498i915_find_fence_reg(struct drm_device *dev,
2499 struct intel_ring_buffer *pipelined)
Daniel Vetterae3db242010-02-19 11:51:58 +01002500{
Daniel Vetterae3db242010-02-19 11:51:58 +01002501 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002502 struct drm_i915_fence_reg *reg, *first, *avail;
2503 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002504
2505 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002506 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002507 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2508 reg = &dev_priv->fence_regs[i];
2509 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002510 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002511
Chris Wilson05394f32010-11-08 19:18:58 +00002512 if (!reg->obj->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002513 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002514 }
2515
Chris Wilsond9e86c02010-11-10 16:40:20 +00002516 if (avail == NULL)
2517 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002518
2519 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002520 avail = first = NULL;
2521 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2522 if (reg->obj->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002523 continue;
2524
Chris Wilsond9e86c02010-11-10 16:40:20 +00002525 if (first == NULL)
2526 first = reg;
2527
2528 if (!pipelined ||
2529 !reg->obj->last_fenced_ring ||
2530 reg->obj->last_fenced_ring == pipelined) {
2531 avail = reg;
2532 break;
2533 }
Daniel Vetterae3db242010-02-19 11:51:58 +01002534 }
2535
Chris Wilsond9e86c02010-11-10 16:40:20 +00002536 if (avail == NULL)
2537 avail = first;
Daniel Vetterae3db242010-02-19 11:51:58 +01002538
Chris Wilsona00b10c2010-09-24 21:15:47 +01002539 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002540}
2541
Jesse Barnesde151cf2008-11-12 10:03:55 -08002542/**
Chris Wilsond9e86c02010-11-10 16:40:20 +00002543 * i915_gem_object_get_fence - set up a fence reg for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002544 * @obj: object to map through a fence reg
Chris Wilsond9e86c02010-11-10 16:40:20 +00002545 * @pipelined: ring on which to queue the change, or NULL for CPU access
2546 * @interruptible: must we wait uninterruptibly for the register to retire?
Jesse Barnesde151cf2008-11-12 10:03:55 -08002547 *
2548 * When mapping objects through the GTT, userspace wants to be able to write
2549 * to them without having to worry about swizzling if the object is tiled.
2550 *
2551 * This function walks the fence regs looking for a free one for @obj,
2552 * stealing one if it can't find any.
2553 *
2554 * It then sets up the reg based on the object's properties: address, pitch
2555 * and tiling format.
2556 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002557int
Chris Wilsond9e86c02010-11-10 16:40:20 +00002558i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002559 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002560{
Chris Wilson05394f32010-11-08 19:18:58 +00002561 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002562 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002563 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002564 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002565
Chris Wilson6bda10d2010-12-05 21:04:18 +00002566 /* XXX disable pipelining. There are bugs. Shocking. */
2567 pipelined = NULL;
2568
Chris Wilsond9e86c02010-11-10 16:40:20 +00002569 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002570 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2571 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002572 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002573
Chris Wilson29c5a582011-03-17 15:23:22 +00002574 if (obj->tiling_changed) {
2575 ret = i915_gem_object_flush_fence(obj, pipelined);
2576 if (ret)
2577 return ret;
2578
2579 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2580 pipelined = NULL;
2581
2582 if (pipelined) {
2583 reg->setup_seqno =
2584 i915_gem_next_request_seqno(pipelined);
2585 obj->last_fenced_seqno = reg->setup_seqno;
2586 obj->last_fenced_ring = pipelined;
2587 }
2588
2589 goto update;
2590 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002591
2592 if (!pipelined) {
2593 if (reg->setup_seqno) {
2594 if (!ring_passed_seqno(obj->last_fenced_ring,
2595 reg->setup_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002596 ret = i915_wait_request(obj->last_fenced_ring,
Chris Wilsonce453d82011-02-21 14:43:56 +00002597 reg->setup_seqno);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002598 if (ret)
2599 return ret;
2600 }
2601
2602 reg->setup_seqno = 0;
2603 }
2604 } else if (obj->last_fenced_ring &&
2605 obj->last_fenced_ring != pipelined) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002606 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002607 if (ret)
2608 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002609 }
2610
Eric Anholta09ba7f2009-08-29 12:49:51 -07002611 return 0;
2612 }
2613
Chris Wilsond9e86c02010-11-10 16:40:20 +00002614 reg = i915_find_fence_reg(dev, pipelined);
2615 if (reg == NULL)
2616 return -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002617
Chris Wilsonce453d82011-02-21 14:43:56 +00002618 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002619 if (ret)
Daniel Vetterae3db242010-02-19 11:51:58 +01002620 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002621
Chris Wilsond9e86c02010-11-10 16:40:20 +00002622 if (reg->obj) {
2623 struct drm_i915_gem_object *old = reg->obj;
2624
2625 drm_gem_object_reference(&old->base);
2626
2627 if (old->tiling_mode)
2628 i915_gem_release_mmap(old);
2629
Chris Wilsonce453d82011-02-21 14:43:56 +00002630 ret = i915_gem_object_flush_fence(old, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002631 if (ret) {
2632 drm_gem_object_unreference(&old->base);
2633 return ret;
2634 }
2635
2636 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2637 pipelined = NULL;
2638
2639 old->fence_reg = I915_FENCE_REG_NONE;
2640 old->last_fenced_ring = pipelined;
2641 old->last_fenced_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002642 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002643
2644 drm_gem_object_unreference(&old->base);
2645 } else if (obj->last_fenced_seqno == 0)
2646 pipelined = NULL;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002647
Jesse Barnesde151cf2008-11-12 10:03:55 -08002648 reg->obj = obj;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002649 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2650 obj->fence_reg = reg - dev_priv->fence_regs;
2651 obj->last_fenced_ring = pipelined;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002652
Chris Wilsond9e86c02010-11-10 16:40:20 +00002653 reg->setup_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002654 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002655 obj->last_fenced_seqno = reg->setup_seqno;
2656
2657update:
2658 obj->tiling_changed = false;
Chris Wilsone259bef2010-09-17 00:32:02 +01002659 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002660 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002661 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002662 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002663 break;
2664 case 5:
2665 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002666 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002667 break;
2668 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002669 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002670 break;
2671 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002672 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002673 break;
2674 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002675
Daniel Vetterc6642782010-11-12 13:46:18 +00002676 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002677}
2678
2679/**
2680 * i915_gem_clear_fence_reg - clear out fence register info
2681 * @obj: object to clear
2682 *
2683 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002684 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002685 */
2686static void
Chris Wilsond9e86c02010-11-10 16:40:20 +00002687i915_gem_clear_fence_reg(struct drm_device *dev,
2688 struct drm_i915_fence_reg *reg)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002689{
Jesse Barnes79e53942008-11-07 14:24:08 -08002690 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002691 uint32_t fence_reg = reg - dev_priv->fence_regs;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002692
Chris Wilsone259bef2010-09-17 00:32:02 +01002693 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002694 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002695 case 6:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002696 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002697 break;
2698 case 5:
2699 case 4:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002700 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002701 break;
2702 case 3:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002703 if (fence_reg >= 8)
2704 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002705 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002706 case 2:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002707 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002708
2709 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002710 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002711 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002712
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002713 list_del_init(&reg->lru_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002714 reg->obj = NULL;
2715 reg->setup_seqno = 0;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002716}
2717
2718/**
Eric Anholt673a3942008-07-30 12:06:12 -07002719 * Finds free space in the GTT aperture and binds the object there.
2720 */
2721static int
Chris Wilson05394f32010-11-08 19:18:58 +00002722i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002723 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002724 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002725{
Chris Wilson05394f32010-11-08 19:18:58 +00002726 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002727 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002728 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002729 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002730 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002731 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002732 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002733
Chris Wilson05394f32010-11-08 19:18:58 +00002734 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002735 DRM_ERROR("Attempting to bind a purgeable object\n");
2736 return -EINVAL;
2737 }
2738
Chris Wilsone28f8712011-07-18 13:11:49 -07002739 fence_size = i915_gem_get_gtt_size(dev,
2740 obj->base.size,
2741 obj->tiling_mode);
2742 fence_alignment = i915_gem_get_gtt_alignment(dev,
2743 obj->base.size,
2744 obj->tiling_mode);
2745 unfenced_alignment =
2746 i915_gem_get_unfenced_gtt_alignment(dev,
2747 obj->base.size,
2748 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002749
Eric Anholt673a3942008-07-30 12:06:12 -07002750 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002751 alignment = map_and_fenceable ? fence_alignment :
2752 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002753 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002754 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2755 return -EINVAL;
2756 }
2757
Chris Wilson05394f32010-11-08 19:18:58 +00002758 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002759
Chris Wilson654fc602010-05-27 13:18:21 +01002760 /* If the object is bigger than the entire aperture, reject it early
2761 * before evicting everything in a vain attempt to find space.
2762 */
Chris Wilson05394f32010-11-08 19:18:58 +00002763 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002764 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002765 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2766 return -E2BIG;
2767 }
2768
Eric Anholt673a3942008-07-30 12:06:12 -07002769 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002770 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002771 free_space =
2772 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002773 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002774 dev_priv->mm.gtt_mappable_end,
2775 0);
2776 else
2777 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002778 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002779
2780 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002781 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002782 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002783 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002784 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002785 dev_priv->mm.gtt_mappable_end,
2786 0);
2787 else
Chris Wilson05394f32010-11-08 19:18:58 +00002788 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002789 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002790 }
Chris Wilson05394f32010-11-08 19:18:58 +00002791 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002792 /* If the gtt is empty and we're still having trouble
2793 * fitting our object in, we're out of memory.
2794 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002795 ret = i915_gem_evict_something(dev, size, alignment,
2796 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002797 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002798 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002799
Eric Anholt673a3942008-07-30 12:06:12 -07002800 goto search_free;
2801 }
2802
Chris Wilsone5281cc2010-10-28 13:45:36 +01002803 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002804 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002805 drm_mm_put_block(obj->gtt_space);
2806 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002807
2808 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002809 /* first try to reclaim some memory by clearing the GTT */
2810 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002811 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002812 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002813 if (gfpmask) {
2814 gfpmask = 0;
2815 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002816 }
2817
Chris Wilson809b6332011-01-10 17:33:15 +00002818 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002819 }
2820
2821 goto search_free;
2822 }
2823
Eric Anholt673a3942008-07-30 12:06:12 -07002824 return ret;
2825 }
2826
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002827 ret = i915_gem_gtt_bind_object(obj);
2828 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002829 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002830 drm_mm_put_block(obj->gtt_space);
2831 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002832
Chris Wilson809b6332011-01-10 17:33:15 +00002833 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002834 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002835
2836 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002837 }
Eric Anholt673a3942008-07-30 12:06:12 -07002838
Chris Wilson6299f992010-11-24 12:23:44 +00002839 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002840 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002841
Eric Anholt673a3942008-07-30 12:06:12 -07002842 /* Assert that the object is not currently in any GPU domain. As it
2843 * wasn't in the GTT, there shouldn't be any way it could have been in
2844 * a GPU cache
2845 */
Chris Wilson05394f32010-11-08 19:18:58 +00002846 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2847 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002848
Chris Wilson6299f992010-11-24 12:23:44 +00002849 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002850
Daniel Vetter75e9e912010-11-04 17:11:09 +01002851 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002852 obj->gtt_space->size == fence_size &&
2853 (obj->gtt_space->start & (fence_alignment -1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002854
Daniel Vetter75e9e912010-11-04 17:11:09 +01002855 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002856 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002857
Chris Wilson05394f32010-11-08 19:18:58 +00002858 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002859
Chris Wilsondb53a302011-02-03 11:57:46 +00002860 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002861 return 0;
2862}
2863
2864void
Chris Wilson05394f32010-11-08 19:18:58 +00002865i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002866{
Eric Anholt673a3942008-07-30 12:06:12 -07002867 /* If we don't have a page list set up, then we're not pinned
2868 * to GPU, and we can ignore the cache flush because it'll happen
2869 * again at bind time.
2870 */
Chris Wilson05394f32010-11-08 19:18:58 +00002871 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002872 return;
2873
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002874 /* If the GPU is snooping the contents of the CPU cache,
2875 * we do not need to manually clear the CPU cache lines. However,
2876 * the caches are only snooped when the render cache is
2877 * flushed/invalidated. As we always have to emit invalidations
2878 * and flushes when moving into and out of the RENDER domain, correct
2879 * snooping behaviour occurs naturally as the result of our domain
2880 * tracking.
2881 */
2882 if (obj->cache_level != I915_CACHE_NONE)
2883 return;
2884
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002885 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002886
Chris Wilson05394f32010-11-08 19:18:58 +00002887 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002888}
2889
Eric Anholte47c68e2008-11-14 13:35:19 -08002890/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002891static int
Chris Wilson3619df02010-11-28 15:37:17 +00002892i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002893{
Chris Wilson05394f32010-11-08 19:18:58 +00002894 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002895 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002896
2897 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002898 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002899}
2900
2901/** Flushes the GTT write domain for the object if it's dirty. */
2902static void
Chris Wilson05394f32010-11-08 19:18:58 +00002903i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002904{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002905 uint32_t old_write_domain;
2906
Chris Wilson05394f32010-11-08 19:18:58 +00002907 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002908 return;
2909
Chris Wilson63256ec2011-01-04 18:42:07 +00002910 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002911 * to it immediately go to main memory as far as we know, so there's
2912 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002913 *
2914 * However, we do have to enforce the order so that all writes through
2915 * the GTT land before any writes to the device, such as updates to
2916 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002917 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002918 wmb();
2919
Chris Wilson05394f32010-11-08 19:18:58 +00002920 old_write_domain = obj->base.write_domain;
2921 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002922
2923 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002924 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002925 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002926}
2927
2928/** Flushes the CPU write domain for the object if it's dirty. */
2929static void
Chris Wilson05394f32010-11-08 19:18:58 +00002930i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002931{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002932 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002933
Chris Wilson05394f32010-11-08 19:18:58 +00002934 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002935 return;
2936
2937 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002938 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002939 old_write_domain = obj->base.write_domain;
2940 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002941
2942 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002943 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002944 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002945}
2946
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002947/**
2948 * Moves a single object to the GTT read, and possibly write domain.
2949 *
2950 * This function returns when the move is complete, including waiting on
2951 * flushes to occur.
2952 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002953int
Chris Wilson20217462010-11-23 15:26:33 +00002954i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002955{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002956 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002957 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002958
Eric Anholt02354392008-11-26 13:58:13 -08002959 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002960 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002961 return -EINVAL;
2962
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002963 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2964 return 0;
2965
Chris Wilson88241782011-01-07 17:09:48 +00002966 ret = i915_gem_object_flush_gpu_write_domain(obj);
2967 if (ret)
2968 return ret;
2969
Chris Wilson87ca9c82010-12-02 09:42:56 +00002970 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002971 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002972 if (ret)
2973 return ret;
2974 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002975
Chris Wilson72133422010-09-13 23:56:38 +01002976 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002977
Chris Wilson05394f32010-11-08 19:18:58 +00002978 old_write_domain = obj->base.write_domain;
2979 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002980
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002981 /* It should now be out of any other write domains, and we can update
2982 * the domain values for our changes.
2983 */
Chris Wilson05394f32010-11-08 19:18:58 +00002984 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2985 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002986 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002987 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2988 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2989 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002990 }
2991
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002992 trace_i915_gem_object_change_domain(obj,
2993 old_read_domains,
2994 old_write_domain);
2995
Eric Anholte47c68e2008-11-14 13:35:19 -08002996 return 0;
2997}
2998
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002999/*
3000 * Prepare buffer for display plane. Use uninterruptible for possible flush
3001 * wait, as in modesetting process we're not supposed to be interrupted.
3002 */
3003int
Chris Wilson05394f32010-11-08 19:18:58 +00003004i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
Chris Wilson919926a2010-11-12 13:42:53 +00003005 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003006{
Daniel Vetterba3d8d72010-02-11 22:37:04 +01003007 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003008 int ret;
3009
3010 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00003011 if (obj->gtt_space == NULL)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003012 return -EINVAL;
3013
Chris Wilson88241782011-01-07 17:09:48 +00003014 ret = i915_gem_object_flush_gpu_write_domain(obj);
3015 if (ret)
3016 return ret;
3017
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003018
Chris Wilsonced270f2010-09-26 22:47:46 +01003019 /* Currently, we are always called from an non-interruptible context. */
Chris Wilson0be73282010-12-06 14:36:27 +00003020 if (pipelined != obj->ring) {
Chris Wilsonce453d82011-02-21 14:43:56 +00003021 ret = i915_gem_object_wait_rendering(obj);
Chris Wilsonced270f2010-09-26 22:47:46 +01003022 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003023 return ret;
3024 }
3025
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003026 i915_gem_object_flush_cpu_write_domain(obj);
3027
Chris Wilson05394f32010-11-08 19:18:58 +00003028 old_read_domains = obj->base.read_domains;
3029 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003030
3031 trace_i915_gem_object_change_domain(obj,
3032 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003033 obj->base.write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003034
3035 return 0;
3036}
3037
Chris Wilson85345512010-11-13 09:49:11 +00003038int
Chris Wilsonce453d82011-02-21 14:43:56 +00003039i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003040{
Chris Wilson88241782011-01-07 17:09:48 +00003041 int ret;
3042
Chris Wilson85345512010-11-13 09:49:11 +00003043 if (!obj->active)
3044 return 0;
3045
Chris Wilson88241782011-01-07 17:09:48 +00003046 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003047 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00003048 if (ret)
3049 return ret;
3050 }
Chris Wilson85345512010-11-13 09:49:11 +00003051
Chris Wilsonce453d82011-02-21 14:43:56 +00003052 return i915_gem_object_wait_rendering(obj);
Chris Wilson85345512010-11-13 09:49:11 +00003053}
3054
Eric Anholte47c68e2008-11-14 13:35:19 -08003055/**
3056 * Moves a single object to the CPU read, and possibly write domain.
3057 *
3058 * This function returns when the move is complete, including waiting on
3059 * flushes to occur.
3060 */
3061static int
Chris Wilson919926a2010-11-12 13:42:53 +00003062i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003063{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003064 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003065 int ret;
3066
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003067 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3068 return 0;
3069
Chris Wilson88241782011-01-07 17:09:48 +00003070 ret = i915_gem_object_flush_gpu_write_domain(obj);
3071 if (ret)
3072 return ret;
3073
Chris Wilsonce453d82011-02-21 14:43:56 +00003074 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01003075 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003076 return ret;
3077
3078 i915_gem_object_flush_gtt_write_domain(obj);
3079
3080 /* If we have a partially-valid cache of the object in the CPU,
3081 * finish invalidating it and free the per-page flags.
3082 */
3083 i915_gem_object_set_to_full_cpu_read_domain(obj);
3084
Chris Wilson05394f32010-11-08 19:18:58 +00003085 old_write_domain = obj->base.write_domain;
3086 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003087
Eric Anholte47c68e2008-11-14 13:35:19 -08003088 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003089 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003090 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003091
Chris Wilson05394f32010-11-08 19:18:58 +00003092 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003093 }
3094
3095 /* It should now be out of any other write domains, and we can update
3096 * the domain values for our changes.
3097 */
Chris Wilson05394f32010-11-08 19:18:58 +00003098 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003099
3100 /* If we're writing through the CPU, then the GPU read domains will
3101 * need to be invalidated at next use.
3102 */
3103 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003104 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3105 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003106 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003107
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003108 trace_i915_gem_object_change_domain(obj,
3109 old_read_domains,
3110 old_write_domain);
3111
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003112 return 0;
3113}
3114
Eric Anholt673a3942008-07-30 12:06:12 -07003115/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003116 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003117 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003118 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3119 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3120 */
3121static void
Chris Wilson05394f32010-11-08 19:18:58 +00003122i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003123{
Chris Wilson05394f32010-11-08 19:18:58 +00003124 if (!obj->page_cpu_valid)
Eric Anholte47c68e2008-11-14 13:35:19 -08003125 return;
3126
3127 /* If we're partially in the CPU read domain, finish moving it in.
3128 */
Chris Wilson05394f32010-11-08 19:18:58 +00003129 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003130 int i;
3131
Chris Wilson05394f32010-11-08 19:18:58 +00003132 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3133 if (obj->page_cpu_valid[i])
Eric Anholte47c68e2008-11-14 13:35:19 -08003134 continue;
Chris Wilson05394f32010-11-08 19:18:58 +00003135 drm_clflush_pages(obj->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003136 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003137 }
3138
3139 /* Free the page_cpu_valid mappings which are now stale, whether
3140 * or not we've got I915_GEM_DOMAIN_CPU.
3141 */
Chris Wilson05394f32010-11-08 19:18:58 +00003142 kfree(obj->page_cpu_valid);
3143 obj->page_cpu_valid = NULL;
Eric Anholte47c68e2008-11-14 13:35:19 -08003144}
3145
3146/**
3147 * Set the CPU read domain on a range of the object.
3148 *
3149 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3150 * not entirely valid. The page_cpu_valid member of the object flags which
3151 * pages have been flushed, and will be respected by
3152 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3153 * of the whole object.
3154 *
3155 * This function returns when the move is complete, including waiting on
3156 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003157 */
3158static int
Chris Wilson05394f32010-11-08 19:18:58 +00003159i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -08003160 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003161{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003162 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003163 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003164
Chris Wilson05394f32010-11-08 19:18:58 +00003165 if (offset == 0 && size == obj->base.size)
Eric Anholte47c68e2008-11-14 13:35:19 -08003166 return i915_gem_object_set_to_cpu_domain(obj, 0);
3167
Chris Wilson88241782011-01-07 17:09:48 +00003168 ret = i915_gem_object_flush_gpu_write_domain(obj);
3169 if (ret)
3170 return ret;
3171
Chris Wilsonce453d82011-02-21 14:43:56 +00003172 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01003173 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003174 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003175
Eric Anholte47c68e2008-11-14 13:35:19 -08003176 i915_gem_object_flush_gtt_write_domain(obj);
3177
3178 /* If we're already fully in the CPU read domain, we're done. */
Chris Wilson05394f32010-11-08 19:18:58 +00003179 if (obj->page_cpu_valid == NULL &&
3180 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003181 return 0;
3182
Eric Anholte47c68e2008-11-14 13:35:19 -08003183 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3184 * newly adding I915_GEM_DOMAIN_CPU
3185 */
Chris Wilson05394f32010-11-08 19:18:58 +00003186 if (obj->page_cpu_valid == NULL) {
3187 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3188 GFP_KERNEL);
3189 if (obj->page_cpu_valid == NULL)
Eric Anholte47c68e2008-11-14 13:35:19 -08003190 return -ENOMEM;
Chris Wilson05394f32010-11-08 19:18:58 +00003191 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3192 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003193
3194 /* Flush the cache on any pages that are still invalid from the CPU's
3195 * perspective.
3196 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003197 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3198 i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00003199 if (obj->page_cpu_valid[i])
Eric Anholt673a3942008-07-30 12:06:12 -07003200 continue;
3201
Chris Wilson05394f32010-11-08 19:18:58 +00003202 drm_clflush_pages(obj->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003203
Chris Wilson05394f32010-11-08 19:18:58 +00003204 obj->page_cpu_valid[i] = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003205 }
3206
Eric Anholte47c68e2008-11-14 13:35:19 -08003207 /* It should now be out of any other write domains, and we can update
3208 * the domain values for our changes.
3209 */
Chris Wilson05394f32010-11-08 19:18:58 +00003210 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003211
Chris Wilson05394f32010-11-08 19:18:58 +00003212 old_read_domains = obj->base.read_domains;
3213 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003214
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003215 trace_i915_gem_object_change_domain(obj,
3216 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003217 obj->base.write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003218
Eric Anholt673a3942008-07-30 12:06:12 -07003219 return 0;
3220}
3221
Eric Anholt673a3942008-07-30 12:06:12 -07003222/* Throttle our rendering by waiting until the ring has completed our requests
3223 * emitted over 20 msec ago.
3224 *
Eric Anholtb9624422009-06-03 07:27:35 +00003225 * Note that if we were to use the current jiffies each time around the loop,
3226 * we wouldn't escape the function with any frames outstanding if the time to
3227 * render a frame was over 20ms.
3228 *
Eric Anholt673a3942008-07-30 12:06:12 -07003229 * This should get us reasonable parallelism between CPU and GPU but also
3230 * relatively low latency when blocking on a particular request to finish.
3231 */
3232static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003233i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003234{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003235 struct drm_i915_private *dev_priv = dev->dev_private;
3236 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003237 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003238 struct drm_i915_gem_request *request;
3239 struct intel_ring_buffer *ring = NULL;
3240 u32 seqno = 0;
3241 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003242
Chris Wilsone110e8d2011-01-26 15:39:14 +00003243 if (atomic_read(&dev_priv->mm.wedged))
3244 return -EIO;
3245
Chris Wilson1c255952010-09-26 11:03:27 +01003246 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003247 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003248 if (time_after_eq(request->emitted_jiffies, recent_enough))
3249 break;
3250
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003251 ring = request->ring;
3252 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003253 }
Chris Wilson1c255952010-09-26 11:03:27 +01003254 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003255
3256 if (seqno == 0)
3257 return 0;
3258
3259 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003260 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003261 /* And wait for the seqno passing without holding any locks and
3262 * causing extra latency for others. This is safe as the irq
3263 * generation is designed to be run atomically and so is
3264 * lockless.
3265 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003266 if (ring->irq_get(ring)) {
3267 ret = wait_event_interruptible(ring->irq_queue,
3268 i915_seqno_passed(ring->get_seqno(ring), seqno)
3269 || atomic_read(&dev_priv->mm.wedged));
3270 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003271
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003272 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3273 ret = -EIO;
3274 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003275 }
3276
3277 if (ret == 0)
3278 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003279
Eric Anholt673a3942008-07-30 12:06:12 -07003280 return ret;
3281}
3282
Eric Anholt673a3942008-07-30 12:06:12 -07003283int
Chris Wilson05394f32010-11-08 19:18:58 +00003284i915_gem_object_pin(struct drm_i915_gem_object *obj,
3285 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003286 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003287{
Chris Wilson05394f32010-11-08 19:18:58 +00003288 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003289 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003290 int ret;
3291
Chris Wilson05394f32010-11-08 19:18:58 +00003292 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01003293 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003294
Chris Wilson05394f32010-11-08 19:18:58 +00003295 if (obj->gtt_space != NULL) {
3296 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3297 (map_and_fenceable && !obj->map_and_fenceable)) {
3298 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003299 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003300 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3301 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003302 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003303 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003304 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003305 ret = i915_gem_object_unbind(obj);
3306 if (ret)
3307 return ret;
3308 }
3309 }
3310
Chris Wilson05394f32010-11-08 19:18:58 +00003311 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003312 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003313 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003314 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003315 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003316 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003317
Chris Wilson05394f32010-11-08 19:18:58 +00003318 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003319 if (!obj->active)
3320 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003321 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003322 }
Chris Wilson6299f992010-11-24 12:23:44 +00003323 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003324
Chris Wilson23bc5982010-09-29 16:10:57 +01003325 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003326 return 0;
3327}
3328
3329void
Chris Wilson05394f32010-11-08 19:18:58 +00003330i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003331{
Chris Wilson05394f32010-11-08 19:18:58 +00003332 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003333 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003334
Chris Wilson23bc5982010-09-29 16:10:57 +01003335 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003336 BUG_ON(obj->pin_count == 0);
3337 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003338
Chris Wilson05394f32010-11-08 19:18:58 +00003339 if (--obj->pin_count == 0) {
3340 if (!obj->active)
3341 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003342 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003343 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003344 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003345 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003346}
3347
3348int
3349i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003350 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003351{
3352 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003353 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003354 int ret;
3355
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003356 ret = i915_mutex_lock_interruptible(dev);
3357 if (ret)
3358 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003359
Chris Wilson05394f32010-11-08 19:18:58 +00003360 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003361 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003362 ret = -ENOENT;
3363 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003364 }
Eric Anholt673a3942008-07-30 12:06:12 -07003365
Chris Wilson05394f32010-11-08 19:18:58 +00003366 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003367 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003368 ret = -EINVAL;
3369 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003370 }
3371
Chris Wilson05394f32010-11-08 19:18:58 +00003372 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003373 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3374 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003375 ret = -EINVAL;
3376 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003377 }
3378
Chris Wilson05394f32010-11-08 19:18:58 +00003379 obj->user_pin_count++;
3380 obj->pin_filp = file;
3381 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003382 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003383 if (ret)
3384 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003385 }
3386
3387 /* XXX - flush the CPU caches for pinned objects
3388 * as the X server doesn't manage domains yet
3389 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003390 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003391 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003392out:
Chris Wilson05394f32010-11-08 19:18:58 +00003393 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003394unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003395 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003396 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003397}
3398
3399int
3400i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003401 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003402{
3403 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003404 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003405 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003406
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003407 ret = i915_mutex_lock_interruptible(dev);
3408 if (ret)
3409 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003410
Chris Wilson05394f32010-11-08 19:18:58 +00003411 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003412 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003413 ret = -ENOENT;
3414 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003415 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003416
Chris Wilson05394f32010-11-08 19:18:58 +00003417 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003418 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3419 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003420 ret = -EINVAL;
3421 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003422 }
Chris Wilson05394f32010-11-08 19:18:58 +00003423 obj->user_pin_count--;
3424 if (obj->user_pin_count == 0) {
3425 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003426 i915_gem_object_unpin(obj);
3427 }
Eric Anholt673a3942008-07-30 12:06:12 -07003428
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003429out:
Chris Wilson05394f32010-11-08 19:18:58 +00003430 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003431unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003432 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003433 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003434}
3435
3436int
3437i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003438 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003439{
3440 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003441 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003442 int ret;
3443
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003444 ret = i915_mutex_lock_interruptible(dev);
3445 if (ret)
3446 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003447
Chris Wilson05394f32010-11-08 19:18:58 +00003448 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003449 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003450 ret = -ENOENT;
3451 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003452 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003453
Chris Wilson0be555b2010-08-04 15:36:30 +01003454 /* Count all active objects as busy, even if they are currently not used
3455 * by the gpu. Users of this interface expect objects to eventually
3456 * become non-busy without any further actions, therefore emit any
3457 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003458 */
Chris Wilson05394f32010-11-08 19:18:58 +00003459 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003460 if (args->busy) {
3461 /* Unconditionally flush objects, even when the gpu still uses this
3462 * object. Userspace calling this function indicates that it wants to
3463 * use this buffer rather sooner than later, so issuing the required
3464 * flush earlier is beneficial.
3465 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003466 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003467 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003468 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003469 } else if (obj->ring->outstanding_lazy_request ==
3470 obj->last_rendering_seqno) {
3471 struct drm_i915_gem_request *request;
3472
Chris Wilson7a194872010-12-07 10:38:40 +00003473 /* This ring is not being cleared by active usage,
3474 * so emit a request to do so.
3475 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003476 request = kzalloc(sizeof(*request), GFP_KERNEL);
3477 if (request)
Chris Wilsondb53a302011-02-03 11:57:46 +00003478 ret = i915_add_request(obj->ring, NULL,request);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003479 else
Chris Wilson7a194872010-12-07 10:38:40 +00003480 ret = -ENOMEM;
3481 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003482
3483 /* Update the active list for the hardware's current position.
3484 * Otherwise this only updates on a delayed timer or when irqs
3485 * are actually unmasked, and our working set ends up being
3486 * larger than required.
3487 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003488 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003489
Chris Wilson05394f32010-11-08 19:18:58 +00003490 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003491 }
Eric Anholt673a3942008-07-30 12:06:12 -07003492
Chris Wilson05394f32010-11-08 19:18:58 +00003493 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003494unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003495 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003496 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003497}
3498
3499int
3500i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3501 struct drm_file *file_priv)
3502{
3503 return i915_gem_ring_throttle(dev, file_priv);
3504}
3505
Chris Wilson3ef94da2009-09-14 16:50:29 +01003506int
3507i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3508 struct drm_file *file_priv)
3509{
3510 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003511 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003512 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003513
3514 switch (args->madv) {
3515 case I915_MADV_DONTNEED:
3516 case I915_MADV_WILLNEED:
3517 break;
3518 default:
3519 return -EINVAL;
3520 }
3521
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003522 ret = i915_mutex_lock_interruptible(dev);
3523 if (ret)
3524 return ret;
3525
Chris Wilson05394f32010-11-08 19:18:58 +00003526 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003527 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003528 ret = -ENOENT;
3529 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003530 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003531
Chris Wilson05394f32010-11-08 19:18:58 +00003532 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003533 ret = -EINVAL;
3534 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003535 }
3536
Chris Wilson05394f32010-11-08 19:18:58 +00003537 if (obj->madv != __I915_MADV_PURGED)
3538 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003539
Chris Wilson2d7ef392009-09-20 23:13:10 +01003540 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003541 if (i915_gem_object_is_purgeable(obj) &&
3542 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003543 i915_gem_object_truncate(obj);
3544
Chris Wilson05394f32010-11-08 19:18:58 +00003545 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003546
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003547out:
Chris Wilson05394f32010-11-08 19:18:58 +00003548 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003549unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003550 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003551 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003552}
3553
Chris Wilson05394f32010-11-08 19:18:58 +00003554struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3555 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003556{
Chris Wilson73aa8082010-09-30 11:46:12 +01003557 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003558 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003559 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003560
3561 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3562 if (obj == NULL)
3563 return NULL;
3564
3565 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3566 kfree(obj);
3567 return NULL;
3568 }
3569
Hugh Dickins5949eac2011-06-27 16:18:18 -07003570 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3571 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3572
Chris Wilson73aa8082010-09-30 11:46:12 +01003573 i915_gem_info_add_obj(dev_priv, size);
3574
Daniel Vetterc397b902010-04-09 19:05:07 +00003575 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3576 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3577
Chris Wilson93dfb402011-03-29 16:59:50 -07003578 obj->cache_level = I915_CACHE_NONE;
Daniel Vetter62b8b212010-04-09 19:05:08 +00003579 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003580 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003581 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003582 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003583 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003584 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003585 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003586 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003587 /* Avoid an unnecessary call to unbind on the first bind. */
3588 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003589
Chris Wilson05394f32010-11-08 19:18:58 +00003590 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003591}
3592
Eric Anholt673a3942008-07-30 12:06:12 -07003593int i915_gem_init_object(struct drm_gem_object *obj)
3594{
Daniel Vetterc397b902010-04-09 19:05:07 +00003595 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003596
Eric Anholt673a3942008-07-30 12:06:12 -07003597 return 0;
3598}
3599
Chris Wilson05394f32010-11-08 19:18:58 +00003600static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003601{
Chris Wilson05394f32010-11-08 19:18:58 +00003602 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003603 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003604 int ret;
3605
3606 ret = i915_gem_object_unbind(obj);
3607 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003608 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003609 &dev_priv->mm.deferred_free_list);
3610 return;
3611 }
3612
Chris Wilson26e12f892011-03-20 11:20:19 +00003613 trace_i915_gem_object_destroy(obj);
3614
Chris Wilson05394f32010-11-08 19:18:58 +00003615 if (obj->base.map_list.map)
Chris Wilsonbe726152010-07-23 23:18:50 +01003616 i915_gem_free_mmap_offset(obj);
3617
Chris Wilson05394f32010-11-08 19:18:58 +00003618 drm_gem_object_release(&obj->base);
3619 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003620
Chris Wilson05394f32010-11-08 19:18:58 +00003621 kfree(obj->page_cpu_valid);
3622 kfree(obj->bit_17);
3623 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003624}
3625
Chris Wilson05394f32010-11-08 19:18:58 +00003626void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003627{
Chris Wilson05394f32010-11-08 19:18:58 +00003628 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3629 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003630
Chris Wilson05394f32010-11-08 19:18:58 +00003631 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003632 i915_gem_object_unpin(obj);
3633
Chris Wilson05394f32010-11-08 19:18:58 +00003634 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003635 i915_gem_detach_phys_object(dev, obj);
3636
Chris Wilsonbe726152010-07-23 23:18:50 +01003637 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003638}
3639
Jesse Barnes5669fca2009-02-17 15:13:31 -08003640int
Eric Anholt673a3942008-07-30 12:06:12 -07003641i915_gem_idle(struct drm_device *dev)
3642{
3643 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003644 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003645
Keith Packard6dbe2772008-10-14 21:41:13 -07003646 mutex_lock(&dev->struct_mutex);
3647
Chris Wilson87acb0a2010-10-19 10:13:00 +01003648 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003649 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003650 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003651 }
Eric Anholt673a3942008-07-30 12:06:12 -07003652
Chris Wilson29105cc2010-01-07 10:39:13 +00003653 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003654 if (ret) {
3655 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003656 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003657 }
Eric Anholt673a3942008-07-30 12:06:12 -07003658
Chris Wilson29105cc2010-01-07 10:39:13 +00003659 /* Under UMS, be paranoid and evict. */
3660 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003661 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003662 if (ret) {
3663 mutex_unlock(&dev->struct_mutex);
3664 return ret;
3665 }
3666 }
3667
Chris Wilson312817a2010-11-22 11:50:11 +00003668 i915_gem_reset_fences(dev);
3669
Chris Wilson29105cc2010-01-07 10:39:13 +00003670 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3671 * We need to replace this with a semaphore, or something.
3672 * And not confound mm.suspended!
3673 */
3674 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003675 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003676
3677 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003678 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003679
Keith Packard6dbe2772008-10-14 21:41:13 -07003680 mutex_unlock(&dev->struct_mutex);
3681
Chris Wilson29105cc2010-01-07 10:39:13 +00003682 /* Cancel the retire work handler, which should be idle now. */
3683 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3684
Eric Anholt673a3942008-07-30 12:06:12 -07003685 return 0;
3686}
3687
Eric Anholt673a3942008-07-30 12:06:12 -07003688int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003689i915_gem_init_ringbuffer(struct drm_device *dev)
3690{
3691 drm_i915_private_t *dev_priv = dev->dev_private;
3692 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003693
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003694 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003695 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003696 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003697
3698 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003699 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003700 if (ret)
3701 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003702 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003703
Chris Wilson549f7362010-10-19 11:19:32 +01003704 if (HAS_BLT(dev)) {
3705 ret = intel_init_blt_ring_buffer(dev);
3706 if (ret)
3707 goto cleanup_bsd_ring;
3708 }
3709
Chris Wilson6f392d5482010-08-07 11:01:22 +01003710 dev_priv->next_seqno = 1;
3711
Chris Wilson68f95ba2010-05-27 13:18:22 +01003712 return 0;
3713
Chris Wilson549f7362010-10-19 11:19:32 +01003714cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003715 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003716cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003717 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003718 return ret;
3719}
3720
3721void
3722i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3723{
3724 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003725 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003726
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003727 for (i = 0; i < I915_NUM_RINGS; i++)
3728 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003729}
3730
3731int
Eric Anholt673a3942008-07-30 12:06:12 -07003732i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3733 struct drm_file *file_priv)
3734{
3735 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003736 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003737
Jesse Barnes79e53942008-11-07 14:24:08 -08003738 if (drm_core_check_feature(dev, DRIVER_MODESET))
3739 return 0;
3740
Ben Gamariba1234d2009-09-14 17:48:47 -04003741 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003742 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003743 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003744 }
3745
Eric Anholt673a3942008-07-30 12:06:12 -07003746 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003747 dev_priv->mm.suspended = 0;
3748
3749 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003750 if (ret != 0) {
3751 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003752 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003753 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003754
Chris Wilson69dc4982010-10-19 10:36:51 +01003755 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003756 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3757 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003758 for (i = 0; i < I915_NUM_RINGS; i++) {
3759 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3760 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3761 }
Eric Anholt673a3942008-07-30 12:06:12 -07003762 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003763
Chris Wilson5f353082010-06-07 14:03:03 +01003764 ret = drm_irq_install(dev);
3765 if (ret)
3766 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003767
Eric Anholt673a3942008-07-30 12:06:12 -07003768 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003769
3770cleanup_ringbuffer:
3771 mutex_lock(&dev->struct_mutex);
3772 i915_gem_cleanup_ringbuffer(dev);
3773 dev_priv->mm.suspended = 1;
3774 mutex_unlock(&dev->struct_mutex);
3775
3776 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003777}
3778
3779int
3780i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3781 struct drm_file *file_priv)
3782{
Jesse Barnes79e53942008-11-07 14:24:08 -08003783 if (drm_core_check_feature(dev, DRIVER_MODESET))
3784 return 0;
3785
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003786 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003787 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003788}
3789
3790void
3791i915_gem_lastclose(struct drm_device *dev)
3792{
3793 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003794
Eric Anholte806b492009-01-22 09:56:58 -08003795 if (drm_core_check_feature(dev, DRIVER_MODESET))
3796 return;
3797
Keith Packard6dbe2772008-10-14 21:41:13 -07003798 ret = i915_gem_idle(dev);
3799 if (ret)
3800 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003801}
3802
Chris Wilson64193402010-10-24 12:38:05 +01003803static void
3804init_ring_lists(struct intel_ring_buffer *ring)
3805{
3806 INIT_LIST_HEAD(&ring->active_list);
3807 INIT_LIST_HEAD(&ring->request_list);
3808 INIT_LIST_HEAD(&ring->gpu_write_list);
3809}
3810
Eric Anholt673a3942008-07-30 12:06:12 -07003811void
3812i915_gem_load(struct drm_device *dev)
3813{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003814 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003815 drm_i915_private_t *dev_priv = dev->dev_private;
3816
Chris Wilson69dc4982010-10-19 10:36:51 +01003817 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003818 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3819 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003820 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003821 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003822 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003823 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003824 for (i = 0; i < I915_NUM_RINGS; i++)
3825 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003826 for (i = 0; i < 16; i++)
3827 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003828 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3829 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003830 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003831
Dave Airlie94400122010-07-20 13:15:31 +10003832 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3833 if (IS_GEN3(dev)) {
3834 u32 tmp = I915_READ(MI_ARB_STATE);
3835 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3836 /* arb state is a masked write, so set bit + bit in mask */
3837 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3838 I915_WRITE(MI_ARB_STATE, tmp);
3839 }
3840 }
3841
Chris Wilson72bfa192010-12-19 11:42:05 +00003842 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3843
Jesse Barnesde151cf2008-11-12 10:03:55 -08003844 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003845 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3846 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003847
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003848 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003849 dev_priv->num_fence_regs = 16;
3850 else
3851 dev_priv->num_fence_regs = 8;
3852
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003853 /* Initialize fence registers to zero */
Eric Anholt10ed13e2011-05-06 13:53:49 -07003854 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3855 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003856 }
Eric Anholt10ed13e2011-05-06 13:53:49 -07003857
Eric Anholt673a3942008-07-30 12:06:12 -07003858 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003859 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003860
Chris Wilsonce453d82011-02-21 14:43:56 +00003861 dev_priv->mm.interruptible = true;
3862
Chris Wilson17250b72010-10-28 12:51:39 +01003863 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3864 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3865 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003866}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003867
3868/*
3869 * Create a physically contiguous memory object for this object
3870 * e.g. for cursor + overlay regs
3871 */
Chris Wilson995b6762010-08-20 13:23:26 +01003872static int i915_gem_init_phys_object(struct drm_device *dev,
3873 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003874{
3875 drm_i915_private_t *dev_priv = dev->dev_private;
3876 struct drm_i915_gem_phys_object *phys_obj;
3877 int ret;
3878
3879 if (dev_priv->mm.phys_objs[id - 1] || !size)
3880 return 0;
3881
Eric Anholt9a298b22009-03-24 12:23:04 -07003882 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003883 if (!phys_obj)
3884 return -ENOMEM;
3885
3886 phys_obj->id = id;
3887
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003888 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003889 if (!phys_obj->handle) {
3890 ret = -ENOMEM;
3891 goto kfree_obj;
3892 }
3893#ifdef CONFIG_X86
3894 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3895#endif
3896
3897 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3898
3899 return 0;
3900kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003901 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003902 return ret;
3903}
3904
Chris Wilson995b6762010-08-20 13:23:26 +01003905static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003906{
3907 drm_i915_private_t *dev_priv = dev->dev_private;
3908 struct drm_i915_gem_phys_object *phys_obj;
3909
3910 if (!dev_priv->mm.phys_objs[id - 1])
3911 return;
3912
3913 phys_obj = dev_priv->mm.phys_objs[id - 1];
3914 if (phys_obj->cur_obj) {
3915 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3916 }
3917
3918#ifdef CONFIG_X86
3919 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3920#endif
3921 drm_pci_free(dev, phys_obj->handle);
3922 kfree(phys_obj);
3923 dev_priv->mm.phys_objs[id - 1] = NULL;
3924}
3925
3926void i915_gem_free_all_phys_object(struct drm_device *dev)
3927{
3928 int i;
3929
Dave Airlie260883c2009-01-22 17:58:49 +10003930 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003931 i915_gem_free_phys_object(dev, i);
3932}
3933
3934void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003935 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003936{
Chris Wilson05394f32010-11-08 19:18:58 +00003937 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003938 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003939 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003940 int page_count;
3941
Chris Wilson05394f32010-11-08 19:18:58 +00003942 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003943 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003944 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003945
Chris Wilson05394f32010-11-08 19:18:58 +00003946 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003947 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07003948 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003949 if (!IS_ERR(page)) {
3950 char *dst = kmap_atomic(page);
3951 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3952 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003953
Chris Wilsone5281cc2010-10-28 13:45:36 +01003954 drm_clflush_pages(&page, 1);
3955
3956 set_page_dirty(page);
3957 mark_page_accessed(page);
3958 page_cache_release(page);
3959 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003960 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003961 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003962
Chris Wilson05394f32010-11-08 19:18:58 +00003963 obj->phys_obj->cur_obj = NULL;
3964 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003965}
3966
3967int
3968i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003969 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003970 int id,
3971 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003972{
Chris Wilson05394f32010-11-08 19:18:58 +00003973 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003974 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003975 int ret = 0;
3976 int page_count;
3977 int i;
3978
3979 if (id > I915_MAX_PHYS_OBJECT)
3980 return -EINVAL;
3981
Chris Wilson05394f32010-11-08 19:18:58 +00003982 if (obj->phys_obj) {
3983 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003984 return 0;
3985 i915_gem_detach_phys_object(dev, obj);
3986 }
3987
Dave Airlie71acb5e2008-12-30 20:31:46 +10003988 /* create a new object */
3989 if (!dev_priv->mm.phys_objs[id - 1]) {
3990 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003991 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003992 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003993 DRM_ERROR("failed to init phys object %d size: %zu\n",
3994 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003995 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003996 }
3997 }
3998
3999 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004000 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4001 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004002
Chris Wilson05394f32010-11-08 19:18:58 +00004003 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004004
4005 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004006 struct page *page;
4007 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004008
Hugh Dickins5949eac2011-06-27 16:18:18 -07004009 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004010 if (IS_ERR(page))
4011 return PTR_ERR(page);
4012
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004013 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004014 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004015 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004016 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004017
4018 mark_page_accessed(page);
4019 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004020 }
4021
4022 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004023}
4024
4025static int
Chris Wilson05394f32010-11-08 19:18:58 +00004026i915_gem_phys_pwrite(struct drm_device *dev,
4027 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004028 struct drm_i915_gem_pwrite *args,
4029 struct drm_file *file_priv)
4030{
Chris Wilson05394f32010-11-08 19:18:58 +00004031 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004032 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004033
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004034 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4035 unsigned long unwritten;
4036
4037 /* The physical object once assigned is fixed for the lifetime
4038 * of the obj, so we can safely drop the lock and continue
4039 * to access vaddr.
4040 */
4041 mutex_unlock(&dev->struct_mutex);
4042 unwritten = copy_from_user(vaddr, user_data, args->size);
4043 mutex_lock(&dev->struct_mutex);
4044 if (unwritten)
4045 return -EFAULT;
4046 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004047
Daniel Vetter40ce6572010-11-05 18:12:18 +01004048 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10004049 return 0;
4050}
Eric Anholtb9624422009-06-03 07:27:35 +00004051
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004052void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004053{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004054 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004055
4056 /* Clean up our request list when the client is going away, so that
4057 * later retire_requests won't dereference our soon-to-be-gone
4058 * file_priv.
4059 */
Chris Wilson1c255952010-09-26 11:03:27 +01004060 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004061 while (!list_empty(&file_priv->mm.request_list)) {
4062 struct drm_i915_gem_request *request;
4063
4064 request = list_first_entry(&file_priv->mm.request_list,
4065 struct drm_i915_gem_request,
4066 client_list);
4067 list_del(&request->client_list);
4068 request->file_priv = NULL;
4069 }
Chris Wilson1c255952010-09-26 11:03:27 +01004070 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004071}
Chris Wilson31169712009-09-14 16:50:28 +01004072
Chris Wilson31169712009-09-14 16:50:28 +01004073static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004074i915_gpu_is_active(struct drm_device *dev)
4075{
4076 drm_i915_private_t *dev_priv = dev->dev_private;
4077 int lists_empty;
4078
Chris Wilson1637ef42010-04-20 17:10:35 +01004079 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01004080 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004081
4082 return !lists_empty;
4083}
4084
4085static int
Ying Han1495f232011-05-24 17:12:27 -07004086i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004087{
Chris Wilson17250b72010-10-28 12:51:39 +01004088 struct drm_i915_private *dev_priv =
4089 container_of(shrinker,
4090 struct drm_i915_private,
4091 mm.inactive_shrinker);
4092 struct drm_device *dev = dev_priv->dev;
4093 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07004094 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01004095 int cnt;
4096
4097 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004098 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004099
4100 /* "fast-path" to count number of available objects */
4101 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004102 cnt = 0;
4103 list_for_each_entry(obj,
4104 &dev_priv->mm.inactive_list,
4105 mm_list)
4106 cnt++;
4107 mutex_unlock(&dev->struct_mutex);
4108 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004109 }
4110
Chris Wilson1637ef42010-04-20 17:10:35 +01004111rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004112 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004113 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004114
Chris Wilson17250b72010-10-28 12:51:39 +01004115 list_for_each_entry_safe(obj, next,
4116 &dev_priv->mm.inactive_list,
4117 mm_list) {
4118 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004119 if (i915_gem_object_unbind(obj) == 0 &&
4120 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004121 break;
Chris Wilson31169712009-09-14 16:50:28 +01004122 }
Chris Wilson31169712009-09-14 16:50:28 +01004123 }
4124
4125 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004126 cnt = 0;
4127 list_for_each_entry_safe(obj, next,
4128 &dev_priv->mm.inactive_list,
4129 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004130 if (nr_to_scan &&
4131 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004132 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004133 else
Chris Wilson17250b72010-10-28 12:51:39 +01004134 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004135 }
4136
Chris Wilson17250b72010-10-28 12:51:39 +01004137 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004138 /*
4139 * We are desperate for pages, so as a last resort, wait
4140 * for the GPU to finish and discard whatever we can.
4141 * This has a dramatic impact to reduce the number of
4142 * OOM-killer events whilst running the GPU aggressively.
4143 */
Chris Wilson17250b72010-10-28 12:51:39 +01004144 if (i915_gpu_idle(dev) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004145 goto rescan;
4146 }
Chris Wilson17250b72010-10-28 12:51:39 +01004147 mutex_unlock(&dev->struct_mutex);
4148 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004149}