blob: a41c0e7168057a71a7282e739e661888d4cbaaa3 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037
Chris Wilson88241782011-01-07 17:09:48 +000038static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000039static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000041static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
42 bool write);
43static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
44 uint64_t offset,
45 uint64_t size);
Chris Wilson05394f32010-11-08 19:18:58 +000046static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000047static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
48 unsigned alignment,
49 bool map_and_fenceable);
Chris Wilsond9e86c02010-11-10 16:40:20 +000050static void i915_gem_clear_fence_reg(struct drm_device *dev,
51 struct drm_i915_fence_reg *reg);
Chris Wilson05394f32010-11-08 19:18:58 +000052static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100054 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000055 struct drm_file *file);
56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070057
Chris Wilson17250b72010-10-28 12:51:39 +010058static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 int nr_to_scan,
60 gfp_t gfp_mask);
61
Chris Wilson31169712009-09-14 16:50:28 +010062
Chris Wilson73aa8082010-09-30 11:46:12 +010063/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size)
66{
67 dev_priv->mm.object_count++;
68 dev_priv->mm.object_memory += size;
69}
70
71static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
72 size_t size)
73{
74 dev_priv->mm.object_count--;
75 dev_priv->mm.object_memory -= size;
76}
77
Chris Wilson21dd3732011-01-26 15:55:56 +000078static int
79i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010080{
81 struct drm_i915_private *dev_priv = dev->dev_private;
82 struct completion *x = &dev_priv->error_completion;
83 unsigned long flags;
84 int ret;
85
86 if (!atomic_read(&dev_priv->mm.wedged))
87 return 0;
88
89 ret = wait_for_completion_interruptible(x);
90 if (ret)
91 return ret;
92
Chris Wilson21dd3732011-01-26 15:55:56 +000093 if (atomic_read(&dev_priv->mm.wedged)) {
94 /* GPU is hung, bump the completion count to account for
95 * the token we just consumed so that we never hit zero and
96 * end up waiting upon a subsequent completion event that
97 * will never happen.
98 */
99 spin_lock_irqsave(&x->wait.lock, flags);
100 x->done++;
101 spin_unlock_irqrestore(&x->wait.lock, flags);
102 }
103 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100104}
105
Chris Wilson54cf91d2010-11-25 18:00:26 +0000106int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100107{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100108 int ret;
109
Chris Wilson21dd3732011-01-26 15:55:56 +0000110 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100111 if (ret)
112 return ret;
113
114 ret = mutex_lock_interruptible(&dev->struct_mutex);
115 if (ret)
116 return ret;
117
Chris Wilson23bc5982010-09-29 16:10:57 +0100118 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100119 return 0;
120}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100121
Chris Wilson7d1c4802010-08-07 21:45:03 +0100122static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000123i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100124{
Chris Wilson05394f32010-11-08 19:18:58 +0000125 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100126}
127
Chris Wilson20217462010-11-23 15:26:33 +0000128void i915_gem_do_init(struct drm_device *dev,
129 unsigned long start,
130 unsigned long mappable_end,
131 unsigned long end)
Jesse Barnes79e53942008-11-07 14:24:08 -0800132{
133 drm_i915_private_t *dev_priv = dev->dev_private;
134
Chris Wilsonbee4a182011-01-21 10:54:32 +0000135 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
Jesse Barnes79e53942008-11-07 14:24:08 -0800136
Chris Wilsonbee4a182011-01-21 10:54:32 +0000137 dev_priv->mm.gtt_start = start;
138 dev_priv->mm.gtt_mappable_end = mappable_end;
139 dev_priv->mm.gtt_end = end;
Chris Wilson73aa8082010-09-30 11:46:12 +0100140 dev_priv->mm.gtt_total = end - start;
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200141 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
Chris Wilsonbee4a182011-01-21 10:54:32 +0000142
143 /* Take over this portion of the GTT */
144 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
Jesse Barnes79e53942008-11-07 14:24:08 -0800145}
Keith Packard6dbe2772008-10-14 21:41:13 -0700146
Eric Anholt673a3942008-07-30 12:06:12 -0700147int
148i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000149 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700150{
Eric Anholt673a3942008-07-30 12:06:12 -0700151 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000152
153 if (args->gtt_start >= args->gtt_end ||
154 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
155 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700156
157 mutex_lock(&dev->struct_mutex);
Chris Wilson20217462010-11-23 15:26:33 +0000158 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700159 mutex_unlock(&dev->struct_mutex);
160
Chris Wilson20217462010-11-23 15:26:33 +0000161 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700162}
163
Eric Anholt5a125c32008-10-22 21:40:13 -0700164int
165i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000166 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700167{
Chris Wilson73aa8082010-09-30 11:46:12 +0100168 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700169 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000170 struct drm_i915_gem_object *obj;
171 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700172
173 if (!(dev->driver->driver_features & DRIVER_GEM))
174 return -ENODEV;
175
Chris Wilson6299f992010-11-24 12:23:44 +0000176 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100177 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000178 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
179 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100180 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700181
Chris Wilson6299f992010-11-24 12:23:44 +0000182 args->aper_size = dev_priv->mm.gtt_total;
183 args->aper_available_size = args->aper_size -pinned;
184
Eric Anholt5a125c32008-10-22 21:40:13 -0700185 return 0;
186}
187
Eric Anholt673a3942008-07-30 12:06:12 -0700188/**
189 * Creates a new mm object and returns a handle to it.
190 */
191int
192i915_gem_create_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000193 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700194{
195 struct drm_i915_gem_create *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000196 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300197 int ret;
198 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700199
200 args->size = roundup(args->size, PAGE_SIZE);
201
202 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000203 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700204 if (obj == NULL)
205 return -ENOMEM;
206
Chris Wilson05394f32010-11-08 19:18:58 +0000207 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100208 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000209 drm_gem_object_release(&obj->base);
210 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100211 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700212 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100213 }
214
Chris Wilson202f2fe2010-10-14 13:20:40 +0100215 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000216 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100217 trace_i915_gem_object_create(obj);
218
Eric Anholt673a3942008-07-30 12:06:12 -0700219 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700220 return 0;
221}
222
Chris Wilson05394f32010-11-08 19:18:58 +0000223static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700224{
Chris Wilson05394f32010-11-08 19:18:58 +0000225 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700226
227 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000228 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700229}
230
Chris Wilson99a03df2010-05-27 14:15:34 +0100231static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700232slow_shmem_copy(struct page *dst_page,
233 int dst_offset,
234 struct page *src_page,
235 int src_offset,
236 int length)
237{
238 char *dst_vaddr, *src_vaddr;
239
Chris Wilson99a03df2010-05-27 14:15:34 +0100240 dst_vaddr = kmap(dst_page);
241 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700242
243 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
244
Chris Wilson99a03df2010-05-27 14:15:34 +0100245 kunmap(src_page);
246 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700247}
248
Chris Wilson99a03df2010-05-27 14:15:34 +0100249static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700250slow_shmem_bit17_copy(struct page *gpu_page,
251 int gpu_offset,
252 struct page *cpu_page,
253 int cpu_offset,
254 int length,
255 int is_read)
256{
257 char *gpu_vaddr, *cpu_vaddr;
258
259 /* Use the unswizzled path if this page isn't affected. */
260 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
261 if (is_read)
262 return slow_shmem_copy(cpu_page, cpu_offset,
263 gpu_page, gpu_offset, length);
264 else
265 return slow_shmem_copy(gpu_page, gpu_offset,
266 cpu_page, cpu_offset, length);
267 }
268
Chris Wilson99a03df2010-05-27 14:15:34 +0100269 gpu_vaddr = kmap(gpu_page);
270 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700271
272 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
273 * XORing with the other bits (A9 for Y, A9 and A10 for X)
274 */
275 while (length > 0) {
276 int cacheline_end = ALIGN(gpu_offset + 1, 64);
277 int this_length = min(cacheline_end - gpu_offset, length);
278 int swizzled_gpu_offset = gpu_offset ^ 64;
279
280 if (is_read) {
281 memcpy(cpu_vaddr + cpu_offset,
282 gpu_vaddr + swizzled_gpu_offset,
283 this_length);
284 } else {
285 memcpy(gpu_vaddr + swizzled_gpu_offset,
286 cpu_vaddr + cpu_offset,
287 this_length);
288 }
289 cpu_offset += this_length;
290 gpu_offset += this_length;
291 length -= this_length;
292 }
293
Chris Wilson99a03df2010-05-27 14:15:34 +0100294 kunmap(cpu_page);
295 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700296}
297
Eric Anholt673a3942008-07-30 12:06:12 -0700298/**
Eric Anholteb014592009-03-10 11:44:52 -0700299 * This is the fast shmem pread path, which attempts to copy_from_user directly
300 * from the backing pages of the object to the user's address space. On a
301 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
302 */
303static int
Chris Wilson05394f32010-11-08 19:18:58 +0000304i915_gem_shmem_pread_fast(struct drm_device *dev,
305 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700306 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000307 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700308{
Chris Wilson05394f32010-11-08 19:18:58 +0000309 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700310 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100311 loff_t offset;
Eric Anholteb014592009-03-10 11:44:52 -0700312 char __user *user_data;
313 int page_offset, page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700314
315 user_data = (char __user *) (uintptr_t) args->data_ptr;
316 remain = args->size;
317
Eric Anholteb014592009-03-10 11:44:52 -0700318 offset = args->offset;
319
320 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100321 struct page *page;
322 char *vaddr;
323 int ret;
324
Eric Anholteb014592009-03-10 11:44:52 -0700325 /* Operation in this page
326 *
Eric Anholteb014592009-03-10 11:44:52 -0700327 * page_offset = offset within page
328 * page_length = bytes to copy for this page
329 */
Eric Anholteb014592009-03-10 11:44:52 -0700330 page_offset = offset & (PAGE_SIZE-1);
331 page_length = remain;
332 if ((page_offset + remain) > PAGE_SIZE)
333 page_length = PAGE_SIZE - page_offset;
334
Chris Wilsone5281cc2010-10-28 13:45:36 +0100335 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
336 GFP_HIGHUSER | __GFP_RECLAIMABLE);
337 if (IS_ERR(page))
338 return PTR_ERR(page);
339
340 vaddr = kmap_atomic(page);
341 ret = __copy_to_user_inatomic(user_data,
342 vaddr + page_offset,
343 page_length);
344 kunmap_atomic(vaddr);
345
346 mark_page_accessed(page);
347 page_cache_release(page);
348 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100349 return -EFAULT;
Eric Anholteb014592009-03-10 11:44:52 -0700350
351 remain -= page_length;
352 user_data += page_length;
353 offset += page_length;
354 }
355
Chris Wilson4f27b752010-10-14 15:26:45 +0100356 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700357}
358
359/**
360 * This is the fallback shmem pread path, which allocates temporary storage
361 * in kernel space to copy_to_user into outside of the struct_mutex, so we
362 * can copy out of the object's backing pages while holding the struct mutex
363 * and not take page faults.
364 */
365static int
Chris Wilson05394f32010-11-08 19:18:58 +0000366i915_gem_shmem_pread_slow(struct drm_device *dev,
367 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700368 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000369 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700370{
Chris Wilson05394f32010-11-08 19:18:58 +0000371 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700372 struct mm_struct *mm = current->mm;
373 struct page **user_pages;
374 ssize_t remain;
375 loff_t offset, pinned_pages, i;
376 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100377 int shmem_page_offset;
378 int data_page_index, data_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700379 int page_length;
380 int ret;
381 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700382 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700383
384 remain = args->size;
385
386 /* Pin the user pages containing the data. We can't fault while
387 * holding the struct mutex, yet we want to hold it while
388 * dereferencing the user data.
389 */
390 first_data_page = data_ptr / PAGE_SIZE;
391 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
392 num_pages = last_data_page - first_data_page + 1;
393
Chris Wilson4f27b752010-10-14 15:26:45 +0100394 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700395 if (user_pages == NULL)
396 return -ENOMEM;
397
Chris Wilson4f27b752010-10-14 15:26:45 +0100398 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700399 down_read(&mm->mmap_sem);
400 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700401 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700402 up_read(&mm->mmap_sem);
Chris Wilson4f27b752010-10-14 15:26:45 +0100403 mutex_lock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700404 if (pinned_pages < num_pages) {
405 ret = -EFAULT;
Chris Wilson4f27b752010-10-14 15:26:45 +0100406 goto out;
Eric Anholteb014592009-03-10 11:44:52 -0700407 }
408
Chris Wilson4f27b752010-10-14 15:26:45 +0100409 ret = i915_gem_object_set_cpu_read_domain_range(obj,
410 args->offset,
Eric Anholteb014592009-03-10 11:44:52 -0700411 args->size);
Chris Wilson4f27b752010-10-14 15:26:45 +0100412 if (ret)
413 goto out;
414
415 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700416
Eric Anholteb014592009-03-10 11:44:52 -0700417 offset = args->offset;
418
419 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100420 struct page *page;
421
Eric Anholteb014592009-03-10 11:44:52 -0700422 /* Operation in this page
423 *
Eric Anholteb014592009-03-10 11:44:52 -0700424 * shmem_page_offset = offset within page in shmem file
425 * data_page_index = page number in get_user_pages return
426 * data_page_offset = offset with data_page_index page.
427 * page_length = bytes to copy for this page
428 */
Eric Anholteb014592009-03-10 11:44:52 -0700429 shmem_page_offset = offset & ~PAGE_MASK;
430 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
431 data_page_offset = data_ptr & ~PAGE_MASK;
432
433 page_length = remain;
434 if ((shmem_page_offset + page_length) > PAGE_SIZE)
435 page_length = PAGE_SIZE - shmem_page_offset;
436 if ((data_page_offset + page_length) > PAGE_SIZE)
437 page_length = PAGE_SIZE - data_page_offset;
438
Chris Wilsone5281cc2010-10-28 13:45:36 +0100439 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
440 GFP_HIGHUSER | __GFP_RECLAIMABLE);
441 if (IS_ERR(page))
442 return PTR_ERR(page);
443
Eric Anholt280b7132009-03-12 16:56:27 -0700444 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100445 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700446 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100447 user_pages[data_page_index],
448 data_page_offset,
449 page_length,
450 1);
451 } else {
452 slow_shmem_copy(user_pages[data_page_index],
453 data_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100454 page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100455 shmem_page_offset,
456 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700457 }
Eric Anholteb014592009-03-10 11:44:52 -0700458
Chris Wilsone5281cc2010-10-28 13:45:36 +0100459 mark_page_accessed(page);
460 page_cache_release(page);
461
Eric Anholteb014592009-03-10 11:44:52 -0700462 remain -= page_length;
463 data_ptr += page_length;
464 offset += page_length;
465 }
466
Chris Wilson4f27b752010-10-14 15:26:45 +0100467out:
Eric Anholteb014592009-03-10 11:44:52 -0700468 for (i = 0; i < pinned_pages; i++) {
469 SetPageDirty(user_pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100470 mark_page_accessed(user_pages[i]);
Eric Anholteb014592009-03-10 11:44:52 -0700471 page_cache_release(user_pages[i]);
472 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700473 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700474
475 return ret;
476}
477
Eric Anholt673a3942008-07-30 12:06:12 -0700478/**
479 * Reads data from the object referenced by handle.
480 *
481 * On error, the contents of *data are undefined.
482 */
483int
484i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000485 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700486{
487 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000488 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100489 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700490
Chris Wilson51311d02010-11-17 09:10:42 +0000491 if (args->size == 0)
492 return 0;
493
494 if (!access_ok(VERIFY_WRITE,
495 (char __user *)(uintptr_t)args->data_ptr,
496 args->size))
497 return -EFAULT;
498
499 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
500 args->size);
501 if (ret)
502 return -EFAULT;
503
Chris Wilson4f27b752010-10-14 15:26:45 +0100504 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100505 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100506 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700507
Chris Wilson05394f32010-11-08 19:18:58 +0000508 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100509 if (obj == NULL) {
510 ret = -ENOENT;
511 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100512 }
Eric Anholt673a3942008-07-30 12:06:12 -0700513
Chris Wilson7dcd2492010-09-26 20:21:44 +0100514 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000515 if (args->offset > obj->base.size ||
516 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100517 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100518 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100519 }
520
Chris Wilson4f27b752010-10-14 15:26:45 +0100521 ret = i915_gem_object_set_cpu_read_domain_range(obj,
522 args->offset,
523 args->size);
524 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100525 goto out;
Chris Wilson4f27b752010-10-14 15:26:45 +0100526
527 ret = -EFAULT;
528 if (!i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson05394f32010-11-08 19:18:58 +0000529 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
Chris Wilson4f27b752010-10-14 15:26:45 +0100530 if (ret == -EFAULT)
Chris Wilson05394f32010-11-08 19:18:58 +0000531 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700532
Chris Wilson35b62a82010-09-26 20:23:38 +0100533out:
Chris Wilson05394f32010-11-08 19:18:58 +0000534 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100535unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100536 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700537 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700538}
539
Keith Packard0839ccb2008-10-30 19:38:48 -0700540/* This is the fast write path which cannot handle
541 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700542 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700543
Keith Packard0839ccb2008-10-30 19:38:48 -0700544static inline int
545fast_user_write(struct io_mapping *mapping,
546 loff_t page_base, int page_offset,
547 char __user *user_data,
548 int length)
549{
550 char *vaddr_atomic;
551 unsigned long unwritten;
552
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700553 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700554 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
555 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700556 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100557 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700558}
559
560/* Here's the write path which can sleep for
561 * page faults
562 */
563
Chris Wilsonab34c222010-05-27 14:15:35 +0100564static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700565slow_kernel_write(struct io_mapping *mapping,
566 loff_t gtt_base, int gtt_offset,
567 struct page *user_page, int user_offset,
568 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700569{
Chris Wilsonab34c222010-05-27 14:15:35 +0100570 char __iomem *dst_vaddr;
571 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700572
Chris Wilsonab34c222010-05-27 14:15:35 +0100573 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
574 src_vaddr = kmap(user_page);
575
576 memcpy_toio(dst_vaddr + gtt_offset,
577 src_vaddr + user_offset,
578 length);
579
580 kunmap(user_page);
581 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700582}
583
Eric Anholt3de09aa2009-03-09 09:42:23 -0700584/**
585 * This is the fast pwrite path, where we copy the data directly from the
586 * user into the GTT, uncached.
587 */
Eric Anholt673a3942008-07-30 12:06:12 -0700588static int
Chris Wilson05394f32010-11-08 19:18:58 +0000589i915_gem_gtt_pwrite_fast(struct drm_device *dev,
590 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700591 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000592 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700593{
Keith Packard0839ccb2008-10-30 19:38:48 -0700594 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700595 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700596 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700597 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700598 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700599
600 user_data = (char __user *) (uintptr_t) args->data_ptr;
601 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700602
Chris Wilson05394f32010-11-08 19:18:58 +0000603 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700604
605 while (remain > 0) {
606 /* Operation in this page
607 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700608 * page_base = page offset within aperture
609 * page_offset = offset within page
610 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700611 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700612 page_base = (offset & ~(PAGE_SIZE-1));
613 page_offset = offset & (PAGE_SIZE-1);
614 page_length = remain;
615 if ((page_offset + remain) > PAGE_SIZE)
616 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700617
Keith Packard0839ccb2008-10-30 19:38:48 -0700618 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700619 * source page isn't available. Return the error and we'll
620 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700621 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100622 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
623 page_offset, user_data, page_length))
624
625 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700626
Keith Packard0839ccb2008-10-30 19:38:48 -0700627 remain -= page_length;
628 user_data += page_length;
629 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700630 }
Eric Anholt673a3942008-07-30 12:06:12 -0700631
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100632 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700633}
634
Eric Anholt3de09aa2009-03-09 09:42:23 -0700635/**
636 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
637 * the memory and maps it using kmap_atomic for copying.
638 *
639 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
640 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
641 */
Eric Anholt3043c602008-10-02 12:24:47 -0700642static int
Chris Wilson05394f32010-11-08 19:18:58 +0000643i915_gem_gtt_pwrite_slow(struct drm_device *dev,
644 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700645 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000646 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700647{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700648 drm_i915_private_t *dev_priv = dev->dev_private;
649 ssize_t remain;
650 loff_t gtt_page_base, offset;
651 loff_t first_data_page, last_data_page, num_pages;
652 loff_t pinned_pages, i;
653 struct page **user_pages;
654 struct mm_struct *mm = current->mm;
655 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700656 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700657 uint64_t data_ptr = args->data_ptr;
658
659 remain = args->size;
660
661 /* Pin the user pages containing the data. We can't fault while
662 * holding the struct mutex, and all of the pwrite implementations
663 * want to hold it while dereferencing the user data.
664 */
665 first_data_page = data_ptr / PAGE_SIZE;
666 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
667 num_pages = last_data_page - first_data_page + 1;
668
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100669 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700670 if (user_pages == NULL)
671 return -ENOMEM;
672
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100673 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700674 down_read(&mm->mmap_sem);
675 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
676 num_pages, 0, 0, user_pages, NULL);
677 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100678 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700679 if (pinned_pages < num_pages) {
680 ret = -EFAULT;
681 goto out_unpin_pages;
682 }
683
Chris Wilsond9e86c02010-11-10 16:40:20 +0000684 ret = i915_gem_object_set_to_gtt_domain(obj, true);
685 if (ret)
686 goto out_unpin_pages;
687
688 ret = i915_gem_object_put_fence(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700689 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100690 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700691
Chris Wilson05394f32010-11-08 19:18:58 +0000692 offset = obj->gtt_offset + args->offset;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700693
694 while (remain > 0) {
695 /* Operation in this page
696 *
697 * gtt_page_base = page offset within aperture
698 * gtt_page_offset = offset within page in aperture
699 * data_page_index = page number in get_user_pages return
700 * data_page_offset = offset with data_page_index page.
701 * page_length = bytes to copy for this page
702 */
703 gtt_page_base = offset & PAGE_MASK;
704 gtt_page_offset = offset & ~PAGE_MASK;
705 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
706 data_page_offset = data_ptr & ~PAGE_MASK;
707
708 page_length = remain;
709 if ((gtt_page_offset + page_length) > PAGE_SIZE)
710 page_length = PAGE_SIZE - gtt_page_offset;
711 if ((data_page_offset + page_length) > PAGE_SIZE)
712 page_length = PAGE_SIZE - data_page_offset;
713
Chris Wilsonab34c222010-05-27 14:15:35 +0100714 slow_kernel_write(dev_priv->mm.gtt_mapping,
715 gtt_page_base, gtt_page_offset,
716 user_pages[data_page_index],
717 data_page_offset,
718 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700719
720 remain -= page_length;
721 offset += page_length;
722 data_ptr += page_length;
723 }
724
Eric Anholt3de09aa2009-03-09 09:42:23 -0700725out_unpin_pages:
726 for (i = 0; i < pinned_pages; i++)
727 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700728 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700729
730 return ret;
731}
732
Eric Anholt40123c12009-03-09 13:42:30 -0700733/**
734 * This is the fast shmem pwrite path, which attempts to directly
735 * copy_from_user into the kmapped pages backing the object.
736 */
Eric Anholt673a3942008-07-30 12:06:12 -0700737static int
Chris Wilson05394f32010-11-08 19:18:58 +0000738i915_gem_shmem_pwrite_fast(struct drm_device *dev,
739 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700740 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000741 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700742{
Chris Wilson05394f32010-11-08 19:18:58 +0000743 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700744 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100745 loff_t offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700746 char __user *user_data;
747 int page_offset, page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700748
749 user_data = (char __user *) (uintptr_t) args->data_ptr;
750 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700751
Eric Anholt673a3942008-07-30 12:06:12 -0700752 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000753 obj->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700754
Eric Anholt40123c12009-03-09 13:42:30 -0700755 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100756 struct page *page;
757 char *vaddr;
758 int ret;
759
Eric Anholt40123c12009-03-09 13:42:30 -0700760 /* Operation in this page
761 *
Eric Anholt40123c12009-03-09 13:42:30 -0700762 * page_offset = offset within page
763 * page_length = bytes to copy for this page
764 */
Eric Anholt40123c12009-03-09 13:42:30 -0700765 page_offset = offset & (PAGE_SIZE-1);
766 page_length = remain;
767 if ((page_offset + remain) > PAGE_SIZE)
768 page_length = PAGE_SIZE - page_offset;
769
Chris Wilsone5281cc2010-10-28 13:45:36 +0100770 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
771 GFP_HIGHUSER | __GFP_RECLAIMABLE);
772 if (IS_ERR(page))
773 return PTR_ERR(page);
774
775 vaddr = kmap_atomic(page, KM_USER0);
776 ret = __copy_from_user_inatomic(vaddr + page_offset,
777 user_data,
778 page_length);
779 kunmap_atomic(vaddr, KM_USER0);
780
781 set_page_dirty(page);
782 mark_page_accessed(page);
783 page_cache_release(page);
784
785 /* If we get a fault while copying data, then (presumably) our
786 * source page isn't available. Return the error and we'll
787 * retry in the slow path.
788 */
789 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100790 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700791
792 remain -= page_length;
793 user_data += page_length;
794 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700795 }
796
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100797 return 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700798}
799
800/**
801 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
802 * the memory and maps it using kmap_atomic for copying.
803 *
804 * This avoids taking mmap_sem for faulting on the user's address while the
805 * struct_mutex is held.
806 */
807static int
Chris Wilson05394f32010-11-08 19:18:58 +0000808i915_gem_shmem_pwrite_slow(struct drm_device *dev,
809 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700810 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000811 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700812{
Chris Wilson05394f32010-11-08 19:18:58 +0000813 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700814 struct mm_struct *mm = current->mm;
815 struct page **user_pages;
816 ssize_t remain;
817 loff_t offset, pinned_pages, i;
818 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100819 int shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700820 int data_page_index, data_page_offset;
821 int page_length;
822 int ret;
823 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700824 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700825
826 remain = args->size;
827
828 /* Pin the user pages containing the data. We can't fault while
829 * holding the struct mutex, and all of the pwrite implementations
830 * want to hold it while dereferencing the user data.
831 */
832 first_data_page = data_ptr / PAGE_SIZE;
833 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
834 num_pages = last_data_page - first_data_page + 1;
835
Chris Wilson4f27b752010-10-14 15:26:45 +0100836 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700837 if (user_pages == NULL)
838 return -ENOMEM;
839
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100840 mutex_unlock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700841 down_read(&mm->mmap_sem);
842 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
843 num_pages, 0, 0, user_pages, NULL);
844 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100845 mutex_lock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700846 if (pinned_pages < num_pages) {
847 ret = -EFAULT;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100848 goto out;
Eric Anholt40123c12009-03-09 13:42:30 -0700849 }
850
Eric Anholt40123c12009-03-09 13:42:30 -0700851 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100852 if (ret)
853 goto out;
854
855 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700856
Eric Anholt40123c12009-03-09 13:42:30 -0700857 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000858 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700859
860 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100861 struct page *page;
862
Eric Anholt40123c12009-03-09 13:42:30 -0700863 /* Operation in this page
864 *
Eric Anholt40123c12009-03-09 13:42:30 -0700865 * shmem_page_offset = offset within page in shmem file
866 * data_page_index = page number in get_user_pages return
867 * data_page_offset = offset with data_page_index page.
868 * page_length = bytes to copy for this page
869 */
Eric Anholt40123c12009-03-09 13:42:30 -0700870 shmem_page_offset = offset & ~PAGE_MASK;
871 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
872 data_page_offset = data_ptr & ~PAGE_MASK;
873
874 page_length = remain;
875 if ((shmem_page_offset + page_length) > PAGE_SIZE)
876 page_length = PAGE_SIZE - shmem_page_offset;
877 if ((data_page_offset + page_length) > PAGE_SIZE)
878 page_length = PAGE_SIZE - data_page_offset;
879
Chris Wilsone5281cc2010-10-28 13:45:36 +0100880 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
881 GFP_HIGHUSER | __GFP_RECLAIMABLE);
882 if (IS_ERR(page)) {
883 ret = PTR_ERR(page);
884 goto out;
885 }
886
Eric Anholt280b7132009-03-12 16:56:27 -0700887 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100888 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700889 shmem_page_offset,
890 user_pages[data_page_index],
891 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100892 page_length,
893 0);
894 } else {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100895 slow_shmem_copy(page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100896 shmem_page_offset,
897 user_pages[data_page_index],
898 data_page_offset,
899 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700900 }
Eric Anholt40123c12009-03-09 13:42:30 -0700901
Chris Wilsone5281cc2010-10-28 13:45:36 +0100902 set_page_dirty(page);
903 mark_page_accessed(page);
904 page_cache_release(page);
905
Eric Anholt40123c12009-03-09 13:42:30 -0700906 remain -= page_length;
907 data_ptr += page_length;
908 offset += page_length;
909 }
910
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100911out:
Eric Anholt40123c12009-03-09 13:42:30 -0700912 for (i = 0; i < pinned_pages; i++)
913 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700914 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700915
916 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700917}
918
919/**
920 * Writes data to the object referenced by handle.
921 *
922 * On error, the contents of the buffer that were to be modified are undefined.
923 */
924int
925i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100926 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700927{
928 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000929 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000930 int ret;
931
932 if (args->size == 0)
933 return 0;
934
935 if (!access_ok(VERIFY_READ,
936 (char __user *)(uintptr_t)args->data_ptr,
937 args->size))
938 return -EFAULT;
939
940 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
941 args->size);
942 if (ret)
943 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700944
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100945 ret = i915_mutex_lock_interruptible(dev);
946 if (ret)
947 return ret;
948
Chris Wilson05394f32010-11-08 19:18:58 +0000949 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100950 if (obj == NULL) {
951 ret = -ENOENT;
952 goto unlock;
953 }
Eric Anholt673a3942008-07-30 12:06:12 -0700954
Chris Wilson7dcd2492010-09-26 20:21:44 +0100955 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000956 if (args->offset > obj->base.size ||
957 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100958 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100959 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100960 }
961
Eric Anholt673a3942008-07-30 12:06:12 -0700962 /* We can only do the GTT pwrite on untiled buffers, as otherwise
963 * it would end up going through the fenced access, and we'll get
964 * different detiling behavior between reading and writing.
965 * pread/pwrite currently are reading and writing from the CPU
966 * perspective, requiring manual detiling by the client.
967 */
Chris Wilson05394f32010-11-08 19:18:58 +0000968 if (obj->phys_obj)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100969 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Chris Wilsond9e86c02010-11-10 16:40:20 +0000970 else if (obj->gtt_space &&
Chris Wilson05394f32010-11-08 19:18:58 +0000971 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Daniel Vetter75e9e912010-11-04 17:11:09 +0100972 ret = i915_gem_object_pin(obj, 0, true);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100973 if (ret)
974 goto out;
975
Chris Wilsond9e86c02010-11-10 16:40:20 +0000976 ret = i915_gem_object_set_to_gtt_domain(obj, true);
977 if (ret)
978 goto out_unpin;
979
980 ret = i915_gem_object_put_fence(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100981 if (ret)
982 goto out_unpin;
983
984 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
985 if (ret == -EFAULT)
986 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
987
988out_unpin:
989 i915_gem_object_unpin(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700990 } else {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100991 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
992 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100993 goto out;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100994
995 ret = -EFAULT;
996 if (!i915_gem_object_needs_bit17_swizzle(obj))
997 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
998 if (ret == -EFAULT)
999 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
Eric Anholt40123c12009-03-09 13:42:30 -07001000 }
Eric Anholt673a3942008-07-30 12:06:12 -07001001
Chris Wilson35b62a82010-09-26 20:23:38 +01001002out:
Chris Wilson05394f32010-11-08 19:18:58 +00001003 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001004unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001005 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07001006 return ret;
1007}
1008
1009/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001010 * Called when user space prepares to use an object with the CPU, either
1011 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001012 */
1013int
1014i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001015 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001016{
1017 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001018 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001019 uint32_t read_domains = args->read_domains;
1020 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001021 int ret;
1022
1023 if (!(dev->driver->driver_features & DRIVER_GEM))
1024 return -ENODEV;
1025
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001026 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001027 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001028 return -EINVAL;
1029
Chris Wilson21d509e2009-06-06 09:46:02 +01001030 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001031 return -EINVAL;
1032
1033 /* Having something in the write domain implies it's in the read
1034 * domain, and only that read domain. Enforce that in the request.
1035 */
1036 if (write_domain != 0 && read_domains != write_domain)
1037 return -EINVAL;
1038
Chris Wilson76c1dec2010-09-25 11:22:51 +01001039 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001040 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001041 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001042
Chris Wilson05394f32010-11-08 19:18:58 +00001043 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001044 if (obj == NULL) {
1045 ret = -ENOENT;
1046 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001047 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001048
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001049 if (read_domains & I915_GEM_DOMAIN_GTT) {
1050 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001051
1052 /* Silently promote "you're not bound, there was nothing to do"
1053 * to success, since the client was just asking us to
1054 * make sure everything was done.
1055 */
1056 if (ret == -EINVAL)
1057 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001058 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001059 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001060 }
1061
Chris Wilson05394f32010-11-08 19:18:58 +00001062 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001063unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001064 mutex_unlock(&dev->struct_mutex);
1065 return ret;
1066}
1067
1068/**
1069 * Called when user space has done writes to this buffer
1070 */
1071int
1072i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001073 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001074{
1075 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001076 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001077 int ret = 0;
1078
1079 if (!(dev->driver->driver_features & DRIVER_GEM))
1080 return -ENODEV;
1081
Chris Wilson76c1dec2010-09-25 11:22:51 +01001082 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001083 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001084 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001085
Chris Wilson05394f32010-11-08 19:18:58 +00001086 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07001087 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001088 ret = -ENOENT;
1089 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001090 }
1091
Eric Anholt673a3942008-07-30 12:06:12 -07001092 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001093 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001094 i915_gem_object_flush_cpu_write_domain(obj);
1095
Chris Wilson05394f32010-11-08 19:18:58 +00001096 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001097unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001098 mutex_unlock(&dev->struct_mutex);
1099 return ret;
1100}
1101
1102/**
1103 * Maps the contents of an object, returning the address it is mapped
1104 * into.
1105 *
1106 * While the mapping holds a reference on the contents of the object, it doesn't
1107 * imply a ref on the object itself.
1108 */
1109int
1110i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001111 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001112{
Chris Wilsonda761a62010-10-27 17:37:08 +01001113 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001114 struct drm_i915_gem_mmap *args = data;
1115 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001116 unsigned long addr;
1117
1118 if (!(dev->driver->driver_features & DRIVER_GEM))
1119 return -ENODEV;
1120
Chris Wilson05394f32010-11-08 19:18:58 +00001121 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001122 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001123 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001124
Chris Wilsonda761a62010-10-27 17:37:08 +01001125 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1126 drm_gem_object_unreference_unlocked(obj);
1127 return -E2BIG;
1128 }
1129
Eric Anholt673a3942008-07-30 12:06:12 -07001130 down_write(&current->mm->mmap_sem);
1131 addr = do_mmap(obj->filp, 0, args->size,
1132 PROT_READ | PROT_WRITE, MAP_SHARED,
1133 args->offset);
1134 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001135 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001136 if (IS_ERR((void *)addr))
1137 return addr;
1138
1139 args->addr_ptr = (uint64_t) addr;
1140
1141 return 0;
1142}
1143
Jesse Barnesde151cf2008-11-12 10:03:55 -08001144/**
1145 * i915_gem_fault - fault a page into the GTT
1146 * vma: VMA in question
1147 * vmf: fault info
1148 *
1149 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1150 * from userspace. The fault handler takes care of binding the object to
1151 * the GTT (if needed), allocating and programming a fence register (again,
1152 * only if needed based on whether the old reg is still valid or the object
1153 * is tiled) and inserting a new PTE into the faulting process.
1154 *
1155 * Note that the faulting process may involve evicting existing objects
1156 * from the GTT and/or fence registers to make room. So performance may
1157 * suffer if the GTT working set is large or there are few fence registers
1158 * left.
1159 */
1160int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1161{
Chris Wilson05394f32010-11-08 19:18:58 +00001162 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1163 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001164 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001165 pgoff_t page_offset;
1166 unsigned long pfn;
1167 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001168 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001169
1170 /* We don't use vmf->pgoff since that has the fake offset */
1171 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1172 PAGE_SHIFT;
1173
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001174 ret = i915_mutex_lock_interruptible(dev);
1175 if (ret)
1176 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001177
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001178 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001179 if (!obj->map_and_fenceable) {
1180 ret = i915_gem_object_unbind(obj);
1181 if (ret)
1182 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001183 }
Chris Wilson05394f32010-11-08 19:18:58 +00001184 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001185 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001186 if (ret)
1187 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001188 }
1189
Chris Wilson4a684a42010-10-28 14:44:08 +01001190 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1191 if (ret)
1192 goto unlock;
1193
Chris Wilsond9e86c02010-11-10 16:40:20 +00001194 if (obj->tiling_mode == I915_TILING_NONE)
1195 ret = i915_gem_object_put_fence(obj);
1196 else
1197 ret = i915_gem_object_get_fence(obj, NULL, true);
1198 if (ret)
1199 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001200
Chris Wilson05394f32010-11-08 19:18:58 +00001201 if (i915_gem_object_is_inactive(obj))
1202 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001203
Chris Wilson6299f992010-11-24 12:23:44 +00001204 obj->fault_mappable = true;
1205
Chris Wilson05394f32010-11-08 19:18:58 +00001206 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001207 page_offset;
1208
1209 /* Finally, remap it using the new GTT offset */
1210 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001211unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001212 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001213out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001214 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001215 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001216 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001217 /* Give the error handler a chance to run and move the
1218 * objects off the GPU active list. Next time we service the
1219 * fault, we should be able to transition the page into the
1220 * GTT without touching the GPU (and so avoid further
1221 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1222 * with coherency, just lost writes.
1223 */
Chris Wilson045e7692010-11-07 09:18:22 +00001224 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001225 case 0:
1226 case -ERESTARTSYS:
1227 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001228 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001229 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001230 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001231 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001232 }
1233}
1234
1235/**
1236 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1237 * @obj: obj in question
1238 *
1239 * GEM memory mapping works by handing back to userspace a fake mmap offset
1240 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1241 * up the object based on the offset and sets up the various memory mapping
1242 * structures.
1243 *
1244 * This routine allocates and attaches a fake offset for @obj.
1245 */
1246static int
Chris Wilson05394f32010-11-08 19:18:58 +00001247i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001248{
Chris Wilson05394f32010-11-08 19:18:58 +00001249 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001250 struct drm_gem_mm *mm = dev->mm_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001251 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001252 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001253 int ret = 0;
1254
1255 /* Set the object up for mmap'ing */
Chris Wilson05394f32010-11-08 19:18:58 +00001256 list = &obj->base.map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001257 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001258 if (!list->map)
1259 return -ENOMEM;
1260
1261 map = list->map;
1262 map->type = _DRM_GEM;
Chris Wilson05394f32010-11-08 19:18:58 +00001263 map->size = obj->base.size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001264 map->handle = obj;
1265
1266 /* Get a DRM GEM mmap offset allocated... */
1267 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
Chris Wilson05394f32010-11-08 19:18:58 +00001268 obj->base.size / PAGE_SIZE,
1269 0, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001270 if (!list->file_offset_node) {
Chris Wilson05394f32010-11-08 19:18:58 +00001271 DRM_ERROR("failed to allocate offset for bo %d\n",
1272 obj->base.name);
Chris Wilson9e0ae532010-09-21 15:05:24 +01001273 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001274 goto out_free_list;
1275 }
1276
1277 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
Chris Wilson05394f32010-11-08 19:18:58 +00001278 obj->base.size / PAGE_SIZE,
1279 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001280 if (!list->file_offset_node) {
1281 ret = -ENOMEM;
1282 goto out_free_list;
1283 }
1284
1285 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae532010-09-21 15:05:24 +01001286 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1287 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001288 DRM_ERROR("failed to add to map hash\n");
1289 goto out_free_mm;
1290 }
1291
Jesse Barnesde151cf2008-11-12 10:03:55 -08001292 return 0;
1293
1294out_free_mm:
1295 drm_mm_put_block(list->file_offset_node);
1296out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001297 kfree(list->map);
Chris Wilson39a01d12010-10-28 13:03:06 +01001298 list->map = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001299
1300 return ret;
1301}
1302
Chris Wilson901782b2009-07-10 08:18:50 +01001303/**
1304 * i915_gem_release_mmap - remove physical page mappings
1305 * @obj: obj in question
1306 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001307 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001308 * relinquish ownership of the pages back to the system.
1309 *
1310 * It is vital that we remove the page mapping if we have mapped a tiled
1311 * object through the GTT and then lose the fence register due to
1312 * resource pressure. Similarly if the object has been moved out of the
1313 * aperture, than pages mapped into userspace must be revoked. Removing the
1314 * mapping will then trigger a page fault on the next user access, allowing
1315 * fixup by i915_gem_fault().
1316 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001317void
Chris Wilson05394f32010-11-08 19:18:58 +00001318i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001319{
Chris Wilson6299f992010-11-24 12:23:44 +00001320 if (!obj->fault_mappable)
1321 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001322
Chris Wilson6299f992010-11-24 12:23:44 +00001323 unmap_mapping_range(obj->base.dev->dev_mapping,
1324 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1325 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001326
Chris Wilson6299f992010-11-24 12:23:44 +00001327 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001328}
1329
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001330static void
Chris Wilson05394f32010-11-08 19:18:58 +00001331i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001332{
Chris Wilson05394f32010-11-08 19:18:58 +00001333 struct drm_device *dev = obj->base.dev;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001334 struct drm_gem_mm *mm = dev->mm_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001335 struct drm_map_list *list = &obj->base.map_list;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001336
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001337 drm_ht_remove_item(&mm->offset_hash, &list->hash);
Chris Wilson39a01d12010-10-28 13:03:06 +01001338 drm_mm_put_block(list->file_offset_node);
1339 kfree(list->map);
1340 list->map = NULL;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001341}
1342
Chris Wilson92b88ae2010-11-09 11:47:32 +00001343static uint32_t
1344i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1345{
1346 struct drm_device *dev = obj->base.dev;
1347 uint32_t size;
1348
1349 if (INTEL_INFO(dev)->gen >= 4 ||
1350 obj->tiling_mode == I915_TILING_NONE)
1351 return obj->base.size;
1352
1353 /* Previous chips need a power-of-two fence region when tiling */
1354 if (INTEL_INFO(dev)->gen == 3)
1355 size = 1024*1024;
1356 else
1357 size = 512*1024;
1358
1359 while (size < obj->base.size)
1360 size <<= 1;
1361
1362 return size;
1363}
1364
Jesse Barnesde151cf2008-11-12 10:03:55 -08001365/**
1366 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1367 * @obj: object to check
1368 *
1369 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001370 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001371 */
1372static uint32_t
Chris Wilson05394f32010-11-08 19:18:58 +00001373i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001374{
Chris Wilson05394f32010-11-08 19:18:58 +00001375 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001376
1377 /*
1378 * Minimum alignment is 4k (GTT page size), but might be greater
1379 * if a fence register is needed for the object.
1380 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001381 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilson05394f32010-11-08 19:18:58 +00001382 obj->tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001383 return 4096;
1384
1385 /*
1386 * Previous chips need to be aligned to the size of the smallest
1387 * fence register that can contain the object.
1388 */
Chris Wilson05394f32010-11-08 19:18:58 +00001389 return i915_gem_get_gtt_size(obj);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001390}
1391
Daniel Vetter5e783302010-11-14 22:32:36 +01001392/**
1393 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1394 * unfenced object
1395 * @obj: object to check
1396 *
1397 * Return the required GTT alignment for an object, only taking into account
1398 * unfenced tiled surface requirements.
1399 */
1400static uint32_t
Chris Wilson05394f32010-11-08 19:18:58 +00001401i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
Daniel Vetter5e783302010-11-14 22:32:36 +01001402{
Chris Wilson05394f32010-11-08 19:18:58 +00001403 struct drm_device *dev = obj->base.dev;
Daniel Vetter5e783302010-11-14 22:32:36 +01001404 int tile_height;
1405
1406 /*
1407 * Minimum alignment is 4k (GTT page size) for sane hw.
1408 */
1409 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilson05394f32010-11-08 19:18:58 +00001410 obj->tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001411 return 4096;
1412
1413 /*
1414 * Older chips need unfenced tiled buffers to be aligned to the left
1415 * edge of an even tile row (where tile rows are counted as if the bo is
1416 * placed in a fenced gtt region).
1417 */
1418 if (IS_GEN2(dev) ||
Chris Wilson05394f32010-11-08 19:18:58 +00001419 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
Daniel Vetter5e783302010-11-14 22:32:36 +01001420 tile_height = 32;
1421 else
1422 tile_height = 8;
1423
Chris Wilson05394f32010-11-08 19:18:58 +00001424 return tile_height * obj->stride * 2;
Daniel Vetter5e783302010-11-14 22:32:36 +01001425}
1426
Jesse Barnesde151cf2008-11-12 10:03:55 -08001427/**
1428 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1429 * @dev: DRM device
1430 * @data: GTT mapping ioctl data
Chris Wilson05394f32010-11-08 19:18:58 +00001431 * @file: GEM object info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001432 *
1433 * Simply returns the fake offset to userspace so it can mmap it.
1434 * The mmap call will end up in drm_gem_mmap(), which will set things
1435 * up so we can get faults in the handler above.
1436 *
1437 * The fault handler will take care of binding the object into the GTT
1438 * (since it may have been evicted to make room for something), allocating
1439 * a fence register, and mapping the appropriate aperture address into
1440 * userspace.
1441 */
1442int
1443i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001444 struct drm_file *file)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001445{
Chris Wilsonda761a62010-10-27 17:37:08 +01001446 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001447 struct drm_i915_gem_mmap_gtt *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001448 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001449 int ret;
1450
1451 if (!(dev->driver->driver_features & DRIVER_GEM))
1452 return -ENODEV;
1453
Chris Wilson76c1dec2010-09-25 11:22:51 +01001454 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001455 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001456 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001457
Chris Wilson05394f32010-11-08 19:18:58 +00001458 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001459 if (obj == NULL) {
1460 ret = -ENOENT;
1461 goto unlock;
1462 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001463
Chris Wilson05394f32010-11-08 19:18:58 +00001464 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001465 ret = -E2BIG;
1466 goto unlock;
1467 }
1468
Chris Wilson05394f32010-11-08 19:18:58 +00001469 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001470 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001471 ret = -EINVAL;
1472 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001473 }
1474
Chris Wilson05394f32010-11-08 19:18:58 +00001475 if (!obj->base.map_list.map) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001476 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001477 if (ret)
1478 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001479 }
1480
Chris Wilson05394f32010-11-08 19:18:58 +00001481 args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001482
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001483out:
Chris Wilson05394f32010-11-08 19:18:58 +00001484 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001485unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001486 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001487 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001488}
1489
Chris Wilsone5281cc2010-10-28 13:45:36 +01001490static int
Chris Wilson05394f32010-11-08 19:18:58 +00001491i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001492 gfp_t gfpmask)
1493{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001494 int page_count, i;
1495 struct address_space *mapping;
1496 struct inode *inode;
1497 struct page *page;
1498
1499 /* Get the list of pages out of our struct file. They'll be pinned
1500 * at this point until we release them.
1501 */
Chris Wilson05394f32010-11-08 19:18:58 +00001502 page_count = obj->base.size / PAGE_SIZE;
1503 BUG_ON(obj->pages != NULL);
1504 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1505 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001506 return -ENOMEM;
1507
Chris Wilson05394f32010-11-08 19:18:58 +00001508 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001509 mapping = inode->i_mapping;
1510 for (i = 0; i < page_count; i++) {
1511 page = read_cache_page_gfp(mapping, i,
1512 GFP_HIGHUSER |
1513 __GFP_COLD |
1514 __GFP_RECLAIMABLE |
1515 gfpmask);
1516 if (IS_ERR(page))
1517 goto err_pages;
1518
Chris Wilson05394f32010-11-08 19:18:58 +00001519 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001520 }
1521
Chris Wilson05394f32010-11-08 19:18:58 +00001522 if (obj->tiling_mode != I915_TILING_NONE)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001523 i915_gem_object_do_bit_17_swizzle(obj);
1524
1525 return 0;
1526
1527err_pages:
1528 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001529 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001530
Chris Wilson05394f32010-11-08 19:18:58 +00001531 drm_free_large(obj->pages);
1532 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001533 return PTR_ERR(page);
1534}
1535
Chris Wilson5cdf5882010-09-27 15:51:07 +01001536static void
Chris Wilson05394f32010-11-08 19:18:58 +00001537i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001538{
Chris Wilson05394f32010-11-08 19:18:58 +00001539 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001540 int i;
1541
Chris Wilson05394f32010-11-08 19:18:58 +00001542 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001543
Chris Wilson05394f32010-11-08 19:18:58 +00001544 if (obj->tiling_mode != I915_TILING_NONE)
Eric Anholt280b7132009-03-12 16:56:27 -07001545 i915_gem_object_save_bit_17_swizzle(obj);
1546
Chris Wilson05394f32010-11-08 19:18:58 +00001547 if (obj->madv == I915_MADV_DONTNEED)
1548 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001549
1550 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001551 if (obj->dirty)
1552 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001553
Chris Wilson05394f32010-11-08 19:18:58 +00001554 if (obj->madv == I915_MADV_WILLNEED)
1555 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001556
Chris Wilson05394f32010-11-08 19:18:58 +00001557 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001558 }
Chris Wilson05394f32010-11-08 19:18:58 +00001559 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001560
Chris Wilson05394f32010-11-08 19:18:58 +00001561 drm_free_large(obj->pages);
1562 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001563}
1564
Chris Wilson54cf91d2010-11-25 18:00:26 +00001565void
Chris Wilson05394f32010-11-08 19:18:58 +00001566i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001567 struct intel_ring_buffer *ring,
1568 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001569{
Chris Wilson05394f32010-11-08 19:18:58 +00001570 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001571 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001572
Zou Nan hai852835f2010-05-21 09:08:56 +08001573 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001574 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001575
1576 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001577 if (!obj->active) {
1578 drm_gem_object_reference(&obj->base);
1579 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001580 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001581
Eric Anholt673a3942008-07-30 12:06:12 -07001582 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001583 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1584 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001585
Chris Wilson05394f32010-11-08 19:18:58 +00001586 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001587 if (obj->fenced_gpu_access) {
1588 struct drm_i915_fence_reg *reg;
1589
1590 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1591
1592 obj->last_fenced_seqno = seqno;
1593 obj->last_fenced_ring = ring;
1594
1595 reg = &dev_priv->fence_regs[obj->fence_reg];
1596 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1597 }
1598}
1599
1600static void
1601i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1602{
1603 list_del_init(&obj->ring_list);
1604 obj->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001605}
1606
Eric Anholtce44b0e2008-11-06 16:00:31 -08001607static void
Chris Wilson05394f32010-11-08 19:18:58 +00001608i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001609{
Chris Wilson05394f32010-11-08 19:18:58 +00001610 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001611 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001612
Chris Wilson05394f32010-11-08 19:18:58 +00001613 BUG_ON(!obj->active);
1614 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001615
1616 i915_gem_object_move_off_active(obj);
1617}
1618
1619static void
1620i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1621{
1622 struct drm_device *dev = obj->base.dev;
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1624
1625 if (obj->pin_count != 0)
1626 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1627 else
1628 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1629
1630 BUG_ON(!list_empty(&obj->gpu_write_list));
1631 BUG_ON(!obj->active);
1632 obj->ring = NULL;
1633
1634 i915_gem_object_move_off_active(obj);
1635 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001636
1637 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001638 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001639 drm_gem_object_unreference(&obj->base);
1640
1641 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001642}
Eric Anholt673a3942008-07-30 12:06:12 -07001643
Chris Wilson963b4832009-09-20 23:03:54 +01001644/* Immediately discard the backing storage */
1645static void
Chris Wilson05394f32010-11-08 19:18:58 +00001646i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001647{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001648 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001649
Chris Wilsonae9fed62010-08-07 11:01:30 +01001650 /* Our goal here is to return as much of the memory as
1651 * is possible back to the system as we are called from OOM.
1652 * To do this we must instruct the shmfs to drop all of its
1653 * backing pages, *now*. Here we mirror the actions taken
1654 * when by shmem_delete_inode() to release the backing store.
1655 */
Chris Wilson05394f32010-11-08 19:18:58 +00001656 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001657 truncate_inode_pages(inode->i_mapping, 0);
1658 if (inode->i_op->truncate_range)
1659 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001660
Chris Wilson05394f32010-11-08 19:18:58 +00001661 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001662}
1663
1664static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001665i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001666{
Chris Wilson05394f32010-11-08 19:18:58 +00001667 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001668}
1669
Eric Anholt673a3942008-07-30 12:06:12 -07001670static void
Daniel Vetter63560392010-02-19 11:51:59 +01001671i915_gem_process_flushing_list(struct drm_device *dev,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001672 uint32_t flush_domains,
Zou Nan hai852835f2010-05-21 09:08:56 +08001673 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001674{
Chris Wilson05394f32010-11-08 19:18:58 +00001675 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001676
Chris Wilson05394f32010-11-08 19:18:58 +00001677 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001678 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001679 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001680 if (obj->base.write_domain & flush_domains) {
1681 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001682
Chris Wilson05394f32010-11-08 19:18:58 +00001683 obj->base.write_domain = 0;
1684 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001685 i915_gem_object_move_to_active(obj, ring,
1686 i915_gem_next_request_seqno(dev, ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001687
Daniel Vetter63560392010-02-19 11:51:59 +01001688 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001689 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001690 old_write_domain);
1691 }
1692 }
1693}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001694
Chris Wilson3cce4692010-10-27 16:11:02 +01001695int
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001696i915_add_request(struct drm_device *dev,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001697 struct drm_file *file,
Chris Wilson8dc5d142010-08-12 12:36:12 +01001698 struct drm_i915_gem_request *request,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001699 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001700{
1701 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001702 struct drm_i915_file_private *file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001703 uint32_t seqno;
1704 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001705 int ret;
1706
1707 BUG_ON(request == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07001708
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001709 if (file != NULL)
1710 file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001711
Chris Wilson3cce4692010-10-27 16:11:02 +01001712 ret = ring->add_request(ring, &seqno);
1713 if (ret)
1714 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001715
Chris Wilsona56ba562010-09-28 10:07:56 +01001716 ring->outstanding_lazy_request = false;
Eric Anholt673a3942008-07-30 12:06:12 -07001717
1718 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001719 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001720 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001721 was_empty = list_empty(&ring->request_list);
1722 list_add_tail(&request->list, &ring->request_list);
1723
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001724 if (file_priv) {
Chris Wilson1c255952010-09-26 11:03:27 +01001725 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001726 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001727 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001728 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001729 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001730 }
Eric Anholt673a3942008-07-30 12:06:12 -07001731
Ben Gamarif65d9422009-09-14 17:48:44 -04001732 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001733 mod_timer(&dev_priv->hangcheck_timer,
1734 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001735 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001736 queue_delayed_work(dev_priv->wq,
1737 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001738 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001739 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001740}
1741
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001742static inline void
1743i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001744{
Chris Wilson1c255952010-09-26 11:03:27 +01001745 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001746
Chris Wilson1c255952010-09-26 11:03:27 +01001747 if (!file_priv)
1748 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001749
Chris Wilson1c255952010-09-26 11:03:27 +01001750 spin_lock(&file_priv->mm.lock);
1751 list_del(&request->client_list);
1752 request->file_priv = NULL;
1753 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001754}
1755
Chris Wilsondfaae392010-09-22 10:31:52 +01001756static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1757 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001758{
Chris Wilsondfaae392010-09-22 10:31:52 +01001759 while (!list_empty(&ring->request_list)) {
1760 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001761
Chris Wilsondfaae392010-09-22 10:31:52 +01001762 request = list_first_entry(&ring->request_list,
1763 struct drm_i915_gem_request,
1764 list);
1765
1766 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001767 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001768 kfree(request);
1769 }
1770
1771 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001772 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001773
Chris Wilson05394f32010-11-08 19:18:58 +00001774 obj = list_first_entry(&ring->active_list,
1775 struct drm_i915_gem_object,
1776 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001777
Chris Wilson05394f32010-11-08 19:18:58 +00001778 obj->base.write_domain = 0;
1779 list_del_init(&obj->gpu_write_list);
1780 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001781 }
Eric Anholt673a3942008-07-30 12:06:12 -07001782}
1783
Chris Wilson312817a2010-11-22 11:50:11 +00001784static void i915_gem_reset_fences(struct drm_device *dev)
1785{
1786 struct drm_i915_private *dev_priv = dev->dev_private;
1787 int i;
1788
1789 for (i = 0; i < 16; i++) {
1790 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001791 struct drm_i915_gem_object *obj = reg->obj;
1792
1793 if (!obj)
1794 continue;
1795
1796 if (obj->tiling_mode)
1797 i915_gem_release_mmap(obj);
1798
Chris Wilsond9e86c02010-11-10 16:40:20 +00001799 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1800 reg->obj->fenced_gpu_access = false;
1801 reg->obj->last_fenced_seqno = 0;
1802 reg->obj->last_fenced_ring = NULL;
1803 i915_gem_clear_fence_reg(dev, reg);
Chris Wilson312817a2010-11-22 11:50:11 +00001804 }
1805}
1806
Chris Wilson069efc12010-09-30 16:53:18 +01001807void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001808{
Chris Wilsondfaae392010-09-22 10:31:52 +01001809 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001810 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001811 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001812
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001813 for (i = 0; i < I915_NUM_RINGS; i++)
1814 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001815
1816 /* Remove anything from the flushing lists. The GPU cache is likely
1817 * to be lost on reset along with the data, so simply move the
1818 * lost bo to the inactive list.
1819 */
1820 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001821 obj= list_first_entry(&dev_priv->mm.flushing_list,
1822 struct drm_i915_gem_object,
1823 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001824
Chris Wilson05394f32010-11-08 19:18:58 +00001825 obj->base.write_domain = 0;
1826 list_del_init(&obj->gpu_write_list);
1827 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001828 }
Chris Wilson9375e442010-09-19 12:21:28 +01001829
Chris Wilsondfaae392010-09-22 10:31:52 +01001830 /* Move everything out of the GPU domains to ensure we do any
1831 * necessary invalidation upon reuse.
1832 */
Chris Wilson05394f32010-11-08 19:18:58 +00001833 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001834 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001835 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001836 {
Chris Wilson05394f32010-11-08 19:18:58 +00001837 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001838 }
Chris Wilson069efc12010-09-30 16:53:18 +01001839
1840 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001841 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001842}
1843
1844/**
1845 * This function clears the request list as sequence numbers are passed.
1846 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001847static void
1848i915_gem_retire_requests_ring(struct drm_device *dev,
1849 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001850{
1851 drm_i915_private_t *dev_priv = dev->dev_private;
1852 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001853 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001854
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001855 if (!ring->status_page.page_addr ||
1856 list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001857 return;
1858
Chris Wilson23bc5982010-09-29 16:10:57 +01001859 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001860
Chris Wilson78501ea2010-10-27 12:18:21 +01001861 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001862
Chris Wilson076e2c02011-01-21 10:07:18 +00001863 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001864 if (seqno >= ring->sync_seqno[i])
1865 ring->sync_seqno[i] = 0;
1866
Zou Nan hai852835f2010-05-21 09:08:56 +08001867 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001868 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001869
Zou Nan hai852835f2010-05-21 09:08:56 +08001870 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001871 struct drm_i915_gem_request,
1872 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001873
Chris Wilsondfaae392010-09-22 10:31:52 +01001874 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001875 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001876
1877 trace_i915_gem_request_retire(dev, request->seqno);
1878
1879 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001880 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001881 kfree(request);
1882 }
1883
1884 /* Move any buffers on the active list that are no longer referenced
1885 * by the ringbuffer to the flushing/inactive lists as appropriate.
1886 */
1887 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001888 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001889
Chris Wilson05394f32010-11-08 19:18:58 +00001890 obj= list_first_entry(&ring->active_list,
1891 struct drm_i915_gem_object,
1892 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001893
Chris Wilson05394f32010-11-08 19:18:58 +00001894 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001895 break;
1896
Chris Wilson05394f32010-11-08 19:18:58 +00001897 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001898 i915_gem_object_move_to_flushing(obj);
1899 else
1900 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001901 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001902
1903 if (unlikely (dev_priv->trace_irq_seqno &&
1904 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001905 ring->irq_put(ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001906 dev_priv->trace_irq_seqno = 0;
1907 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001908
1909 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001910}
1911
1912void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001913i915_gem_retire_requests(struct drm_device *dev)
1914{
1915 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001916 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001917
Chris Wilsonbe726152010-07-23 23:18:50 +01001918 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001919 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001920
1921 /* We must be careful that during unbind() we do not
1922 * accidentally infinitely recurse into retire requests.
1923 * Currently:
1924 * retire -> free -> unbind -> wait -> retire_ring
1925 */
Chris Wilson05394f32010-11-08 19:18:58 +00001926 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001927 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001928 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001929 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001930 }
1931
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001932 for (i = 0; i < I915_NUM_RINGS; i++)
1933 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001934}
1935
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001936static void
Eric Anholt673a3942008-07-30 12:06:12 -07001937i915_gem_retire_work_handler(struct work_struct *work)
1938{
1939 drm_i915_private_t *dev_priv;
1940 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001941 bool idle;
1942 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001943
1944 dev_priv = container_of(work, drm_i915_private_t,
1945 mm.retire_work.work);
1946 dev = dev_priv->dev;
1947
Chris Wilson891b48c2010-09-29 12:26:37 +01001948 /* Come back later if the device is busy... */
1949 if (!mutex_trylock(&dev->struct_mutex)) {
1950 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1951 return;
1952 }
1953
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001954 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001955
Chris Wilson0a587052011-01-09 21:05:44 +00001956 /* Send a periodic flush down the ring so we don't hold onto GEM
1957 * objects indefinitely.
1958 */
1959 idle = true;
1960 for (i = 0; i < I915_NUM_RINGS; i++) {
1961 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1962
1963 if (!list_empty(&ring->gpu_write_list)) {
1964 struct drm_i915_gem_request *request;
1965 int ret;
1966
1967 ret = i915_gem_flush_ring(dev, ring, 0,
1968 I915_GEM_GPU_DOMAINS);
1969 request = kzalloc(sizeof(*request), GFP_KERNEL);
1970 if (ret || request == NULL ||
1971 i915_add_request(dev, NULL, request, ring))
1972 kfree(request);
1973 }
1974
1975 idle &= list_empty(&ring->request_list);
1976 }
1977
1978 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001979 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001980
Eric Anholt673a3942008-07-30 12:06:12 -07001981 mutex_unlock(&dev->struct_mutex);
1982}
1983
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001984int
Zou Nan hai852835f2010-05-21 09:08:56 +08001985i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001986 bool interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001987{
1988 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001989 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001990 int ret = 0;
1991
1992 BUG_ON(seqno == 0);
1993
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001994 if (atomic_read(&dev_priv->mm.wedged)) {
1995 struct completion *x = &dev_priv->error_completion;
1996 bool recovery_complete;
1997 unsigned long flags;
1998
1999 /* Give the error handler a chance to run. */
2000 spin_lock_irqsave(&x->wait.lock, flags);
2001 recovery_complete = x->done > 0;
2002 spin_unlock_irqrestore(&x->wait.lock, flags);
2003
2004 return recovery_complete ? -EIO : -EAGAIN;
2005 }
Ben Gamariffed1d02009-09-14 17:48:41 -04002006
Chris Wilson5d97eb62010-11-10 20:40:02 +00002007 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01002008 struct drm_i915_gem_request *request;
2009
2010 request = kzalloc(sizeof(*request), GFP_KERNEL);
2011 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01002012 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01002013
2014 ret = i915_add_request(dev, NULL, request, ring);
2015 if (ret) {
2016 kfree(request);
2017 return ret;
2018 }
2019
2020 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01002021 }
2022
Chris Wilson78501ea2010-10-27 12:18:21 +01002023 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07002024 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08002025 ier = I915_READ(DEIER) | I915_READ(GTIER);
2026 else
2027 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07002028 if (!ier) {
2029 DRM_ERROR("something (likely vbetool) disabled "
2030 "interrupts, re-enabling\n");
2031 i915_driver_irq_preinstall(dev);
2032 i915_driver_irq_postinstall(dev);
2033 }
2034
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002035 trace_i915_gem_request_wait_begin(dev, seqno);
2036
Chris Wilsonb2223492010-10-27 15:27:33 +01002037 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002038 if (ring->irq_get(ring)) {
2039 if (interruptible)
2040 ret = wait_event_interruptible(ring->irq_queue,
2041 i915_seqno_passed(ring->get_seqno(ring), seqno)
2042 || atomic_read(&dev_priv->mm.wedged));
2043 else
2044 wait_event(ring->irq_queue,
2045 i915_seqno_passed(ring->get_seqno(ring), seqno)
2046 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02002047
Chris Wilsonb13c2b92010-12-13 16:54:50 +00002048 ring->irq_put(ring);
Chris Wilsonb5ba1772010-12-14 12:17:15 +00002049 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2050 seqno) ||
2051 atomic_read(&dev_priv->mm.wedged), 3000))
2052 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01002053 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002054
2055 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002056 }
Ben Gamariba1234d2009-09-14 17:48:47 -04002057 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01002058 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07002059
2060 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01002061 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilson78501ea2010-10-27 12:18:21 +01002062 __func__, ret, seqno, ring->get_seqno(ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01002063 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002064
2065 /* Directly dispatch request retiring. While we have the work queue
2066 * to handle this, the waiter on a request often wants an associated
2067 * buffer to have made it to the inactive list, and we would need
2068 * a separate wait queue to handle that.
2069 */
2070 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002071 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002072
2073 return ret;
2074}
2075
Daniel Vetter48764bf2009-09-15 22:57:32 +02002076/**
2077 * Waits for a sequence number to be signaled, and cleans up the
2078 * request and object lists appropriately for that event.
2079 */
2080static int
Zou Nan hai852835f2010-05-21 09:08:56 +08002081i915_wait_request(struct drm_device *dev, uint32_t seqno,
Chris Wilsona56ba562010-09-28 10:07:56 +01002082 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02002083{
Zou Nan hai852835f2010-05-21 09:08:56 +08002084 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002085}
2086
Eric Anholt673a3942008-07-30 12:06:12 -07002087/**
2088 * Ensures that all rendering to the object has completed and the object is
2089 * safe to unbind from the GTT or access from the CPU.
2090 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00002091int
Chris Wilson05394f32010-11-08 19:18:58 +00002092i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002093 bool interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07002094{
Chris Wilson05394f32010-11-08 19:18:58 +00002095 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002096 int ret;
2097
Eric Anholte47c68e2008-11-14 13:35:19 -08002098 /* This function only exists to support waiting for existing rendering,
2099 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002100 */
Chris Wilson05394f32010-11-08 19:18:58 +00002101 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002102
2103 /* If there is rendering queued on the buffer being evicted, wait for
2104 * it.
2105 */
Chris Wilson05394f32010-11-08 19:18:58 +00002106 if (obj->active) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002107 ret = i915_do_wait_request(dev,
Chris Wilson05394f32010-11-08 19:18:58 +00002108 obj->last_rendering_seqno,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002109 interruptible,
Chris Wilson05394f32010-11-08 19:18:58 +00002110 obj->ring);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002111 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002112 return ret;
2113 }
2114
2115 return 0;
2116}
2117
2118/**
2119 * Unbinds an object from the GTT aperture.
2120 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002121int
Chris Wilson05394f32010-11-08 19:18:58 +00002122i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002123{
Eric Anholt673a3942008-07-30 12:06:12 -07002124 int ret = 0;
2125
Chris Wilson05394f32010-11-08 19:18:58 +00002126 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002127 return 0;
2128
Chris Wilson05394f32010-11-08 19:18:58 +00002129 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002130 DRM_ERROR("Attempting to unbind pinned buffer\n");
2131 return -EINVAL;
2132 }
2133
Eric Anholt5323fd02009-09-09 11:50:45 -07002134 /* blow away mappings if mapped through GTT */
2135 i915_gem_release_mmap(obj);
2136
Eric Anholt673a3942008-07-30 12:06:12 -07002137 /* Move the object to the CPU domain to ensure that
2138 * any possible CPU writes while it's not in the GTT
2139 * are flushed when we go to remap it. This will
2140 * also ensure that all pending GPU writes are finished
2141 * before we unbind.
2142 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002143 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002144 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002145 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002146 /* Continue on if we fail due to EIO, the GPU is hung so we
2147 * should be safe and we need to cleanup or else we might
2148 * cause memory corruption through use-after-free.
2149 */
Chris Wilson812ed492010-09-30 15:08:57 +01002150 if (ret) {
2151 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002152 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed492010-09-30 15:08:57 +01002153 }
Eric Anholt673a3942008-07-30 12:06:12 -07002154
Daniel Vetter96b47b62009-12-15 17:50:00 +01002155 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002156 ret = i915_gem_object_put_fence(obj);
2157 if (ret == -ERESTARTSYS)
2158 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002159
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002160 i915_gem_gtt_unbind_object(obj);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002161 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002162
Chris Wilson6299f992010-11-24 12:23:44 +00002163 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002164 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002165 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002166 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002167
Chris Wilson05394f32010-11-08 19:18:58 +00002168 drm_mm_put_block(obj->gtt_space);
2169 obj->gtt_space = NULL;
2170 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002171
Chris Wilson05394f32010-11-08 19:18:58 +00002172 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002173 i915_gem_object_truncate(obj);
2174
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002175 trace_i915_gem_object_unbind(obj);
2176
Chris Wilson8dc17752010-07-23 23:18:51 +01002177 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002178}
2179
Chris Wilson88241782011-01-07 17:09:48 +00002180int
Chris Wilson54cf91d2010-11-25 18:00:26 +00002181i915_gem_flush_ring(struct drm_device *dev,
2182 struct intel_ring_buffer *ring,
2183 uint32_t invalidate_domains,
2184 uint32_t flush_domains)
2185{
Chris Wilson88241782011-01-07 17:09:48 +00002186 int ret;
2187
2188 ret = ring->flush(ring, invalidate_domains, flush_domains);
2189 if (ret)
2190 return ret;
2191
2192 i915_gem_process_flushing_list(dev, flush_domains, ring);
2193 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002194}
2195
Chris Wilsona56ba562010-09-28 10:07:56 +01002196static int i915_ring_idle(struct drm_device *dev,
2197 struct intel_ring_buffer *ring)
2198{
Chris Wilson88241782011-01-07 17:09:48 +00002199 int ret;
2200
Chris Wilson395b70b2010-10-28 21:28:46 +01002201 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002202 return 0;
2203
Chris Wilson88241782011-01-07 17:09:48 +00002204 if (!list_empty(&ring->gpu_write_list)) {
2205 ret = i915_gem_flush_ring(dev, ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002206 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002207 if (ret)
2208 return ret;
2209 }
2210
Chris Wilsona56ba562010-09-28 10:07:56 +01002211 return i915_wait_request(dev,
2212 i915_gem_next_request_seqno(dev, ring),
2213 ring);
2214}
2215
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002216int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002217i915_gpu_idle(struct drm_device *dev)
2218{
2219 drm_i915_private_t *dev_priv = dev->dev_private;
2220 bool lists_empty;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002221 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002222
Zou Nan haid1b851f2010-05-21 09:08:57 +08002223 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +01002224 list_empty(&dev_priv->mm.active_list));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002225 if (lists_empty)
2226 return 0;
2227
2228 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002229 for (i = 0; i < I915_NUM_RINGS; i++) {
2230 ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2231 if (ret)
2232 return ret;
2233 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002234
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002235 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002236}
2237
Daniel Vetterc6642782010-11-12 13:46:18 +00002238static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2239 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002240{
Chris Wilson05394f32010-11-08 19:18:58 +00002241 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002242 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002243 u32 size = obj->gtt_space->size;
2244 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002245 uint64_t val;
2246
Chris Wilson05394f32010-11-08 19:18:58 +00002247 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00002248 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002249 val |= obj->gtt_offset & 0xfffff000;
2250 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07002251 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2252
Chris Wilson05394f32010-11-08 19:18:58 +00002253 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002254 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2255 val |= I965_FENCE_REG_VALID;
2256
Daniel Vetterc6642782010-11-12 13:46:18 +00002257 if (pipelined) {
2258 int ret = intel_ring_begin(pipelined, 6);
2259 if (ret)
2260 return ret;
2261
2262 intel_ring_emit(pipelined, MI_NOOP);
2263 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2264 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2265 intel_ring_emit(pipelined, (u32)val);
2266 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2267 intel_ring_emit(pipelined, (u32)(val >> 32));
2268 intel_ring_advance(pipelined);
2269 } else
2270 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2271
2272 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002273}
2274
Daniel Vetterc6642782010-11-12 13:46:18 +00002275static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2276 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002277{
Chris Wilson05394f32010-11-08 19:18:58 +00002278 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002279 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002280 u32 size = obj->gtt_space->size;
2281 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002282 uint64_t val;
2283
Chris Wilson05394f32010-11-08 19:18:58 +00002284 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002285 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002286 val |= obj->gtt_offset & 0xfffff000;
2287 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2288 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002289 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2290 val |= I965_FENCE_REG_VALID;
2291
Daniel Vetterc6642782010-11-12 13:46:18 +00002292 if (pipelined) {
2293 int ret = intel_ring_begin(pipelined, 6);
2294 if (ret)
2295 return ret;
2296
2297 intel_ring_emit(pipelined, MI_NOOP);
2298 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2299 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2300 intel_ring_emit(pipelined, (u32)val);
2301 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2302 intel_ring_emit(pipelined, (u32)(val >> 32));
2303 intel_ring_advance(pipelined);
2304 } else
2305 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2306
2307 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002308}
2309
Daniel Vetterc6642782010-11-12 13:46:18 +00002310static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2311 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002312{
Chris Wilson05394f32010-11-08 19:18:58 +00002313 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002314 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002315 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002316 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002317 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002318
Daniel Vetterc6642782010-11-12 13:46:18 +00002319 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2320 (size & -size) != size ||
2321 (obj->gtt_offset & (size - 1)),
2322 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2323 obj->gtt_offset, obj->map_and_fenceable, size))
2324 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002325
Daniel Vetterc6642782010-11-12 13:46:18 +00002326 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002327 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002328 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002329 tile_width = 512;
2330
2331 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002332 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002333 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002334
Chris Wilson05394f32010-11-08 19:18:58 +00002335 val = obj->gtt_offset;
2336 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002337 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002338 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002339 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2340 val |= I830_FENCE_REG_VALID;
2341
Chris Wilson05394f32010-11-08 19:18:58 +00002342 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002343 if (fence_reg < 8)
2344 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002345 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002346 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002347
2348 if (pipelined) {
2349 int ret = intel_ring_begin(pipelined, 4);
2350 if (ret)
2351 return ret;
2352
2353 intel_ring_emit(pipelined, MI_NOOP);
2354 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2355 intel_ring_emit(pipelined, fence_reg);
2356 intel_ring_emit(pipelined, val);
2357 intel_ring_advance(pipelined);
2358 } else
2359 I915_WRITE(fence_reg, val);
2360
2361 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002362}
2363
Daniel Vetterc6642782010-11-12 13:46:18 +00002364static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2365 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002366{
Chris Wilson05394f32010-11-08 19:18:58 +00002367 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002368 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002369 u32 size = obj->gtt_space->size;
2370 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002371 uint32_t val;
2372 uint32_t pitch_val;
2373
Daniel Vetterc6642782010-11-12 13:46:18 +00002374 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2375 (size & -size) != size ||
2376 (obj->gtt_offset & (size - 1)),
2377 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2378 obj->gtt_offset, size))
2379 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002380
Chris Wilson05394f32010-11-08 19:18:58 +00002381 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002382 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002383
Chris Wilson05394f32010-11-08 19:18:58 +00002384 val = obj->gtt_offset;
2385 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002386 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002387 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002388 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2389 val |= I830_FENCE_REG_VALID;
2390
Daniel Vetterc6642782010-11-12 13:46:18 +00002391 if (pipelined) {
2392 int ret = intel_ring_begin(pipelined, 4);
2393 if (ret)
2394 return ret;
2395
2396 intel_ring_emit(pipelined, MI_NOOP);
2397 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2398 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2399 intel_ring_emit(pipelined, val);
2400 intel_ring_advance(pipelined);
2401 } else
2402 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2403
2404 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002405}
2406
Chris Wilsond9e86c02010-11-10 16:40:20 +00002407static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2408{
2409 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2410}
2411
2412static int
2413i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2414 struct intel_ring_buffer *pipelined,
2415 bool interruptible)
2416{
2417 int ret;
2418
2419 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002420 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2421 ret = i915_gem_flush_ring(obj->base.dev,
2422 obj->last_fenced_ring,
2423 0, obj->base.write_domain);
2424 if (ret)
2425 return ret;
2426 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002427
2428 obj->fenced_gpu_access = false;
2429 }
2430
2431 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2432 if (!ring_passed_seqno(obj->last_fenced_ring,
2433 obj->last_fenced_seqno)) {
2434 ret = i915_do_wait_request(obj->base.dev,
2435 obj->last_fenced_seqno,
2436 interruptible,
2437 obj->last_fenced_ring);
2438 if (ret)
2439 return ret;
2440 }
2441
2442 obj->last_fenced_seqno = 0;
2443 obj->last_fenced_ring = NULL;
2444 }
2445
Chris Wilson63256ec2011-01-04 18:42:07 +00002446 /* Ensure that all CPU reads are completed before installing a fence
2447 * and all writes before removing the fence.
2448 */
2449 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2450 mb();
2451
Chris Wilsond9e86c02010-11-10 16:40:20 +00002452 return 0;
2453}
2454
2455int
2456i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2457{
2458 int ret;
2459
2460 if (obj->tiling_mode)
2461 i915_gem_release_mmap(obj);
2462
2463 ret = i915_gem_object_flush_fence(obj, NULL, true);
2464 if (ret)
2465 return ret;
2466
2467 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2468 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2469 i915_gem_clear_fence_reg(obj->base.dev,
2470 &dev_priv->fence_regs[obj->fence_reg]);
2471
2472 obj->fence_reg = I915_FENCE_REG_NONE;
2473 }
2474
2475 return 0;
2476}
2477
2478static struct drm_i915_fence_reg *
2479i915_find_fence_reg(struct drm_device *dev,
2480 struct intel_ring_buffer *pipelined)
Daniel Vetterae3db242010-02-19 11:51:58 +01002481{
Daniel Vetterae3db242010-02-19 11:51:58 +01002482 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002483 struct drm_i915_fence_reg *reg, *first, *avail;
2484 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002485
2486 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002487 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002488 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2489 reg = &dev_priv->fence_regs[i];
2490 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002491 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002492
Chris Wilson05394f32010-11-08 19:18:58 +00002493 if (!reg->obj->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002494 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002495 }
2496
Chris Wilsond9e86c02010-11-10 16:40:20 +00002497 if (avail == NULL)
2498 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002499
2500 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002501 avail = first = NULL;
2502 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2503 if (reg->obj->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002504 continue;
2505
Chris Wilsond9e86c02010-11-10 16:40:20 +00002506 if (first == NULL)
2507 first = reg;
2508
2509 if (!pipelined ||
2510 !reg->obj->last_fenced_ring ||
2511 reg->obj->last_fenced_ring == pipelined) {
2512 avail = reg;
2513 break;
2514 }
Daniel Vetterae3db242010-02-19 11:51:58 +01002515 }
2516
Chris Wilsond9e86c02010-11-10 16:40:20 +00002517 if (avail == NULL)
2518 avail = first;
Daniel Vetterae3db242010-02-19 11:51:58 +01002519
Chris Wilsona00b10c2010-09-24 21:15:47 +01002520 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002521}
2522
Jesse Barnesde151cf2008-11-12 10:03:55 -08002523/**
Chris Wilsond9e86c02010-11-10 16:40:20 +00002524 * i915_gem_object_get_fence - set up a fence reg for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002525 * @obj: object to map through a fence reg
Chris Wilsond9e86c02010-11-10 16:40:20 +00002526 * @pipelined: ring on which to queue the change, or NULL for CPU access
2527 * @interruptible: must we wait uninterruptibly for the register to retire?
Jesse Barnesde151cf2008-11-12 10:03:55 -08002528 *
2529 * When mapping objects through the GTT, userspace wants to be able to write
2530 * to them without having to worry about swizzling if the object is tiled.
2531 *
2532 * This function walks the fence regs looking for a free one for @obj,
2533 * stealing one if it can't find any.
2534 *
2535 * It then sets up the reg based on the object's properties: address, pitch
2536 * and tiling format.
2537 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002538int
Chris Wilsond9e86c02010-11-10 16:40:20 +00002539i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2540 struct intel_ring_buffer *pipelined,
2541 bool interruptible)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002542{
Chris Wilson05394f32010-11-08 19:18:58 +00002543 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002544 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002545 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002546 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002547
Chris Wilson6bda10d2010-12-05 21:04:18 +00002548 /* XXX disable pipelining. There are bugs. Shocking. */
2549 pipelined = NULL;
2550
Chris Wilsond9e86c02010-11-10 16:40:20 +00002551 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002552 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2553 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002554 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002555
2556 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2557 pipelined = NULL;
2558
2559 if (!pipelined) {
2560 if (reg->setup_seqno) {
2561 if (!ring_passed_seqno(obj->last_fenced_ring,
2562 reg->setup_seqno)) {
2563 ret = i915_do_wait_request(obj->base.dev,
2564 reg->setup_seqno,
2565 interruptible,
2566 obj->last_fenced_ring);
2567 if (ret)
2568 return ret;
2569 }
2570
2571 reg->setup_seqno = 0;
2572 }
2573 } else if (obj->last_fenced_ring &&
2574 obj->last_fenced_ring != pipelined) {
2575 ret = i915_gem_object_flush_fence(obj,
2576 pipelined,
2577 interruptible);
2578 if (ret)
2579 return ret;
2580 } else if (obj->tiling_changed) {
2581 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002582 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2583 ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
2584 0, obj->base.write_domain);
2585 if (ret)
2586 return ret;
2587 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002588
2589 obj->fenced_gpu_access = false;
2590 }
2591 }
2592
2593 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2594 pipelined = NULL;
2595 BUG_ON(!pipelined && reg->setup_seqno);
2596
2597 if (obj->tiling_changed) {
2598 if (pipelined) {
2599 reg->setup_seqno =
2600 i915_gem_next_request_seqno(dev, pipelined);
2601 obj->last_fenced_seqno = reg->setup_seqno;
2602 obj->last_fenced_ring = pipelined;
2603 }
2604 goto update;
2605 }
2606
Eric Anholta09ba7f2009-08-29 12:49:51 -07002607 return 0;
2608 }
2609
Chris Wilsond9e86c02010-11-10 16:40:20 +00002610 reg = i915_find_fence_reg(dev, pipelined);
2611 if (reg == NULL)
2612 return -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002613
Chris Wilsond9e86c02010-11-10 16:40:20 +00002614 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
2615 if (ret)
Daniel Vetterae3db242010-02-19 11:51:58 +01002616 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002617
Chris Wilsond9e86c02010-11-10 16:40:20 +00002618 if (reg->obj) {
2619 struct drm_i915_gem_object *old = reg->obj;
2620
2621 drm_gem_object_reference(&old->base);
2622
2623 if (old->tiling_mode)
2624 i915_gem_release_mmap(old);
2625
Chris Wilsond9e86c02010-11-10 16:40:20 +00002626 ret = i915_gem_object_flush_fence(old,
Chris Wilson6bda10d2010-12-05 21:04:18 +00002627 pipelined,
Chris Wilsond9e86c02010-11-10 16:40:20 +00002628 interruptible);
2629 if (ret) {
2630 drm_gem_object_unreference(&old->base);
2631 return ret;
2632 }
2633
2634 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2635 pipelined = NULL;
2636
2637 old->fence_reg = I915_FENCE_REG_NONE;
2638 old->last_fenced_ring = pipelined;
2639 old->last_fenced_seqno =
2640 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2641
2642 drm_gem_object_unreference(&old->base);
2643 } else if (obj->last_fenced_seqno == 0)
2644 pipelined = NULL;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002645
Jesse Barnesde151cf2008-11-12 10:03:55 -08002646 reg->obj = obj;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002647 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2648 obj->fence_reg = reg - dev_priv->fence_regs;
2649 obj->last_fenced_ring = pipelined;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002650
Chris Wilsond9e86c02010-11-10 16:40:20 +00002651 reg->setup_seqno =
2652 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2653 obj->last_fenced_seqno = reg->setup_seqno;
2654
2655update:
2656 obj->tiling_changed = false;
Chris Wilsone259bef2010-09-17 00:32:02 +01002657 switch (INTEL_INFO(dev)->gen) {
2658 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002659 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002660 break;
2661 case 5:
2662 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002663 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002664 break;
2665 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002666 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002667 break;
2668 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002669 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002670 break;
2671 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002672
Daniel Vetterc6642782010-11-12 13:46:18 +00002673 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002674}
2675
2676/**
2677 * i915_gem_clear_fence_reg - clear out fence register info
2678 * @obj: object to clear
2679 *
2680 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002681 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002682 */
2683static void
Chris Wilsond9e86c02010-11-10 16:40:20 +00002684i915_gem_clear_fence_reg(struct drm_device *dev,
2685 struct drm_i915_fence_reg *reg)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002686{
Jesse Barnes79e53942008-11-07 14:24:08 -08002687 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002688 uint32_t fence_reg = reg - dev_priv->fence_regs;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002689
Chris Wilsone259bef2010-09-17 00:32:02 +01002690 switch (INTEL_INFO(dev)->gen) {
2691 case 6:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002692 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002693 break;
2694 case 5:
2695 case 4:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002696 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002697 break;
2698 case 3:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002699 if (fence_reg >= 8)
2700 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002701 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002702 case 2:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002703 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002704
2705 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002706 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002707 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002708
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002709 list_del_init(&reg->lru_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002710 reg->obj = NULL;
2711 reg->setup_seqno = 0;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002712}
2713
2714/**
Eric Anholt673a3942008-07-30 12:06:12 -07002715 * Finds free space in the GTT aperture and binds the object there.
2716 */
2717static int
Chris Wilson05394f32010-11-08 19:18:58 +00002718i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002719 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002720 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002721{
Chris Wilson05394f32010-11-08 19:18:58 +00002722 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002723 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002724 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002725 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002726 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002727 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002728 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002729
Chris Wilson05394f32010-11-08 19:18:58 +00002730 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002731 DRM_ERROR("Attempting to bind a purgeable object\n");
2732 return -EINVAL;
2733 }
2734
Chris Wilson05394f32010-11-08 19:18:58 +00002735 fence_size = i915_gem_get_gtt_size(obj);
2736 fence_alignment = i915_gem_get_gtt_alignment(obj);
2737 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002738
Eric Anholt673a3942008-07-30 12:06:12 -07002739 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002740 alignment = map_and_fenceable ? fence_alignment :
2741 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002742 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002743 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2744 return -EINVAL;
2745 }
2746
Chris Wilson05394f32010-11-08 19:18:58 +00002747 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002748
Chris Wilson654fc602010-05-27 13:18:21 +01002749 /* If the object is bigger than the entire aperture, reject it early
2750 * before evicting everything in a vain attempt to find space.
2751 */
Chris Wilson05394f32010-11-08 19:18:58 +00002752 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002753 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002754 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2755 return -E2BIG;
2756 }
2757
Eric Anholt673a3942008-07-30 12:06:12 -07002758 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002759 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002760 free_space =
2761 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002762 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002763 dev_priv->mm.gtt_mappable_end,
2764 0);
2765 else
2766 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002767 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002768
2769 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002770 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002771 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002772 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002773 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002774 dev_priv->mm.gtt_mappable_end,
2775 0);
2776 else
Chris Wilson05394f32010-11-08 19:18:58 +00002777 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002778 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002779 }
Chris Wilson05394f32010-11-08 19:18:58 +00002780 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002781 /* If the gtt is empty and we're still having trouble
2782 * fitting our object in, we're out of memory.
2783 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002784 ret = i915_gem_evict_something(dev, size, alignment,
2785 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002786 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002787 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002788
Eric Anholt673a3942008-07-30 12:06:12 -07002789 goto search_free;
2790 }
2791
Chris Wilsone5281cc2010-10-28 13:45:36 +01002792 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002793 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002794 drm_mm_put_block(obj->gtt_space);
2795 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002796
2797 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002798 /* first try to reclaim some memory by clearing the GTT */
2799 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002800 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002801 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002802 if (gfpmask) {
2803 gfpmask = 0;
2804 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002805 }
2806
Chris Wilson809b6332011-01-10 17:33:15 +00002807 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002808 }
2809
2810 goto search_free;
2811 }
2812
Eric Anholt673a3942008-07-30 12:06:12 -07002813 return ret;
2814 }
2815
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002816 ret = i915_gem_gtt_bind_object(obj);
2817 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002818 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002819 drm_mm_put_block(obj->gtt_space);
2820 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002821
Chris Wilson809b6332011-01-10 17:33:15 +00002822 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002823 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002824
2825 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002826 }
Eric Anholt673a3942008-07-30 12:06:12 -07002827
Chris Wilson6299f992010-11-24 12:23:44 +00002828 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002829 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002830
Eric Anholt673a3942008-07-30 12:06:12 -07002831 /* Assert that the object is not currently in any GPU domain. As it
2832 * wasn't in the GTT, there shouldn't be any way it could have been in
2833 * a GPU cache
2834 */
Chris Wilson05394f32010-11-08 19:18:58 +00002835 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2836 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002837
Chris Wilson6299f992010-11-24 12:23:44 +00002838 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002839
Daniel Vetter75e9e912010-11-04 17:11:09 +01002840 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002841 obj->gtt_space->size == fence_size &&
2842 (obj->gtt_space->start & (fence_alignment -1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002843
Daniel Vetter75e9e912010-11-04 17:11:09 +01002844 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002845 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002846
Chris Wilson05394f32010-11-08 19:18:58 +00002847 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002848
Chris Wilson6299f992010-11-24 12:23:44 +00002849 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002850 return 0;
2851}
2852
2853void
Chris Wilson05394f32010-11-08 19:18:58 +00002854i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002855{
Eric Anholt673a3942008-07-30 12:06:12 -07002856 /* If we don't have a page list set up, then we're not pinned
2857 * to GPU, and we can ignore the cache flush because it'll happen
2858 * again at bind time.
2859 */
Chris Wilson05394f32010-11-08 19:18:58 +00002860 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002861 return;
2862
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002863 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002864
Chris Wilson05394f32010-11-08 19:18:58 +00002865 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002866}
2867
Eric Anholte47c68e2008-11-14 13:35:19 -08002868/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002869static int
Chris Wilson3619df02010-11-28 15:37:17 +00002870i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002871{
Chris Wilson05394f32010-11-08 19:18:58 +00002872 struct drm_device *dev = obj->base.dev;
Eric Anholte47c68e2008-11-14 13:35:19 -08002873
Chris Wilson05394f32010-11-08 19:18:58 +00002874 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002875 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002876
2877 /* Queue the GPU write cache flushing we need. */
Chris Wilson88241782011-01-07 17:09:48 +00002878 return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002879}
2880
2881/** Flushes the GTT write domain for the object if it's dirty. */
2882static void
Chris Wilson05394f32010-11-08 19:18:58 +00002883i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002884{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002885 uint32_t old_write_domain;
2886
Chris Wilson05394f32010-11-08 19:18:58 +00002887 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002888 return;
2889
Chris Wilson63256ec2011-01-04 18:42:07 +00002890 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002891 * to it immediately go to main memory as far as we know, so there's
2892 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002893 *
2894 * However, we do have to enforce the order so that all writes through
2895 * the GTT land before any writes to the device, such as updates to
2896 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002897 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002898 wmb();
2899
Chris Wilson4a684a42010-10-28 14:44:08 +01002900 i915_gem_release_mmap(obj);
2901
Chris Wilson05394f32010-11-08 19:18:58 +00002902 old_write_domain = obj->base.write_domain;
2903 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002904
2905 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002906 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002907 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002908}
2909
2910/** Flushes the CPU write domain for the object if it's dirty. */
2911static void
Chris Wilson05394f32010-11-08 19:18:58 +00002912i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002913{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002914 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002915
Chris Wilson05394f32010-11-08 19:18:58 +00002916 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002917 return;
2918
2919 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002920 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002921 old_write_domain = obj->base.write_domain;
2922 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002923
2924 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002925 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002926 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002927}
2928
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002929/**
2930 * Moves a single object to the GTT read, and possibly write domain.
2931 *
2932 * This function returns when the move is complete, including waiting on
2933 * flushes to occur.
2934 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002935int
Chris Wilson20217462010-11-23 15:26:33 +00002936i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002937{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002938 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002939 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002940
Eric Anholt02354392008-11-26 13:58:13 -08002941 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002942 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002943 return -EINVAL;
2944
Chris Wilson88241782011-01-07 17:09:48 +00002945 ret = i915_gem_object_flush_gpu_write_domain(obj);
2946 if (ret)
2947 return ret;
2948
Chris Wilson87ca9c82010-12-02 09:42:56 +00002949 if (obj->pending_gpu_write || write) {
2950 ret = i915_gem_object_wait_rendering(obj, true);
2951 if (ret)
2952 return ret;
2953 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002954
Chris Wilson72133422010-09-13 23:56:38 +01002955 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002956
Chris Wilson05394f32010-11-08 19:18:58 +00002957 old_write_domain = obj->base.write_domain;
2958 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002959
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002960 /* It should now be out of any other write domains, and we can update
2961 * the domain values for our changes.
2962 */
Chris Wilson05394f32010-11-08 19:18:58 +00002963 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2964 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002965 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002966 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2967 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2968 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002969 }
2970
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002971 trace_i915_gem_object_change_domain(obj,
2972 old_read_domains,
2973 old_write_domain);
2974
Eric Anholte47c68e2008-11-14 13:35:19 -08002975 return 0;
2976}
2977
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002978/*
2979 * Prepare buffer for display plane. Use uninterruptible for possible flush
2980 * wait, as in modesetting process we're not supposed to be interrupted.
2981 */
2982int
Chris Wilson05394f32010-11-08 19:18:58 +00002983i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
Chris Wilson919926a2010-11-12 13:42:53 +00002984 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002985{
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002986 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002987 int ret;
2988
2989 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002990 if (obj->gtt_space == NULL)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002991 return -EINVAL;
2992
Chris Wilson88241782011-01-07 17:09:48 +00002993 ret = i915_gem_object_flush_gpu_write_domain(obj);
2994 if (ret)
2995 return ret;
2996
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002997
Chris Wilsonced270f2010-09-26 22:47:46 +01002998 /* Currently, we are always called from an non-interruptible context. */
Chris Wilson0be73282010-12-06 14:36:27 +00002999 if (pipelined != obj->ring) {
Chris Wilsonced270f2010-09-26 22:47:46 +01003000 ret = i915_gem_object_wait_rendering(obj, false);
3001 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003002 return ret;
3003 }
3004
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003005 i915_gem_object_flush_cpu_write_domain(obj);
3006
Chris Wilson05394f32010-11-08 19:18:58 +00003007 old_read_domains = obj->base.read_domains;
3008 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003009
3010 trace_i915_gem_object_change_domain(obj,
3011 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003012 obj->base.write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003013
3014 return 0;
3015}
3016
Chris Wilson85345512010-11-13 09:49:11 +00003017int
3018i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3019 bool interruptible)
3020{
Chris Wilson88241782011-01-07 17:09:48 +00003021 int ret;
3022
Chris Wilson85345512010-11-13 09:49:11 +00003023 if (!obj->active)
3024 return 0;
3025
Chris Wilson88241782011-01-07 17:09:48 +00003026 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3027 ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
3028 0, obj->base.write_domain);
3029 if (ret)
3030 return ret;
3031 }
Chris Wilson85345512010-11-13 09:49:11 +00003032
Chris Wilson05394f32010-11-08 19:18:58 +00003033 return i915_gem_object_wait_rendering(obj, interruptible);
Chris Wilson85345512010-11-13 09:49:11 +00003034}
3035
Eric Anholte47c68e2008-11-14 13:35:19 -08003036/**
3037 * Moves a single object to the CPU read, and possibly write domain.
3038 *
3039 * This function returns when the move is complete, including waiting on
3040 * flushes to occur.
3041 */
3042static int
Chris Wilson919926a2010-11-12 13:42:53 +00003043i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003044{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003045 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003046 int ret;
3047
Chris Wilson88241782011-01-07 17:09:48 +00003048 ret = i915_gem_object_flush_gpu_write_domain(obj);
3049 if (ret)
3050 return ret;
3051
Daniel Vetterde18a292010-11-27 22:30:41 +01003052 ret = i915_gem_object_wait_rendering(obj, true);
3053 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003054 return ret;
3055
3056 i915_gem_object_flush_gtt_write_domain(obj);
3057
3058 /* If we have a partially-valid cache of the object in the CPU,
3059 * finish invalidating it and free the per-page flags.
3060 */
3061 i915_gem_object_set_to_full_cpu_read_domain(obj);
3062
Chris Wilson05394f32010-11-08 19:18:58 +00003063 old_write_domain = obj->base.write_domain;
3064 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003065
Eric Anholte47c68e2008-11-14 13:35:19 -08003066 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003067 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003068 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003069
Chris Wilson05394f32010-11-08 19:18:58 +00003070 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003071 }
3072
3073 /* It should now be out of any other write domains, and we can update
3074 * the domain values for our changes.
3075 */
Chris Wilson05394f32010-11-08 19:18:58 +00003076 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003077
3078 /* If we're writing through the CPU, then the GPU read domains will
3079 * need to be invalidated at next use.
3080 */
3081 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003082 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3083 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003084 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003085
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003086 trace_i915_gem_object_change_domain(obj,
3087 old_read_domains,
3088 old_write_domain);
3089
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003090 return 0;
3091}
3092
Eric Anholt673a3942008-07-30 12:06:12 -07003093/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003094 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003095 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003096 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3097 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3098 */
3099static void
Chris Wilson05394f32010-11-08 19:18:58 +00003100i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003101{
Chris Wilson05394f32010-11-08 19:18:58 +00003102 if (!obj->page_cpu_valid)
Eric Anholte47c68e2008-11-14 13:35:19 -08003103 return;
3104
3105 /* If we're partially in the CPU read domain, finish moving it in.
3106 */
Chris Wilson05394f32010-11-08 19:18:58 +00003107 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003108 int i;
3109
Chris Wilson05394f32010-11-08 19:18:58 +00003110 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3111 if (obj->page_cpu_valid[i])
Eric Anholte47c68e2008-11-14 13:35:19 -08003112 continue;
Chris Wilson05394f32010-11-08 19:18:58 +00003113 drm_clflush_pages(obj->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003114 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003115 }
3116
3117 /* Free the page_cpu_valid mappings which are now stale, whether
3118 * or not we've got I915_GEM_DOMAIN_CPU.
3119 */
Chris Wilson05394f32010-11-08 19:18:58 +00003120 kfree(obj->page_cpu_valid);
3121 obj->page_cpu_valid = NULL;
Eric Anholte47c68e2008-11-14 13:35:19 -08003122}
3123
3124/**
3125 * Set the CPU read domain on a range of the object.
3126 *
3127 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3128 * not entirely valid. The page_cpu_valid member of the object flags which
3129 * pages have been flushed, and will be respected by
3130 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3131 * of the whole object.
3132 *
3133 * This function returns when the move is complete, including waiting on
3134 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003135 */
3136static int
Chris Wilson05394f32010-11-08 19:18:58 +00003137i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -08003138 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003139{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003140 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003141 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003142
Chris Wilson05394f32010-11-08 19:18:58 +00003143 if (offset == 0 && size == obj->base.size)
Eric Anholte47c68e2008-11-14 13:35:19 -08003144 return i915_gem_object_set_to_cpu_domain(obj, 0);
3145
Chris Wilson88241782011-01-07 17:09:48 +00003146 ret = i915_gem_object_flush_gpu_write_domain(obj);
3147 if (ret)
3148 return ret;
3149
Daniel Vetterde18a292010-11-27 22:30:41 +01003150 ret = i915_gem_object_wait_rendering(obj, true);
3151 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003152 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003153
Eric Anholte47c68e2008-11-14 13:35:19 -08003154 i915_gem_object_flush_gtt_write_domain(obj);
3155
3156 /* If we're already fully in the CPU read domain, we're done. */
Chris Wilson05394f32010-11-08 19:18:58 +00003157 if (obj->page_cpu_valid == NULL &&
3158 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003159 return 0;
3160
Eric Anholte47c68e2008-11-14 13:35:19 -08003161 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3162 * newly adding I915_GEM_DOMAIN_CPU
3163 */
Chris Wilson05394f32010-11-08 19:18:58 +00003164 if (obj->page_cpu_valid == NULL) {
3165 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3166 GFP_KERNEL);
3167 if (obj->page_cpu_valid == NULL)
Eric Anholte47c68e2008-11-14 13:35:19 -08003168 return -ENOMEM;
Chris Wilson05394f32010-11-08 19:18:58 +00003169 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3170 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003171
3172 /* Flush the cache on any pages that are still invalid from the CPU's
3173 * perspective.
3174 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003175 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3176 i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00003177 if (obj->page_cpu_valid[i])
Eric Anholt673a3942008-07-30 12:06:12 -07003178 continue;
3179
Chris Wilson05394f32010-11-08 19:18:58 +00003180 drm_clflush_pages(obj->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003181
Chris Wilson05394f32010-11-08 19:18:58 +00003182 obj->page_cpu_valid[i] = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003183 }
3184
Eric Anholte47c68e2008-11-14 13:35:19 -08003185 /* It should now be out of any other write domains, and we can update
3186 * the domain values for our changes.
3187 */
Chris Wilson05394f32010-11-08 19:18:58 +00003188 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003189
Chris Wilson05394f32010-11-08 19:18:58 +00003190 old_read_domains = obj->base.read_domains;
3191 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003192
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003193 trace_i915_gem_object_change_domain(obj,
3194 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003195 obj->base.write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003196
Eric Anholt673a3942008-07-30 12:06:12 -07003197 return 0;
3198}
3199
Eric Anholt673a3942008-07-30 12:06:12 -07003200/* Throttle our rendering by waiting until the ring has completed our requests
3201 * emitted over 20 msec ago.
3202 *
Eric Anholtb9624422009-06-03 07:27:35 +00003203 * Note that if we were to use the current jiffies each time around the loop,
3204 * we wouldn't escape the function with any frames outstanding if the time to
3205 * render a frame was over 20ms.
3206 *
Eric Anholt673a3942008-07-30 12:06:12 -07003207 * This should get us reasonable parallelism between CPU and GPU but also
3208 * relatively low latency when blocking on a particular request to finish.
3209 */
3210static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003211i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003212{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003213 struct drm_i915_private *dev_priv = dev->dev_private;
3214 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003215 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003216 struct drm_i915_gem_request *request;
3217 struct intel_ring_buffer *ring = NULL;
3218 u32 seqno = 0;
3219 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003220
Chris Wilsone110e8d2011-01-26 15:39:14 +00003221 if (atomic_read(&dev_priv->mm.wedged))
3222 return -EIO;
3223
Chris Wilson1c255952010-09-26 11:03:27 +01003224 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003225 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003226 if (time_after_eq(request->emitted_jiffies, recent_enough))
3227 break;
3228
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003229 ring = request->ring;
3230 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003231 }
Chris Wilson1c255952010-09-26 11:03:27 +01003232 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003233
3234 if (seqno == 0)
3235 return 0;
3236
3237 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003238 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003239 /* And wait for the seqno passing without holding any locks and
3240 * causing extra latency for others. This is safe as the irq
3241 * generation is designed to be run atomically and so is
3242 * lockless.
3243 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003244 if (ring->irq_get(ring)) {
3245 ret = wait_event_interruptible(ring->irq_queue,
3246 i915_seqno_passed(ring->get_seqno(ring), seqno)
3247 || atomic_read(&dev_priv->mm.wedged));
3248 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003249
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003250 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3251 ret = -EIO;
3252 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003253 }
3254
3255 if (ret == 0)
3256 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003257
Eric Anholt673a3942008-07-30 12:06:12 -07003258 return ret;
3259}
3260
Eric Anholt673a3942008-07-30 12:06:12 -07003261int
Chris Wilson05394f32010-11-08 19:18:58 +00003262i915_gem_object_pin(struct drm_i915_gem_object *obj,
3263 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003264 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003265{
Chris Wilson05394f32010-11-08 19:18:58 +00003266 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003267 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003268 int ret;
3269
Chris Wilson05394f32010-11-08 19:18:58 +00003270 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01003271 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003272
Chris Wilson05394f32010-11-08 19:18:58 +00003273 if (obj->gtt_space != NULL) {
3274 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3275 (map_and_fenceable && !obj->map_and_fenceable)) {
3276 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003277 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003278 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3279 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003280 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003281 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003282 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003283 ret = i915_gem_object_unbind(obj);
3284 if (ret)
3285 return ret;
3286 }
3287 }
3288
Chris Wilson05394f32010-11-08 19:18:58 +00003289 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003290 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003291 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003292 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003293 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003294 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003295
Chris Wilson05394f32010-11-08 19:18:58 +00003296 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003297 if (!obj->active)
3298 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003299 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003300 }
Chris Wilson6299f992010-11-24 12:23:44 +00003301 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003302
Chris Wilson23bc5982010-09-29 16:10:57 +01003303 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003304 return 0;
3305}
3306
3307void
Chris Wilson05394f32010-11-08 19:18:58 +00003308i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003309{
Chris Wilson05394f32010-11-08 19:18:58 +00003310 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003311 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003312
Chris Wilson23bc5982010-09-29 16:10:57 +01003313 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003314 BUG_ON(obj->pin_count == 0);
3315 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003316
Chris Wilson05394f32010-11-08 19:18:58 +00003317 if (--obj->pin_count == 0) {
3318 if (!obj->active)
3319 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003320 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003321 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003322 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003323 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003324}
3325
3326int
3327i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003328 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003329{
3330 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003331 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003332 int ret;
3333
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003334 ret = i915_mutex_lock_interruptible(dev);
3335 if (ret)
3336 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003337
Chris Wilson05394f32010-11-08 19:18:58 +00003338 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003339 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003340 ret = -ENOENT;
3341 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003342 }
Eric Anholt673a3942008-07-30 12:06:12 -07003343
Chris Wilson05394f32010-11-08 19:18:58 +00003344 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003345 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003346 ret = -EINVAL;
3347 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003348 }
3349
Chris Wilson05394f32010-11-08 19:18:58 +00003350 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003351 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3352 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003353 ret = -EINVAL;
3354 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003355 }
3356
Chris Wilson05394f32010-11-08 19:18:58 +00003357 obj->user_pin_count++;
3358 obj->pin_filp = file;
3359 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003360 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003361 if (ret)
3362 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003363 }
3364
3365 /* XXX - flush the CPU caches for pinned objects
3366 * as the X server doesn't manage domains yet
3367 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003368 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003369 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003370out:
Chris Wilson05394f32010-11-08 19:18:58 +00003371 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003372unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003373 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003374 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003375}
3376
3377int
3378i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003379 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003380{
3381 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003382 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003383 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003384
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003385 ret = i915_mutex_lock_interruptible(dev);
3386 if (ret)
3387 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003388
Chris Wilson05394f32010-11-08 19:18:58 +00003389 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003390 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003391 ret = -ENOENT;
3392 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003393 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003394
Chris Wilson05394f32010-11-08 19:18:58 +00003395 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003396 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3397 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003398 ret = -EINVAL;
3399 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003400 }
Chris Wilson05394f32010-11-08 19:18:58 +00003401 obj->user_pin_count--;
3402 if (obj->user_pin_count == 0) {
3403 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003404 i915_gem_object_unpin(obj);
3405 }
Eric Anholt673a3942008-07-30 12:06:12 -07003406
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003407out:
Chris Wilson05394f32010-11-08 19:18:58 +00003408 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003409unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003410 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003411 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003412}
3413
3414int
3415i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003416 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003417{
3418 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003419 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003420 int ret;
3421
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003422 ret = i915_mutex_lock_interruptible(dev);
3423 if (ret)
3424 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003425
Chris Wilson05394f32010-11-08 19:18:58 +00003426 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003427 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003428 ret = -ENOENT;
3429 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003430 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003431
Chris Wilson0be555b2010-08-04 15:36:30 +01003432 /* Count all active objects as busy, even if they are currently not used
3433 * by the gpu. Users of this interface expect objects to eventually
3434 * become non-busy without any further actions, therefore emit any
3435 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003436 */
Chris Wilson05394f32010-11-08 19:18:58 +00003437 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003438 if (args->busy) {
3439 /* Unconditionally flush objects, even when the gpu still uses this
3440 * object. Userspace calling this function indicates that it wants to
3441 * use this buffer rather sooner than later, so issuing the required
3442 * flush earlier is beneficial.
3443 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003444 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilson88241782011-01-07 17:09:48 +00003445 ret = i915_gem_flush_ring(dev, obj->ring,
3446 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003447 } else if (obj->ring->outstanding_lazy_request ==
3448 obj->last_rendering_seqno) {
3449 struct drm_i915_gem_request *request;
3450
Chris Wilson7a194872010-12-07 10:38:40 +00003451 /* This ring is not being cleared by active usage,
3452 * so emit a request to do so.
3453 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003454 request = kzalloc(sizeof(*request), GFP_KERNEL);
3455 if (request)
3456 ret = i915_add_request(dev,
3457 NULL, request,
3458 obj->ring);
3459 else
Chris Wilson7a194872010-12-07 10:38:40 +00003460 ret = -ENOMEM;
3461 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003462
3463 /* Update the active list for the hardware's current position.
3464 * Otherwise this only updates on a delayed timer or when irqs
3465 * are actually unmasked, and our working set ends up being
3466 * larger than required.
3467 */
Chris Wilson05394f32010-11-08 19:18:58 +00003468 i915_gem_retire_requests_ring(dev, obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003469
Chris Wilson05394f32010-11-08 19:18:58 +00003470 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003471 }
Eric Anholt673a3942008-07-30 12:06:12 -07003472
Chris Wilson05394f32010-11-08 19:18:58 +00003473 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003474unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003475 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003476 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003477}
3478
3479int
3480i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3481 struct drm_file *file_priv)
3482{
3483 return i915_gem_ring_throttle(dev, file_priv);
3484}
3485
Chris Wilson3ef94da2009-09-14 16:50:29 +01003486int
3487i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3488 struct drm_file *file_priv)
3489{
3490 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003491 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003492 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003493
3494 switch (args->madv) {
3495 case I915_MADV_DONTNEED:
3496 case I915_MADV_WILLNEED:
3497 break;
3498 default:
3499 return -EINVAL;
3500 }
3501
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003502 ret = i915_mutex_lock_interruptible(dev);
3503 if (ret)
3504 return ret;
3505
Chris Wilson05394f32010-11-08 19:18:58 +00003506 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilson3ef94da2009-09-14 16:50:29 +01003507 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003508 ret = -ENOENT;
3509 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003510 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003511
Chris Wilson05394f32010-11-08 19:18:58 +00003512 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003513 ret = -EINVAL;
3514 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003515 }
3516
Chris Wilson05394f32010-11-08 19:18:58 +00003517 if (obj->madv != __I915_MADV_PURGED)
3518 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003519
Chris Wilson2d7ef392009-09-20 23:13:10 +01003520 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003521 if (i915_gem_object_is_purgeable(obj) &&
3522 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003523 i915_gem_object_truncate(obj);
3524
Chris Wilson05394f32010-11-08 19:18:58 +00003525 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003526
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003527out:
Chris Wilson05394f32010-11-08 19:18:58 +00003528 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003529unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003530 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003531 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003532}
3533
Chris Wilson05394f32010-11-08 19:18:58 +00003534struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3535 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003536{
Chris Wilson73aa8082010-09-30 11:46:12 +01003537 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003538 struct drm_i915_gem_object *obj;
3539
3540 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3541 if (obj == NULL)
3542 return NULL;
3543
3544 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3545 kfree(obj);
3546 return NULL;
3547 }
3548
Chris Wilson73aa8082010-09-30 11:46:12 +01003549 i915_gem_info_add_obj(dev_priv, size);
3550
Daniel Vetterc397b902010-04-09 19:05:07 +00003551 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3552 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3553
3554 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00003555 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003556 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003557 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003558 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003559 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003560 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003561 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003562 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003563 /* Avoid an unnecessary call to unbind on the first bind. */
3564 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003565
Chris Wilson05394f32010-11-08 19:18:58 +00003566 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003567}
3568
Eric Anholt673a3942008-07-30 12:06:12 -07003569int i915_gem_init_object(struct drm_gem_object *obj)
3570{
Daniel Vetterc397b902010-04-09 19:05:07 +00003571 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003572
Eric Anholt673a3942008-07-30 12:06:12 -07003573 return 0;
3574}
3575
Chris Wilson05394f32010-11-08 19:18:58 +00003576static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003577{
Chris Wilson05394f32010-11-08 19:18:58 +00003578 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003579 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003580 int ret;
3581
3582 ret = i915_gem_object_unbind(obj);
3583 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003584 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003585 &dev_priv->mm.deferred_free_list);
3586 return;
3587 }
3588
Chris Wilson05394f32010-11-08 19:18:58 +00003589 if (obj->base.map_list.map)
Chris Wilsonbe726152010-07-23 23:18:50 +01003590 i915_gem_free_mmap_offset(obj);
3591
Chris Wilson05394f32010-11-08 19:18:58 +00003592 drm_gem_object_release(&obj->base);
3593 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003594
Chris Wilson05394f32010-11-08 19:18:58 +00003595 kfree(obj->page_cpu_valid);
3596 kfree(obj->bit_17);
3597 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003598}
3599
Chris Wilson05394f32010-11-08 19:18:58 +00003600void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003601{
Chris Wilson05394f32010-11-08 19:18:58 +00003602 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3603 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003604
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003605 trace_i915_gem_object_destroy(obj);
3606
Chris Wilson05394f32010-11-08 19:18:58 +00003607 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003608 i915_gem_object_unpin(obj);
3609
Chris Wilson05394f32010-11-08 19:18:58 +00003610 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003611 i915_gem_detach_phys_object(dev, obj);
3612
Chris Wilsonbe726152010-07-23 23:18:50 +01003613 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003614}
3615
Jesse Barnes5669fca2009-02-17 15:13:31 -08003616int
Eric Anholt673a3942008-07-30 12:06:12 -07003617i915_gem_idle(struct drm_device *dev)
3618{
3619 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003620 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003621
Keith Packard6dbe2772008-10-14 21:41:13 -07003622 mutex_lock(&dev->struct_mutex);
3623
Chris Wilson87acb0a2010-10-19 10:13:00 +01003624 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003625 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003626 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003627 }
Eric Anholt673a3942008-07-30 12:06:12 -07003628
Chris Wilson29105cc2010-01-07 10:39:13 +00003629 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003630 if (ret) {
3631 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003632 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003633 }
Eric Anholt673a3942008-07-30 12:06:12 -07003634
Chris Wilson29105cc2010-01-07 10:39:13 +00003635 /* Under UMS, be paranoid and evict. */
3636 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003637 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003638 if (ret) {
3639 mutex_unlock(&dev->struct_mutex);
3640 return ret;
3641 }
3642 }
3643
Chris Wilson312817a2010-11-22 11:50:11 +00003644 i915_gem_reset_fences(dev);
3645
Chris Wilson29105cc2010-01-07 10:39:13 +00003646 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3647 * We need to replace this with a semaphore, or something.
3648 * And not confound mm.suspended!
3649 */
3650 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003651 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003652
3653 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003654 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003655
Keith Packard6dbe2772008-10-14 21:41:13 -07003656 mutex_unlock(&dev->struct_mutex);
3657
Chris Wilson29105cc2010-01-07 10:39:13 +00003658 /* Cancel the retire work handler, which should be idle now. */
3659 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3660
Eric Anholt673a3942008-07-30 12:06:12 -07003661 return 0;
3662}
3663
Eric Anholt673a3942008-07-30 12:06:12 -07003664int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003665i915_gem_init_ringbuffer(struct drm_device *dev)
3666{
3667 drm_i915_private_t *dev_priv = dev->dev_private;
3668 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003669
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003670 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003671 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003672 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003673
3674 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003675 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003676 if (ret)
3677 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003678 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003679
Chris Wilson549f7362010-10-19 11:19:32 +01003680 if (HAS_BLT(dev)) {
3681 ret = intel_init_blt_ring_buffer(dev);
3682 if (ret)
3683 goto cleanup_bsd_ring;
3684 }
3685
Chris Wilson6f392d52010-08-07 11:01:22 +01003686 dev_priv->next_seqno = 1;
3687
Chris Wilson68f95ba2010-05-27 13:18:22 +01003688 return 0;
3689
Chris Wilson549f7362010-10-19 11:19:32 +01003690cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003691 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003692cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003693 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003694 return ret;
3695}
3696
3697void
3698i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3699{
3700 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003701 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003702
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003703 for (i = 0; i < I915_NUM_RINGS; i++)
3704 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003705}
3706
3707int
Eric Anholt673a3942008-07-30 12:06:12 -07003708i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3709 struct drm_file *file_priv)
3710{
3711 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003712 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003713
Jesse Barnes79e53942008-11-07 14:24:08 -08003714 if (drm_core_check_feature(dev, DRIVER_MODESET))
3715 return 0;
3716
Ben Gamariba1234d2009-09-14 17:48:47 -04003717 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003718 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003719 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003720 }
3721
Eric Anholt673a3942008-07-30 12:06:12 -07003722 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003723 dev_priv->mm.suspended = 0;
3724
3725 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003726 if (ret != 0) {
3727 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003728 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003729 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003730
Chris Wilson69dc4982010-10-19 10:36:51 +01003731 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003732 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3733 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003734 for (i = 0; i < I915_NUM_RINGS; i++) {
3735 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3736 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3737 }
Eric Anholt673a3942008-07-30 12:06:12 -07003738 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003739
Chris Wilson5f353082010-06-07 14:03:03 +01003740 ret = drm_irq_install(dev);
3741 if (ret)
3742 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003743
Eric Anholt673a3942008-07-30 12:06:12 -07003744 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003745
3746cleanup_ringbuffer:
3747 mutex_lock(&dev->struct_mutex);
3748 i915_gem_cleanup_ringbuffer(dev);
3749 dev_priv->mm.suspended = 1;
3750 mutex_unlock(&dev->struct_mutex);
3751
3752 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003753}
3754
3755int
3756i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3757 struct drm_file *file_priv)
3758{
Jesse Barnes79e53942008-11-07 14:24:08 -08003759 if (drm_core_check_feature(dev, DRIVER_MODESET))
3760 return 0;
3761
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003762 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003763 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003764}
3765
3766void
3767i915_gem_lastclose(struct drm_device *dev)
3768{
3769 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003770
Eric Anholte806b492009-01-22 09:56:58 -08003771 if (drm_core_check_feature(dev, DRIVER_MODESET))
3772 return;
3773
Keith Packard6dbe2772008-10-14 21:41:13 -07003774 ret = i915_gem_idle(dev);
3775 if (ret)
3776 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003777}
3778
Chris Wilson64193402010-10-24 12:38:05 +01003779static void
3780init_ring_lists(struct intel_ring_buffer *ring)
3781{
3782 INIT_LIST_HEAD(&ring->active_list);
3783 INIT_LIST_HEAD(&ring->request_list);
3784 INIT_LIST_HEAD(&ring->gpu_write_list);
3785}
3786
Eric Anholt673a3942008-07-30 12:06:12 -07003787void
3788i915_gem_load(struct drm_device *dev)
3789{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003790 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003791 drm_i915_private_t *dev_priv = dev->dev_private;
3792
Chris Wilson69dc4982010-10-19 10:36:51 +01003793 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003794 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3795 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003796 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003797 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003798 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003799 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003800 for (i = 0; i < I915_NUM_RINGS; i++)
3801 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003802 for (i = 0; i < 16; i++)
3803 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003804 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3805 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003806 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003807
Dave Airlie94400122010-07-20 13:15:31 +10003808 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3809 if (IS_GEN3(dev)) {
3810 u32 tmp = I915_READ(MI_ARB_STATE);
3811 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3812 /* arb state is a masked write, so set bit + bit in mask */
3813 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3814 I915_WRITE(MI_ARB_STATE, tmp);
3815 }
3816 }
3817
Chris Wilson72bfa192010-12-19 11:42:05 +00003818 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3819
Jesse Barnesde151cf2008-11-12 10:03:55 -08003820 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003821 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3822 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003823
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003824 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003825 dev_priv->num_fence_regs = 16;
3826 else
3827 dev_priv->num_fence_regs = 8;
3828
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003829 /* Initialize fence registers to zero */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003830 switch (INTEL_INFO(dev)->gen) {
3831 case 6:
3832 for (i = 0; i < 16; i++)
3833 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3834 break;
3835 case 5:
3836 case 4:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003837 for (i = 0; i < 16; i++)
3838 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003839 break;
3840 case 3:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003841 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3842 for (i = 0; i < 8; i++)
3843 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003844 case 2:
3845 for (i = 0; i < 8; i++)
3846 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3847 break;
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003848 }
Eric Anholt673a3942008-07-30 12:06:12 -07003849 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003850 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003851
3852 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3853 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3854 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003855}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003856
3857/*
3858 * Create a physically contiguous memory object for this object
3859 * e.g. for cursor + overlay regs
3860 */
Chris Wilson995b6762010-08-20 13:23:26 +01003861static int i915_gem_init_phys_object(struct drm_device *dev,
3862 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003863{
3864 drm_i915_private_t *dev_priv = dev->dev_private;
3865 struct drm_i915_gem_phys_object *phys_obj;
3866 int ret;
3867
3868 if (dev_priv->mm.phys_objs[id - 1] || !size)
3869 return 0;
3870
Eric Anholt9a298b22009-03-24 12:23:04 -07003871 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003872 if (!phys_obj)
3873 return -ENOMEM;
3874
3875 phys_obj->id = id;
3876
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003877 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003878 if (!phys_obj->handle) {
3879 ret = -ENOMEM;
3880 goto kfree_obj;
3881 }
3882#ifdef CONFIG_X86
3883 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3884#endif
3885
3886 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3887
3888 return 0;
3889kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003890 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003891 return ret;
3892}
3893
Chris Wilson995b6762010-08-20 13:23:26 +01003894static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003895{
3896 drm_i915_private_t *dev_priv = dev->dev_private;
3897 struct drm_i915_gem_phys_object *phys_obj;
3898
3899 if (!dev_priv->mm.phys_objs[id - 1])
3900 return;
3901
3902 phys_obj = dev_priv->mm.phys_objs[id - 1];
3903 if (phys_obj->cur_obj) {
3904 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3905 }
3906
3907#ifdef CONFIG_X86
3908 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3909#endif
3910 drm_pci_free(dev, phys_obj->handle);
3911 kfree(phys_obj);
3912 dev_priv->mm.phys_objs[id - 1] = NULL;
3913}
3914
3915void i915_gem_free_all_phys_object(struct drm_device *dev)
3916{
3917 int i;
3918
Dave Airlie260883c2009-01-22 17:58:49 +10003919 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003920 i915_gem_free_phys_object(dev, i);
3921}
3922
3923void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003924 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003925{
Chris Wilson05394f32010-11-08 19:18:58 +00003926 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003927 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003928 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003929 int page_count;
3930
Chris Wilson05394f32010-11-08 19:18:58 +00003931 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003932 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003933 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003934
Chris Wilson05394f32010-11-08 19:18:58 +00003935 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003936 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003937 struct page *page = read_cache_page_gfp(mapping, i,
3938 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3939 if (!IS_ERR(page)) {
3940 char *dst = kmap_atomic(page);
3941 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3942 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003943
Chris Wilsone5281cc2010-10-28 13:45:36 +01003944 drm_clflush_pages(&page, 1);
3945
3946 set_page_dirty(page);
3947 mark_page_accessed(page);
3948 page_cache_release(page);
3949 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003950 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003951 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003952
Chris Wilson05394f32010-11-08 19:18:58 +00003953 obj->phys_obj->cur_obj = NULL;
3954 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003955}
3956
3957int
3958i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003959 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003960 int id,
3961 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003962{
Chris Wilson05394f32010-11-08 19:18:58 +00003963 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003964 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003965 int ret = 0;
3966 int page_count;
3967 int i;
3968
3969 if (id > I915_MAX_PHYS_OBJECT)
3970 return -EINVAL;
3971
Chris Wilson05394f32010-11-08 19:18:58 +00003972 if (obj->phys_obj) {
3973 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003974 return 0;
3975 i915_gem_detach_phys_object(dev, obj);
3976 }
3977
Dave Airlie71acb5e2008-12-30 20:31:46 +10003978 /* create a new object */
3979 if (!dev_priv->mm.phys_objs[id - 1]) {
3980 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003981 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003982 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003983 DRM_ERROR("failed to init phys object %d size: %zu\n",
3984 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003985 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003986 }
3987 }
3988
3989 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003990 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3991 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003992
Chris Wilson05394f32010-11-08 19:18:58 +00003993 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003994
3995 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003996 struct page *page;
3997 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003998
Chris Wilsone5281cc2010-10-28 13:45:36 +01003999 page = read_cache_page_gfp(mapping, i,
4000 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4001 if (IS_ERR(page))
4002 return PTR_ERR(page);
4003
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004004 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004005 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004006 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004007 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004008
4009 mark_page_accessed(page);
4010 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004011 }
4012
4013 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004014}
4015
4016static int
Chris Wilson05394f32010-11-08 19:18:58 +00004017i915_gem_phys_pwrite(struct drm_device *dev,
4018 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004019 struct drm_i915_gem_pwrite *args,
4020 struct drm_file *file_priv)
4021{
Chris Wilson05394f32010-11-08 19:18:58 +00004022 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004023 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004024
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004025 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4026 unsigned long unwritten;
4027
4028 /* The physical object once assigned is fixed for the lifetime
4029 * of the obj, so we can safely drop the lock and continue
4030 * to access vaddr.
4031 */
4032 mutex_unlock(&dev->struct_mutex);
4033 unwritten = copy_from_user(vaddr, user_data, args->size);
4034 mutex_lock(&dev->struct_mutex);
4035 if (unwritten)
4036 return -EFAULT;
4037 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004038
Daniel Vetter40ce6572010-11-05 18:12:18 +01004039 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10004040 return 0;
4041}
Eric Anholtb9624422009-06-03 07:27:35 +00004042
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004043void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004044{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004045 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004046
4047 /* Clean up our request list when the client is going away, so that
4048 * later retire_requests won't dereference our soon-to-be-gone
4049 * file_priv.
4050 */
Chris Wilson1c255952010-09-26 11:03:27 +01004051 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004052 while (!list_empty(&file_priv->mm.request_list)) {
4053 struct drm_i915_gem_request *request;
4054
4055 request = list_first_entry(&file_priv->mm.request_list,
4056 struct drm_i915_gem_request,
4057 client_list);
4058 list_del(&request->client_list);
4059 request->file_priv = NULL;
4060 }
Chris Wilson1c255952010-09-26 11:03:27 +01004061 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004062}
Chris Wilson31169712009-09-14 16:50:28 +01004063
Chris Wilson31169712009-09-14 16:50:28 +01004064static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004065i915_gpu_is_active(struct drm_device *dev)
4066{
4067 drm_i915_private_t *dev_priv = dev->dev_private;
4068 int lists_empty;
4069
Chris Wilson1637ef42010-04-20 17:10:35 +01004070 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01004071 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004072
4073 return !lists_empty;
4074}
4075
4076static int
Chris Wilson17250b72010-10-28 12:51:39 +01004077i915_gem_inactive_shrink(struct shrinker *shrinker,
4078 int nr_to_scan,
4079 gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01004080{
Chris Wilson17250b72010-10-28 12:51:39 +01004081 struct drm_i915_private *dev_priv =
4082 container_of(shrinker,
4083 struct drm_i915_private,
4084 mm.inactive_shrinker);
4085 struct drm_device *dev = dev_priv->dev;
4086 struct drm_i915_gem_object *obj, *next;
4087 int cnt;
4088
4089 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004090 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004091
4092 /* "fast-path" to count number of available objects */
4093 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004094 cnt = 0;
4095 list_for_each_entry(obj,
4096 &dev_priv->mm.inactive_list,
4097 mm_list)
4098 cnt++;
4099 mutex_unlock(&dev->struct_mutex);
4100 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004101 }
4102
Chris Wilson1637ef42010-04-20 17:10:35 +01004103rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004104 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004105 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004106
Chris Wilson17250b72010-10-28 12:51:39 +01004107 list_for_each_entry_safe(obj, next,
4108 &dev_priv->mm.inactive_list,
4109 mm_list) {
4110 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004111 if (i915_gem_object_unbind(obj) == 0 &&
4112 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004113 break;
Chris Wilson31169712009-09-14 16:50:28 +01004114 }
Chris Wilson31169712009-09-14 16:50:28 +01004115 }
4116
4117 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004118 cnt = 0;
4119 list_for_each_entry_safe(obj, next,
4120 &dev_priv->mm.inactive_list,
4121 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004122 if (nr_to_scan &&
4123 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004124 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004125 else
Chris Wilson17250b72010-10-28 12:51:39 +01004126 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004127 }
4128
Chris Wilson17250b72010-10-28 12:51:39 +01004129 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004130 /*
4131 * We are desperate for pages, so as a last resort, wait
4132 * for the GPU to finish and discard whatever we can.
4133 * This has a dramatic impact to reduce the number of
4134 * OOM-killer events whilst running the GPU aggressively.
4135 */
Chris Wilson17250b72010-10-28 12:51:39 +01004136 if (i915_gpu_idle(dev) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004137 goto rescan;
4138 }
Chris Wilson17250b72010-10-28 12:51:39 +01004139 mutex_unlock(&dev->struct_mutex);
4140 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004141}