blob: eae52de75a4cb1da71f387fbee9d304d319c322f [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037
Chris Wilson3619df02010-11-28 15:37:17 +000038static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000039static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
Chris Wilson919926a2010-11-12 13:42:53 +000042 bool write);
Chris Wilson05394f32010-11-08 19:18:58 +000043static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -080044 uint64_t offset,
45 uint64_t size);
Chris Wilson05394f32010-11-08 19:18:58 +000046static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000047static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Chris Wilsona00b10c2010-09-24 21:15:47 +010048 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +010049 bool map_and_fenceable);
Chris Wilson05394f32010-11-08 19:18:58 +000050static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
51static int i915_gem_phys_pwrite(struct drm_device *dev,
52 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100053 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000054 struct drm_file *file);
55static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson17250b72010-10-28 12:51:39 +010057static int i915_gem_inactive_shrink(struct shrinker *shrinker,
58 int nr_to_scan,
59 gfp_t gfp_mask);
60
Chris Wilson31169712009-09-14 16:50:28 +010061
Chris Wilson73aa8082010-09-30 11:46:12 +010062/* some bookkeeping */
63static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64 size_t size)
65{
66 dev_priv->mm.object_count++;
67 dev_priv->mm.object_memory += size;
68}
69
70static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71 size_t size)
72{
73 dev_priv->mm.object_count--;
74 dev_priv->mm.object_memory -= size;
75}
76
Chris Wilson30dbf0c2010-09-25 10:19:17 +010077int
78i915_gem_check_is_wedged(struct drm_device *dev)
79{
80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct completion *x = &dev_priv->error_completion;
82 unsigned long flags;
83 int ret;
84
85 if (!atomic_read(&dev_priv->mm.wedged))
86 return 0;
87
88 ret = wait_for_completion_interruptible(x);
89 if (ret)
90 return ret;
91
92 /* Success, we reset the GPU! */
93 if (!atomic_read(&dev_priv->mm.wedged))
94 return 0;
95
96 /* GPU is hung, bump the completion count to account for
97 * the token we just consumed so that we never hit zero and
98 * end up waiting upon a subsequent completion event that
99 * will never happen.
100 */
101 spin_lock_irqsave(&x->wait.lock, flags);
102 x->done++;
103 spin_unlock_irqrestore(&x->wait.lock, flags);
104 return -EIO;
105}
106
Chris Wilson54cf91d2010-11-25 18:00:26 +0000107int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100108{
109 struct drm_i915_private *dev_priv = dev->dev_private;
110 int ret;
111
112 ret = i915_gem_check_is_wedged(dev);
113 if (ret)
114 return ret;
115
116 ret = mutex_lock_interruptible(&dev->struct_mutex);
117 if (ret)
118 return ret;
119
120 if (atomic_read(&dev_priv->mm.wedged)) {
121 mutex_unlock(&dev->struct_mutex);
122 return -EAGAIN;
123 }
124
Chris Wilson23bc5982010-09-29 16:10:57 +0100125 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100126 return 0;
127}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100128
Chris Wilson7d1c4802010-08-07 21:45:03 +0100129static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000130i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100131{
Chris Wilson05394f32010-11-08 19:18:58 +0000132 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100133}
134
Chris Wilson20217462010-11-23 15:26:33 +0000135void i915_gem_do_init(struct drm_device *dev,
136 unsigned long start,
137 unsigned long mappable_end,
138 unsigned long end)
Jesse Barnes79e53942008-11-07 14:24:08 -0800139{
140 drm_i915_private_t *dev_priv = dev->dev_private;
141
Jesse Barnes79e53942008-11-07 14:24:08 -0800142 drm_mm_init(&dev_priv->mm.gtt_space, start,
143 end - start);
144
Chris Wilson73aa8082010-09-30 11:46:12 +0100145 dev_priv->mm.gtt_total = end - start;
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200146 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
Daniel Vetter53984632010-09-22 23:44:24 +0200147 dev_priv->mm.gtt_mappable_end = mappable_end;
Jesse Barnes79e53942008-11-07 14:24:08 -0800148}
Keith Packard6dbe2772008-10-14 21:41:13 -0700149
Eric Anholt673a3942008-07-30 12:06:12 -0700150int
151i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000152 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700153{
Eric Anholt673a3942008-07-30 12:06:12 -0700154 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000155
156 if (args->gtt_start >= args->gtt_end ||
157 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
158 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700159
160 mutex_lock(&dev->struct_mutex);
Chris Wilson20217462010-11-23 15:26:33 +0000161 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700162 mutex_unlock(&dev->struct_mutex);
163
Chris Wilson20217462010-11-23 15:26:33 +0000164 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700165}
166
Eric Anholt5a125c32008-10-22 21:40:13 -0700167int
168i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000169 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700170{
Chris Wilson73aa8082010-09-30 11:46:12 +0100171 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700172 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000173 struct drm_i915_gem_object *obj;
174 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700175
176 if (!(dev->driver->driver_features & DRIVER_GEM))
177 return -ENODEV;
178
Chris Wilson6299f992010-11-24 12:23:44 +0000179 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100180 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000181 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
182 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100183 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700184
Chris Wilson6299f992010-11-24 12:23:44 +0000185 args->aper_size = dev_priv->mm.gtt_total;
186 args->aper_available_size = args->aper_size -pinned;
187
Eric Anholt5a125c32008-10-22 21:40:13 -0700188 return 0;
189}
190
Eric Anholt673a3942008-07-30 12:06:12 -0700191/**
192 * Creates a new mm object and returns a handle to it.
193 */
194int
195i915_gem_create_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000196 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700197{
198 struct drm_i915_gem_create *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000199 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300200 int ret;
201 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700202
203 args->size = roundup(args->size, PAGE_SIZE);
204
205 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000206 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700207 if (obj == NULL)
208 return -ENOMEM;
209
Chris Wilson05394f32010-11-08 19:18:58 +0000210 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100211 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000212 drm_gem_object_release(&obj->base);
213 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100214 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700215 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100216 }
217
Chris Wilson202f2fe2010-10-14 13:20:40 +0100218 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000219 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100220 trace_i915_gem_object_create(obj);
221
Eric Anholt673a3942008-07-30 12:06:12 -0700222 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700223 return 0;
224}
225
Chris Wilson05394f32010-11-08 19:18:58 +0000226static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700227{
Chris Wilson05394f32010-11-08 19:18:58 +0000228 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700229
230 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000231 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700232}
233
Chris Wilson99a03df2010-05-27 14:15:34 +0100234static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700235slow_shmem_copy(struct page *dst_page,
236 int dst_offset,
237 struct page *src_page,
238 int src_offset,
239 int length)
240{
241 char *dst_vaddr, *src_vaddr;
242
Chris Wilson99a03df2010-05-27 14:15:34 +0100243 dst_vaddr = kmap(dst_page);
244 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700245
246 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
247
Chris Wilson99a03df2010-05-27 14:15:34 +0100248 kunmap(src_page);
249 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700250}
251
Chris Wilson99a03df2010-05-27 14:15:34 +0100252static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700253slow_shmem_bit17_copy(struct page *gpu_page,
254 int gpu_offset,
255 struct page *cpu_page,
256 int cpu_offset,
257 int length,
258 int is_read)
259{
260 char *gpu_vaddr, *cpu_vaddr;
261
262 /* Use the unswizzled path if this page isn't affected. */
263 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
264 if (is_read)
265 return slow_shmem_copy(cpu_page, cpu_offset,
266 gpu_page, gpu_offset, length);
267 else
268 return slow_shmem_copy(gpu_page, gpu_offset,
269 cpu_page, cpu_offset, length);
270 }
271
Chris Wilson99a03df2010-05-27 14:15:34 +0100272 gpu_vaddr = kmap(gpu_page);
273 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700274
275 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
276 * XORing with the other bits (A9 for Y, A9 and A10 for X)
277 */
278 while (length > 0) {
279 int cacheline_end = ALIGN(gpu_offset + 1, 64);
280 int this_length = min(cacheline_end - gpu_offset, length);
281 int swizzled_gpu_offset = gpu_offset ^ 64;
282
283 if (is_read) {
284 memcpy(cpu_vaddr + cpu_offset,
285 gpu_vaddr + swizzled_gpu_offset,
286 this_length);
287 } else {
288 memcpy(gpu_vaddr + swizzled_gpu_offset,
289 cpu_vaddr + cpu_offset,
290 this_length);
291 }
292 cpu_offset += this_length;
293 gpu_offset += this_length;
294 length -= this_length;
295 }
296
Chris Wilson99a03df2010-05-27 14:15:34 +0100297 kunmap(cpu_page);
298 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700299}
300
Eric Anholt673a3942008-07-30 12:06:12 -0700301/**
Eric Anholteb014592009-03-10 11:44:52 -0700302 * This is the fast shmem pread path, which attempts to copy_from_user directly
303 * from the backing pages of the object to the user's address space. On a
304 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
305 */
306static int
Chris Wilson05394f32010-11-08 19:18:58 +0000307i915_gem_shmem_pread_fast(struct drm_device *dev,
308 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700309 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000310 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700311{
Chris Wilson05394f32010-11-08 19:18:58 +0000312 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700313 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100314 loff_t offset;
Eric Anholteb014592009-03-10 11:44:52 -0700315 char __user *user_data;
316 int page_offset, page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700317
318 user_data = (char __user *) (uintptr_t) args->data_ptr;
319 remain = args->size;
320
Eric Anholteb014592009-03-10 11:44:52 -0700321 offset = args->offset;
322
323 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100324 struct page *page;
325 char *vaddr;
326 int ret;
327
Eric Anholteb014592009-03-10 11:44:52 -0700328 /* Operation in this page
329 *
Eric Anholteb014592009-03-10 11:44:52 -0700330 * page_offset = offset within page
331 * page_length = bytes to copy for this page
332 */
Eric Anholteb014592009-03-10 11:44:52 -0700333 page_offset = offset & (PAGE_SIZE-1);
334 page_length = remain;
335 if ((page_offset + remain) > PAGE_SIZE)
336 page_length = PAGE_SIZE - page_offset;
337
Chris Wilsone5281cc2010-10-28 13:45:36 +0100338 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
339 GFP_HIGHUSER | __GFP_RECLAIMABLE);
340 if (IS_ERR(page))
341 return PTR_ERR(page);
342
343 vaddr = kmap_atomic(page);
344 ret = __copy_to_user_inatomic(user_data,
345 vaddr + page_offset,
346 page_length);
347 kunmap_atomic(vaddr);
348
349 mark_page_accessed(page);
350 page_cache_release(page);
351 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100352 return -EFAULT;
Eric Anholteb014592009-03-10 11:44:52 -0700353
354 remain -= page_length;
355 user_data += page_length;
356 offset += page_length;
357 }
358
Chris Wilson4f27b752010-10-14 15:26:45 +0100359 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700360}
361
362/**
363 * This is the fallback shmem pread path, which allocates temporary storage
364 * in kernel space to copy_to_user into outside of the struct_mutex, so we
365 * can copy out of the object's backing pages while holding the struct mutex
366 * and not take page faults.
367 */
368static int
Chris Wilson05394f32010-11-08 19:18:58 +0000369i915_gem_shmem_pread_slow(struct drm_device *dev,
370 struct drm_i915_gem_object *obj,
Eric Anholteb014592009-03-10 11:44:52 -0700371 struct drm_i915_gem_pread *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000372 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700373{
Chris Wilson05394f32010-11-08 19:18:58 +0000374 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholteb014592009-03-10 11:44:52 -0700375 struct mm_struct *mm = current->mm;
376 struct page **user_pages;
377 ssize_t remain;
378 loff_t offset, pinned_pages, i;
379 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100380 int shmem_page_offset;
381 int data_page_index, data_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700382 int page_length;
383 int ret;
384 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700385 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700386
387 remain = args->size;
388
389 /* Pin the user pages containing the data. We can't fault while
390 * holding the struct mutex, yet we want to hold it while
391 * dereferencing the user data.
392 */
393 first_data_page = data_ptr / PAGE_SIZE;
394 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
395 num_pages = last_data_page - first_data_page + 1;
396
Chris Wilson4f27b752010-10-14 15:26:45 +0100397 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700398 if (user_pages == NULL)
399 return -ENOMEM;
400
Chris Wilson4f27b752010-10-14 15:26:45 +0100401 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700402 down_read(&mm->mmap_sem);
403 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700404 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700405 up_read(&mm->mmap_sem);
Chris Wilson4f27b752010-10-14 15:26:45 +0100406 mutex_lock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700407 if (pinned_pages < num_pages) {
408 ret = -EFAULT;
Chris Wilson4f27b752010-10-14 15:26:45 +0100409 goto out;
Eric Anholteb014592009-03-10 11:44:52 -0700410 }
411
Chris Wilson4f27b752010-10-14 15:26:45 +0100412 ret = i915_gem_object_set_cpu_read_domain_range(obj,
413 args->offset,
Eric Anholteb014592009-03-10 11:44:52 -0700414 args->size);
Chris Wilson4f27b752010-10-14 15:26:45 +0100415 if (ret)
416 goto out;
417
418 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700419
Eric Anholteb014592009-03-10 11:44:52 -0700420 offset = args->offset;
421
422 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100423 struct page *page;
424
Eric Anholteb014592009-03-10 11:44:52 -0700425 /* Operation in this page
426 *
Eric Anholteb014592009-03-10 11:44:52 -0700427 * shmem_page_offset = offset within page in shmem file
428 * data_page_index = page number in get_user_pages return
429 * data_page_offset = offset with data_page_index page.
430 * page_length = bytes to copy for this page
431 */
Eric Anholteb014592009-03-10 11:44:52 -0700432 shmem_page_offset = offset & ~PAGE_MASK;
433 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
434 data_page_offset = data_ptr & ~PAGE_MASK;
435
436 page_length = remain;
437 if ((shmem_page_offset + page_length) > PAGE_SIZE)
438 page_length = PAGE_SIZE - shmem_page_offset;
439 if ((data_page_offset + page_length) > PAGE_SIZE)
440 page_length = PAGE_SIZE - data_page_offset;
441
Chris Wilsone5281cc2010-10-28 13:45:36 +0100442 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
443 GFP_HIGHUSER | __GFP_RECLAIMABLE);
444 if (IS_ERR(page))
445 return PTR_ERR(page);
446
Eric Anholt280b7132009-03-12 16:56:27 -0700447 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100448 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700449 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100450 user_pages[data_page_index],
451 data_page_offset,
452 page_length,
453 1);
454 } else {
455 slow_shmem_copy(user_pages[data_page_index],
456 data_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100457 page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100458 shmem_page_offset,
459 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700460 }
Eric Anholteb014592009-03-10 11:44:52 -0700461
Chris Wilsone5281cc2010-10-28 13:45:36 +0100462 mark_page_accessed(page);
463 page_cache_release(page);
464
Eric Anholteb014592009-03-10 11:44:52 -0700465 remain -= page_length;
466 data_ptr += page_length;
467 offset += page_length;
468 }
469
Chris Wilson4f27b752010-10-14 15:26:45 +0100470out:
Eric Anholteb014592009-03-10 11:44:52 -0700471 for (i = 0; i < pinned_pages; i++) {
472 SetPageDirty(user_pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100473 mark_page_accessed(user_pages[i]);
Eric Anholteb014592009-03-10 11:44:52 -0700474 page_cache_release(user_pages[i]);
475 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700476 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700477
478 return ret;
479}
480
Eric Anholt673a3942008-07-30 12:06:12 -0700481/**
482 * Reads data from the object referenced by handle.
483 *
484 * On error, the contents of *data are undefined.
485 */
486int
487i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000488 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700489{
490 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000491 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100492 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700493
Chris Wilson51311d02010-11-17 09:10:42 +0000494 if (args->size == 0)
495 return 0;
496
497 if (!access_ok(VERIFY_WRITE,
498 (char __user *)(uintptr_t)args->data_ptr,
499 args->size))
500 return -EFAULT;
501
502 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
503 args->size);
504 if (ret)
505 return -EFAULT;
506
Chris Wilson4f27b752010-10-14 15:26:45 +0100507 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100508 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100509 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700510
Chris Wilson05394f32010-11-08 19:18:58 +0000511 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100512 if (obj == NULL) {
513 ret = -ENOENT;
514 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100515 }
Eric Anholt673a3942008-07-30 12:06:12 -0700516
Chris Wilson7dcd2492010-09-26 20:21:44 +0100517 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000518 if (args->offset > obj->base.size ||
519 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100520 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100521 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100522 }
523
Chris Wilson4f27b752010-10-14 15:26:45 +0100524 ret = i915_gem_object_set_cpu_read_domain_range(obj,
525 args->offset,
526 args->size);
527 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100528 goto out;
Chris Wilson4f27b752010-10-14 15:26:45 +0100529
530 ret = -EFAULT;
531 if (!i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilson05394f32010-11-08 19:18:58 +0000532 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
Chris Wilson4f27b752010-10-14 15:26:45 +0100533 if (ret == -EFAULT)
Chris Wilson05394f32010-11-08 19:18:58 +0000534 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700535
Chris Wilson35b62a82010-09-26 20:23:38 +0100536out:
Chris Wilson05394f32010-11-08 19:18:58 +0000537 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100538unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100539 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700540 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700541}
542
Keith Packard0839ccb2008-10-30 19:38:48 -0700543/* This is the fast write path which cannot handle
544 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700545 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700546
Keith Packard0839ccb2008-10-30 19:38:48 -0700547static inline int
548fast_user_write(struct io_mapping *mapping,
549 loff_t page_base, int page_offset,
550 char __user *user_data,
551 int length)
552{
553 char *vaddr_atomic;
554 unsigned long unwritten;
555
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700556 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700557 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
558 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700559 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100560 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700561}
562
563/* Here's the write path which can sleep for
564 * page faults
565 */
566
Chris Wilsonab34c222010-05-27 14:15:35 +0100567static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700568slow_kernel_write(struct io_mapping *mapping,
569 loff_t gtt_base, int gtt_offset,
570 struct page *user_page, int user_offset,
571 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700572{
Chris Wilsonab34c222010-05-27 14:15:35 +0100573 char __iomem *dst_vaddr;
574 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700575
Chris Wilsonab34c222010-05-27 14:15:35 +0100576 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
577 src_vaddr = kmap(user_page);
578
579 memcpy_toio(dst_vaddr + gtt_offset,
580 src_vaddr + user_offset,
581 length);
582
583 kunmap(user_page);
584 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700585}
586
Eric Anholt3de09aa2009-03-09 09:42:23 -0700587/**
588 * This is the fast pwrite path, where we copy the data directly from the
589 * user into the GTT, uncached.
590 */
Eric Anholt673a3942008-07-30 12:06:12 -0700591static int
Chris Wilson05394f32010-11-08 19:18:58 +0000592i915_gem_gtt_pwrite_fast(struct drm_device *dev,
593 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700594 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000595 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700596{
Keith Packard0839ccb2008-10-30 19:38:48 -0700597 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700598 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700600 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700601 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700602
603 user_data = (char __user *) (uintptr_t) args->data_ptr;
604 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700605
Chris Wilson05394f32010-11-08 19:18:58 +0000606 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700607
608 while (remain > 0) {
609 /* Operation in this page
610 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700611 * page_base = page offset within aperture
612 * page_offset = offset within page
613 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700614 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700615 page_base = (offset & ~(PAGE_SIZE-1));
616 page_offset = offset & (PAGE_SIZE-1);
617 page_length = remain;
618 if ((page_offset + remain) > PAGE_SIZE)
619 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700620
Keith Packard0839ccb2008-10-30 19:38:48 -0700621 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100625 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
626 page_offset, user_data, page_length))
627
628 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700629
Keith Packard0839ccb2008-10-30 19:38:48 -0700630 remain -= page_length;
631 user_data += page_length;
632 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700633 }
Eric Anholt673a3942008-07-30 12:06:12 -0700634
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100635 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700636}
637
Eric Anholt3de09aa2009-03-09 09:42:23 -0700638/**
639 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
640 * the memory and maps it using kmap_atomic for copying.
641 *
642 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
643 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
644 */
Eric Anholt3043c602008-10-02 12:24:47 -0700645static int
Chris Wilson05394f32010-11-08 19:18:58 +0000646i915_gem_gtt_pwrite_slow(struct drm_device *dev,
647 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700648 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000649 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700650{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700651 drm_i915_private_t *dev_priv = dev->dev_private;
652 ssize_t remain;
653 loff_t gtt_page_base, offset;
654 loff_t first_data_page, last_data_page, num_pages;
655 loff_t pinned_pages, i;
656 struct page **user_pages;
657 struct mm_struct *mm = current->mm;
658 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700659 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700660 uint64_t data_ptr = args->data_ptr;
661
662 remain = args->size;
663
664 /* Pin the user pages containing the data. We can't fault while
665 * holding the struct mutex, and all of the pwrite implementations
666 * want to hold it while dereferencing the user data.
667 */
668 first_data_page = data_ptr / PAGE_SIZE;
669 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
670 num_pages = last_data_page - first_data_page + 1;
671
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100672 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700673 if (user_pages == NULL)
674 return -ENOMEM;
675
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100676 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700677 down_read(&mm->mmap_sem);
678 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
679 num_pages, 0, 0, user_pages, NULL);
680 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100681 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700682 if (pinned_pages < num_pages) {
683 ret = -EFAULT;
684 goto out_unpin_pages;
685 }
686
Eric Anholt3de09aa2009-03-09 09:42:23 -0700687 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
688 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100689 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700690
Chris Wilson05394f32010-11-08 19:18:58 +0000691 offset = obj->gtt_offset + args->offset;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700692
693 while (remain > 0) {
694 /* Operation in this page
695 *
696 * gtt_page_base = page offset within aperture
697 * gtt_page_offset = offset within page in aperture
698 * data_page_index = page number in get_user_pages return
699 * data_page_offset = offset with data_page_index page.
700 * page_length = bytes to copy for this page
701 */
702 gtt_page_base = offset & PAGE_MASK;
703 gtt_page_offset = offset & ~PAGE_MASK;
704 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
705 data_page_offset = data_ptr & ~PAGE_MASK;
706
707 page_length = remain;
708 if ((gtt_page_offset + page_length) > PAGE_SIZE)
709 page_length = PAGE_SIZE - gtt_page_offset;
710 if ((data_page_offset + page_length) > PAGE_SIZE)
711 page_length = PAGE_SIZE - data_page_offset;
712
Chris Wilsonab34c222010-05-27 14:15:35 +0100713 slow_kernel_write(dev_priv->mm.gtt_mapping,
714 gtt_page_base, gtt_page_offset,
715 user_pages[data_page_index],
716 data_page_offset,
717 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700718
719 remain -= page_length;
720 offset += page_length;
721 data_ptr += page_length;
722 }
723
Eric Anholt3de09aa2009-03-09 09:42:23 -0700724out_unpin_pages:
725 for (i = 0; i < pinned_pages; i++)
726 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700727 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700728
729 return ret;
730}
731
Eric Anholt40123c12009-03-09 13:42:30 -0700732/**
733 * This is the fast shmem pwrite path, which attempts to directly
734 * copy_from_user into the kmapped pages backing the object.
735 */
Eric Anholt673a3942008-07-30 12:06:12 -0700736static int
Chris Wilson05394f32010-11-08 19:18:58 +0000737i915_gem_shmem_pwrite_fast(struct drm_device *dev,
738 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700739 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000740 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700741{
Chris Wilson05394f32010-11-08 19:18:58 +0000742 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700743 ssize_t remain;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100744 loff_t offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700745 char __user *user_data;
746 int page_offset, page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700747
748 user_data = (char __user *) (uintptr_t) args->data_ptr;
749 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700750
Eric Anholt673a3942008-07-30 12:06:12 -0700751 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000752 obj->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700753
Eric Anholt40123c12009-03-09 13:42:30 -0700754 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100755 struct page *page;
756 char *vaddr;
757 int ret;
758
Eric Anholt40123c12009-03-09 13:42:30 -0700759 /* Operation in this page
760 *
Eric Anholt40123c12009-03-09 13:42:30 -0700761 * page_offset = offset within page
762 * page_length = bytes to copy for this page
763 */
Eric Anholt40123c12009-03-09 13:42:30 -0700764 page_offset = offset & (PAGE_SIZE-1);
765 page_length = remain;
766 if ((page_offset + remain) > PAGE_SIZE)
767 page_length = PAGE_SIZE - page_offset;
768
Chris Wilsone5281cc2010-10-28 13:45:36 +0100769 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
770 GFP_HIGHUSER | __GFP_RECLAIMABLE);
771 if (IS_ERR(page))
772 return PTR_ERR(page);
773
774 vaddr = kmap_atomic(page, KM_USER0);
775 ret = __copy_from_user_inatomic(vaddr + page_offset,
776 user_data,
777 page_length);
778 kunmap_atomic(vaddr, KM_USER0);
779
780 set_page_dirty(page);
781 mark_page_accessed(page);
782 page_cache_release(page);
783
784 /* If we get a fault while copying data, then (presumably) our
785 * source page isn't available. Return the error and we'll
786 * retry in the slow path.
787 */
788 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100789 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700790
791 remain -= page_length;
792 user_data += page_length;
793 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700794 }
795
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100796 return 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700797}
798
799/**
800 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
801 * the memory and maps it using kmap_atomic for copying.
802 *
803 * This avoids taking mmap_sem for faulting on the user's address while the
804 * struct_mutex is held.
805 */
806static int
Chris Wilson05394f32010-11-08 19:18:58 +0000807i915_gem_shmem_pwrite_slow(struct drm_device *dev,
808 struct drm_i915_gem_object *obj,
Eric Anholt40123c12009-03-09 13:42:30 -0700809 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000810 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700811{
Chris Wilson05394f32010-11-08 19:18:58 +0000812 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700813 struct mm_struct *mm = current->mm;
814 struct page **user_pages;
815 ssize_t remain;
816 loff_t offset, pinned_pages, i;
817 loff_t first_data_page, last_data_page, num_pages;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100818 int shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700819 int data_page_index, data_page_offset;
820 int page_length;
821 int ret;
822 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700823 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700824
825 remain = args->size;
826
827 /* Pin the user pages containing the data. We can't fault while
828 * holding the struct mutex, and all of the pwrite implementations
829 * want to hold it while dereferencing the user data.
830 */
831 first_data_page = data_ptr / PAGE_SIZE;
832 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
833 num_pages = last_data_page - first_data_page + 1;
834
Chris Wilson4f27b752010-10-14 15:26:45 +0100835 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700836 if (user_pages == NULL)
837 return -ENOMEM;
838
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100839 mutex_unlock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700840 down_read(&mm->mmap_sem);
841 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
842 num_pages, 0, 0, user_pages, NULL);
843 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100844 mutex_lock(&dev->struct_mutex);
Eric Anholt40123c12009-03-09 13:42:30 -0700845 if (pinned_pages < num_pages) {
846 ret = -EFAULT;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100847 goto out;
Eric Anholt40123c12009-03-09 13:42:30 -0700848 }
849
Eric Anholt40123c12009-03-09 13:42:30 -0700850 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100851 if (ret)
852 goto out;
853
854 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700855
Eric Anholt40123c12009-03-09 13:42:30 -0700856 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000857 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700858
859 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100860 struct page *page;
861
Eric Anholt40123c12009-03-09 13:42:30 -0700862 /* Operation in this page
863 *
Eric Anholt40123c12009-03-09 13:42:30 -0700864 * shmem_page_offset = offset within page in shmem file
865 * data_page_index = page number in get_user_pages return
866 * data_page_offset = offset with data_page_index page.
867 * page_length = bytes to copy for this page
868 */
Eric Anholt40123c12009-03-09 13:42:30 -0700869 shmem_page_offset = offset & ~PAGE_MASK;
870 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
871 data_page_offset = data_ptr & ~PAGE_MASK;
872
873 page_length = remain;
874 if ((shmem_page_offset + page_length) > PAGE_SIZE)
875 page_length = PAGE_SIZE - shmem_page_offset;
876 if ((data_page_offset + page_length) > PAGE_SIZE)
877 page_length = PAGE_SIZE - data_page_offset;
878
Chris Wilsone5281cc2010-10-28 13:45:36 +0100879 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
880 GFP_HIGHUSER | __GFP_RECLAIMABLE);
881 if (IS_ERR(page)) {
882 ret = PTR_ERR(page);
883 goto out;
884 }
885
Eric Anholt280b7132009-03-12 16:56:27 -0700886 if (do_bit17_swizzling) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100887 slow_shmem_bit17_copy(page,
Eric Anholt280b7132009-03-12 16:56:27 -0700888 shmem_page_offset,
889 user_pages[data_page_index],
890 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100891 page_length,
892 0);
893 } else {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100894 slow_shmem_copy(page,
Chris Wilson99a03df2010-05-27 14:15:34 +0100895 shmem_page_offset,
896 user_pages[data_page_index],
897 data_page_offset,
898 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700899 }
Eric Anholt40123c12009-03-09 13:42:30 -0700900
Chris Wilsone5281cc2010-10-28 13:45:36 +0100901 set_page_dirty(page);
902 mark_page_accessed(page);
903 page_cache_release(page);
904
Eric Anholt40123c12009-03-09 13:42:30 -0700905 remain -= page_length;
906 data_ptr += page_length;
907 offset += page_length;
908 }
909
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100910out:
Eric Anholt40123c12009-03-09 13:42:30 -0700911 for (i = 0; i < pinned_pages; i++)
912 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700913 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700914
915 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700916}
917
918/**
919 * Writes data to the object referenced by handle.
920 *
921 * On error, the contents of the buffer that were to be modified are undefined.
922 */
923int
924i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100925 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700926{
927 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000928 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000929 int ret;
930
931 if (args->size == 0)
932 return 0;
933
934 if (!access_ok(VERIFY_READ,
935 (char __user *)(uintptr_t)args->data_ptr,
936 args->size))
937 return -EFAULT;
938
939 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
940 args->size);
941 if (ret)
942 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700943
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100944 ret = i915_mutex_lock_interruptible(dev);
945 if (ret)
946 return ret;
947
Chris Wilson05394f32010-11-08 19:18:58 +0000948 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100949 if (obj == NULL) {
950 ret = -ENOENT;
951 goto unlock;
952 }
Eric Anholt673a3942008-07-30 12:06:12 -0700953
Chris Wilson7dcd2492010-09-26 20:21:44 +0100954 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000955 if (args->offset > obj->base.size ||
956 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100957 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100958 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100959 }
960
Eric Anholt673a3942008-07-30 12:06:12 -0700961 /* We can only do the GTT pwrite on untiled buffers, as otherwise
962 * it would end up going through the fenced access, and we'll get
963 * different detiling behavior between reading and writing.
964 * pread/pwrite currently are reading and writing from the CPU
965 * perspective, requiring manual detiling by the client.
966 */
Chris Wilson05394f32010-11-08 19:18:58 +0000967 if (obj->phys_obj)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100968 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Chris Wilson05394f32010-11-08 19:18:58 +0000969 else if (obj->tiling_mode == I915_TILING_NONE &&
970 obj->gtt_space &&
971 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Daniel Vetter75e9e912010-11-04 17:11:09 +0100972 ret = i915_gem_object_pin(obj, 0, true);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100973 if (ret)
974 goto out;
975
976 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
977 if (ret)
978 goto out_unpin;
979
980 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
981 if (ret == -EFAULT)
982 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
983
984out_unpin:
985 i915_gem_object_unpin(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700986 } else {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100987 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
988 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100989 goto out;
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100990
991 ret = -EFAULT;
992 if (!i915_gem_object_needs_bit17_swizzle(obj))
993 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
994 if (ret == -EFAULT)
995 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
Eric Anholt40123c12009-03-09 13:42:30 -0700996 }
Eric Anholt673a3942008-07-30 12:06:12 -0700997
Chris Wilson35b62a82010-09-26 20:23:38 +0100998out:
Chris Wilson05394f32010-11-08 19:18:58 +0000999 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001000unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +01001001 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07001002 return ret;
1003}
1004
1005/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001006 * Called when user space prepares to use an object with the CPU, either
1007 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001008 */
1009int
1010i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001011 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001012{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001013 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001014 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001015 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001016 uint32_t read_domains = args->read_domains;
1017 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001018 int ret;
1019
1020 if (!(dev->driver->driver_features & DRIVER_GEM))
1021 return -ENODEV;
1022
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001023 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001024 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001025 return -EINVAL;
1026
Chris Wilson21d509e2009-06-06 09:46:02 +01001027 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001028 return -EINVAL;
1029
1030 /* Having something in the write domain implies it's in the read
1031 * domain, and only that read domain. Enforce that in the request.
1032 */
1033 if (write_domain != 0 && read_domains != write_domain)
1034 return -EINVAL;
1035
Chris Wilson76c1dec2010-09-25 11:22:51 +01001036 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001037 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001038 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001039
Chris Wilson05394f32010-11-08 19:18:58 +00001040 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001041 if (obj == NULL) {
1042 ret = -ENOENT;
1043 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001044 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001045
1046 intel_mark_busy(dev, obj);
1047
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001048 if (read_domains & I915_GEM_DOMAIN_GTT) {
1049 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001050
Eric Anholta09ba7f2009-08-29 12:49:51 -07001051 /* Update the LRU on the fence for the CPU access that's
1052 * about to occur.
1053 */
Chris Wilson05394f32010-11-08 19:18:58 +00001054 if (obj->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001055 struct drm_i915_fence_reg *reg =
Chris Wilson05394f32010-11-08 19:18:58 +00001056 &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001057 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001058 &dev_priv->mm.fence_list);
1059 }
1060
Eric Anholt02354392008-11-26 13:58:13 -08001061 /* Silently promote "you're not bound, there was nothing to do"
1062 * to success, since the client was just asking us to
1063 * make sure everything was done.
1064 */
1065 if (ret == -EINVAL)
1066 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001067 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001068 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001069 }
1070
Chris Wilson7d1c4802010-08-07 21:45:03 +01001071 /* Maintain LRU order of "inactive" objects */
Chris Wilson05394f32010-11-08 19:18:58 +00001072 if (ret == 0 && i915_gem_object_is_inactive(obj))
1073 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001074
Chris Wilson05394f32010-11-08 19:18:58 +00001075 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001076unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001077 mutex_unlock(&dev->struct_mutex);
1078 return ret;
1079}
1080
1081/**
1082 * Called when user space has done writes to this buffer
1083 */
1084int
1085i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001086 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001087{
1088 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001089 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001090 int ret = 0;
1091
1092 if (!(dev->driver->driver_features & DRIVER_GEM))
1093 return -ENODEV;
1094
Chris Wilson76c1dec2010-09-25 11:22:51 +01001095 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001096 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001097 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001098
Chris Wilson05394f32010-11-08 19:18:58 +00001099 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07001100 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001101 ret = -ENOENT;
1102 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001103 }
1104
Eric Anholt673a3942008-07-30 12:06:12 -07001105 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001106 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001107 i915_gem_object_flush_cpu_write_domain(obj);
1108
Chris Wilson05394f32010-11-08 19:18:58 +00001109 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001110unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001111 mutex_unlock(&dev->struct_mutex);
1112 return ret;
1113}
1114
1115/**
1116 * Maps the contents of an object, returning the address it is mapped
1117 * into.
1118 *
1119 * While the mapping holds a reference on the contents of the object, it doesn't
1120 * imply a ref on the object itself.
1121 */
1122int
1123i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001124 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001125{
Chris Wilsonda761a62010-10-27 17:37:08 +01001126 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001127 struct drm_i915_gem_mmap *args = data;
1128 struct drm_gem_object *obj;
1129 loff_t offset;
1130 unsigned long addr;
1131
1132 if (!(dev->driver->driver_features & DRIVER_GEM))
1133 return -ENODEV;
1134
Chris Wilson05394f32010-11-08 19:18:58 +00001135 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001136 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001137 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001138
Chris Wilsonda761a62010-10-27 17:37:08 +01001139 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1140 drm_gem_object_unreference_unlocked(obj);
1141 return -E2BIG;
1142 }
1143
Eric Anholt673a3942008-07-30 12:06:12 -07001144 offset = args->offset;
1145
1146 down_write(&current->mm->mmap_sem);
1147 addr = do_mmap(obj->filp, 0, args->size,
1148 PROT_READ | PROT_WRITE, MAP_SHARED,
1149 args->offset);
1150 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001151 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001152 if (IS_ERR((void *)addr))
1153 return addr;
1154
1155 args->addr_ptr = (uint64_t) addr;
1156
1157 return 0;
1158}
1159
Jesse Barnesde151cf2008-11-12 10:03:55 -08001160/**
1161 * i915_gem_fault - fault a page into the GTT
1162 * vma: VMA in question
1163 * vmf: fault info
1164 *
1165 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1166 * from userspace. The fault handler takes care of binding the object to
1167 * the GTT (if needed), allocating and programming a fence register (again,
1168 * only if needed based on whether the old reg is still valid or the object
1169 * is tiled) and inserting a new PTE into the faulting process.
1170 *
1171 * Note that the faulting process may involve evicting existing objects
1172 * from the GTT and/or fence registers to make room. So performance may
1173 * suffer if the GTT working set is large or there are few fence registers
1174 * left.
1175 */
1176int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1177{
Chris Wilson05394f32010-11-08 19:18:58 +00001178 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1179 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001180 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001181 pgoff_t page_offset;
1182 unsigned long pfn;
1183 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001184 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001185
1186 /* We don't use vmf->pgoff since that has the fake offset */
1187 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1188 PAGE_SHIFT;
1189
1190 /* Now bind it into the GTT if needed */
1191 mutex_lock(&dev->struct_mutex);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001192
Chris Wilson919926a2010-11-12 13:42:53 +00001193 if (!obj->map_and_fenceable) {
1194 ret = i915_gem_object_unbind(obj);
1195 if (ret)
1196 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001197 }
Chris Wilson05394f32010-11-08 19:18:58 +00001198 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001199 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001200 if (ret)
1201 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001202 }
1203
Chris Wilson4a684a42010-10-28 14:44:08 +01001204 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1205 if (ret)
1206 goto unlock;
1207
Jesse Barnesde151cf2008-11-12 10:03:55 -08001208 /* Need a new fence register? */
Chris Wilson05394f32010-11-08 19:18:58 +00001209 if (obj->tiling_mode != I915_TILING_NONE) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01001210 ret = i915_gem_object_get_fence_reg(obj, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001211 if (ret)
1212 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001213 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001214
Chris Wilson05394f32010-11-08 19:18:58 +00001215 if (i915_gem_object_is_inactive(obj))
1216 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001217
Chris Wilson6299f992010-11-24 12:23:44 +00001218 obj->fault_mappable = true;
1219
Chris Wilson05394f32010-11-08 19:18:58 +00001220 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001221 page_offset;
1222
1223 /* Finally, remap it using the new GTT offset */
1224 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001225unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001226 mutex_unlock(&dev->struct_mutex);
1227
1228 switch (ret) {
Chris Wilson045e7692010-11-07 09:18:22 +00001229 case -EAGAIN:
1230 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001231 case 0:
1232 case -ERESTARTSYS:
1233 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001234 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001235 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001236 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001237 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001238 }
1239}
1240
1241/**
1242 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1243 * @obj: obj in question
1244 *
1245 * GEM memory mapping works by handing back to userspace a fake mmap offset
1246 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1247 * up the object based on the offset and sets up the various memory mapping
1248 * structures.
1249 *
1250 * This routine allocates and attaches a fake offset for @obj.
1251 */
1252static int
Chris Wilson05394f32010-11-08 19:18:58 +00001253i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001254{
Chris Wilson05394f32010-11-08 19:18:58 +00001255 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001256 struct drm_gem_mm *mm = dev->mm_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001257 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001258 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001259 int ret = 0;
1260
1261 /* Set the object up for mmap'ing */
Chris Wilson05394f32010-11-08 19:18:58 +00001262 list = &obj->base.map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001263 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001264 if (!list->map)
1265 return -ENOMEM;
1266
1267 map = list->map;
1268 map->type = _DRM_GEM;
Chris Wilson05394f32010-11-08 19:18:58 +00001269 map->size = obj->base.size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001270 map->handle = obj;
1271
1272 /* Get a DRM GEM mmap offset allocated... */
1273 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
Chris Wilson05394f32010-11-08 19:18:58 +00001274 obj->base.size / PAGE_SIZE,
1275 0, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001276 if (!list->file_offset_node) {
Chris Wilson05394f32010-11-08 19:18:58 +00001277 DRM_ERROR("failed to allocate offset for bo %d\n",
1278 obj->base.name);
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001279 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001280 goto out_free_list;
1281 }
1282
1283 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
Chris Wilson05394f32010-11-08 19:18:58 +00001284 obj->base.size / PAGE_SIZE,
1285 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001286 if (!list->file_offset_node) {
1287 ret = -ENOMEM;
1288 goto out_free_list;
1289 }
1290
1291 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001292 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1293 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001294 DRM_ERROR("failed to add to map hash\n");
1295 goto out_free_mm;
1296 }
1297
Jesse Barnesde151cf2008-11-12 10:03:55 -08001298 return 0;
1299
1300out_free_mm:
1301 drm_mm_put_block(list->file_offset_node);
1302out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001303 kfree(list->map);
Chris Wilson39a01d12010-10-28 13:03:06 +01001304 list->map = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001305
1306 return ret;
1307}
1308
Chris Wilson901782b2009-07-10 08:18:50 +01001309/**
1310 * i915_gem_release_mmap - remove physical page mappings
1311 * @obj: obj in question
1312 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001313 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001314 * relinquish ownership of the pages back to the system.
1315 *
1316 * It is vital that we remove the page mapping if we have mapped a tiled
1317 * object through the GTT and then lose the fence register due to
1318 * resource pressure. Similarly if the object has been moved out of the
1319 * aperture, than pages mapped into userspace must be revoked. Removing the
1320 * mapping will then trigger a page fault on the next user access, allowing
1321 * fixup by i915_gem_fault().
1322 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001323void
Chris Wilson05394f32010-11-08 19:18:58 +00001324i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001325{
Chris Wilson6299f992010-11-24 12:23:44 +00001326 if (!obj->fault_mappable)
1327 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001328
Chris Wilson6299f992010-11-24 12:23:44 +00001329 unmap_mapping_range(obj->base.dev->dev_mapping,
1330 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1331 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001332
Chris Wilson6299f992010-11-24 12:23:44 +00001333 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001334}
1335
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001336static void
Chris Wilson05394f32010-11-08 19:18:58 +00001337i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001338{
Chris Wilson05394f32010-11-08 19:18:58 +00001339 struct drm_device *dev = obj->base.dev;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001340 struct drm_gem_mm *mm = dev->mm_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001341 struct drm_map_list *list = &obj->base.map_list;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001342
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001343 drm_ht_remove_item(&mm->offset_hash, &list->hash);
Chris Wilson39a01d12010-10-28 13:03:06 +01001344 drm_mm_put_block(list->file_offset_node);
1345 kfree(list->map);
1346 list->map = NULL;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001347}
1348
Chris Wilson92b88ae2010-11-09 11:47:32 +00001349static uint32_t
1350i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1351{
1352 struct drm_device *dev = obj->base.dev;
1353 uint32_t size;
1354
1355 if (INTEL_INFO(dev)->gen >= 4 ||
1356 obj->tiling_mode == I915_TILING_NONE)
1357 return obj->base.size;
1358
1359 /* Previous chips need a power-of-two fence region when tiling */
1360 if (INTEL_INFO(dev)->gen == 3)
1361 size = 1024*1024;
1362 else
1363 size = 512*1024;
1364
1365 while (size < obj->base.size)
1366 size <<= 1;
1367
1368 return size;
1369}
1370
Jesse Barnesde151cf2008-11-12 10:03:55 -08001371/**
1372 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1373 * @obj: object to check
1374 *
1375 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001376 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001377 */
1378static uint32_t
Chris Wilson05394f32010-11-08 19:18:58 +00001379i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001380{
Chris Wilson05394f32010-11-08 19:18:58 +00001381 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001382
1383 /*
1384 * Minimum alignment is 4k (GTT page size), but might be greater
1385 * if a fence register is needed for the object.
1386 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001387 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilson05394f32010-11-08 19:18:58 +00001388 obj->tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001389 return 4096;
1390
1391 /*
1392 * Previous chips need to be aligned to the size of the smallest
1393 * fence register that can contain the object.
1394 */
Chris Wilson05394f32010-11-08 19:18:58 +00001395 return i915_gem_get_gtt_size(obj);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001396}
1397
Daniel Vetter5e783302010-11-14 22:32:36 +01001398/**
1399 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1400 * unfenced object
1401 * @obj: object to check
1402 *
1403 * Return the required GTT alignment for an object, only taking into account
1404 * unfenced tiled surface requirements.
1405 */
1406static uint32_t
Chris Wilson05394f32010-11-08 19:18:58 +00001407i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
Daniel Vetter5e783302010-11-14 22:32:36 +01001408{
Chris Wilson05394f32010-11-08 19:18:58 +00001409 struct drm_device *dev = obj->base.dev;
Daniel Vetter5e783302010-11-14 22:32:36 +01001410 int tile_height;
1411
1412 /*
1413 * Minimum alignment is 4k (GTT page size) for sane hw.
1414 */
1415 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilson05394f32010-11-08 19:18:58 +00001416 obj->tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001417 return 4096;
1418
1419 /*
1420 * Older chips need unfenced tiled buffers to be aligned to the left
1421 * edge of an even tile row (where tile rows are counted as if the bo is
1422 * placed in a fenced gtt region).
1423 */
1424 if (IS_GEN2(dev) ||
Chris Wilson05394f32010-11-08 19:18:58 +00001425 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
Daniel Vetter5e783302010-11-14 22:32:36 +01001426 tile_height = 32;
1427 else
1428 tile_height = 8;
1429
Chris Wilson05394f32010-11-08 19:18:58 +00001430 return tile_height * obj->stride * 2;
Daniel Vetter5e783302010-11-14 22:32:36 +01001431}
1432
Jesse Barnesde151cf2008-11-12 10:03:55 -08001433/**
1434 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1435 * @dev: DRM device
1436 * @data: GTT mapping ioctl data
Chris Wilson05394f32010-11-08 19:18:58 +00001437 * @file: GEM object info
Jesse Barnesde151cf2008-11-12 10:03:55 -08001438 *
1439 * Simply returns the fake offset to userspace so it can mmap it.
1440 * The mmap call will end up in drm_gem_mmap(), which will set things
1441 * up so we can get faults in the handler above.
1442 *
1443 * The fault handler will take care of binding the object into the GTT
1444 * (since it may have been evicted to make room for something), allocating
1445 * a fence register, and mapping the appropriate aperture address into
1446 * userspace.
1447 */
1448int
1449i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001450 struct drm_file *file)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001451{
Chris Wilsonda761a62010-10-27 17:37:08 +01001452 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001453 struct drm_i915_gem_mmap_gtt *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001454 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001455 int ret;
1456
1457 if (!(dev->driver->driver_features & DRIVER_GEM))
1458 return -ENODEV;
1459
Chris Wilson76c1dec2010-09-25 11:22:51 +01001460 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001461 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001462 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001463
Chris Wilson05394f32010-11-08 19:18:58 +00001464 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001465 if (obj == NULL) {
1466 ret = -ENOENT;
1467 goto unlock;
1468 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001469
Chris Wilson05394f32010-11-08 19:18:58 +00001470 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001471 ret = -E2BIG;
1472 goto unlock;
1473 }
1474
Chris Wilson05394f32010-11-08 19:18:58 +00001475 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001476 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001477 ret = -EINVAL;
1478 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001479 }
1480
Chris Wilson05394f32010-11-08 19:18:58 +00001481 if (!obj->base.map_list.map) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001482 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001483 if (ret)
1484 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001485 }
1486
Chris Wilson05394f32010-11-08 19:18:58 +00001487 args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001488
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001489out:
Chris Wilson05394f32010-11-08 19:18:58 +00001490 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001491unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001492 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001493 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001494}
1495
Chris Wilsone5281cc2010-10-28 13:45:36 +01001496static int
Chris Wilson05394f32010-11-08 19:18:58 +00001497i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001498 gfp_t gfpmask)
1499{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001500 int page_count, i;
1501 struct address_space *mapping;
1502 struct inode *inode;
1503 struct page *page;
1504
1505 /* Get the list of pages out of our struct file. They'll be pinned
1506 * at this point until we release them.
1507 */
Chris Wilson05394f32010-11-08 19:18:58 +00001508 page_count = obj->base.size / PAGE_SIZE;
1509 BUG_ON(obj->pages != NULL);
1510 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1511 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001512 return -ENOMEM;
1513
Chris Wilson05394f32010-11-08 19:18:58 +00001514 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001515 mapping = inode->i_mapping;
1516 for (i = 0; i < page_count; i++) {
1517 page = read_cache_page_gfp(mapping, i,
1518 GFP_HIGHUSER |
1519 __GFP_COLD |
1520 __GFP_RECLAIMABLE |
1521 gfpmask);
1522 if (IS_ERR(page))
1523 goto err_pages;
1524
Chris Wilson05394f32010-11-08 19:18:58 +00001525 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001526 }
1527
Chris Wilson05394f32010-11-08 19:18:58 +00001528 if (obj->tiling_mode != I915_TILING_NONE)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001529 i915_gem_object_do_bit_17_swizzle(obj);
1530
1531 return 0;
1532
1533err_pages:
1534 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001535 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001536
Chris Wilson05394f32010-11-08 19:18:58 +00001537 drm_free_large(obj->pages);
1538 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001539 return PTR_ERR(page);
1540}
1541
Chris Wilson5cdf5882010-09-27 15:51:07 +01001542static void
Chris Wilson05394f32010-11-08 19:18:58 +00001543i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001544{
Chris Wilson05394f32010-11-08 19:18:58 +00001545 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001546 int i;
1547
Chris Wilson05394f32010-11-08 19:18:58 +00001548 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001549
Chris Wilson05394f32010-11-08 19:18:58 +00001550 if (obj->tiling_mode != I915_TILING_NONE)
Eric Anholt280b7132009-03-12 16:56:27 -07001551 i915_gem_object_save_bit_17_swizzle(obj);
1552
Chris Wilson05394f32010-11-08 19:18:58 +00001553 if (obj->madv == I915_MADV_DONTNEED)
1554 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001555
1556 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001557 if (obj->dirty)
1558 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001559
Chris Wilson05394f32010-11-08 19:18:58 +00001560 if (obj->madv == I915_MADV_WILLNEED)
1561 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001562
Chris Wilson05394f32010-11-08 19:18:58 +00001563 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001564 }
Chris Wilson05394f32010-11-08 19:18:58 +00001565 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001566
Chris Wilson05394f32010-11-08 19:18:58 +00001567 drm_free_large(obj->pages);
1568 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001569}
1570
Chris Wilson54cf91d2010-11-25 18:00:26 +00001571void
Chris Wilson05394f32010-11-08 19:18:58 +00001572i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Zou Nan hai852835f2010-05-21 09:08:56 +08001573 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001574{
Chris Wilson05394f32010-11-08 19:18:58 +00001575 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001576 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona56ba562010-09-28 10:07:56 +01001577 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001578
Zou Nan hai852835f2010-05-21 09:08:56 +08001579 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001580 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001581
1582 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001583 if (!obj->active) {
1584 drm_gem_object_reference(&obj->base);
1585 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001586 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001587
Eric Anholt673a3942008-07-30 12:06:12 -07001588 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001589 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1590 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001591
Chris Wilson05394f32010-11-08 19:18:58 +00001592 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001593 if (obj->fenced_gpu_access) {
1594 struct drm_i915_fence_reg *reg;
1595
1596 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1597
1598 obj->last_fenced_seqno = seqno;
1599 obj->last_fenced_ring = ring;
1600
1601 reg = &dev_priv->fence_regs[obj->fence_reg];
1602 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1603 }
1604}
1605
1606static void
1607i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1608{
1609 list_del_init(&obj->ring_list);
1610 obj->last_rendering_seqno = 0;
1611 obj->last_fenced_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001612}
1613
Eric Anholtce44b0e2008-11-06 16:00:31 -08001614static void
Chris Wilson05394f32010-11-08 19:18:58 +00001615i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001616{
Chris Wilson05394f32010-11-08 19:18:58 +00001617 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001618 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001619
Chris Wilson05394f32010-11-08 19:18:58 +00001620 BUG_ON(!obj->active);
1621 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001622
1623 i915_gem_object_move_off_active(obj);
1624}
1625
1626static void
1627i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1628{
1629 struct drm_device *dev = obj->base.dev;
1630 struct drm_i915_private *dev_priv = dev->dev_private;
1631
1632 if (obj->pin_count != 0)
1633 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1634 else
1635 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1636
1637 BUG_ON(!list_empty(&obj->gpu_write_list));
1638 BUG_ON(!obj->active);
1639 obj->ring = NULL;
1640
1641 i915_gem_object_move_off_active(obj);
1642 obj->fenced_gpu_access = false;
1643 obj->last_fenced_ring = NULL;
1644
1645 obj->active = 0;
1646 drm_gem_object_unreference(&obj->base);
1647
1648 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001649}
Eric Anholt673a3942008-07-30 12:06:12 -07001650
Chris Wilson963b4832009-09-20 23:03:54 +01001651/* Immediately discard the backing storage */
1652static void
Chris Wilson05394f32010-11-08 19:18:58 +00001653i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001654{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001655 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001656
Chris Wilsonae9fed62010-08-07 11:01:30 +01001657 /* Our goal here is to return as much of the memory as
1658 * is possible back to the system as we are called from OOM.
1659 * To do this we must instruct the shmfs to drop all of its
1660 * backing pages, *now*. Here we mirror the actions taken
1661 * when by shmem_delete_inode() to release the backing store.
1662 */
Chris Wilson05394f32010-11-08 19:18:58 +00001663 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001664 truncate_inode_pages(inode->i_mapping, 0);
1665 if (inode->i_op->truncate_range)
1666 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001667
Chris Wilson05394f32010-11-08 19:18:58 +00001668 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001669}
1670
1671static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001672i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001673{
Chris Wilson05394f32010-11-08 19:18:58 +00001674 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001675}
1676
Eric Anholt673a3942008-07-30 12:06:12 -07001677static void
Daniel Vetter63560392010-02-19 11:51:59 +01001678i915_gem_process_flushing_list(struct drm_device *dev,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001679 uint32_t flush_domains,
Zou Nan hai852835f2010-05-21 09:08:56 +08001680 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001681{
Chris Wilson05394f32010-11-08 19:18:58 +00001682 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001683
Chris Wilson05394f32010-11-08 19:18:58 +00001684 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001685 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001686 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001687 if (obj->base.write_domain & flush_domains) {
1688 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001689
Chris Wilson05394f32010-11-08 19:18:58 +00001690 obj->base.write_domain = 0;
1691 list_del_init(&obj->gpu_write_list);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001692 i915_gem_object_move_to_active(obj, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001693
Daniel Vetter63560392010-02-19 11:51:59 +01001694 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001695 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001696 old_write_domain);
1697 }
1698 }
1699}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001700
Chris Wilson3cce4692010-10-27 16:11:02 +01001701int
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001702i915_add_request(struct drm_device *dev,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001703 struct drm_file *file,
Chris Wilson8dc5d142010-08-12 12:36:12 +01001704 struct drm_i915_gem_request *request,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001705 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001706{
1707 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001708 struct drm_i915_file_private *file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001709 uint32_t seqno;
1710 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001711 int ret;
1712
1713 BUG_ON(request == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07001714
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001715 if (file != NULL)
1716 file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001717
Chris Wilson3cce4692010-10-27 16:11:02 +01001718 ret = ring->add_request(ring, &seqno);
1719 if (ret)
1720 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001721
Chris Wilsona56ba562010-09-28 10:07:56 +01001722 ring->outstanding_lazy_request = false;
Eric Anholt673a3942008-07-30 12:06:12 -07001723
1724 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001725 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001726 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001727 was_empty = list_empty(&ring->request_list);
1728 list_add_tail(&request->list, &ring->request_list);
1729
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001730 if (file_priv) {
Chris Wilson1c255952010-09-26 11:03:27 +01001731 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001732 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001733 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001734 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001735 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001736 }
Eric Anholt673a3942008-07-30 12:06:12 -07001737
Ben Gamarif65d9422009-09-14 17:48:44 -04001738 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001739 mod_timer(&dev_priv->hangcheck_timer,
1740 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001741 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001742 queue_delayed_work(dev_priv->wq,
1743 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001744 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001745 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001746}
1747
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001748static inline void
1749i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001750{
Chris Wilson1c255952010-09-26 11:03:27 +01001751 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001752
Chris Wilson1c255952010-09-26 11:03:27 +01001753 if (!file_priv)
1754 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001755
Chris Wilson1c255952010-09-26 11:03:27 +01001756 spin_lock(&file_priv->mm.lock);
1757 list_del(&request->client_list);
1758 request->file_priv = NULL;
1759 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001760}
1761
Chris Wilsondfaae392010-09-22 10:31:52 +01001762static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1763 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001764{
Chris Wilsondfaae392010-09-22 10:31:52 +01001765 while (!list_empty(&ring->request_list)) {
1766 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001767
Chris Wilsondfaae392010-09-22 10:31:52 +01001768 request = list_first_entry(&ring->request_list,
1769 struct drm_i915_gem_request,
1770 list);
1771
1772 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001773 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001774 kfree(request);
1775 }
1776
1777 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001778 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001779
Chris Wilson05394f32010-11-08 19:18:58 +00001780 obj = list_first_entry(&ring->active_list,
1781 struct drm_i915_gem_object,
1782 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001783
Chris Wilson05394f32010-11-08 19:18:58 +00001784 obj->base.write_domain = 0;
1785 list_del_init(&obj->gpu_write_list);
1786 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001787 }
Eric Anholt673a3942008-07-30 12:06:12 -07001788}
1789
Chris Wilson312817a2010-11-22 11:50:11 +00001790static void i915_gem_reset_fences(struct drm_device *dev)
1791{
1792 struct drm_i915_private *dev_priv = dev->dev_private;
1793 int i;
1794
1795 for (i = 0; i < 16; i++) {
1796 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001797 struct drm_i915_gem_object *obj = reg->obj;
1798
1799 if (!obj)
1800 continue;
1801
1802 if (obj->tiling_mode)
1803 i915_gem_release_mmap(obj);
1804
1805 i915_gem_clear_fence_reg(obj);
Chris Wilson312817a2010-11-22 11:50:11 +00001806 }
1807}
1808
Chris Wilson069efc12010-09-30 16:53:18 +01001809void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001810{
Chris Wilsondfaae392010-09-22 10:31:52 +01001811 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001812 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001813
Chris Wilsondfaae392010-09-22 10:31:52 +01001814 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
Chris Wilson87acb0a2010-10-19 10:13:00 +01001815 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001816 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01001817
1818 /* Remove anything from the flushing lists. The GPU cache is likely
1819 * to be lost on reset along with the data, so simply move the
1820 * lost bo to the inactive list.
1821 */
1822 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001823 obj= list_first_entry(&dev_priv->mm.flushing_list,
1824 struct drm_i915_gem_object,
1825 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001826
Chris Wilson05394f32010-11-08 19:18:58 +00001827 obj->base.write_domain = 0;
1828 list_del_init(&obj->gpu_write_list);
1829 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001830 }
Chris Wilson9375e442010-09-19 12:21:28 +01001831
Chris Wilsondfaae392010-09-22 10:31:52 +01001832 /* Move everything out of the GPU domains to ensure we do any
1833 * necessary invalidation upon reuse.
1834 */
Chris Wilson05394f32010-11-08 19:18:58 +00001835 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001836 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001837 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001838 {
Chris Wilson05394f32010-11-08 19:18:58 +00001839 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001840 }
Chris Wilson069efc12010-09-30 16:53:18 +01001841
1842 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001843 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001844}
1845
1846/**
1847 * This function clears the request list as sequence numbers are passed.
1848 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001849static void
1850i915_gem_retire_requests_ring(struct drm_device *dev,
1851 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001852{
1853 drm_i915_private_t *dev_priv = dev->dev_private;
1854 uint32_t seqno;
1855
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001856 if (!ring->status_page.page_addr ||
1857 list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001858 return;
1859
Chris Wilson23bc5982010-09-29 16:10:57 +01001860 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001861
Chris Wilson78501ea2010-10-27 12:18:21 +01001862 seqno = ring->get_seqno(ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001863 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001864 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001865
Zou Nan hai852835f2010-05-21 09:08:56 +08001866 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001867 struct drm_i915_gem_request,
1868 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001869
Chris Wilsondfaae392010-09-22 10:31:52 +01001870 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001871 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001872
1873 trace_i915_gem_request_retire(dev, request->seqno);
1874
1875 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001876 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001877 kfree(request);
1878 }
1879
1880 /* Move any buffers on the active list that are no longer referenced
1881 * by the ringbuffer to the flushing/inactive lists as appropriate.
1882 */
1883 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001884 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001885
Chris Wilson05394f32010-11-08 19:18:58 +00001886 obj= list_first_entry(&ring->active_list,
1887 struct drm_i915_gem_object,
1888 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001889
Chris Wilson05394f32010-11-08 19:18:58 +00001890 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001891 break;
1892
Chris Wilson05394f32010-11-08 19:18:58 +00001893 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001894 i915_gem_object_move_to_flushing(obj);
1895 else
1896 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001897 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001898
1899 if (unlikely (dev_priv->trace_irq_seqno &&
1900 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Chris Wilson78501ea2010-10-27 12:18:21 +01001901 ring->user_irq_put(ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001902 dev_priv->trace_irq_seqno = 0;
1903 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001904
1905 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001906}
1907
1908void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001909i915_gem_retire_requests(struct drm_device *dev)
1910{
1911 drm_i915_private_t *dev_priv = dev->dev_private;
1912
Chris Wilsonbe726152010-07-23 23:18:50 +01001913 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001914 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001915
1916 /* We must be careful that during unbind() we do not
1917 * accidentally infinitely recurse into retire requests.
1918 * Currently:
1919 * retire -> free -> unbind -> wait -> retire_ring
1920 */
Chris Wilson05394f32010-11-08 19:18:58 +00001921 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001922 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001923 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001924 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001925 }
1926
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001927 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
Chris Wilson87acb0a2010-10-19 10:13:00 +01001928 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
Chris Wilson549f7362010-10-19 11:19:32 +01001929 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001930}
1931
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001932static void
Eric Anholt673a3942008-07-30 12:06:12 -07001933i915_gem_retire_work_handler(struct work_struct *work)
1934{
1935 drm_i915_private_t *dev_priv;
1936 struct drm_device *dev;
1937
1938 dev_priv = container_of(work, drm_i915_private_t,
1939 mm.retire_work.work);
1940 dev = dev_priv->dev;
1941
Chris Wilson891b48c2010-09-29 12:26:37 +01001942 /* Come back later if the device is busy... */
1943 if (!mutex_trylock(&dev->struct_mutex)) {
1944 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1945 return;
1946 }
1947
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001948 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001949
Keith Packard6dbe2772008-10-14 21:41:13 -07001950 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001951 (!list_empty(&dev_priv->render_ring.request_list) ||
Chris Wilson549f7362010-10-19 11:19:32 +01001952 !list_empty(&dev_priv->bsd_ring.request_list) ||
1953 !list_empty(&dev_priv->blt_ring.request_list)))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001954 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001955 mutex_unlock(&dev->struct_mutex);
1956}
1957
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001958int
Zou Nan hai852835f2010-05-21 09:08:56 +08001959i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001960 bool interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001961{
1962 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001963 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001964 int ret = 0;
1965
1966 BUG_ON(seqno == 0);
1967
Ben Gamariba1234d2009-09-14 17:48:47 -04001968 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001969 return -EAGAIN;
Ben Gamariffed1d02009-09-14 17:48:41 -04001970
Chris Wilson5d97eb62010-11-10 20:40:02 +00001971 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001972 struct drm_i915_gem_request *request;
1973
1974 request = kzalloc(sizeof(*request), GFP_KERNEL);
1975 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001976 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001977
1978 ret = i915_add_request(dev, NULL, request, ring);
1979 if (ret) {
1980 kfree(request);
1981 return ret;
1982 }
1983
1984 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001985 }
1986
Chris Wilson78501ea2010-10-27 12:18:21 +01001987 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001988 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001989 ier = I915_READ(DEIER) | I915_READ(GTIER);
1990 else
1991 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001992 if (!ier) {
1993 DRM_ERROR("something (likely vbetool) disabled "
1994 "interrupts, re-enabling\n");
1995 i915_driver_irq_preinstall(dev);
1996 i915_driver_irq_postinstall(dev);
1997 }
1998
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001999 trace_i915_gem_request_wait_begin(dev, seqno);
2000
Chris Wilsonb2223492010-10-27 15:27:33 +01002001 ring->waiting_seqno = seqno;
Chris Wilson78501ea2010-10-27 12:18:21 +01002002 ring->user_irq_get(ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002003 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08002004 ret = wait_event_interruptible(ring->irq_queue,
Chris Wilson78501ea2010-10-27 12:18:21 +01002005 i915_seqno_passed(ring->get_seqno(ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08002006 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02002007 else
Zou Nan hai852835f2010-05-21 09:08:56 +08002008 wait_event(ring->irq_queue,
Chris Wilson78501ea2010-10-27 12:18:21 +01002009 i915_seqno_passed(ring->get_seqno(ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08002010 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02002011
Chris Wilson78501ea2010-10-27 12:18:21 +01002012 ring->user_irq_put(ring);
Chris Wilsonb2223492010-10-27 15:27:33 +01002013 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002014
2015 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002016 }
Ben Gamariba1234d2009-09-14 17:48:47 -04002017 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01002018 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07002019
2020 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01002021 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilson78501ea2010-10-27 12:18:21 +01002022 __func__, ret, seqno, ring->get_seqno(ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01002023 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07002024
2025 /* Directly dispatch request retiring. While we have the work queue
2026 * to handle this, the waiter on a request often wants an associated
2027 * buffer to have made it to the inactive list, and we would need
2028 * a separate wait queue to handle that.
2029 */
2030 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002031 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002032
2033 return ret;
2034}
2035
Daniel Vetter48764bf2009-09-15 22:57:32 +02002036/**
2037 * Waits for a sequence number to be signaled, and cleans up the
2038 * request and object lists appropriately for that event.
2039 */
2040static int
Zou Nan hai852835f2010-05-21 09:08:56 +08002041i915_wait_request(struct drm_device *dev, uint32_t seqno,
Chris Wilsona56ba562010-09-28 10:07:56 +01002042 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02002043{
Zou Nan hai852835f2010-05-21 09:08:56 +08002044 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002045}
2046
Eric Anholt673a3942008-07-30 12:06:12 -07002047/**
2048 * Ensures that all rendering to the object has completed and the object is
2049 * safe to unbind from the GTT or access from the CPU.
2050 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00002051int
Chris Wilson05394f32010-11-08 19:18:58 +00002052i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002053 bool interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07002054{
Chris Wilson05394f32010-11-08 19:18:58 +00002055 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002056 int ret;
2057
Eric Anholte47c68e2008-11-14 13:35:19 -08002058 /* This function only exists to support waiting for existing rendering,
2059 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002060 */
Chris Wilson05394f32010-11-08 19:18:58 +00002061 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002062
2063 /* If there is rendering queued on the buffer being evicted, wait for
2064 * it.
2065 */
Chris Wilson05394f32010-11-08 19:18:58 +00002066 if (obj->active) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002067 ret = i915_do_wait_request(dev,
Chris Wilson05394f32010-11-08 19:18:58 +00002068 obj->last_rendering_seqno,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002069 interruptible,
Chris Wilson05394f32010-11-08 19:18:58 +00002070 obj->ring);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002071 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002072 return ret;
2073 }
2074
2075 return 0;
2076}
2077
2078/**
2079 * Unbinds an object from the GTT aperture.
2080 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002081int
Chris Wilson05394f32010-11-08 19:18:58 +00002082i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002083{
Eric Anholt673a3942008-07-30 12:06:12 -07002084 int ret = 0;
2085
Chris Wilson05394f32010-11-08 19:18:58 +00002086 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002087 return 0;
2088
Chris Wilson05394f32010-11-08 19:18:58 +00002089 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002090 DRM_ERROR("Attempting to unbind pinned buffer\n");
2091 return -EINVAL;
2092 }
2093
Eric Anholt5323fd02009-09-09 11:50:45 -07002094 /* blow away mappings if mapped through GTT */
2095 i915_gem_release_mmap(obj);
2096
Eric Anholt673a3942008-07-30 12:06:12 -07002097 /* Move the object to the CPU domain to ensure that
2098 * any possible CPU writes while it's not in the GTT
2099 * are flushed when we go to remap it. This will
2100 * also ensure that all pending GPU writes are finished
2101 * before we unbind.
2102 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002103 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002104 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002105 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002106 /* Continue on if we fail due to EIO, the GPU is hung so we
2107 * should be safe and we need to cleanup or else we might
2108 * cause memory corruption through use-after-free.
2109 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002110 if (ret) {
2111 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002112 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002113 }
Eric Anholt673a3942008-07-30 12:06:12 -07002114
Daniel Vetter96b47b62009-12-15 17:50:00 +01002115 /* release the fence reg _after_ flushing */
Chris Wilson05394f32010-11-08 19:18:58 +00002116 if (obj->fence_reg != I915_FENCE_REG_NONE)
Daniel Vetter96b47b62009-12-15 17:50:00 +01002117 i915_gem_clear_fence_reg(obj);
2118
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002119 i915_gem_gtt_unbind_object(obj);
Chris Wilsone5281cc2010-10-28 13:45:36 +01002120 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002121
Chris Wilson6299f992010-11-24 12:23:44 +00002122 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002123 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002124 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002125 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002126
Chris Wilson05394f32010-11-08 19:18:58 +00002127 drm_mm_put_block(obj->gtt_space);
2128 obj->gtt_space = NULL;
2129 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002130
Chris Wilson05394f32010-11-08 19:18:58 +00002131 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002132 i915_gem_object_truncate(obj);
2133
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002134 trace_i915_gem_object_unbind(obj);
2135
Chris Wilson8dc17752010-07-23 23:18:51 +01002136 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002137}
2138
Chris Wilson54cf91d2010-11-25 18:00:26 +00002139void
2140i915_gem_flush_ring(struct drm_device *dev,
2141 struct intel_ring_buffer *ring,
2142 uint32_t invalidate_domains,
2143 uint32_t flush_domains)
2144{
2145 ring->flush(ring, invalidate_domains, flush_domains);
2146 i915_gem_process_flushing_list(dev, flush_domains, ring);
2147}
2148
Chris Wilsona56ba562010-09-28 10:07:56 +01002149static int i915_ring_idle(struct drm_device *dev,
2150 struct intel_ring_buffer *ring)
2151{
Chris Wilson395b70b2010-10-28 21:28:46 +01002152 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002153 return 0;
2154
Chris Wilson05394f32010-11-08 19:18:58 +00002155 i915_gem_flush_ring(dev, ring,
Chris Wilsona56ba562010-09-28 10:07:56 +01002156 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2157 return i915_wait_request(dev,
2158 i915_gem_next_request_seqno(dev, ring),
2159 ring);
2160}
2161
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002162int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002163i915_gpu_idle(struct drm_device *dev)
2164{
2165 drm_i915_private_t *dev_priv = dev->dev_private;
2166 bool lists_empty;
Zou Nan hai852835f2010-05-21 09:08:56 +08002167 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002168
Zou Nan haid1b851f2010-05-21 09:08:57 +08002169 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +01002170 list_empty(&dev_priv->mm.active_list));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002171 if (lists_empty)
2172 return 0;
2173
2174 /* Flush everything onto the inactive list. */
Chris Wilsona56ba562010-09-28 10:07:56 +01002175 ret = i915_ring_idle(dev, &dev_priv->render_ring);
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002176 if (ret)
2177 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002178
Chris Wilson87acb0a2010-10-19 10:13:00 +01002179 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2180 if (ret)
2181 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002182
Chris Wilson549f7362010-10-19 11:19:32 +01002183 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2184 if (ret)
2185 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002186
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002187 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002188}
2189
Daniel Vetterc6642782010-11-12 13:46:18 +00002190static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2191 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002192{
Chris Wilson05394f32010-11-08 19:18:58 +00002193 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002194 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002195 u32 size = obj->gtt_space->size;
2196 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002197 uint64_t val;
2198
Chris Wilson05394f32010-11-08 19:18:58 +00002199 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00002200 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002201 val |= obj->gtt_offset & 0xfffff000;
2202 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07002203 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2204
Chris Wilson05394f32010-11-08 19:18:58 +00002205 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002206 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2207 val |= I965_FENCE_REG_VALID;
2208
Daniel Vetterc6642782010-11-12 13:46:18 +00002209 if (pipelined) {
2210 int ret = intel_ring_begin(pipelined, 6);
2211 if (ret)
2212 return ret;
2213
2214 intel_ring_emit(pipelined, MI_NOOP);
2215 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2216 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2217 intel_ring_emit(pipelined, (u32)val);
2218 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2219 intel_ring_emit(pipelined, (u32)(val >> 32));
2220 intel_ring_advance(pipelined);
2221 } else
2222 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2223
2224 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002225}
2226
Daniel Vetterc6642782010-11-12 13:46:18 +00002227static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2228 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002229{
Chris Wilson05394f32010-11-08 19:18:58 +00002230 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002231 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002232 u32 size = obj->gtt_space->size;
2233 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002234 uint64_t val;
2235
Chris Wilson05394f32010-11-08 19:18:58 +00002236 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002237 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002238 val |= obj->gtt_offset & 0xfffff000;
2239 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2240 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002241 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2242 val |= I965_FENCE_REG_VALID;
2243
Daniel Vetterc6642782010-11-12 13:46:18 +00002244 if (pipelined) {
2245 int ret = intel_ring_begin(pipelined, 6);
2246 if (ret)
2247 return ret;
2248
2249 intel_ring_emit(pipelined, MI_NOOP);
2250 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2251 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2252 intel_ring_emit(pipelined, (u32)val);
2253 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2254 intel_ring_emit(pipelined, (u32)(val >> 32));
2255 intel_ring_advance(pipelined);
2256 } else
2257 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2258
2259 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002260}
2261
Daniel Vetterc6642782010-11-12 13:46:18 +00002262static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2263 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002264{
Chris Wilson05394f32010-11-08 19:18:58 +00002265 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002266 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002267 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002268 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002269 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002270
Daniel Vetterc6642782010-11-12 13:46:18 +00002271 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2272 (size & -size) != size ||
2273 (obj->gtt_offset & (size - 1)),
2274 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2275 obj->gtt_offset, obj->map_and_fenceable, size))
2276 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002277
Daniel Vetterc6642782010-11-12 13:46:18 +00002278 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002279 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002280 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002281 tile_width = 512;
2282
2283 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002284 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002285 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002286
Chris Wilson05394f32010-11-08 19:18:58 +00002287 val = obj->gtt_offset;
2288 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002289 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002290 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002291 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2292 val |= I830_FENCE_REG_VALID;
2293
Chris Wilson05394f32010-11-08 19:18:58 +00002294 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002295 if (fence_reg < 8)
2296 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002297 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002298 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002299
2300 if (pipelined) {
2301 int ret = intel_ring_begin(pipelined, 4);
2302 if (ret)
2303 return ret;
2304
2305 intel_ring_emit(pipelined, MI_NOOP);
2306 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2307 intel_ring_emit(pipelined, fence_reg);
2308 intel_ring_emit(pipelined, val);
2309 intel_ring_advance(pipelined);
2310 } else
2311 I915_WRITE(fence_reg, val);
2312
2313 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002314}
2315
Daniel Vetterc6642782010-11-12 13:46:18 +00002316static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2317 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002318{
Chris Wilson05394f32010-11-08 19:18:58 +00002319 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002320 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002321 u32 size = obj->gtt_space->size;
2322 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002323 uint32_t val;
2324 uint32_t pitch_val;
2325
Daniel Vetterc6642782010-11-12 13:46:18 +00002326 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2327 (size & -size) != size ||
2328 (obj->gtt_offset & (size - 1)),
2329 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2330 obj->gtt_offset, size))
2331 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002332
Chris Wilson05394f32010-11-08 19:18:58 +00002333 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002334 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002335
Chris Wilson05394f32010-11-08 19:18:58 +00002336 val = obj->gtt_offset;
2337 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002338 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002339 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002340 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2341 val |= I830_FENCE_REG_VALID;
2342
Daniel Vetterc6642782010-11-12 13:46:18 +00002343 if (pipelined) {
2344 int ret = intel_ring_begin(pipelined, 4);
2345 if (ret)
2346 return ret;
2347
2348 intel_ring_emit(pipelined, MI_NOOP);
2349 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2350 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2351 intel_ring_emit(pipelined, val);
2352 intel_ring_advance(pipelined);
2353 } else
2354 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2355
2356 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002357}
2358
Chris Wilson2cf34d72010-09-14 13:03:28 +01002359static int i915_find_fence_reg(struct drm_device *dev,
2360 bool interruptible)
Daniel Vetterae3db242010-02-19 11:51:58 +01002361{
Daniel Vetterae3db242010-02-19 11:51:58 +01002362 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002363 struct drm_i915_fence_reg *reg;
Chris Wilson05394f32010-11-08 19:18:58 +00002364 struct drm_i915_gem_object *obj = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002365 int i, avail, ret;
2366
2367 /* First try to find a free reg */
2368 avail = 0;
2369 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2370 reg = &dev_priv->fence_regs[i];
2371 if (!reg->obj)
2372 return i;
2373
Chris Wilson05394f32010-11-08 19:18:58 +00002374 if (!reg->obj->pin_count)
2375 avail++;
Daniel Vetterae3db242010-02-19 11:51:58 +01002376 }
2377
2378 if (avail == 0)
2379 return -ENOSPC;
2380
2381 /* None available, try to steal one or wait for a user to finish */
Chris Wilsona00b10c2010-09-24 21:15:47 +01002382 avail = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002383 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2384 lru_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00002385 obj = reg->obj;
2386 if (obj->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002387 continue;
2388
2389 /* found one! */
Chris Wilson05394f32010-11-08 19:18:58 +00002390 avail = obj->fence_reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002391 break;
2392 }
2393
Chris Wilsona00b10c2010-09-24 21:15:47 +01002394 BUG_ON(avail == I915_FENCE_REG_NONE);
Daniel Vetterae3db242010-02-19 11:51:58 +01002395
2396 /* We only have a reference on obj from the active list. put_fence_reg
2397 * might drop that one, causing a use-after-free in it. So hold a
2398 * private reference to obj like the other callers of put_fence_reg
2399 * (set_tiling ioctl) do. */
Chris Wilson05394f32010-11-08 19:18:58 +00002400 drm_gem_object_reference(&obj->base);
2401 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2402 drm_gem_object_unreference(&obj->base);
Daniel Vetterae3db242010-02-19 11:51:58 +01002403 if (ret != 0)
2404 return ret;
2405
Chris Wilsona00b10c2010-09-24 21:15:47 +01002406 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002407}
2408
Jesse Barnesde151cf2008-11-12 10:03:55 -08002409/**
2410 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2411 * @obj: object to map through a fence reg
2412 *
2413 * When mapping objects through the GTT, userspace wants to be able to write
2414 * to them without having to worry about swizzling if the object is tiled.
2415 *
2416 * This function walks the fence regs looking for a free one for @obj,
2417 * stealing one if it can't find any.
2418 *
2419 * It then sets up the reg based on the object's properties: address, pitch
2420 * and tiling format.
2421 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002422int
Chris Wilson05394f32010-11-08 19:18:58 +00002423i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002424 bool interruptible)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002425{
Chris Wilson05394f32010-11-08 19:18:58 +00002426 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002427 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002428 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterc6642782010-11-12 13:46:18 +00002429 struct intel_ring_buffer *pipelined = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002430 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002431
Eric Anholta09ba7f2009-08-29 12:49:51 -07002432 /* Just update our place in the LRU if our fence is getting used. */
Chris Wilson05394f32010-11-08 19:18:58 +00002433 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2434 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002435 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002436 return 0;
2437 }
2438
Chris Wilson05394f32010-11-08 19:18:58 +00002439 switch (obj->tiling_mode) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002440 case I915_TILING_NONE:
2441 WARN(1, "allocating a fence for non-tiled object?\n");
2442 break;
2443 case I915_TILING_X:
Chris Wilson05394f32010-11-08 19:18:58 +00002444 if (!obj->stride)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002445 return -EINVAL;
Chris Wilson05394f32010-11-08 19:18:58 +00002446 WARN((obj->stride & (512 - 1)),
Jesse Barnes0f973f22009-01-26 17:10:45 -08002447 "object 0x%08x is X tiled but has non-512B pitch\n",
Chris Wilson05394f32010-11-08 19:18:58 +00002448 obj->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002449 break;
2450 case I915_TILING_Y:
Chris Wilson05394f32010-11-08 19:18:58 +00002451 if (!obj->stride)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002452 return -EINVAL;
Chris Wilson05394f32010-11-08 19:18:58 +00002453 WARN((obj->stride & (128 - 1)),
Jesse Barnes0f973f22009-01-26 17:10:45 -08002454 "object 0x%08x is Y tiled but has non-128B pitch\n",
Chris Wilson05394f32010-11-08 19:18:58 +00002455 obj->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002456 break;
2457 }
2458
Chris Wilson2cf34d72010-09-14 13:03:28 +01002459 ret = i915_find_fence_reg(dev, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002460 if (ret < 0)
2461 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002462
Chris Wilson05394f32010-11-08 19:18:58 +00002463 obj->fence_reg = ret;
2464 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002465 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002466
Jesse Barnesde151cf2008-11-12 10:03:55 -08002467 reg->obj = obj;
2468
Chris Wilsone259bef2010-09-17 00:32:02 +01002469 switch (INTEL_INFO(dev)->gen) {
2470 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002471 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002472 break;
2473 case 5:
2474 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002475 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002476 break;
2477 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002478 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002479 break;
2480 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002481 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002482 break;
2483 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002484
Chris Wilsona00b10c2010-09-24 21:15:47 +01002485 trace_i915_gem_object_get_fence(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002486 obj->fence_reg,
2487 obj->tiling_mode);
Daniel Vetterc6642782010-11-12 13:46:18 +00002488 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002489}
2490
2491/**
2492 * i915_gem_clear_fence_reg - clear out fence register info
2493 * @obj: object to clear
2494 *
2495 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002496 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002497 */
2498static void
Chris Wilson05394f32010-11-08 19:18:58 +00002499i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002500{
Chris Wilson05394f32010-11-08 19:18:58 +00002501 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002502 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002503 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilsone259bef2010-09-17 00:32:02 +01002504 uint32_t fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002505
Chris Wilsone259bef2010-09-17 00:32:02 +01002506 switch (INTEL_INFO(dev)->gen) {
2507 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002508 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
Chris Wilson05394f32010-11-08 19:18:58 +00002509 (obj->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002510 break;
2511 case 5:
2512 case 4:
Chris Wilson05394f32010-11-08 19:18:58 +00002513 I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002514 break;
2515 case 3:
Chris Wilson05394f32010-11-08 19:18:58 +00002516 if (obj->fence_reg >= 8)
2517 fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002518 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002519 case 2:
Chris Wilson05394f32010-11-08 19:18:58 +00002520 fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002521
2522 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002523 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002524 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002525
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002526 reg->obj = NULL;
Chris Wilson05394f32010-11-08 19:18:58 +00002527 obj->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002528 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002529}
2530
Eric Anholt673a3942008-07-30 12:06:12 -07002531/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002532 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2533 * to the buffer to finish, and then resets the fence register.
2534 * @obj: tiled object holding a fence register.
Chris Wilson2cf34d72010-09-14 13:03:28 +01002535 * @bool: whether the wait upon the fence is interruptible
Chris Wilson52dc7d32009-06-06 09:46:01 +01002536 *
2537 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002538 * data structures in dev_priv and obj.
Chris Wilson52dc7d32009-06-06 09:46:01 +01002539 */
2540int
Chris Wilson05394f32010-11-08 19:18:58 +00002541i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
Chris Wilson2cf34d72010-09-14 13:03:28 +01002542 bool interruptible)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002543{
Chris Wilson05394f32010-11-08 19:18:58 +00002544 struct drm_device *dev = obj->base.dev;
Chris Wilsoncaea7472010-11-12 13:53:37 +00002545 int ret;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002546
Chris Wilson05394f32010-11-08 19:18:58 +00002547 if (obj->fence_reg == I915_FENCE_REG_NONE)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002548 return 0;
2549
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002550 /* If we've changed tiling, GTT-mappings of the object
2551 * need to re-fault to ensure that the correct fence register
2552 * setup is in place.
2553 */
2554 i915_gem_release_mmap(obj);
2555
Chris Wilson52dc7d32009-06-06 09:46:01 +01002556 /* On the i915, GPU access to tiled buffers is via a fence,
2557 * therefore we must wait for any outstanding access to complete
2558 * before clearing the fence.
2559 */
Chris Wilsoncaea7472010-11-12 13:53:37 +00002560 if (obj->fenced_gpu_access) {
Chris Wilson3619df02010-11-28 15:37:17 +00002561 i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilsoncaea7472010-11-12 13:53:37 +00002562 obj->fenced_gpu_access = false;
2563 }
2564
2565 if (obj->last_fenced_seqno) {
2566 ret = i915_do_wait_request(dev,
2567 obj->last_fenced_seqno,
2568 interruptible,
2569 obj->last_fenced_ring);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002570 if (ret)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002571 return ret;
Chris Wilson53640e12010-09-20 11:40:50 +01002572
Chris Wilsoncaea7472010-11-12 13:53:37 +00002573 obj->last_fenced_seqno = false;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002574 }
2575
Daniel Vetter4a726612010-02-01 13:59:16 +01002576 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002577 i915_gem_clear_fence_reg(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002578
2579 return 0;
2580}
2581
2582/**
Eric Anholt673a3942008-07-30 12:06:12 -07002583 * Finds free space in the GTT aperture and binds the object there.
2584 */
2585static int
Chris Wilson05394f32010-11-08 19:18:58 +00002586i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002587 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002588 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002589{
Chris Wilson05394f32010-11-08 19:18:58 +00002590 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002591 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002592 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002593 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002594 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002595 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002596 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002597
Chris Wilson05394f32010-11-08 19:18:58 +00002598 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002599 DRM_ERROR("Attempting to bind a purgeable object\n");
2600 return -EINVAL;
2601 }
2602
Chris Wilson05394f32010-11-08 19:18:58 +00002603 fence_size = i915_gem_get_gtt_size(obj);
2604 fence_alignment = i915_gem_get_gtt_alignment(obj);
2605 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002606
Eric Anholt673a3942008-07-30 12:06:12 -07002607 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002608 alignment = map_and_fenceable ? fence_alignment :
2609 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002610 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002611 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2612 return -EINVAL;
2613 }
2614
Chris Wilson05394f32010-11-08 19:18:58 +00002615 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002616
Chris Wilson654fc602010-05-27 13:18:21 +01002617 /* If the object is bigger than the entire aperture, reject it early
2618 * before evicting everything in a vain attempt to find space.
2619 */
Chris Wilson05394f32010-11-08 19:18:58 +00002620 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002621 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002622 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2623 return -E2BIG;
2624 }
2625
Eric Anholt673a3942008-07-30 12:06:12 -07002626 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002627 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002628 free_space =
2629 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002630 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002631 dev_priv->mm.gtt_mappable_end,
2632 0);
2633 else
2634 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002635 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002636
2637 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002638 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002639 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002640 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002641 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002642 dev_priv->mm.gtt_mappable_end,
2643 0);
2644 else
Chris Wilson05394f32010-11-08 19:18:58 +00002645 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002646 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002647 }
Chris Wilson05394f32010-11-08 19:18:58 +00002648 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002649 /* If the gtt is empty and we're still having trouble
2650 * fitting our object in, we're out of memory.
2651 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002652 ret = i915_gem_evict_something(dev, size, alignment,
2653 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002654 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002655 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002656
Eric Anholt673a3942008-07-30 12:06:12 -07002657 goto search_free;
2658 }
2659
Chris Wilsone5281cc2010-10-28 13:45:36 +01002660 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002661 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002662 drm_mm_put_block(obj->gtt_space);
2663 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002664
2665 if (ret == -ENOMEM) {
2666 /* first try to clear up some space from the GTT */
Chris Wilsona00b10c2010-09-24 21:15:47 +01002667 ret = i915_gem_evict_something(dev, size,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002668 alignment,
2669 map_and_fenceable);
Chris Wilson07f73f62009-09-14 16:50:30 +01002670 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002671 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002672 if (gfpmask) {
2673 gfpmask = 0;
2674 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002675 }
2676
2677 return ret;
2678 }
2679
2680 goto search_free;
2681 }
2682
Eric Anholt673a3942008-07-30 12:06:12 -07002683 return ret;
2684 }
2685
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002686 ret = i915_gem_gtt_bind_object(obj);
2687 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002688 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002689 drm_mm_put_block(obj->gtt_space);
2690 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002691
Chris Wilsona00b10c2010-09-24 21:15:47 +01002692 ret = i915_gem_evict_something(dev, size,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002693 alignment, map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002694 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002695 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002696
2697 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002698 }
Eric Anholt673a3942008-07-30 12:06:12 -07002699
Chris Wilson6299f992010-11-24 12:23:44 +00002700 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002701 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002702
Eric Anholt673a3942008-07-30 12:06:12 -07002703 /* Assert that the object is not currently in any GPU domain. As it
2704 * wasn't in the GTT, there shouldn't be any way it could have been in
2705 * a GPU cache
2706 */
Chris Wilson05394f32010-11-08 19:18:58 +00002707 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2708 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002709
Chris Wilson6299f992010-11-24 12:23:44 +00002710 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002711
Daniel Vetter75e9e912010-11-04 17:11:09 +01002712 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002713 obj->gtt_space->size == fence_size &&
2714 (obj->gtt_space->start & (fence_alignment -1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002715
Daniel Vetter75e9e912010-11-04 17:11:09 +01002716 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002717 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002718
Chris Wilson05394f32010-11-08 19:18:58 +00002719 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002720
Chris Wilson6299f992010-11-24 12:23:44 +00002721 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002722 return 0;
2723}
2724
2725void
Chris Wilson05394f32010-11-08 19:18:58 +00002726i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002727{
Eric Anholt673a3942008-07-30 12:06:12 -07002728 /* If we don't have a page list set up, then we're not pinned
2729 * to GPU, and we can ignore the cache flush because it'll happen
2730 * again at bind time.
2731 */
Chris Wilson05394f32010-11-08 19:18:58 +00002732 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002733 return;
2734
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002735 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002736
Chris Wilson05394f32010-11-08 19:18:58 +00002737 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002738}
2739
Eric Anholte47c68e2008-11-14 13:35:19 -08002740/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson3619df02010-11-28 15:37:17 +00002741static void
2742i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002743{
Chris Wilson05394f32010-11-08 19:18:58 +00002744 struct drm_device *dev = obj->base.dev;
Eric Anholte47c68e2008-11-14 13:35:19 -08002745
Chris Wilson05394f32010-11-08 19:18:58 +00002746 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson3619df02010-11-28 15:37:17 +00002747 return;
Eric Anholte47c68e2008-11-14 13:35:19 -08002748
2749 /* Queue the GPU write cache flushing we need. */
Chris Wilson05394f32010-11-08 19:18:58 +00002750 i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2751 BUG_ON(obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002752}
2753
2754/** Flushes the GTT write domain for the object if it's dirty. */
2755static void
Chris Wilson05394f32010-11-08 19:18:58 +00002756i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002757{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002758 uint32_t old_write_domain;
2759
Chris Wilson05394f32010-11-08 19:18:58 +00002760 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002761 return;
2762
2763 /* No actual flushing is required for the GTT write domain. Writes
2764 * to it immediately go to main memory as far as we know, so there's
2765 * no chipset flush. It also doesn't land in render cache.
2766 */
Chris Wilson4a684a42010-10-28 14:44:08 +01002767 i915_gem_release_mmap(obj);
2768
Chris Wilson05394f32010-11-08 19:18:58 +00002769 old_write_domain = obj->base.write_domain;
2770 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002771
2772 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002773 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002774 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002775}
2776
2777/** Flushes the CPU write domain for the object if it's dirty. */
2778static void
Chris Wilson05394f32010-11-08 19:18:58 +00002779i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002780{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002781 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002782
Chris Wilson05394f32010-11-08 19:18:58 +00002783 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002784 return;
2785
2786 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002787 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002788 old_write_domain = obj->base.write_domain;
2789 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002790
2791 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002792 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002793 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002794}
2795
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002796/**
2797 * Moves a single object to the GTT read, and possibly write domain.
2798 *
2799 * This function returns when the move is complete, including waiting on
2800 * flushes to occur.
2801 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002802int
Chris Wilson20217462010-11-23 15:26:33 +00002803i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002804{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002805 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002806 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002807
Eric Anholt02354392008-11-26 13:58:13 -08002808 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002809 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002810 return -EINVAL;
2811
Chris Wilson3619df02010-11-28 15:37:17 +00002812 i915_gem_object_flush_gpu_write_domain(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01002813 ret = i915_gem_object_wait_rendering(obj, true);
2814 if (ret)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002815 return ret;
2816
Chris Wilson72133422010-09-13 23:56:38 +01002817 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002818
Chris Wilson05394f32010-11-08 19:18:58 +00002819 old_write_domain = obj->base.write_domain;
2820 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002821
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002822 /* It should now be out of any other write domains, and we can update
2823 * the domain values for our changes.
2824 */
Chris Wilson05394f32010-11-08 19:18:58 +00002825 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2826 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002827 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002828 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2829 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2830 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002831 }
2832
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002833 trace_i915_gem_object_change_domain(obj,
2834 old_read_domains,
2835 old_write_domain);
2836
Eric Anholte47c68e2008-11-14 13:35:19 -08002837 return 0;
2838}
2839
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002840/*
2841 * Prepare buffer for display plane. Use uninterruptible for possible flush
2842 * wait, as in modesetting process we're not supposed to be interrupted.
2843 */
2844int
Chris Wilson05394f32010-11-08 19:18:58 +00002845i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
Chris Wilson919926a2010-11-12 13:42:53 +00002846 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002847{
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002848 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002849 int ret;
2850
2851 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002852 if (obj->gtt_space == NULL)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002853 return -EINVAL;
2854
Chris Wilson3619df02010-11-28 15:37:17 +00002855 i915_gem_object_flush_gpu_write_domain(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002856
Chris Wilsonced270f2010-09-26 22:47:46 +01002857 /* Currently, we are always called from an non-interruptible context. */
2858 if (!pipelined) {
2859 ret = i915_gem_object_wait_rendering(obj, false);
2860 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002861 return ret;
2862 }
2863
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002864 i915_gem_object_flush_cpu_write_domain(obj);
2865
Chris Wilson05394f32010-11-08 19:18:58 +00002866 old_read_domains = obj->base.read_domains;
2867 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002868
2869 trace_i915_gem_object_change_domain(obj,
2870 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00002871 obj->base.write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002872
2873 return 0;
2874}
2875
Chris Wilson85345512010-11-13 09:49:11 +00002876int
2877i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2878 bool interruptible)
2879{
2880 if (!obj->active)
2881 return 0;
2882
2883 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
Chris Wilson05394f32010-11-08 19:18:58 +00002884 i915_gem_flush_ring(obj->base.dev, obj->ring,
Chris Wilson85345512010-11-13 09:49:11 +00002885 0, obj->base.write_domain);
2886
Chris Wilson05394f32010-11-08 19:18:58 +00002887 return i915_gem_object_wait_rendering(obj, interruptible);
Chris Wilson85345512010-11-13 09:49:11 +00002888}
2889
Eric Anholte47c68e2008-11-14 13:35:19 -08002890/**
2891 * Moves a single object to the CPU read, and possibly write domain.
2892 *
2893 * This function returns when the move is complete, including waiting on
2894 * flushes to occur.
2895 */
2896static int
Chris Wilson919926a2010-11-12 13:42:53 +00002897i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002898{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002899 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002900 int ret;
2901
Chris Wilson3619df02010-11-28 15:37:17 +00002902 i915_gem_object_flush_gpu_write_domain(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01002903 ret = i915_gem_object_wait_rendering(obj, true);
2904 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08002905 return ret;
2906
2907 i915_gem_object_flush_gtt_write_domain(obj);
2908
2909 /* If we have a partially-valid cache of the object in the CPU,
2910 * finish invalidating it and free the per-page flags.
2911 */
2912 i915_gem_object_set_to_full_cpu_read_domain(obj);
2913
Chris Wilson05394f32010-11-08 19:18:58 +00002914 old_write_domain = obj->base.write_domain;
2915 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002916
Eric Anholte47c68e2008-11-14 13:35:19 -08002917 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00002918 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002919 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002920
Chris Wilson05394f32010-11-08 19:18:58 +00002921 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002922 }
2923
2924 /* It should now be out of any other write domains, and we can update
2925 * the domain values for our changes.
2926 */
Chris Wilson05394f32010-11-08 19:18:58 +00002927 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08002928
2929 /* If we're writing through the CPU, then the GPU read domains will
2930 * need to be invalidated at next use.
2931 */
2932 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002933 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2934 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002935 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002936
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002937 trace_i915_gem_object_change_domain(obj,
2938 old_read_domains,
2939 old_write_domain);
2940
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002941 return 0;
2942}
2943
Eric Anholt673a3942008-07-30 12:06:12 -07002944/**
Eric Anholte47c68e2008-11-14 13:35:19 -08002945 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07002946 *
Eric Anholte47c68e2008-11-14 13:35:19 -08002947 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2948 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2949 */
2950static void
Chris Wilson05394f32010-11-08 19:18:58 +00002951i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002952{
Chris Wilson05394f32010-11-08 19:18:58 +00002953 if (!obj->page_cpu_valid)
Eric Anholte47c68e2008-11-14 13:35:19 -08002954 return;
2955
2956 /* If we're partially in the CPU read domain, finish moving it in.
2957 */
Chris Wilson05394f32010-11-08 19:18:58 +00002958 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002959 int i;
2960
Chris Wilson05394f32010-11-08 19:18:58 +00002961 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
2962 if (obj->page_cpu_valid[i])
Eric Anholte47c68e2008-11-14 13:35:19 -08002963 continue;
Chris Wilson05394f32010-11-08 19:18:58 +00002964 drm_clflush_pages(obj->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08002965 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002966 }
2967
2968 /* Free the page_cpu_valid mappings which are now stale, whether
2969 * or not we've got I915_GEM_DOMAIN_CPU.
2970 */
Chris Wilson05394f32010-11-08 19:18:58 +00002971 kfree(obj->page_cpu_valid);
2972 obj->page_cpu_valid = NULL;
Eric Anholte47c68e2008-11-14 13:35:19 -08002973}
2974
2975/**
2976 * Set the CPU read domain on a range of the object.
2977 *
2978 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2979 * not entirely valid. The page_cpu_valid member of the object flags which
2980 * pages have been flushed, and will be respected by
2981 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2982 * of the whole object.
2983 *
2984 * This function returns when the move is complete, including waiting on
2985 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07002986 */
2987static int
Chris Wilson05394f32010-11-08 19:18:58 +00002988i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -08002989 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07002990{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002991 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002992 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002993
Chris Wilson05394f32010-11-08 19:18:58 +00002994 if (offset == 0 && size == obj->base.size)
Eric Anholte47c68e2008-11-14 13:35:19 -08002995 return i915_gem_object_set_to_cpu_domain(obj, 0);
2996
Chris Wilson3619df02010-11-28 15:37:17 +00002997 i915_gem_object_flush_gpu_write_domain(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01002998 ret = i915_gem_object_wait_rendering(obj, true);
2999 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003000 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003001
Eric Anholte47c68e2008-11-14 13:35:19 -08003002 i915_gem_object_flush_gtt_write_domain(obj);
3003
3004 /* If we're already fully in the CPU read domain, we're done. */
Chris Wilson05394f32010-11-08 19:18:58 +00003005 if (obj->page_cpu_valid == NULL &&
3006 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003007 return 0;
3008
Eric Anholte47c68e2008-11-14 13:35:19 -08003009 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3010 * newly adding I915_GEM_DOMAIN_CPU
3011 */
Chris Wilson05394f32010-11-08 19:18:58 +00003012 if (obj->page_cpu_valid == NULL) {
3013 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3014 GFP_KERNEL);
3015 if (obj->page_cpu_valid == NULL)
Eric Anholte47c68e2008-11-14 13:35:19 -08003016 return -ENOMEM;
Chris Wilson05394f32010-11-08 19:18:58 +00003017 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3018 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003019
3020 /* Flush the cache on any pages that are still invalid from the CPU's
3021 * perspective.
3022 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003023 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3024 i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00003025 if (obj->page_cpu_valid[i])
Eric Anholt673a3942008-07-30 12:06:12 -07003026 continue;
3027
Chris Wilson05394f32010-11-08 19:18:58 +00003028 drm_clflush_pages(obj->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003029
Chris Wilson05394f32010-11-08 19:18:58 +00003030 obj->page_cpu_valid[i] = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003031 }
3032
Eric Anholte47c68e2008-11-14 13:35:19 -08003033 /* It should now be out of any other write domains, and we can update
3034 * the domain values for our changes.
3035 */
Chris Wilson05394f32010-11-08 19:18:58 +00003036 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003037
Chris Wilson05394f32010-11-08 19:18:58 +00003038 old_read_domains = obj->base.read_domains;
3039 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003040
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003041 trace_i915_gem_object_change_domain(obj,
3042 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003043 obj->base.write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003044
Eric Anholt673a3942008-07-30 12:06:12 -07003045 return 0;
3046}
3047
Eric Anholt673a3942008-07-30 12:06:12 -07003048/* Throttle our rendering by waiting until the ring has completed our requests
3049 * emitted over 20 msec ago.
3050 *
Eric Anholtb9624422009-06-03 07:27:35 +00003051 * Note that if we were to use the current jiffies each time around the loop,
3052 * we wouldn't escape the function with any frames outstanding if the time to
3053 * render a frame was over 20ms.
3054 *
Eric Anholt673a3942008-07-30 12:06:12 -07003055 * This should get us reasonable parallelism between CPU and GPU but also
3056 * relatively low latency when blocking on a particular request to finish.
3057 */
3058static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003059i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003060{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003061 struct drm_i915_private *dev_priv = dev->dev_private;
3062 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003063 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003064 struct drm_i915_gem_request *request;
3065 struct intel_ring_buffer *ring = NULL;
3066 u32 seqno = 0;
3067 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003068
Chris Wilson1c255952010-09-26 11:03:27 +01003069 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003070 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003071 if (time_after_eq(request->emitted_jiffies, recent_enough))
3072 break;
3073
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003074 ring = request->ring;
3075 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003076 }
Chris Wilson1c255952010-09-26 11:03:27 +01003077 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003078
3079 if (seqno == 0)
3080 return 0;
3081
3082 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003083 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003084 /* And wait for the seqno passing without holding any locks and
3085 * causing extra latency for others. This is safe as the irq
3086 * generation is designed to be run atomically and so is
3087 * lockless.
3088 */
Chris Wilson78501ea2010-10-27 12:18:21 +01003089 ring->user_irq_get(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003090 ret = wait_event_interruptible(ring->irq_queue,
Chris Wilson78501ea2010-10-27 12:18:21 +01003091 i915_seqno_passed(ring->get_seqno(ring), seqno)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003092 || atomic_read(&dev_priv->mm.wedged));
Chris Wilson78501ea2010-10-27 12:18:21 +01003093 ring->user_irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003094
3095 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3096 ret = -EIO;
3097 }
3098
3099 if (ret == 0)
3100 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003101
Eric Anholt673a3942008-07-30 12:06:12 -07003102 return ret;
3103}
3104
Eric Anholt673a3942008-07-30 12:06:12 -07003105int
Chris Wilson05394f32010-11-08 19:18:58 +00003106i915_gem_object_pin(struct drm_i915_gem_object *obj,
3107 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003108 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003109{
Chris Wilson05394f32010-11-08 19:18:58 +00003110 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003111 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003112 int ret;
3113
Chris Wilson05394f32010-11-08 19:18:58 +00003114 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01003115 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003116
Chris Wilson05394f32010-11-08 19:18:58 +00003117 if (obj->gtt_space != NULL) {
3118 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3119 (map_and_fenceable && !obj->map_and_fenceable)) {
3120 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003121 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003122 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3123 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003124 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003125 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003126 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003127 ret = i915_gem_object_unbind(obj);
3128 if (ret)
3129 return ret;
3130 }
3131 }
3132
Chris Wilson05394f32010-11-08 19:18:58 +00003133 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003134 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003135 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003136 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003137 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003138 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003139
Chris Wilson05394f32010-11-08 19:18:58 +00003140 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003141 if (!obj->active)
3142 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003143 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003144 }
Chris Wilson6299f992010-11-24 12:23:44 +00003145 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003146
Chris Wilson23bc5982010-09-29 16:10:57 +01003147 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003148 return 0;
3149}
3150
3151void
Chris Wilson05394f32010-11-08 19:18:58 +00003152i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003153{
Chris Wilson05394f32010-11-08 19:18:58 +00003154 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003155 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003156
Chris Wilson23bc5982010-09-29 16:10:57 +01003157 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003158 BUG_ON(obj->pin_count == 0);
3159 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003160
Chris Wilson05394f32010-11-08 19:18:58 +00003161 if (--obj->pin_count == 0) {
3162 if (!obj->active)
3163 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003164 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003165 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003166 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003167 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003168}
3169
3170int
3171i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003172 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003173{
3174 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003175 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003176 int ret;
3177
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003178 ret = i915_mutex_lock_interruptible(dev);
3179 if (ret)
3180 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003181
Chris Wilson05394f32010-11-08 19:18:58 +00003182 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003183 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003184 ret = -ENOENT;
3185 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003186 }
Eric Anholt673a3942008-07-30 12:06:12 -07003187
Chris Wilson05394f32010-11-08 19:18:58 +00003188 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003189 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003190 ret = -EINVAL;
3191 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003192 }
3193
Chris Wilson05394f32010-11-08 19:18:58 +00003194 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003195 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3196 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003197 ret = -EINVAL;
3198 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003199 }
3200
Chris Wilson05394f32010-11-08 19:18:58 +00003201 obj->user_pin_count++;
3202 obj->pin_filp = file;
3203 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003204 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003205 if (ret)
3206 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003207 }
3208
3209 /* XXX - flush the CPU caches for pinned objects
3210 * as the X server doesn't manage domains yet
3211 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003212 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003213 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003214out:
Chris Wilson05394f32010-11-08 19:18:58 +00003215 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003216unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003217 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003218 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003219}
3220
3221int
3222i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003223 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003224{
3225 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003226 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003227 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003228
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003229 ret = i915_mutex_lock_interruptible(dev);
3230 if (ret)
3231 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003232
Chris Wilson05394f32010-11-08 19:18:58 +00003233 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003234 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003235 ret = -ENOENT;
3236 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003237 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003238
Chris Wilson05394f32010-11-08 19:18:58 +00003239 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003240 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3241 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003242 ret = -EINVAL;
3243 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003244 }
Chris Wilson05394f32010-11-08 19:18:58 +00003245 obj->user_pin_count--;
3246 if (obj->user_pin_count == 0) {
3247 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003248 i915_gem_object_unpin(obj);
3249 }
Eric Anholt673a3942008-07-30 12:06:12 -07003250
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003251out:
Chris Wilson05394f32010-11-08 19:18:58 +00003252 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003253unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003254 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003255 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003256}
3257
3258int
3259i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003260 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003261{
3262 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003263 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003264 int ret;
3265
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003266 ret = i915_mutex_lock_interruptible(dev);
3267 if (ret)
3268 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003269
Chris Wilson05394f32010-11-08 19:18:58 +00003270 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Eric Anholt673a3942008-07-30 12:06:12 -07003271 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003272 ret = -ENOENT;
3273 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003274 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003275
Chris Wilson0be555b2010-08-04 15:36:30 +01003276 /* Count all active objects as busy, even if they are currently not used
3277 * by the gpu. Users of this interface expect objects to eventually
3278 * become non-busy without any further actions, therefore emit any
3279 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003280 */
Chris Wilson05394f32010-11-08 19:18:58 +00003281 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003282 if (args->busy) {
3283 /* Unconditionally flush objects, even when the gpu still uses this
3284 * object. Userspace calling this function indicates that it wants to
3285 * use this buffer rather sooner than later, so issuing the required
3286 * flush earlier is beneficial.
3287 */
Chris Wilson05394f32010-11-08 19:18:58 +00003288 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
3289 i915_gem_flush_ring(dev, obj->ring,
3290 0, obj->base.write_domain);
Chris Wilson0be555b2010-08-04 15:36:30 +01003291
3292 /* Update the active list for the hardware's current position.
3293 * Otherwise this only updates on a delayed timer or when irqs
3294 * are actually unmasked, and our working set ends up being
3295 * larger than required.
3296 */
Chris Wilson05394f32010-11-08 19:18:58 +00003297 i915_gem_retire_requests_ring(dev, obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003298
Chris Wilson05394f32010-11-08 19:18:58 +00003299 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003300 }
Eric Anholt673a3942008-07-30 12:06:12 -07003301
Chris Wilson05394f32010-11-08 19:18:58 +00003302 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003303unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003304 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003305 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003306}
3307
3308int
3309i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3310 struct drm_file *file_priv)
3311{
3312 return i915_gem_ring_throttle(dev, file_priv);
3313}
3314
Chris Wilson3ef94da2009-09-14 16:50:29 +01003315int
3316i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3317 struct drm_file *file_priv)
3318{
3319 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003320 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003321 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003322
3323 switch (args->madv) {
3324 case I915_MADV_DONTNEED:
3325 case I915_MADV_WILLNEED:
3326 break;
3327 default:
3328 return -EINVAL;
3329 }
3330
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003331 ret = i915_mutex_lock_interruptible(dev);
3332 if (ret)
3333 return ret;
3334
Chris Wilson05394f32010-11-08 19:18:58 +00003335 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilson3ef94da2009-09-14 16:50:29 +01003336 if (obj == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003337 ret = -ENOENT;
3338 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003339 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003340
Chris Wilson05394f32010-11-08 19:18:58 +00003341 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003342 ret = -EINVAL;
3343 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003344 }
3345
Chris Wilson05394f32010-11-08 19:18:58 +00003346 if (obj->madv != __I915_MADV_PURGED)
3347 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003348
Chris Wilson2d7ef392009-09-20 23:13:10 +01003349 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003350 if (i915_gem_object_is_purgeable(obj) &&
3351 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003352 i915_gem_object_truncate(obj);
3353
Chris Wilson05394f32010-11-08 19:18:58 +00003354 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003355
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003356out:
Chris Wilson05394f32010-11-08 19:18:58 +00003357 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003358unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003359 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003360 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003361}
3362
Chris Wilson05394f32010-11-08 19:18:58 +00003363struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3364 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003365{
Chris Wilson73aa8082010-09-30 11:46:12 +01003366 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003367 struct drm_i915_gem_object *obj;
3368
3369 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3370 if (obj == NULL)
3371 return NULL;
3372
3373 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3374 kfree(obj);
3375 return NULL;
3376 }
3377
Chris Wilson73aa8082010-09-30 11:46:12 +01003378 i915_gem_info_add_obj(dev_priv, size);
3379
Daniel Vetterc397b902010-04-09 19:05:07 +00003380 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3381 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3382
3383 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00003384 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003385 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003386 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003387 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003388 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003389 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003390 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003391 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003392 /* Avoid an unnecessary call to unbind on the first bind. */
3393 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003394
Chris Wilson05394f32010-11-08 19:18:58 +00003395 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003396}
3397
Eric Anholt673a3942008-07-30 12:06:12 -07003398int i915_gem_init_object(struct drm_gem_object *obj)
3399{
Daniel Vetterc397b902010-04-09 19:05:07 +00003400 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003401
Eric Anholt673a3942008-07-30 12:06:12 -07003402 return 0;
3403}
3404
Chris Wilson05394f32010-11-08 19:18:58 +00003405static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003406{
Chris Wilson05394f32010-11-08 19:18:58 +00003407 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003408 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003409 int ret;
3410
3411 ret = i915_gem_object_unbind(obj);
3412 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003413 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003414 &dev_priv->mm.deferred_free_list);
3415 return;
3416 }
3417
Chris Wilson05394f32010-11-08 19:18:58 +00003418 if (obj->base.map_list.map)
Chris Wilsonbe726152010-07-23 23:18:50 +01003419 i915_gem_free_mmap_offset(obj);
3420
Chris Wilson05394f32010-11-08 19:18:58 +00003421 drm_gem_object_release(&obj->base);
3422 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003423
Chris Wilson05394f32010-11-08 19:18:58 +00003424 kfree(obj->page_cpu_valid);
3425 kfree(obj->bit_17);
3426 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003427}
3428
Chris Wilson05394f32010-11-08 19:18:58 +00003429void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003430{
Chris Wilson05394f32010-11-08 19:18:58 +00003431 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3432 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003433
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003434 trace_i915_gem_object_destroy(obj);
3435
Chris Wilson05394f32010-11-08 19:18:58 +00003436 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003437 i915_gem_object_unpin(obj);
3438
Chris Wilson05394f32010-11-08 19:18:58 +00003439 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003440 i915_gem_detach_phys_object(dev, obj);
3441
Chris Wilsonbe726152010-07-23 23:18:50 +01003442 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003443}
3444
Jesse Barnes5669fca2009-02-17 15:13:31 -08003445int
Eric Anholt673a3942008-07-30 12:06:12 -07003446i915_gem_idle(struct drm_device *dev)
3447{
3448 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003449 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003450
Keith Packard6dbe2772008-10-14 21:41:13 -07003451 mutex_lock(&dev->struct_mutex);
3452
Chris Wilson87acb0a2010-10-19 10:13:00 +01003453 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003454 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003455 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003456 }
Eric Anholt673a3942008-07-30 12:06:12 -07003457
Chris Wilson29105cc2010-01-07 10:39:13 +00003458 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003459 if (ret) {
3460 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003461 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003462 }
Eric Anholt673a3942008-07-30 12:06:12 -07003463
Chris Wilson29105cc2010-01-07 10:39:13 +00003464 /* Under UMS, be paranoid and evict. */
3465 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003466 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003467 if (ret) {
3468 mutex_unlock(&dev->struct_mutex);
3469 return ret;
3470 }
3471 }
3472
Chris Wilson312817a2010-11-22 11:50:11 +00003473 i915_gem_reset_fences(dev);
3474
Chris Wilson29105cc2010-01-07 10:39:13 +00003475 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3476 * We need to replace this with a semaphore, or something.
3477 * And not confound mm.suspended!
3478 */
3479 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003480 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003481
3482 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003483 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003484
Keith Packard6dbe2772008-10-14 21:41:13 -07003485 mutex_unlock(&dev->struct_mutex);
3486
Chris Wilson29105cc2010-01-07 10:39:13 +00003487 /* Cancel the retire work handler, which should be idle now. */
3488 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3489
Eric Anholt673a3942008-07-30 12:06:12 -07003490 return 0;
3491}
3492
Eric Anholt673a3942008-07-30 12:06:12 -07003493int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003494i915_gem_init_ringbuffer(struct drm_device *dev)
3495{
3496 drm_i915_private_t *dev_priv = dev->dev_private;
3497 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003498
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003499 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003500 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003501 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003502
3503 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003504 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003505 if (ret)
3506 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003507 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003508
Chris Wilson549f7362010-10-19 11:19:32 +01003509 if (HAS_BLT(dev)) {
3510 ret = intel_init_blt_ring_buffer(dev);
3511 if (ret)
3512 goto cleanup_bsd_ring;
3513 }
3514
Chris Wilson6f392d5482010-08-07 11:01:22 +01003515 dev_priv->next_seqno = 1;
3516
Chris Wilson68f95ba2010-05-27 13:18:22 +01003517 return 0;
3518
Chris Wilson549f7362010-10-19 11:19:32 +01003519cleanup_bsd_ring:
Chris Wilson78501ea2010-10-27 12:18:21 +01003520 intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003521cleanup_render_ring:
Chris Wilson78501ea2010-10-27 12:18:21 +01003522 intel_cleanup_ring_buffer(&dev_priv->render_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003523 return ret;
3524}
3525
3526void
3527i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3528{
3529 drm_i915_private_t *dev_priv = dev->dev_private;
3530
Chris Wilson78501ea2010-10-27 12:18:21 +01003531 intel_cleanup_ring_buffer(&dev_priv->render_ring);
3532 intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
3533 intel_cleanup_ring_buffer(&dev_priv->blt_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003534}
3535
3536int
Eric Anholt673a3942008-07-30 12:06:12 -07003537i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3538 struct drm_file *file_priv)
3539{
3540 drm_i915_private_t *dev_priv = dev->dev_private;
3541 int ret;
3542
Jesse Barnes79e53942008-11-07 14:24:08 -08003543 if (drm_core_check_feature(dev, DRIVER_MODESET))
3544 return 0;
3545
Ben Gamariba1234d2009-09-14 17:48:47 -04003546 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003547 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003548 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003549 }
3550
Eric Anholt673a3942008-07-30 12:06:12 -07003551 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003552 dev_priv->mm.suspended = 0;
3553
3554 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003555 if (ret != 0) {
3556 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003557 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003558 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003559
Chris Wilson69dc4982010-10-19 10:36:51 +01003560 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08003561 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Chris Wilson87acb0a2010-10-19 10:13:00 +01003562 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
Chris Wilson549f7362010-10-19 11:19:32 +01003563 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003564 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3565 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08003566 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Chris Wilson87acb0a2010-10-19 10:13:00 +01003567 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
Chris Wilson549f7362010-10-19 11:19:32 +01003568 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003569 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003570
Chris Wilson5f353082010-06-07 14:03:03 +01003571 ret = drm_irq_install(dev);
3572 if (ret)
3573 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003574
Eric Anholt673a3942008-07-30 12:06:12 -07003575 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003576
3577cleanup_ringbuffer:
3578 mutex_lock(&dev->struct_mutex);
3579 i915_gem_cleanup_ringbuffer(dev);
3580 dev_priv->mm.suspended = 1;
3581 mutex_unlock(&dev->struct_mutex);
3582
3583 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003584}
3585
3586int
3587i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3588 struct drm_file *file_priv)
3589{
Jesse Barnes79e53942008-11-07 14:24:08 -08003590 if (drm_core_check_feature(dev, DRIVER_MODESET))
3591 return 0;
3592
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003593 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003594 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003595}
3596
3597void
3598i915_gem_lastclose(struct drm_device *dev)
3599{
3600 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003601
Eric Anholte806b492009-01-22 09:56:58 -08003602 if (drm_core_check_feature(dev, DRIVER_MODESET))
3603 return;
3604
Keith Packard6dbe2772008-10-14 21:41:13 -07003605 ret = i915_gem_idle(dev);
3606 if (ret)
3607 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003608}
3609
Chris Wilson64193402010-10-24 12:38:05 +01003610static void
3611init_ring_lists(struct intel_ring_buffer *ring)
3612{
3613 INIT_LIST_HEAD(&ring->active_list);
3614 INIT_LIST_HEAD(&ring->request_list);
3615 INIT_LIST_HEAD(&ring->gpu_write_list);
3616}
3617
Eric Anholt673a3942008-07-30 12:06:12 -07003618void
3619i915_gem_load(struct drm_device *dev)
3620{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003621 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003622 drm_i915_private_t *dev_priv = dev->dev_private;
3623
Chris Wilson69dc4982010-10-19 10:36:51 +01003624 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003625 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3626 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003627 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003628 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003629 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003630 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson64193402010-10-24 12:38:05 +01003631 init_ring_lists(&dev_priv->render_ring);
3632 init_ring_lists(&dev_priv->bsd_ring);
3633 init_ring_lists(&dev_priv->blt_ring);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003634 for (i = 0; i < 16; i++)
3635 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003636 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3637 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003638 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003639
Dave Airlie94400122010-07-20 13:15:31 +10003640 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3641 if (IS_GEN3(dev)) {
3642 u32 tmp = I915_READ(MI_ARB_STATE);
3643 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3644 /* arb state is a masked write, so set bit + bit in mask */
3645 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3646 I915_WRITE(MI_ARB_STATE, tmp);
3647 }
3648 }
3649
Jesse Barnesde151cf2008-11-12 10:03:55 -08003650 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003651 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3652 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003653
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003654 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003655 dev_priv->num_fence_regs = 16;
3656 else
3657 dev_priv->num_fence_regs = 8;
3658
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003659 /* Initialize fence registers to zero */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003660 switch (INTEL_INFO(dev)->gen) {
3661 case 6:
3662 for (i = 0; i < 16; i++)
3663 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3664 break;
3665 case 5:
3666 case 4:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003667 for (i = 0; i < 16; i++)
3668 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003669 break;
3670 case 3:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003671 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3672 for (i = 0; i < 8; i++)
3673 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003674 case 2:
3675 for (i = 0; i < 8; i++)
3676 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3677 break;
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003678 }
Eric Anholt673a3942008-07-30 12:06:12 -07003679 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003680 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003681
3682 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3683 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3684 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003685}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003686
3687/*
3688 * Create a physically contiguous memory object for this object
3689 * e.g. for cursor + overlay regs
3690 */
Chris Wilson995b6762010-08-20 13:23:26 +01003691static int i915_gem_init_phys_object(struct drm_device *dev,
3692 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003693{
3694 drm_i915_private_t *dev_priv = dev->dev_private;
3695 struct drm_i915_gem_phys_object *phys_obj;
3696 int ret;
3697
3698 if (dev_priv->mm.phys_objs[id - 1] || !size)
3699 return 0;
3700
Eric Anholt9a298b22009-03-24 12:23:04 -07003701 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003702 if (!phys_obj)
3703 return -ENOMEM;
3704
3705 phys_obj->id = id;
3706
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003707 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003708 if (!phys_obj->handle) {
3709 ret = -ENOMEM;
3710 goto kfree_obj;
3711 }
3712#ifdef CONFIG_X86
3713 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3714#endif
3715
3716 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3717
3718 return 0;
3719kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003720 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003721 return ret;
3722}
3723
Chris Wilson995b6762010-08-20 13:23:26 +01003724static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003725{
3726 drm_i915_private_t *dev_priv = dev->dev_private;
3727 struct drm_i915_gem_phys_object *phys_obj;
3728
3729 if (!dev_priv->mm.phys_objs[id - 1])
3730 return;
3731
3732 phys_obj = dev_priv->mm.phys_objs[id - 1];
3733 if (phys_obj->cur_obj) {
3734 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3735 }
3736
3737#ifdef CONFIG_X86
3738 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3739#endif
3740 drm_pci_free(dev, phys_obj->handle);
3741 kfree(phys_obj);
3742 dev_priv->mm.phys_objs[id - 1] = NULL;
3743}
3744
3745void i915_gem_free_all_phys_object(struct drm_device *dev)
3746{
3747 int i;
3748
Dave Airlie260883c2009-01-22 17:58:49 +10003749 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003750 i915_gem_free_phys_object(dev, i);
3751}
3752
3753void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003754 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003755{
Chris Wilson05394f32010-11-08 19:18:58 +00003756 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003757 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003758 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003759 int page_count;
3760
Chris Wilson05394f32010-11-08 19:18:58 +00003761 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003762 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003763 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003764
Chris Wilson05394f32010-11-08 19:18:58 +00003765 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003766 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003767 struct page *page = read_cache_page_gfp(mapping, i,
3768 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3769 if (!IS_ERR(page)) {
3770 char *dst = kmap_atomic(page);
3771 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3772 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003773
Chris Wilsone5281cc2010-10-28 13:45:36 +01003774 drm_clflush_pages(&page, 1);
3775
3776 set_page_dirty(page);
3777 mark_page_accessed(page);
3778 page_cache_release(page);
3779 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003780 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003781 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003782
Chris Wilson05394f32010-11-08 19:18:58 +00003783 obj->phys_obj->cur_obj = NULL;
3784 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003785}
3786
3787int
3788i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003789 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003790 int id,
3791 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003792{
Chris Wilson05394f32010-11-08 19:18:58 +00003793 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003794 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003795 int ret = 0;
3796 int page_count;
3797 int i;
3798
3799 if (id > I915_MAX_PHYS_OBJECT)
3800 return -EINVAL;
3801
Chris Wilson05394f32010-11-08 19:18:58 +00003802 if (obj->phys_obj) {
3803 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003804 return 0;
3805 i915_gem_detach_phys_object(dev, obj);
3806 }
3807
Dave Airlie71acb5e2008-12-30 20:31:46 +10003808 /* create a new object */
3809 if (!dev_priv->mm.phys_objs[id - 1]) {
3810 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003811 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003812 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003813 DRM_ERROR("failed to init phys object %d size: %zu\n",
3814 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003815 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003816 }
3817 }
3818
3819 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003820 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3821 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003822
Chris Wilson05394f32010-11-08 19:18:58 +00003823 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003824
3825 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003826 struct page *page;
3827 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003828
Chris Wilsone5281cc2010-10-28 13:45:36 +01003829 page = read_cache_page_gfp(mapping, i,
3830 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3831 if (IS_ERR(page))
3832 return PTR_ERR(page);
3833
Chris Wilsonff75b9b2010-10-30 22:52:31 +01003834 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00003835 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003836 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07003837 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003838
3839 mark_page_accessed(page);
3840 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003841 }
3842
3843 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003844}
3845
3846static int
Chris Wilson05394f32010-11-08 19:18:58 +00003847i915_gem_phys_pwrite(struct drm_device *dev,
3848 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10003849 struct drm_i915_gem_pwrite *args,
3850 struct drm_file *file_priv)
3851{
Chris Wilson05394f32010-11-08 19:18:58 +00003852 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003853 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003854
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003855 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
3856 unsigned long unwritten;
3857
3858 /* The physical object once assigned is fixed for the lifetime
3859 * of the obj, so we can safely drop the lock and continue
3860 * to access vaddr.
3861 */
3862 mutex_unlock(&dev->struct_mutex);
3863 unwritten = copy_from_user(vaddr, user_data, args->size);
3864 mutex_lock(&dev->struct_mutex);
3865 if (unwritten)
3866 return -EFAULT;
3867 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003868
Daniel Vetter40ce6572010-11-05 18:12:18 +01003869 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10003870 return 0;
3871}
Eric Anholtb9624422009-06-03 07:27:35 +00003872
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003873void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00003874{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003875 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003876
3877 /* Clean up our request list when the client is going away, so that
3878 * later retire_requests won't dereference our soon-to-be-gone
3879 * file_priv.
3880 */
Chris Wilson1c255952010-09-26 11:03:27 +01003881 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003882 while (!list_empty(&file_priv->mm.request_list)) {
3883 struct drm_i915_gem_request *request;
3884
3885 request = list_first_entry(&file_priv->mm.request_list,
3886 struct drm_i915_gem_request,
3887 client_list);
3888 list_del(&request->client_list);
3889 request->file_priv = NULL;
3890 }
Chris Wilson1c255952010-09-26 11:03:27 +01003891 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00003892}
Chris Wilson31169712009-09-14 16:50:28 +01003893
Chris Wilson31169712009-09-14 16:50:28 +01003894static int
Chris Wilson1637ef42010-04-20 17:10:35 +01003895i915_gpu_is_active(struct drm_device *dev)
3896{
3897 drm_i915_private_t *dev_priv = dev->dev_private;
3898 int lists_empty;
3899
Chris Wilson1637ef42010-04-20 17:10:35 +01003900 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01003901 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01003902
3903 return !lists_empty;
3904}
3905
3906static int
Chris Wilson17250b72010-10-28 12:51:39 +01003907i915_gem_inactive_shrink(struct shrinker *shrinker,
3908 int nr_to_scan,
3909 gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01003910{
Chris Wilson17250b72010-10-28 12:51:39 +01003911 struct drm_i915_private *dev_priv =
3912 container_of(shrinker,
3913 struct drm_i915_private,
3914 mm.inactive_shrinker);
3915 struct drm_device *dev = dev_priv->dev;
3916 struct drm_i915_gem_object *obj, *next;
3917 int cnt;
3918
3919 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01003920 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01003921
3922 /* "fast-path" to count number of available objects */
3923 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01003924 cnt = 0;
3925 list_for_each_entry(obj,
3926 &dev_priv->mm.inactive_list,
3927 mm_list)
3928 cnt++;
3929 mutex_unlock(&dev->struct_mutex);
3930 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01003931 }
3932
Chris Wilson1637ef42010-04-20 17:10:35 +01003933rescan:
Chris Wilson31169712009-09-14 16:50:28 +01003934 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01003935 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01003936
Chris Wilson17250b72010-10-28 12:51:39 +01003937 list_for_each_entry_safe(obj, next,
3938 &dev_priv->mm.inactive_list,
3939 mm_list) {
3940 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00003941 if (i915_gem_object_unbind(obj) == 0 &&
3942 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01003943 break;
Chris Wilson31169712009-09-14 16:50:28 +01003944 }
Chris Wilson31169712009-09-14 16:50:28 +01003945 }
3946
3947 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01003948 cnt = 0;
3949 list_for_each_entry_safe(obj, next,
3950 &dev_priv->mm.inactive_list,
3951 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00003952 if (nr_to_scan &&
3953 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01003954 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00003955 else
Chris Wilson17250b72010-10-28 12:51:39 +01003956 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01003957 }
3958
Chris Wilson17250b72010-10-28 12:51:39 +01003959 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01003960 /*
3961 * We are desperate for pages, so as a last resort, wait
3962 * for the GPU to finish and discard whatever we can.
3963 * This has a dramatic impact to reduce the number of
3964 * OOM-killer events whilst running the GPU aggressively.
3965 */
Chris Wilson17250b72010-10-28 12:51:39 +01003966 if (i915_gpu_idle(dev) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01003967 goto rescan;
3968 }
Chris Wilson17250b72010-10-28 12:51:39 +01003969 mutex_unlock(&dev->struct_mutex);
3970 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01003971}