blob: ac5bff85a4c784492a26ee80b6c32fd5c0877ea4 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Zhenyu Wangf8f235e2010-08-27 11:08:57 +080037#include <linux/intel-gtt.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Daniel Vetter0108a3e2010-08-07 11:01:21 +010039static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +010040
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
42 bool pipelined);
Eric Anholte47c68e2008-11-14 13:35:19 -080043static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080045static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 int write);
47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint64_t offset,
49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +010051static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52 bool interruptible);
Jesse Barnesde151cf2008-11-12 10:03:55 -080053static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080055static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +100056static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
57 struct drm_i915_gem_pwrite *args,
58 struct drm_file *file_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +010059static void i915_gem_free_object_tail(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070060
Chris Wilson31169712009-09-14 16:50:28 +010061static LIST_HEAD(shrink_list);
62static DEFINE_SPINLOCK(shrink_list_lock);
63
Chris Wilson30dbf0c2010-09-25 10:19:17 +010064int
65i915_gem_check_is_wedged(struct drm_device *dev)
66{
67 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct completion *x = &dev_priv->error_completion;
69 unsigned long flags;
70 int ret;
71
72 if (!atomic_read(&dev_priv->mm.wedged))
73 return 0;
74
75 ret = wait_for_completion_interruptible(x);
76 if (ret)
77 return ret;
78
79 /* Success, we reset the GPU! */
80 if (!atomic_read(&dev_priv->mm.wedged))
81 return 0;
82
83 /* GPU is hung, bump the completion count to account for
84 * the token we just consumed so that we never hit zero and
85 * end up waiting upon a subsequent completion event that
86 * will never happen.
87 */
88 spin_lock_irqsave(&x->wait.lock, flags);
89 x->done++;
90 spin_unlock_irqrestore(&x->wait.lock, flags);
91 return -EIO;
92}
93
Chris Wilson76c1dec2010-09-25 11:22:51 +010094static int i915_mutex_lock_interruptible(struct drm_device *dev)
95{
96 struct drm_i915_private *dev_priv = dev->dev_private;
97 int ret;
98
99 ret = i915_gem_check_is_wedged(dev);
100 if (ret)
101 return ret;
102
103 ret = mutex_lock_interruptible(&dev->struct_mutex);
104 if (ret)
105 return ret;
106
107 if (atomic_read(&dev_priv->mm.wedged)) {
108 mutex_unlock(&dev->struct_mutex);
109 return -EAGAIN;
110 }
111
112 return 0;
113}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114
Chris Wilson7d1c4802010-08-07 21:45:03 +0100115static inline bool
116i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
117{
118 return obj_priv->gtt_space &&
119 !obj_priv->active &&
120 obj_priv->pin_count == 0;
121}
122
Jesse Barnes79e53942008-11-07 14:24:08 -0800123int i915_gem_do_init(struct drm_device *dev, unsigned long start,
124 unsigned long end)
125{
126 drm_i915_private_t *dev_priv = dev->dev_private;
127
128 if (start >= end ||
129 (start & (PAGE_SIZE - 1)) != 0 ||
130 (end & (PAGE_SIZE - 1)) != 0) {
131 return -EINVAL;
132 }
133
134 drm_mm_init(&dev_priv->mm.gtt_space, start,
135 end - start);
136
137 dev->gtt_total = (uint32_t) (end - start);
138
139 return 0;
140}
Keith Packard6dbe2772008-10-14 21:41:13 -0700141
Eric Anholt673a3942008-07-30 12:06:12 -0700142int
143i915_gem_init_ioctl(struct drm_device *dev, void *data,
144 struct drm_file *file_priv)
145{
Eric Anholt673a3942008-07-30 12:06:12 -0700146 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -0800147 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700148
149 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -0800150 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700151 mutex_unlock(&dev->struct_mutex);
152
Jesse Barnes79e53942008-11-07 14:24:08 -0800153 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700154}
155
Eric Anholt5a125c32008-10-22 21:40:13 -0700156int
157i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
158 struct drm_file *file_priv)
159{
Eric Anholt5a125c32008-10-22 21:40:13 -0700160 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -0700161
162 if (!(dev->driver->driver_features & DRIVER_GEM))
163 return -ENODEV;
164
165 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800166 args->aper_available_size = (args->aper_size -
167 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700168
169 return 0;
170}
171
Eric Anholt673a3942008-07-30 12:06:12 -0700172
173/**
174 * Creates a new mm object and returns a handle to it.
175 */
176int
177i915_gem_create_ioctl(struct drm_device *dev, void *data,
178 struct drm_file *file_priv)
179{
180 struct drm_i915_gem_create *args = data;
181 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300182 int ret;
183 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700184
185 args->size = roundup(args->size, PAGE_SIZE);
186
187 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000188 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700189 if (obj == NULL)
190 return -ENOMEM;
191
192 ret = drm_gem_handle_create(file_priv, obj, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100193 if (ret) {
194 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700195 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100196 }
197
198 /* Sink the floating reference from kref_init(handlecount) */
199 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700200
201 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700202 return 0;
203}
204
Eric Anholt40123c12009-03-09 13:42:30 -0700205static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700206fast_shmem_read(struct page **pages,
207 loff_t page_base, int page_offset,
208 char __user *data,
209 int length)
210{
211 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200212 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700213
214 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
215 if (vaddr == NULL)
216 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200217 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700218 kunmap_atomic(vaddr, KM_USER0);
219
Florian Mickler2bc43b52009-04-06 22:55:41 +0200220 if (unwritten)
221 return -EFAULT;
222
223 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700224}
225
Eric Anholt280b7132009-03-12 16:56:27 -0700226static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
227{
228 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100229 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700230
231 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
232 obj_priv->tiling_mode != I915_TILING_NONE;
233}
234
Chris Wilson99a03df2010-05-27 14:15:34 +0100235static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700236slow_shmem_copy(struct page *dst_page,
237 int dst_offset,
238 struct page *src_page,
239 int src_offset,
240 int length)
241{
242 char *dst_vaddr, *src_vaddr;
243
Chris Wilson99a03df2010-05-27 14:15:34 +0100244 dst_vaddr = kmap(dst_page);
245 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700246
247 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
248
Chris Wilson99a03df2010-05-27 14:15:34 +0100249 kunmap(src_page);
250 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700251}
252
Chris Wilson99a03df2010-05-27 14:15:34 +0100253static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700254slow_shmem_bit17_copy(struct page *gpu_page,
255 int gpu_offset,
256 struct page *cpu_page,
257 int cpu_offset,
258 int length,
259 int is_read)
260{
261 char *gpu_vaddr, *cpu_vaddr;
262
263 /* Use the unswizzled path if this page isn't affected. */
264 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
265 if (is_read)
266 return slow_shmem_copy(cpu_page, cpu_offset,
267 gpu_page, gpu_offset, length);
268 else
269 return slow_shmem_copy(gpu_page, gpu_offset,
270 cpu_page, cpu_offset, length);
271 }
272
Chris Wilson99a03df2010-05-27 14:15:34 +0100273 gpu_vaddr = kmap(gpu_page);
274 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700275
276 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
277 * XORing with the other bits (A9 for Y, A9 and A10 for X)
278 */
279 while (length > 0) {
280 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281 int this_length = min(cacheline_end - gpu_offset, length);
282 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284 if (is_read) {
285 memcpy(cpu_vaddr + cpu_offset,
286 gpu_vaddr + swizzled_gpu_offset,
287 this_length);
288 } else {
289 memcpy(gpu_vaddr + swizzled_gpu_offset,
290 cpu_vaddr + cpu_offset,
291 this_length);
292 }
293 cpu_offset += this_length;
294 gpu_offset += this_length;
295 length -= this_length;
296 }
297
Chris Wilson99a03df2010-05-27 14:15:34 +0100298 kunmap(cpu_page);
299 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700300}
301
Eric Anholt673a3942008-07-30 12:06:12 -0700302/**
Eric Anholteb014592009-03-10 11:44:52 -0700303 * This is the fast shmem pread path, which attempts to copy_from_user directly
304 * from the backing pages of the object to the user's address space. On a
305 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
306 */
307static int
308i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
309 struct drm_i915_gem_pread *args,
310 struct drm_file *file_priv)
311{
Daniel Vetter23010e42010-03-08 13:35:02 +0100312 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700313 ssize_t remain;
314 loff_t offset, page_base;
315 char __user *user_data;
316 int page_offset, page_length;
317 int ret;
318
319 user_data = (char __user *) (uintptr_t) args->data_ptr;
320 remain = args->size;
321
Chris Wilson76c1dec2010-09-25 11:22:51 +0100322 ret = i915_mutex_lock_interruptible(dev);
323 if (ret)
324 return ret;
Eric Anholteb014592009-03-10 11:44:52 -0700325
Chris Wilson4bdadb92010-01-27 13:36:32 +0000326 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700327 if (ret != 0)
328 goto fail_unlock;
329
330 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
331 args->size);
332 if (ret != 0)
333 goto fail_put_pages;
334
Daniel Vetter23010e42010-03-08 13:35:02 +0100335 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700336 offset = args->offset;
337
338 while (remain > 0) {
339 /* Operation in this page
340 *
341 * page_base = page offset within aperture
342 * page_offset = offset within page
343 * page_length = bytes to copy for this page
344 */
345 page_base = (offset & ~(PAGE_SIZE-1));
346 page_offset = offset & (PAGE_SIZE-1);
347 page_length = remain;
348 if ((page_offset + remain) > PAGE_SIZE)
349 page_length = PAGE_SIZE - page_offset;
350
351 ret = fast_shmem_read(obj_priv->pages,
352 page_base, page_offset,
353 user_data, page_length);
354 if (ret)
355 goto fail_put_pages;
356
357 remain -= page_length;
358 user_data += page_length;
359 offset += page_length;
360 }
361
362fail_put_pages:
363 i915_gem_object_put_pages(obj);
364fail_unlock:
365 mutex_unlock(&dev->struct_mutex);
366
367 return ret;
368}
369
Chris Wilson07f73f62009-09-14 16:50:30 +0100370static int
371i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
372{
373 int ret;
374
Chris Wilson4bdadb92010-01-27 13:36:32 +0000375 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100376
377 /* If we've insufficient memory to map in the pages, attempt
378 * to make some space by throwing out some old buffers.
379 */
380 if (ret == -ENOMEM) {
381 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100382
Daniel Vetter0108a3e2010-08-07 11:01:21 +0100383 ret = i915_gem_evict_something(dev, obj->size,
384 i915_gem_get_gtt_alignment(obj));
Chris Wilson07f73f62009-09-14 16:50:30 +0100385 if (ret)
386 return ret;
387
Chris Wilson4bdadb92010-01-27 13:36:32 +0000388 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100389 }
390
391 return ret;
392}
393
Eric Anholteb014592009-03-10 11:44:52 -0700394/**
395 * This is the fallback shmem pread path, which allocates temporary storage
396 * in kernel space to copy_to_user into outside of the struct_mutex, so we
397 * can copy out of the object's backing pages while holding the struct mutex
398 * and not take page faults.
399 */
400static int
401i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
402 struct drm_i915_gem_pread *args,
403 struct drm_file *file_priv)
404{
Daniel Vetter23010e42010-03-08 13:35:02 +0100405 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700406 struct mm_struct *mm = current->mm;
407 struct page **user_pages;
408 ssize_t remain;
409 loff_t offset, pinned_pages, i;
410 loff_t first_data_page, last_data_page, num_pages;
411 int shmem_page_index, shmem_page_offset;
412 int data_page_index, data_page_offset;
413 int page_length;
414 int ret;
415 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700416 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700417
418 remain = args->size;
419
420 /* Pin the user pages containing the data. We can't fault while
421 * holding the struct mutex, yet we want to hold it while
422 * dereferencing the user data.
423 */
424 first_data_page = data_ptr / PAGE_SIZE;
425 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
426 num_pages = last_data_page - first_data_page + 1;
427
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700428 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700429 if (user_pages == NULL)
430 return -ENOMEM;
431
432 down_read(&mm->mmap_sem);
433 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700434 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700435 up_read(&mm->mmap_sem);
436 if (pinned_pages < num_pages) {
437 ret = -EFAULT;
438 goto fail_put_user_pages;
439 }
440
Eric Anholt280b7132009-03-12 16:56:27 -0700441 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
442
Chris Wilson76c1dec2010-09-25 11:22:51 +0100443 ret = i915_mutex_lock_interruptible(dev);
444 if (ret)
445 goto fail_put_user_pages;
Eric Anholteb014592009-03-10 11:44:52 -0700446
Chris Wilson07f73f62009-09-14 16:50:30 +0100447 ret = i915_gem_object_get_pages_or_evict(obj);
448 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700449 goto fail_unlock;
450
451 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
452 args->size);
453 if (ret != 0)
454 goto fail_put_pages;
455
Daniel Vetter23010e42010-03-08 13:35:02 +0100456 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700457 offset = args->offset;
458
459 while (remain > 0) {
460 /* Operation in this page
461 *
462 * shmem_page_index = page number within shmem file
463 * shmem_page_offset = offset within page in shmem file
464 * data_page_index = page number in get_user_pages return
465 * data_page_offset = offset with data_page_index page.
466 * page_length = bytes to copy for this page
467 */
468 shmem_page_index = offset / PAGE_SIZE;
469 shmem_page_offset = offset & ~PAGE_MASK;
470 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
471 data_page_offset = data_ptr & ~PAGE_MASK;
472
473 page_length = remain;
474 if ((shmem_page_offset + page_length) > PAGE_SIZE)
475 page_length = PAGE_SIZE - shmem_page_offset;
476 if ((data_page_offset + page_length) > PAGE_SIZE)
477 page_length = PAGE_SIZE - data_page_offset;
478
Eric Anholt280b7132009-03-12 16:56:27 -0700479 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100480 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700481 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100482 user_pages[data_page_index],
483 data_page_offset,
484 page_length,
485 1);
486 } else {
487 slow_shmem_copy(user_pages[data_page_index],
488 data_page_offset,
489 obj_priv->pages[shmem_page_index],
490 shmem_page_offset,
491 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700492 }
Eric Anholteb014592009-03-10 11:44:52 -0700493
494 remain -= page_length;
495 data_ptr += page_length;
496 offset += page_length;
497 }
498
499fail_put_pages:
500 i915_gem_object_put_pages(obj);
501fail_unlock:
502 mutex_unlock(&dev->struct_mutex);
503fail_put_user_pages:
504 for (i = 0; i < pinned_pages; i++) {
505 SetPageDirty(user_pages[i]);
506 page_cache_release(user_pages[i]);
507 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700508 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700509
510 return ret;
511}
512
Eric Anholt673a3942008-07-30 12:06:12 -0700513/**
514 * Reads data from the object referenced by handle.
515 *
516 * On error, the contents of *data are undefined.
517 */
518int
519i915_gem_pread_ioctl(struct drm_device *dev, void *data,
520 struct drm_file *file_priv)
521{
522 struct drm_i915_gem_pread *args = data;
523 struct drm_gem_object *obj;
524 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700525 int ret;
526
527 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
528 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100529 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100530 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700531
532 /* Bounds check source.
533 *
534 * XXX: This could use review for overflow issues...
535 */
536 if (args->offset > obj->size || args->size > obj->size ||
537 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000538 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700539 return -EINVAL;
540 }
541
Eric Anholt280b7132009-03-12 16:56:27 -0700542 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700543 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700544 } else {
545 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
546 if (ret != 0)
547 ret = i915_gem_shmem_pread_slow(dev, obj, args,
548 file_priv);
549 }
Eric Anholt673a3942008-07-30 12:06:12 -0700550
Luca Barbieribc9025b2010-02-09 05:49:12 +0000551 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700552
Eric Anholteb014592009-03-10 11:44:52 -0700553 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700554}
555
Keith Packard0839ccb2008-10-30 19:38:48 -0700556/* This is the fast write path which cannot handle
557 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700558 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700559
Keith Packard0839ccb2008-10-30 19:38:48 -0700560static inline int
561fast_user_write(struct io_mapping *mapping,
562 loff_t page_base, int page_offset,
563 char __user *user_data,
564 int length)
565{
566 char *vaddr_atomic;
567 unsigned long unwritten;
568
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100569 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700570 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
571 user_data, length);
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100572 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700573 if (unwritten)
574 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700575 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700576}
577
578/* Here's the write path which can sleep for
579 * page faults
580 */
581
Chris Wilsonab34c222010-05-27 14:15:35 +0100582static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700583slow_kernel_write(struct io_mapping *mapping,
584 loff_t gtt_base, int gtt_offset,
585 struct page *user_page, int user_offset,
586 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700587{
Chris Wilsonab34c222010-05-27 14:15:35 +0100588 char __iomem *dst_vaddr;
589 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590
Chris Wilsonab34c222010-05-27 14:15:35 +0100591 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
592 src_vaddr = kmap(user_page);
593
594 memcpy_toio(dst_vaddr + gtt_offset,
595 src_vaddr + user_offset,
596 length);
597
598 kunmap(user_page);
599 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700600}
601
Eric Anholt40123c12009-03-09 13:42:30 -0700602static inline int
603fast_shmem_write(struct page **pages,
604 loff_t page_base, int page_offset,
605 char __user *data,
606 int length)
607{
608 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400609 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700610
611 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
612 if (vaddr == NULL)
613 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400614 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700615 kunmap_atomic(vaddr, KM_USER0);
616
Dave Airlied0088772009-03-28 20:29:48 -0400617 if (unwritten)
618 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700619 return 0;
620}
621
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622/**
623 * This is the fast pwrite path, where we copy the data directly from the
624 * user into the GTT, uncached.
625 */
Eric Anholt673a3942008-07-30 12:06:12 -0700626static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700627i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
628 struct drm_i915_gem_pwrite *args,
629 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700630{
Daniel Vetter23010e42010-03-08 13:35:02 +0100631 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700632 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700633 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700635 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700636 int page_offset, page_length;
637 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700638
639 user_data = (char __user *) (uintptr_t) args->data_ptr;
640 remain = args->size;
641 if (!access_ok(VERIFY_READ, user_data, remain))
642 return -EFAULT;
643
Chris Wilson76c1dec2010-09-25 11:22:51 +0100644 ret = i915_mutex_lock_interruptible(dev);
645 if (ret)
646 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Eric Anholt673a3942008-07-30 12:06:12 -0700648 ret = i915_gem_object_pin(obj, 0);
649 if (ret) {
650 mutex_unlock(&dev->struct_mutex);
651 return ret;
652 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800653 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700654 if (ret)
655 goto fail;
656
Daniel Vetter23010e42010-03-08 13:35:02 +0100657 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700658 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700659
660 while (remain > 0) {
661 /* Operation in this page
662 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700663 * page_base = page offset within aperture
664 * page_offset = offset within page
665 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700666 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700667 page_base = (offset & ~(PAGE_SIZE-1));
668 page_offset = offset & (PAGE_SIZE-1);
669 page_length = remain;
670 if ((page_offset + remain) > PAGE_SIZE)
671 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700672
Keith Packard0839ccb2008-10-30 19:38:48 -0700673 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
674 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700675
Keith Packard0839ccb2008-10-30 19:38:48 -0700676 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700677 * source page isn't available. Return the error and we'll
678 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700679 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700680 if (ret)
681 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700682
Keith Packard0839ccb2008-10-30 19:38:48 -0700683 remain -= page_length;
684 user_data += page_length;
685 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700686 }
Eric Anholt673a3942008-07-30 12:06:12 -0700687
688fail:
689 i915_gem_object_unpin(obj);
690 mutex_unlock(&dev->struct_mutex);
691
692 return ret;
693}
694
Eric Anholt3de09aa2009-03-09 09:42:23 -0700695/**
696 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
697 * the memory and maps it using kmap_atomic for copying.
698 *
699 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
700 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
701 */
Eric Anholt3043c602008-10-02 12:24:47 -0700702static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700703i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
704 struct drm_i915_gem_pwrite *args,
705 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700706{
Daniel Vetter23010e42010-03-08 13:35:02 +0100707 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700708 drm_i915_private_t *dev_priv = dev->dev_private;
709 ssize_t remain;
710 loff_t gtt_page_base, offset;
711 loff_t first_data_page, last_data_page, num_pages;
712 loff_t pinned_pages, i;
713 struct page **user_pages;
714 struct mm_struct *mm = current->mm;
715 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700716 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700717 uint64_t data_ptr = args->data_ptr;
718
719 remain = args->size;
720
721 /* Pin the user pages containing the data. We can't fault while
722 * holding the struct mutex, and all of the pwrite implementations
723 * want to hold it while dereferencing the user data.
724 */
725 first_data_page = data_ptr / PAGE_SIZE;
726 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
727 num_pages = last_data_page - first_data_page + 1;
728
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700729 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700730 if (user_pages == NULL)
731 return -ENOMEM;
732
733 down_read(&mm->mmap_sem);
734 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
735 num_pages, 0, 0, user_pages, NULL);
736 up_read(&mm->mmap_sem);
737 if (pinned_pages < num_pages) {
738 ret = -EFAULT;
739 goto out_unpin_pages;
740 }
741
Chris Wilson76c1dec2010-09-25 11:22:51 +0100742 ret = i915_mutex_lock_interruptible(dev);
743 if (ret)
744 goto out_unpin_pages;
745
Eric Anholt3de09aa2009-03-09 09:42:23 -0700746 ret = i915_gem_object_pin(obj, 0);
747 if (ret)
748 goto out_unlock;
749
750 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
751 if (ret)
752 goto out_unpin_object;
753
Daniel Vetter23010e42010-03-08 13:35:02 +0100754 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700755 offset = obj_priv->gtt_offset + args->offset;
756
757 while (remain > 0) {
758 /* Operation in this page
759 *
760 * gtt_page_base = page offset within aperture
761 * gtt_page_offset = offset within page in aperture
762 * data_page_index = page number in get_user_pages return
763 * data_page_offset = offset with data_page_index page.
764 * page_length = bytes to copy for this page
765 */
766 gtt_page_base = offset & PAGE_MASK;
767 gtt_page_offset = offset & ~PAGE_MASK;
768 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
769 data_page_offset = data_ptr & ~PAGE_MASK;
770
771 page_length = remain;
772 if ((gtt_page_offset + page_length) > PAGE_SIZE)
773 page_length = PAGE_SIZE - gtt_page_offset;
774 if ((data_page_offset + page_length) > PAGE_SIZE)
775 page_length = PAGE_SIZE - data_page_offset;
776
Chris Wilsonab34c222010-05-27 14:15:35 +0100777 slow_kernel_write(dev_priv->mm.gtt_mapping,
778 gtt_page_base, gtt_page_offset,
779 user_pages[data_page_index],
780 data_page_offset,
781 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700782
783 remain -= page_length;
784 offset += page_length;
785 data_ptr += page_length;
786 }
787
788out_unpin_object:
789 i915_gem_object_unpin(obj);
790out_unlock:
791 mutex_unlock(&dev->struct_mutex);
792out_unpin_pages:
793 for (i = 0; i < pinned_pages; i++)
794 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700795 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700796
797 return ret;
798}
799
Eric Anholt40123c12009-03-09 13:42:30 -0700800/**
801 * This is the fast shmem pwrite path, which attempts to directly
802 * copy_from_user into the kmapped pages backing the object.
803 */
Eric Anholt673a3942008-07-30 12:06:12 -0700804static int
Eric Anholt40123c12009-03-09 13:42:30 -0700805i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
806 struct drm_i915_gem_pwrite *args,
807 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700808{
Daniel Vetter23010e42010-03-08 13:35:02 +0100809 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700810 ssize_t remain;
811 loff_t offset, page_base;
812 char __user *user_data;
813 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700814 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700815
816 user_data = (char __user *) (uintptr_t) args->data_ptr;
817 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700818
Chris Wilson76c1dec2010-09-25 11:22:51 +0100819 ret = i915_mutex_lock_interruptible(dev);
820 if (ret)
821 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700822
Chris Wilson4bdadb92010-01-27 13:36:32 +0000823 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700824 if (ret != 0)
825 goto fail_unlock;
826
Eric Anholte47c68e2008-11-14 13:35:19 -0800827 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700828 if (ret != 0)
829 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700830
Daniel Vetter23010e42010-03-08 13:35:02 +0100831 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700832 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700833 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700834
Eric Anholt40123c12009-03-09 13:42:30 -0700835 while (remain > 0) {
836 /* Operation in this page
837 *
838 * page_base = page offset within aperture
839 * page_offset = offset within page
840 * page_length = bytes to copy for this page
841 */
842 page_base = (offset & ~(PAGE_SIZE-1));
843 page_offset = offset & (PAGE_SIZE-1);
844 page_length = remain;
845 if ((page_offset + remain) > PAGE_SIZE)
846 page_length = PAGE_SIZE - page_offset;
847
848 ret = fast_shmem_write(obj_priv->pages,
849 page_base, page_offset,
850 user_data, page_length);
851 if (ret)
852 goto fail_put_pages;
853
854 remain -= page_length;
855 user_data += page_length;
856 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700857 }
858
Eric Anholt40123c12009-03-09 13:42:30 -0700859fail_put_pages:
860 i915_gem_object_put_pages(obj);
861fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700862 mutex_unlock(&dev->struct_mutex);
863
Eric Anholt40123c12009-03-09 13:42:30 -0700864 return ret;
865}
866
867/**
868 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
869 * the memory and maps it using kmap_atomic for copying.
870 *
871 * This avoids taking mmap_sem for faulting on the user's address while the
872 * struct_mutex is held.
873 */
874static int
875i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
876 struct drm_i915_gem_pwrite *args,
877 struct drm_file *file_priv)
878{
Daniel Vetter23010e42010-03-08 13:35:02 +0100879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700880 struct mm_struct *mm = current->mm;
881 struct page **user_pages;
882 ssize_t remain;
883 loff_t offset, pinned_pages, i;
884 loff_t first_data_page, last_data_page, num_pages;
885 int shmem_page_index, shmem_page_offset;
886 int data_page_index, data_page_offset;
887 int page_length;
888 int ret;
889 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700890 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700891
892 remain = args->size;
893
894 /* Pin the user pages containing the data. We can't fault while
895 * holding the struct mutex, and all of the pwrite implementations
896 * want to hold it while dereferencing the user data.
897 */
898 first_data_page = data_ptr / PAGE_SIZE;
899 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
900 num_pages = last_data_page - first_data_page + 1;
901
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700902 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700903 if (user_pages == NULL)
904 return -ENOMEM;
905
906 down_read(&mm->mmap_sem);
907 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
908 num_pages, 0, 0, user_pages, NULL);
909 up_read(&mm->mmap_sem);
910 if (pinned_pages < num_pages) {
911 ret = -EFAULT;
912 goto fail_put_user_pages;
913 }
914
Eric Anholt280b7132009-03-12 16:56:27 -0700915 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
916
Chris Wilson76c1dec2010-09-25 11:22:51 +0100917 ret = i915_mutex_lock_interruptible(dev);
918 if (ret)
919 goto fail_put_user_pages;
Eric Anholt40123c12009-03-09 13:42:30 -0700920
Chris Wilson07f73f62009-09-14 16:50:30 +0100921 ret = i915_gem_object_get_pages_or_evict(obj);
922 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700923 goto fail_unlock;
924
925 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
926 if (ret != 0)
927 goto fail_put_pages;
928
Daniel Vetter23010e42010-03-08 13:35:02 +0100929 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700930 offset = args->offset;
931 obj_priv->dirty = 1;
932
933 while (remain > 0) {
934 /* Operation in this page
935 *
936 * shmem_page_index = page number within shmem file
937 * shmem_page_offset = offset within page in shmem file
938 * data_page_index = page number in get_user_pages return
939 * data_page_offset = offset with data_page_index page.
940 * page_length = bytes to copy for this page
941 */
942 shmem_page_index = offset / PAGE_SIZE;
943 shmem_page_offset = offset & ~PAGE_MASK;
944 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
945 data_page_offset = data_ptr & ~PAGE_MASK;
946
947 page_length = remain;
948 if ((shmem_page_offset + page_length) > PAGE_SIZE)
949 page_length = PAGE_SIZE - shmem_page_offset;
950 if ((data_page_offset + page_length) > PAGE_SIZE)
951 page_length = PAGE_SIZE - data_page_offset;
952
Eric Anholt280b7132009-03-12 16:56:27 -0700953 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100954 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700955 shmem_page_offset,
956 user_pages[data_page_index],
957 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100958 page_length,
959 0);
960 } else {
961 slow_shmem_copy(obj_priv->pages[shmem_page_index],
962 shmem_page_offset,
963 user_pages[data_page_index],
964 data_page_offset,
965 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700966 }
Eric Anholt40123c12009-03-09 13:42:30 -0700967
968 remain -= page_length;
969 data_ptr += page_length;
970 offset += page_length;
971 }
972
973fail_put_pages:
974 i915_gem_object_put_pages(obj);
975fail_unlock:
976 mutex_unlock(&dev->struct_mutex);
977fail_put_user_pages:
978 for (i = 0; i < pinned_pages; i++)
979 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700980 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700981
982 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700983}
984
985/**
986 * Writes data to the object referenced by handle.
987 *
988 * On error, the contents of the buffer that were to be modified are undefined.
989 */
990int
991i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv)
993{
994 struct drm_i915_gem_pwrite *args = data;
995 struct drm_gem_object *obj;
996 struct drm_i915_gem_object *obj_priv;
997 int ret = 0;
998
999 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1000 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001001 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +01001002 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001003
1004 /* Bounds check destination.
1005 *
1006 * XXX: This could use review for overflow issues...
1007 */
1008 if (args->offset > obj->size || args->size > obj->size ||
1009 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +00001010 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001011 return -EINVAL;
1012 }
1013
1014 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1015 * it would end up going through the fenced access, and we'll get
1016 * different detiling behavior between reading and writing.
1017 * pread/pwrite currently are reading and writing from the CPU
1018 * perspective, requiring manual detiling by the client.
1019 */
Dave Airlie71acb5e2008-12-30 20:31:46 +10001020 if (obj_priv->phys_obj)
1021 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
1022 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Chris Wilson9b8c4a02010-05-27 14:21:01 +01001023 dev->gtt_total != 0 &&
1024 obj->write_domain != I915_GEM_DOMAIN_CPU) {
Eric Anholt3de09aa2009-03-09 09:42:23 -07001025 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
1026 if (ret == -EFAULT) {
1027 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
1028 file_priv);
1029 }
Eric Anholt280b7132009-03-12 16:56:27 -07001030 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1031 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -07001032 } else {
1033 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1034 if (ret == -EFAULT) {
1035 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1036 file_priv);
1037 }
1038 }
Eric Anholt673a3942008-07-30 12:06:12 -07001039
1040#if WATCH_PWRITE
1041 if (ret)
1042 DRM_INFO("pwrite failed %d\n", ret);
1043#endif
1044
Luca Barbieribc9025b2010-02-09 05:49:12 +00001045 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001046
1047 return ret;
1048}
1049
1050/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001051 * Called when user space prepares to use an object with the CPU, either
1052 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001053 */
1054int
1055i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv)
1057{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001058 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001059 struct drm_i915_gem_set_domain *args = data;
1060 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001061 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001062 uint32_t read_domains = args->read_domains;
1063 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001064 int ret;
1065
1066 if (!(dev->driver->driver_features & DRIVER_GEM))
1067 return -ENODEV;
1068
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001069 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001070 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001071 return -EINVAL;
1072
Chris Wilson21d509e2009-06-06 09:46:02 +01001073 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001074 return -EINVAL;
1075
1076 /* Having something in the write domain implies it's in the read
1077 * domain, and only that read domain. Enforce that in the request.
1078 */
1079 if (write_domain != 0 && read_domains != write_domain)
1080 return -EINVAL;
1081
Eric Anholt673a3942008-07-30 12:06:12 -07001082 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1083 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001084 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +01001085 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001086
Chris Wilson76c1dec2010-09-25 11:22:51 +01001087 ret = i915_mutex_lock_interruptible(dev);
1088 if (ret) {
1089 drm_gem_object_unreference_unlocked(obj);
1090 return ret;
1091 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001092
1093 intel_mark_busy(dev, obj);
1094
Eric Anholt673a3942008-07-30 12:06:12 -07001095#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001096 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001097 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001098#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001099 if (read_domains & I915_GEM_DOMAIN_GTT) {
1100 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001101
Eric Anholta09ba7f2009-08-29 12:49:51 -07001102 /* Update the LRU on the fence for the CPU access that's
1103 * about to occur.
1104 */
1105 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001106 struct drm_i915_fence_reg *reg =
1107 &dev_priv->fence_regs[obj_priv->fence_reg];
1108 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001109 &dev_priv->mm.fence_list);
1110 }
1111
Eric Anholt02354392008-11-26 13:58:13 -08001112 /* Silently promote "you're not bound, there was nothing to do"
1113 * to success, since the client was just asking us to
1114 * make sure everything was done.
1115 */
1116 if (ret == -EINVAL)
1117 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001118 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001119 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001120 }
1121
Chris Wilson7d1c4802010-08-07 21:45:03 +01001122 /* Maintain LRU order of "inactive" objects */
1123 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1124 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1125
Eric Anholt673a3942008-07-30 12:06:12 -07001126 drm_gem_object_unreference(obj);
1127 mutex_unlock(&dev->struct_mutex);
1128 return ret;
1129}
1130
1131/**
1132 * Called when user space has done writes to this buffer
1133 */
1134int
1135i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1137{
1138 struct drm_i915_gem_sw_finish *args = data;
1139 struct drm_gem_object *obj;
1140 struct drm_i915_gem_object *obj_priv;
1141 int ret = 0;
1142
1143 if (!(dev->driver->driver_features & DRIVER_GEM))
1144 return -ENODEV;
1145
Eric Anholt673a3942008-07-30 12:06:12 -07001146 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
Chris Wilson76c1dec2010-09-25 11:22:51 +01001147 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001148 return -ENOENT;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001149
1150 ret = i915_mutex_lock_interruptible(dev);
1151 if (ret) {
1152 drm_gem_object_unreference_unlocked(obj);
1153 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001154 }
1155
1156#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001157 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001158 __func__, args->handle, obj, obj->size);
1159#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001160 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001161
1162 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001163 if (obj_priv->pin_count)
1164 i915_gem_object_flush_cpu_write_domain(obj);
1165
Eric Anholt673a3942008-07-30 12:06:12 -07001166 drm_gem_object_unreference(obj);
1167 mutex_unlock(&dev->struct_mutex);
1168 return ret;
1169}
1170
1171/**
1172 * Maps the contents of an object, returning the address it is mapped
1173 * into.
1174 *
1175 * While the mapping holds a reference on the contents of the object, it doesn't
1176 * imply a ref on the object itself.
1177 */
1178int
1179i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1180 struct drm_file *file_priv)
1181{
1182 struct drm_i915_gem_mmap *args = data;
1183 struct drm_gem_object *obj;
1184 loff_t offset;
1185 unsigned long addr;
1186
1187 if (!(dev->driver->driver_features & DRIVER_GEM))
1188 return -ENODEV;
1189
1190 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1191 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001192 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001193
1194 offset = args->offset;
1195
1196 down_write(&current->mm->mmap_sem);
1197 addr = do_mmap(obj->filp, 0, args->size,
1198 PROT_READ | PROT_WRITE, MAP_SHARED,
1199 args->offset);
1200 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001201 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001202 if (IS_ERR((void *)addr))
1203 return addr;
1204
1205 args->addr_ptr = (uint64_t) addr;
1206
1207 return 0;
1208}
1209
Jesse Barnesde151cf2008-11-12 10:03:55 -08001210/**
1211 * i915_gem_fault - fault a page into the GTT
1212 * vma: VMA in question
1213 * vmf: fault info
1214 *
1215 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1216 * from userspace. The fault handler takes care of binding the object to
1217 * the GTT (if needed), allocating and programming a fence register (again,
1218 * only if needed based on whether the old reg is still valid or the object
1219 * is tiled) and inserting a new PTE into the faulting process.
1220 *
1221 * Note that the faulting process may involve evicting existing objects
1222 * from the GTT and/or fence registers to make room. So performance may
1223 * suffer if the GTT working set is large or there are few fence registers
1224 * left.
1225 */
1226int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1227{
1228 struct drm_gem_object *obj = vma->vm_private_data;
1229 struct drm_device *dev = obj->dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001230 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001231 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001232 pgoff_t page_offset;
1233 unsigned long pfn;
1234 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001235 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001236
1237 /* We don't use vmf->pgoff since that has the fake offset */
1238 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1239 PAGE_SHIFT;
1240
1241 /* Now bind it into the GTT if needed */
1242 mutex_lock(&dev->struct_mutex);
1243 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001244 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001245 if (ret)
1246 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001247
Jesse Barnesde151cf2008-11-12 10:03:55 -08001248 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001249 if (ret)
1250 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001251 }
1252
1253 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001254 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01001255 ret = i915_gem_object_get_fence_reg(obj, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001256 if (ret)
1257 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001258 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001259
Chris Wilson7d1c4802010-08-07 21:45:03 +01001260 if (i915_gem_object_is_inactive(obj_priv))
1261 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1262
Jesse Barnesde151cf2008-11-12 10:03:55 -08001263 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1264 page_offset;
1265
1266 /* Finally, remap it using the new GTT offset */
1267 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001268unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001269 mutex_unlock(&dev->struct_mutex);
1270
1271 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001272 case 0:
1273 case -ERESTARTSYS:
1274 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001275 case -ENOMEM:
1276 case -EAGAIN:
1277 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001278 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001279 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001280 }
1281}
1282
1283/**
1284 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1285 * @obj: obj in question
1286 *
1287 * GEM memory mapping works by handing back to userspace a fake mmap offset
1288 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1289 * up the object based on the offset and sets up the various memory mapping
1290 * structures.
1291 *
1292 * This routine allocates and attaches a fake offset for @obj.
1293 */
1294static int
1295i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1296{
1297 struct drm_device *dev = obj->dev;
1298 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001299 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001300 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001301 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001302 int ret = 0;
1303
1304 /* Set the object up for mmap'ing */
1305 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001306 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001307 if (!list->map)
1308 return -ENOMEM;
1309
1310 map = list->map;
1311 map->type = _DRM_GEM;
1312 map->size = obj->size;
1313 map->handle = obj;
1314
1315 /* Get a DRM GEM mmap offset allocated... */
1316 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1317 obj->size / PAGE_SIZE, 0, 0);
1318 if (!list->file_offset_node) {
1319 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001320 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001321 goto out_free_list;
1322 }
1323
1324 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1325 obj->size / PAGE_SIZE, 0);
1326 if (!list->file_offset_node) {
1327 ret = -ENOMEM;
1328 goto out_free_list;
1329 }
1330
1331 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001332 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1333 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001334 DRM_ERROR("failed to add to map hash\n");
1335 goto out_free_mm;
1336 }
1337
1338 /* By now we should be all set, any drm_mmap request on the offset
1339 * below will get to our mmap & fault handler */
1340 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1341
1342 return 0;
1343
1344out_free_mm:
1345 drm_mm_put_block(list->file_offset_node);
1346out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001347 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001348
1349 return ret;
1350}
1351
Chris Wilson901782b2009-07-10 08:18:50 +01001352/**
1353 * i915_gem_release_mmap - remove physical page mappings
1354 * @obj: obj in question
1355 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001356 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001357 * relinquish ownership of the pages back to the system.
1358 *
1359 * It is vital that we remove the page mapping if we have mapped a tiled
1360 * object through the GTT and then lose the fence register due to
1361 * resource pressure. Similarly if the object has been moved out of the
1362 * aperture, than pages mapped into userspace must be revoked. Removing the
1363 * mapping will then trigger a page fault on the next user access, allowing
1364 * fixup by i915_gem_fault().
1365 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001366void
Chris Wilson901782b2009-07-10 08:18:50 +01001367i915_gem_release_mmap(struct drm_gem_object *obj)
1368{
1369 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001370 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001371
1372 if (dev->dev_mapping)
1373 unmap_mapping_range(dev->dev_mapping,
1374 obj_priv->mmap_offset, obj->size, 1);
1375}
1376
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001377static void
1378i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1379{
1380 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001381 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001382 struct drm_gem_mm *mm = dev->mm_private;
1383 struct drm_map_list *list;
1384
1385 list = &obj->map_list;
1386 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1387
1388 if (list->file_offset_node) {
1389 drm_mm_put_block(list->file_offset_node);
1390 list->file_offset_node = NULL;
1391 }
1392
1393 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001394 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001395 list->map = NULL;
1396 }
1397
1398 obj_priv->mmap_offset = 0;
1399}
1400
Jesse Barnesde151cf2008-11-12 10:03:55 -08001401/**
1402 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1403 * @obj: object to check
1404 *
1405 * Return the required GTT alignment for an object, taking into account
1406 * potential fence register mapping if needed.
1407 */
1408static uint32_t
1409i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1410{
1411 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001412 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001413 int start, i;
1414
1415 /*
1416 * Minimum alignment is 4k (GTT page size), but might be greater
1417 * if a fence register is needed for the object.
1418 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001419 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001420 return 4096;
1421
1422 /*
1423 * Previous chips need to be aligned to the size of the smallest
1424 * fence register that can contain the object.
1425 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001426 if (INTEL_INFO(dev)->gen == 3)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001427 start = 1024*1024;
1428 else
1429 start = 512*1024;
1430
1431 for (i = start; i < obj->size; i <<= 1)
1432 ;
1433
1434 return i;
1435}
1436
1437/**
1438 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1439 * @dev: DRM device
1440 * @data: GTT mapping ioctl data
1441 * @file_priv: GEM object info
1442 *
1443 * Simply returns the fake offset to userspace so it can mmap it.
1444 * The mmap call will end up in drm_gem_mmap(), which will set things
1445 * up so we can get faults in the handler above.
1446 *
1447 * The fault handler will take care of binding the object into the GTT
1448 * (since it may have been evicted to make room for something), allocating
1449 * a fence register, and mapping the appropriate aperture address into
1450 * userspace.
1451 */
1452int
1453i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1454 struct drm_file *file_priv)
1455{
1456 struct drm_i915_gem_mmap_gtt *args = data;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001457 struct drm_gem_object *obj;
1458 struct drm_i915_gem_object *obj_priv;
1459 int ret;
1460
1461 if (!(dev->driver->driver_features & DRIVER_GEM))
1462 return -ENODEV;
1463
1464 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1465 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001466 return -ENOENT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001467
Chris Wilson76c1dec2010-09-25 11:22:51 +01001468 ret = i915_mutex_lock_interruptible(dev);
1469 if (ret) {
1470 drm_gem_object_unreference_unlocked(obj);
1471 return ret;
1472 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001473
Daniel Vetter23010e42010-03-08 13:35:02 +01001474 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475
Chris Wilsonab182822009-09-22 18:46:17 +01001476 if (obj_priv->madv != I915_MADV_WILLNEED) {
1477 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1478 drm_gem_object_unreference(obj);
1479 mutex_unlock(&dev->struct_mutex);
1480 return -EINVAL;
1481 }
1482
1483
Jesse Barnesde151cf2008-11-12 10:03:55 -08001484 if (!obj_priv->mmap_offset) {
1485 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001486 if (ret) {
1487 drm_gem_object_unreference(obj);
1488 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001489 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001490 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001491 }
1492
1493 args->offset = obj_priv->mmap_offset;
1494
Jesse Barnesde151cf2008-11-12 10:03:55 -08001495 /*
1496 * Pull it into the GTT so that we have a page list (makes the
1497 * initial fault faster and any subsequent flushing possible).
1498 */
1499 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001500 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001501 if (ret) {
1502 drm_gem_object_unreference(obj);
1503 mutex_unlock(&dev->struct_mutex);
1504 return ret;
1505 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001506 }
1507
1508 drm_gem_object_unreference(obj);
1509 mutex_unlock(&dev->struct_mutex);
1510
1511 return 0;
1512}
1513
Ben Gamari6911a9b2009-04-02 11:24:54 -07001514void
Eric Anholt856fa192009-03-19 14:10:50 -07001515i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001516{
Daniel Vetter23010e42010-03-08 13:35:02 +01001517 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001518 int page_count = obj->size / PAGE_SIZE;
1519 int i;
1520
Eric Anholt856fa192009-03-19 14:10:50 -07001521 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001522 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001523
1524 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001525 return;
1526
Eric Anholt280b7132009-03-12 16:56:27 -07001527 if (obj_priv->tiling_mode != I915_TILING_NONE)
1528 i915_gem_object_save_bit_17_swizzle(obj);
1529
Chris Wilson3ef94da2009-09-14 16:50:29 +01001530 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001531 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001532
1533 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001534 if (obj_priv->dirty)
1535 set_page_dirty(obj_priv->pages[i]);
1536
1537 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001538 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001539
1540 page_cache_release(obj_priv->pages[i]);
1541 }
Eric Anholt673a3942008-07-30 12:06:12 -07001542 obj_priv->dirty = 0;
1543
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001544 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001545 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001546}
1547
1548static void
Daniel Vetter617dbe22010-02-11 22:16:02 +01001549i915_gem_object_move_to_active(struct drm_gem_object *obj,
Zou Nan hai852835f2010-05-21 09:08:56 +08001550 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001551{
Chris Wilson5c12a07e2010-09-22 11:22:30 +01001552 struct drm_i915_private *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001553 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001554
Zou Nan hai852835f2010-05-21 09:08:56 +08001555 BUG_ON(ring == NULL);
1556 obj_priv->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001557
1558 /* Add a reference if we're newly entering the active list. */
1559 if (!obj_priv->active) {
1560 drm_gem_object_reference(obj);
1561 obj_priv->active = 1;
1562 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001563
Eric Anholt673a3942008-07-30 12:06:12 -07001564 /* Move from whatever list we were on to the tail of execution. */
Zou Nan hai852835f2010-05-21 09:08:56 +08001565 list_move_tail(&obj_priv->list, &ring->active_list);
Chris Wilson5c12a07e2010-09-22 11:22:30 +01001566 obj_priv->last_rendering_seqno = dev_priv->next_seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001567}
1568
Eric Anholtce44b0e2008-11-06 16:00:31 -08001569static void
1570i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1571{
1572 struct drm_device *dev = obj->dev;
1573 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001574 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001575
1576 BUG_ON(!obj_priv->active);
1577 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1578 obj_priv->last_rendering_seqno = 0;
1579}
Eric Anholt673a3942008-07-30 12:06:12 -07001580
Chris Wilson963b4832009-09-20 23:03:54 +01001581/* Immediately discard the backing storage */
1582static void
1583i915_gem_object_truncate(struct drm_gem_object *obj)
1584{
Daniel Vetter23010e42010-03-08 13:35:02 +01001585 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001586 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001587
Chris Wilsonae9fed62010-08-07 11:01:30 +01001588 /* Our goal here is to return as much of the memory as
1589 * is possible back to the system as we are called from OOM.
1590 * To do this we must instruct the shmfs to drop all of its
1591 * backing pages, *now*. Here we mirror the actions taken
1592 * when by shmem_delete_inode() to release the backing store.
1593 */
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001594 inode = obj->filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001595 truncate_inode_pages(inode->i_mapping, 0);
1596 if (inode->i_op->truncate_range)
1597 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001598
1599 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001600}
1601
1602static inline int
1603i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1604{
1605 return obj_priv->madv == I915_MADV_DONTNEED;
1606}
1607
Eric Anholt673a3942008-07-30 12:06:12 -07001608static void
1609i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1610{
1611 struct drm_device *dev = obj->dev;
1612 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001613 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001614
1615 i915_verify_inactive(dev, __FILE__, __LINE__);
1616 if (obj_priv->pin_count != 0)
Chris Wilsonf13d3f72010-09-20 17:36:15 +01001617 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001618 else
1619 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1620
Daniel Vetter99fcb762010-02-07 16:20:18 +01001621 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1622
Eric Anholtce44b0e2008-11-06 16:00:31 -08001623 obj_priv->last_rendering_seqno = 0;
Zou Nan hai852835f2010-05-21 09:08:56 +08001624 obj_priv->ring = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001625 if (obj_priv->active) {
1626 obj_priv->active = 0;
1627 drm_gem_object_unreference(obj);
1628 }
1629 i915_verify_inactive(dev, __FILE__, __LINE__);
1630}
1631
Chris Wilson92204342010-09-18 11:02:01 +01001632static void
Daniel Vetter63560392010-02-19 11:51:59 +01001633i915_gem_process_flushing_list(struct drm_device *dev,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001634 uint32_t flush_domains,
Zou Nan hai852835f2010-05-21 09:08:56 +08001635 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001636{
1637 drm_i915_private_t *dev_priv = dev->dev_private;
1638 struct drm_i915_gem_object *obj_priv, *next;
1639
1640 list_for_each_entry_safe(obj_priv, next,
1641 &dev_priv->mm.gpu_write_list,
1642 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001643 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001644
Chris Wilson2b6efaa2010-09-14 17:04:02 +01001645 if (obj->write_domain & flush_domains &&
1646 obj_priv->ring == ring) {
Daniel Vetter63560392010-02-19 11:51:59 +01001647 uint32_t old_write_domain = obj->write_domain;
1648
1649 obj->write_domain = 0;
1650 list_del_init(&obj_priv->gpu_write_list);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001651 i915_gem_object_move_to_active(obj, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001652
1653 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001654 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1655 struct drm_i915_fence_reg *reg =
1656 &dev_priv->fence_regs[obj_priv->fence_reg];
1657 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001658 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001659 }
Daniel Vetter63560392010-02-19 11:51:59 +01001660
1661 trace_i915_gem_object_change_domain(obj,
1662 obj->read_domains,
1663 old_write_domain);
1664 }
1665 }
1666}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001667
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001668uint32_t
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001669i915_add_request(struct drm_device *dev,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001670 struct drm_file *file,
Chris Wilson8dc5d142010-08-12 12:36:12 +01001671 struct drm_i915_gem_request *request,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001672 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001673{
1674 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001675 struct drm_i915_file_private *file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001676 uint32_t seqno;
1677 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001678
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001679 if (file != NULL)
1680 file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001681
Chris Wilson8dc5d142010-08-12 12:36:12 +01001682 if (request == NULL) {
1683 request = kzalloc(sizeof(*request), GFP_KERNEL);
1684 if (request == NULL)
1685 return 0;
1686 }
Eric Anholt673a3942008-07-30 12:06:12 -07001687
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001688 seqno = ring->add_request(dev, ring, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001689
1690 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001691 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001692 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001693 was_empty = list_empty(&ring->request_list);
1694 list_add_tail(&request->list, &ring->request_list);
1695
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001696 if (file_priv) {
1697 mutex_lock(&file_priv->mutex);
1698 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001699 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001700 &file_priv->mm.request_list);
1701 mutex_unlock(&file_priv->mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00001702 }
Eric Anholt673a3942008-07-30 12:06:12 -07001703
Ben Gamarif65d9422009-09-14 17:48:44 -04001704 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001705 mod_timer(&dev_priv->hangcheck_timer,
1706 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001707 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001708 queue_delayed_work(dev_priv->wq,
1709 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001710 }
Eric Anholt673a3942008-07-30 12:06:12 -07001711 return seqno;
1712}
1713
1714/**
1715 * Command execution barrier
1716 *
1717 * Ensures that all commands in the ring are finished
1718 * before signalling the CPU
1719 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001720static void
Zou Nan hai852835f2010-05-21 09:08:56 +08001721i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001722{
Eric Anholt673a3942008-07-30 12:06:12 -07001723 uint32_t flush_domains = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001724
1725 /* The sampler always gets flushed on i965 (sigh) */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001726 if (INTEL_INFO(dev)->gen >= 4)
Eric Anholt673a3942008-07-30 12:06:12 -07001727 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
Zou Nan hai852835f2010-05-21 09:08:56 +08001728
1729 ring->flush(dev, ring,
1730 I915_GEM_DOMAIN_COMMAND, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001731}
1732
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001733static inline void
1734i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001735{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001736 if (request->file_priv) {
1737 mutex_lock(&request->file_priv->mutex);
1738 list_del(&request->client_list);
1739 mutex_unlock(&request->file_priv->mutex);
1740 }
Eric Anholt673a3942008-07-30 12:06:12 -07001741}
1742
Chris Wilsondfaae392010-09-22 10:31:52 +01001743static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1744 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001745{
Chris Wilsondfaae392010-09-22 10:31:52 +01001746 while (!list_empty(&ring->request_list)) {
1747 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001748
Chris Wilsondfaae392010-09-22 10:31:52 +01001749 request = list_first_entry(&ring->request_list,
1750 struct drm_i915_gem_request,
1751 list);
1752
1753 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001754 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001755 kfree(request);
1756 }
1757
1758 while (!list_empty(&ring->active_list)) {
Chris Wilson9375e442010-09-19 12:21:28 +01001759 struct drm_i915_gem_object *obj_priv;
1760
Chris Wilsondfaae392010-09-22 10:31:52 +01001761 obj_priv = list_first_entry(&ring->active_list,
1762 struct drm_i915_gem_object,
1763 list);
1764
1765 obj_priv->base.write_domain = 0;
1766 list_del_init(&obj_priv->gpu_write_list);
1767 i915_gem_object_move_to_inactive(&obj_priv->base);
1768 }
1769}
1770
1771void i915_gem_reset_lists(struct drm_device *dev)
1772{
1773 struct drm_i915_private *dev_priv = dev->dev_private;
1774 struct drm_i915_gem_object *obj_priv;
1775
1776 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1777 if (HAS_BSD(dev))
1778 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1779
1780 /* Remove anything from the flushing lists. The GPU cache is likely
1781 * to be lost on reset along with the data, so simply move the
1782 * lost bo to the inactive list.
1783 */
1784 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson9375e442010-09-19 12:21:28 +01001785 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1786 struct drm_i915_gem_object,
1787 list);
1788
1789 obj_priv->base.write_domain = 0;
Chris Wilsondfaae392010-09-22 10:31:52 +01001790 list_del_init(&obj_priv->gpu_write_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001791 i915_gem_object_move_to_inactive(&obj_priv->base);
1792 }
Chris Wilson9375e442010-09-19 12:21:28 +01001793
Chris Wilsondfaae392010-09-22 10:31:52 +01001794 /* Move everything out of the GPU domains to ensure we do any
1795 * necessary invalidation upon reuse.
1796 */
Chris Wilson77f01232010-09-19 12:31:36 +01001797 list_for_each_entry(obj_priv,
1798 &dev_priv->mm.inactive_list,
1799 list)
1800 {
1801 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1802 }
1803}
1804
Eric Anholt673a3942008-07-30 12:06:12 -07001805/**
1806 * This function clears the request list as sequence numbers are passed.
1807 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001808static void
1809i915_gem_retire_requests_ring(struct drm_device *dev,
1810 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001811{
1812 drm_i915_private_t *dev_priv = dev->dev_private;
1813 uint32_t seqno;
1814
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001815 if (!ring->status_page.page_addr ||
1816 list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001817 return;
1818
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001819 seqno = ring->get_seqno(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001820 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001821 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001822
Zou Nan hai852835f2010-05-21 09:08:56 +08001823 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001824 struct drm_i915_gem_request,
1825 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001826
Chris Wilsondfaae392010-09-22 10:31:52 +01001827 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001828 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001829
1830 trace_i915_gem_request_retire(dev, request->seqno);
1831
1832 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001833 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001834 kfree(request);
1835 }
1836
1837 /* Move any buffers on the active list that are no longer referenced
1838 * by the ringbuffer to the flushing/inactive lists as appropriate.
1839 */
1840 while (!list_empty(&ring->active_list)) {
1841 struct drm_gem_object *obj;
1842 struct drm_i915_gem_object *obj_priv;
1843
1844 obj_priv = list_first_entry(&ring->active_list,
1845 struct drm_i915_gem_object,
1846 list);
1847
Chris Wilsondfaae392010-09-22 10:31:52 +01001848 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001849 break;
1850
1851 obj = &obj_priv->base;
1852
1853#if WATCH_LRU
1854 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1855 __func__, request->seqno, obj);
1856#endif
1857
1858 if (obj->write_domain != 0)
1859 i915_gem_object_move_to_flushing(obj);
1860 else
1861 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001862 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001863
1864 if (unlikely (dev_priv->trace_irq_seqno &&
1865 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001866 ring->user_irq_put(dev, ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001867 dev_priv->trace_irq_seqno = 0;
1868 }
Eric Anholt673a3942008-07-30 12:06:12 -07001869}
1870
1871void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001872i915_gem_retire_requests(struct drm_device *dev)
1873{
1874 drm_i915_private_t *dev_priv = dev->dev_private;
1875
Chris Wilsonbe726152010-07-23 23:18:50 +01001876 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1877 struct drm_i915_gem_object *obj_priv, *tmp;
1878
1879 /* We must be careful that during unbind() we do not
1880 * accidentally infinitely recurse into retire requests.
1881 * Currently:
1882 * retire -> free -> unbind -> wait -> retire_ring
1883 */
1884 list_for_each_entry_safe(obj_priv, tmp,
1885 &dev_priv->mm.deferred_free_list,
1886 list)
1887 i915_gem_free_object_tail(&obj_priv->base);
1888 }
1889
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001890 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1891 if (HAS_BSD(dev))
1892 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1893}
1894
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001895static void
Eric Anholt673a3942008-07-30 12:06:12 -07001896i915_gem_retire_work_handler(struct work_struct *work)
1897{
1898 drm_i915_private_t *dev_priv;
1899 struct drm_device *dev;
1900
1901 dev_priv = container_of(work, drm_i915_private_t,
1902 mm.retire_work.work);
1903 dev = dev_priv->dev;
1904
1905 mutex_lock(&dev->struct_mutex);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001906 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001907
Keith Packard6dbe2772008-10-14 21:41:13 -07001908 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001909 (!list_empty(&dev_priv->render_ring.request_list) ||
1910 (HAS_BSD(dev) &&
1911 !list_empty(&dev_priv->bsd_ring.request_list))))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001912 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001913 mutex_unlock(&dev->struct_mutex);
1914}
1915
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001916int
Zou Nan hai852835f2010-05-21 09:08:56 +08001917i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001918 bool interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001919{
1920 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001921 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001922 int ret = 0;
1923
1924 BUG_ON(seqno == 0);
1925
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001926 if (atomic_read(&dev_priv->mm.wedged))
1927 return -EAGAIN;
1928
Daniel Vettere35a41d2010-02-11 22:13:59 +01001929 if (seqno == dev_priv->next_seqno) {
Chris Wilson8dc5d142010-08-12 12:36:12 +01001930 seqno = i915_add_request(dev, NULL, NULL, ring);
Daniel Vettere35a41d2010-02-11 22:13:59 +01001931 if (seqno == 0)
1932 return -ENOMEM;
1933 }
1934
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001935 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001936 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001937 ier = I915_READ(DEIER) | I915_READ(GTIER);
1938 else
1939 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001940 if (!ier) {
1941 DRM_ERROR("something (likely vbetool) disabled "
1942 "interrupts, re-enabling\n");
1943 i915_driver_irq_preinstall(dev);
1944 i915_driver_irq_postinstall(dev);
1945 }
1946
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001947 trace_i915_gem_request_wait_begin(dev, seqno);
1948
Zou Nan hai852835f2010-05-21 09:08:56 +08001949 ring->waiting_gem_seqno = seqno;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001950 ring->user_irq_get(dev, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001951 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08001952 ret = wait_event_interruptible(ring->irq_queue,
1953 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001954 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001955 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001956 else
Zou Nan hai852835f2010-05-21 09:08:56 +08001957 wait_event(ring->irq_queue,
1958 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001959 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001960 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001961
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001962 ring->user_irq_put(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001963 ring->waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001964
1965 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001966 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001967 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001968 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001969
1970 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01001971 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001972 __func__, ret, seqno, ring->get_seqno(dev, ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01001973 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001974
1975 /* Directly dispatch request retiring. While we have the work queue
1976 * to handle this, the waiter on a request often wants an associated
1977 * buffer to have made it to the inactive list, and we would need
1978 * a separate wait queue to handle that.
1979 */
1980 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001981 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001982
1983 return ret;
1984}
1985
Daniel Vetter48764bf2009-09-15 22:57:32 +02001986/**
1987 * Waits for a sequence number to be signaled, and cleans up the
1988 * request and object lists appropriately for that event.
1989 */
1990static int
Zou Nan hai852835f2010-05-21 09:08:56 +08001991i915_wait_request(struct drm_device *dev, uint32_t seqno,
1992 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02001993{
Zou Nan hai852835f2010-05-21 09:08:56 +08001994 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001995}
1996
Chris Wilson20f0cd52010-09-23 11:00:38 +01001997static void
Chris Wilson92204342010-09-18 11:02:01 +01001998i915_gem_flush_ring(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01001999 struct drm_file *file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002000 struct intel_ring_buffer *ring,
2001 uint32_t invalidate_domains,
2002 uint32_t flush_domains)
2003{
2004 ring->flush(dev, ring, invalidate_domains, flush_domains);
2005 i915_gem_process_flushing_list(dev, flush_domains, ring);
2006}
2007
2008static void
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002009i915_gem_flush(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01002010 struct drm_file *file_priv,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002011 uint32_t invalidate_domains,
Chris Wilson92204342010-09-18 11:02:01 +01002012 uint32_t flush_domains,
2013 uint32_t flush_rings)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002014{
2015 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter8bff9172010-02-11 22:19:40 +01002016
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002017 if (flush_domains & I915_GEM_DOMAIN_CPU)
2018 drm_agp_chipset_flush(dev);
Daniel Vetter8bff9172010-02-11 22:19:40 +01002019
Chris Wilson92204342010-09-18 11:02:01 +01002020 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2021 if (flush_rings & RING_RENDER)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002022 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002023 &dev_priv->render_ring,
2024 invalidate_domains, flush_domains);
2025 if (flush_rings & RING_BSD)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002026 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002027 &dev_priv->bsd_ring,
2028 invalidate_domains, flush_domains);
2029 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002030}
2031
Eric Anholt673a3942008-07-30 12:06:12 -07002032/**
2033 * Ensures that all rendering to the object has completed and the object is
2034 * safe to unbind from the GTT or access from the CPU.
2035 */
2036static int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002037i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2038 bool interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07002039{
2040 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002041 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002042 int ret;
2043
Eric Anholte47c68e2008-11-14 13:35:19 -08002044 /* This function only exists to support waiting for existing rendering,
2045 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002046 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002047 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002048
2049 /* If there is rendering queued on the buffer being evicted, wait for
2050 * it.
2051 */
2052 if (obj_priv->active) {
2053#if WATCH_BUF
2054 DRM_INFO("%s: object %p wait for seqno %08x\n",
2055 __func__, obj, obj_priv->last_rendering_seqno);
2056#endif
Chris Wilson2cf34d72010-09-14 13:03:28 +01002057 ret = i915_do_wait_request(dev,
2058 obj_priv->last_rendering_seqno,
2059 interruptible,
2060 obj_priv->ring);
2061 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002062 return ret;
2063 }
2064
2065 return 0;
2066}
2067
2068/**
2069 * Unbinds an object from the GTT aperture.
2070 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002071int
Eric Anholt673a3942008-07-30 12:06:12 -07002072i915_gem_object_unbind(struct drm_gem_object *obj)
2073{
2074 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002075 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002076 int ret = 0;
2077
2078#if WATCH_BUF
2079 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2080 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2081#endif
2082 if (obj_priv->gtt_space == NULL)
2083 return 0;
2084
2085 if (obj_priv->pin_count != 0) {
2086 DRM_ERROR("Attempting to unbind pinned buffer\n");
2087 return -EINVAL;
2088 }
2089
Eric Anholt5323fd02009-09-09 11:50:45 -07002090 /* blow away mappings if mapped through GTT */
2091 i915_gem_release_mmap(obj);
2092
Eric Anholt673a3942008-07-30 12:06:12 -07002093 /* Move the object to the CPU domain to ensure that
2094 * any possible CPU writes while it's not in the GTT
2095 * are flushed when we go to remap it. This will
2096 * also ensure that all pending GPU writes are finished
2097 * before we unbind.
2098 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002099 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002100 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002101 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002102 /* Continue on if we fail due to EIO, the GPU is hung so we
2103 * should be safe and we need to cleanup or else we might
2104 * cause memory corruption through use-after-free.
2105 */
Eric Anholt673a3942008-07-30 12:06:12 -07002106
Daniel Vetter96b47b62009-12-15 17:50:00 +01002107 /* release the fence reg _after_ flushing */
2108 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2109 i915_gem_clear_fence_reg(obj);
2110
Eric Anholt673a3942008-07-30 12:06:12 -07002111 if (obj_priv->agp_mem != NULL) {
2112 drm_unbind_agp(obj_priv->agp_mem);
2113 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2114 obj_priv->agp_mem = NULL;
2115 }
2116
Eric Anholt856fa192009-03-19 14:10:50 -07002117 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002118 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002119
2120 if (obj_priv->gtt_space) {
2121 atomic_dec(&dev->gtt_count);
2122 atomic_sub(obj->size, &dev->gtt_memory);
2123
2124 drm_mm_put_block(obj_priv->gtt_space);
2125 obj_priv->gtt_space = NULL;
2126 }
2127
Chris Wilsonf13d3f72010-09-20 17:36:15 +01002128 list_del_init(&obj_priv->list);
Eric Anholt673a3942008-07-30 12:06:12 -07002129
Chris Wilson963b4832009-09-20 23:03:54 +01002130 if (i915_gem_object_is_purgeable(obj_priv))
2131 i915_gem_object_truncate(obj);
2132
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002133 trace_i915_gem_object_unbind(obj);
2134
Chris Wilson8dc17752010-07-23 23:18:51 +01002135 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002136}
2137
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002138int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002139i915_gpu_idle(struct drm_device *dev)
2140{
2141 drm_i915_private_t *dev_priv = dev->dev_private;
2142 bool lists_empty;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002143 u32 seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08002144 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002145
Zou Nan haid1b851f2010-05-21 09:08:57 +08002146 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2147 list_empty(&dev_priv->render_ring.active_list) &&
2148 (!HAS_BSD(dev) ||
2149 list_empty(&dev_priv->bsd_ring.active_list)));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002150 if (lists_empty)
2151 return 0;
2152
2153 /* Flush everything onto the inactive list. */
Chris Wilson5c12a07e2010-09-22 11:22:30 +01002154 seqno = dev_priv->next_seqno;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002155 i915_gem_flush_ring(dev, NULL, &dev_priv->render_ring,
Chris Wilson92204342010-09-18 11:02:01 +01002156 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilsonc78ec302010-09-20 12:50:23 +01002157 ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002158 if (ret)
2159 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002160
2161 if (HAS_BSD(dev)) {
Chris Wilson5c12a07e2010-09-22 11:22:30 +01002162 seqno = dev_priv->next_seqno;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002163 i915_gem_flush_ring(dev, NULL, &dev_priv->bsd_ring,
Chris Wilson92204342010-09-18 11:02:01 +01002164 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilsonc78ec302010-09-20 12:50:23 +01002165 ret = i915_wait_request(dev, seqno, &dev_priv->bsd_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002166 if (ret)
2167 return ret;
2168 }
2169
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002170 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002171}
2172
Ben Gamari6911a9b2009-04-02 11:24:54 -07002173int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002174i915_gem_object_get_pages(struct drm_gem_object *obj,
2175 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002176{
Daniel Vetter23010e42010-03-08 13:35:02 +01002177 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002178 int page_count, i;
2179 struct address_space *mapping;
2180 struct inode *inode;
2181 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002182
Daniel Vetter778c3542010-05-13 11:49:44 +02002183 BUG_ON(obj_priv->pages_refcount
2184 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2185
Eric Anholt856fa192009-03-19 14:10:50 -07002186 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002187 return 0;
2188
2189 /* Get the list of pages out of our struct file. They'll be pinned
2190 * at this point until we release them.
2191 */
2192 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002193 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002194 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002195 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002196 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002197 return -ENOMEM;
2198 }
2199
2200 inode = obj->filp->f_path.dentry->d_inode;
2201 mapping = inode->i_mapping;
2202 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002203 page = read_cache_page_gfp(mapping, i,
Linus Torvalds985b8232010-07-02 10:04:42 +10002204 GFP_HIGHUSER |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002205 __GFP_COLD |
Linus Torvaldscd9f0402010-07-18 09:44:37 -07002206 __GFP_RECLAIMABLE |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002207 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002208 if (IS_ERR(page))
2209 goto err_pages;
2210
Eric Anholt856fa192009-03-19 14:10:50 -07002211 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002212 }
Eric Anholt280b7132009-03-12 16:56:27 -07002213
2214 if (obj_priv->tiling_mode != I915_TILING_NONE)
2215 i915_gem_object_do_bit_17_swizzle(obj);
2216
Eric Anholt673a3942008-07-30 12:06:12 -07002217 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002218
2219err_pages:
2220 while (i--)
2221 page_cache_release(obj_priv->pages[i]);
2222
2223 drm_free_large(obj_priv->pages);
2224 obj_priv->pages = NULL;
2225 obj_priv->pages_refcount--;
2226 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002227}
2228
Eric Anholt4e901fd2009-10-26 16:44:17 -07002229static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2230{
2231 struct drm_gem_object *obj = reg->obj;
2232 struct drm_device *dev = obj->dev;
2233 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002234 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002235 int regnum = obj_priv->fence_reg;
2236 uint64_t val;
2237
2238 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2239 0xfffff000) << 32;
2240 val |= obj_priv->gtt_offset & 0xfffff000;
2241 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2242 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2243
2244 if (obj_priv->tiling_mode == I915_TILING_Y)
2245 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2246 val |= I965_FENCE_REG_VALID;
2247
2248 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2249}
2250
Jesse Barnesde151cf2008-11-12 10:03:55 -08002251static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2252{
2253 struct drm_gem_object *obj = reg->obj;
2254 struct drm_device *dev = obj->dev;
2255 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002256 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002257 int regnum = obj_priv->fence_reg;
2258 uint64_t val;
2259
2260 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2261 0xfffff000) << 32;
2262 val |= obj_priv->gtt_offset & 0xfffff000;
2263 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2264 if (obj_priv->tiling_mode == I915_TILING_Y)
2265 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2266 val |= I965_FENCE_REG_VALID;
2267
2268 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2269}
2270
2271static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2272{
2273 struct drm_gem_object *obj = reg->obj;
2274 struct drm_device *dev = obj->dev;
2275 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002276 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002277 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002278 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002279 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002280 uint32_t pitch_val;
2281
2282 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2283 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002284 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002285 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002286 return;
2287 }
2288
Jesse Barnes0f973f22009-01-26 17:10:45 -08002289 if (obj_priv->tiling_mode == I915_TILING_Y &&
2290 HAS_128_BYTE_Y_TILING(dev))
2291 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002292 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002293 tile_width = 512;
2294
2295 /* Note: pitch better be a power of two tile widths */
2296 pitch_val = obj_priv->stride / tile_width;
2297 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002298
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002299 if (obj_priv->tiling_mode == I915_TILING_Y &&
2300 HAS_128_BYTE_Y_TILING(dev))
2301 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2302 else
2303 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2304
Jesse Barnesde151cf2008-11-12 10:03:55 -08002305 val = obj_priv->gtt_offset;
2306 if (obj_priv->tiling_mode == I915_TILING_Y)
2307 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2308 val |= I915_FENCE_SIZE_BITS(obj->size);
2309 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2310 val |= I830_FENCE_REG_VALID;
2311
Eric Anholtdc529a42009-03-10 22:34:49 -07002312 if (regnum < 8)
2313 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2314 else
2315 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2316 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002317}
2318
2319static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2320{
2321 struct drm_gem_object *obj = reg->obj;
2322 struct drm_device *dev = obj->dev;
2323 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002324 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002325 int regnum = obj_priv->fence_reg;
2326 uint32_t val;
2327 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002328 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002329
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002330 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002331 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002332 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002333 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002334 return;
2335 }
2336
Eric Anholte76a16d2009-05-26 17:44:56 -07002337 pitch_val = obj_priv->stride / 128;
2338 pitch_val = ffs(pitch_val) - 1;
2339 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2340
Jesse Barnesde151cf2008-11-12 10:03:55 -08002341 val = obj_priv->gtt_offset;
2342 if (obj_priv->tiling_mode == I915_TILING_Y)
2343 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002344 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2345 WARN_ON(fence_size_bits & ~0x00000f00);
2346 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002347 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2348 val |= I830_FENCE_REG_VALID;
2349
2350 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002351}
2352
Chris Wilson2cf34d72010-09-14 13:03:28 +01002353static int i915_find_fence_reg(struct drm_device *dev,
2354 bool interruptible)
Daniel Vetterae3db242010-02-19 11:51:58 +01002355{
2356 struct drm_i915_fence_reg *reg = NULL;
2357 struct drm_i915_gem_object *obj_priv = NULL;
2358 struct drm_i915_private *dev_priv = dev->dev_private;
2359 struct drm_gem_object *obj = NULL;
2360 int i, avail, ret;
2361
2362 /* First try to find a free reg */
2363 avail = 0;
2364 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2365 reg = &dev_priv->fence_regs[i];
2366 if (!reg->obj)
2367 return i;
2368
Daniel Vetter23010e42010-03-08 13:35:02 +01002369 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002370 if (!obj_priv->pin_count)
2371 avail++;
2372 }
2373
2374 if (avail == 0)
2375 return -ENOSPC;
2376
2377 /* None available, try to steal one or wait for a user to finish */
2378 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002379 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2380 lru_list) {
2381 obj = reg->obj;
2382 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002383
2384 if (obj_priv->pin_count)
2385 continue;
2386
2387 /* found one! */
2388 i = obj_priv->fence_reg;
2389 break;
2390 }
2391
2392 BUG_ON(i == I915_FENCE_REG_NONE);
2393
2394 /* We only have a reference on obj from the active list. put_fence_reg
2395 * might drop that one, causing a use-after-free in it. So hold a
2396 * private reference to obj like the other callers of put_fence_reg
2397 * (set_tiling ioctl) do. */
2398 drm_gem_object_reference(obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002399 ret = i915_gem_object_put_fence_reg(obj, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002400 drm_gem_object_unreference(obj);
2401 if (ret != 0)
2402 return ret;
2403
2404 return i;
2405}
2406
Jesse Barnesde151cf2008-11-12 10:03:55 -08002407/**
2408 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2409 * @obj: object to map through a fence reg
2410 *
2411 * When mapping objects through the GTT, userspace wants to be able to write
2412 * to them without having to worry about swizzling if the object is tiled.
2413 *
2414 * This function walks the fence regs looking for a free one for @obj,
2415 * stealing one if it can't find any.
2416 *
2417 * It then sets up the reg based on the object's properties: address, pitch
2418 * and tiling format.
2419 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002420int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002421i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2422 bool interruptible)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002423{
2424 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002425 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002426 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002427 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002428 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002429
Eric Anholta09ba7f2009-08-29 12:49:51 -07002430 /* Just update our place in the LRU if our fence is getting used. */
2431 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002432 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2433 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002434 return 0;
2435 }
2436
Jesse Barnesde151cf2008-11-12 10:03:55 -08002437 switch (obj_priv->tiling_mode) {
2438 case I915_TILING_NONE:
2439 WARN(1, "allocating a fence for non-tiled object?\n");
2440 break;
2441 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002442 if (!obj_priv->stride)
2443 return -EINVAL;
2444 WARN((obj_priv->stride & (512 - 1)),
2445 "object 0x%08x is X tiled but has non-512B pitch\n",
2446 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002447 break;
2448 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002449 if (!obj_priv->stride)
2450 return -EINVAL;
2451 WARN((obj_priv->stride & (128 - 1)),
2452 "object 0x%08x is Y tiled but has non-128B pitch\n",
2453 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002454 break;
2455 }
2456
Chris Wilson2cf34d72010-09-14 13:03:28 +01002457 ret = i915_find_fence_reg(dev, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002458 if (ret < 0)
2459 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002460
Daniel Vetterae3db242010-02-19 11:51:58 +01002461 obj_priv->fence_reg = ret;
2462 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002463 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002464
Jesse Barnesde151cf2008-11-12 10:03:55 -08002465 reg->obj = obj;
2466
Chris Wilsone259bef2010-09-17 00:32:02 +01002467 switch (INTEL_INFO(dev)->gen) {
2468 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002469 sandybridge_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002470 break;
2471 case 5:
2472 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002473 i965_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002474 break;
2475 case 3:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002476 i915_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002477 break;
2478 case 2:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002479 i830_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002480 break;
2481 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002482
Daniel Vetterae3db242010-02-19 11:51:58 +01002483 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2484 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002485
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002486 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002487}
2488
2489/**
2490 * i915_gem_clear_fence_reg - clear out fence register info
2491 * @obj: object to clear
2492 *
2493 * Zeroes out the fence register itself and clears out the associated
2494 * data structures in dev_priv and obj_priv.
2495 */
2496static void
2497i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2498{
2499 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002500 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002501 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002502 struct drm_i915_fence_reg *reg =
2503 &dev_priv->fence_regs[obj_priv->fence_reg];
Chris Wilsone259bef2010-09-17 00:32:02 +01002504 uint32_t fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002505
Chris Wilsone259bef2010-09-17 00:32:02 +01002506 switch (INTEL_INFO(dev)->gen) {
2507 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002508 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2509 (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002510 break;
2511 case 5:
2512 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002513 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002514 break;
2515 case 3:
2516 if (obj_priv->fence_reg > 8)
2517 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002518 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002519 case 2:
2520 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002521
2522 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002523 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002524 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002525
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002526 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002527 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002528 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002529}
2530
Eric Anholt673a3942008-07-30 12:06:12 -07002531/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002532 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2533 * to the buffer to finish, and then resets the fence register.
2534 * @obj: tiled object holding a fence register.
Chris Wilson2cf34d72010-09-14 13:03:28 +01002535 * @bool: whether the wait upon the fence is interruptible
Chris Wilson52dc7d32009-06-06 09:46:01 +01002536 *
2537 * Zeroes out the fence register itself and clears out the associated
2538 * data structures in dev_priv and obj_priv.
2539 */
2540int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002541i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2542 bool interruptible)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002543{
2544 struct drm_device *dev = obj->dev;
Chris Wilson53640e12010-09-20 11:40:50 +01002545 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002546 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson53640e12010-09-20 11:40:50 +01002547 struct drm_i915_fence_reg *reg;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002548
2549 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2550 return 0;
2551
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002552 /* If we've changed tiling, GTT-mappings of the object
2553 * need to re-fault to ensure that the correct fence register
2554 * setup is in place.
2555 */
2556 i915_gem_release_mmap(obj);
2557
Chris Wilson52dc7d32009-06-06 09:46:01 +01002558 /* On the i915, GPU access to tiled buffers is via a fence,
2559 * therefore we must wait for any outstanding access to complete
2560 * before clearing the fence.
2561 */
Chris Wilson53640e12010-09-20 11:40:50 +01002562 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2563 if (reg->gpu) {
Chris Wilson52dc7d32009-06-06 09:46:01 +01002564 int ret;
2565
Chris Wilson2cf34d72010-09-14 13:03:28 +01002566 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002567 if (ret)
2568 return ret;
2569
Chris Wilson2cf34d72010-09-14 13:03:28 +01002570 ret = i915_gem_object_wait_rendering(obj, interruptible);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002571 if (ret)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002572 return ret;
Chris Wilson53640e12010-09-20 11:40:50 +01002573
2574 reg->gpu = false;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002575 }
2576
Daniel Vetter4a726612010-02-01 13:59:16 +01002577 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002578 i915_gem_clear_fence_reg(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002579
2580 return 0;
2581}
2582
2583/**
Eric Anholt673a3942008-07-30 12:06:12 -07002584 * Finds free space in the GTT aperture and binds the object there.
2585 */
2586static int
2587i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2588{
2589 struct drm_device *dev = obj->dev;
2590 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002591 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002592 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002593 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002594 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002595
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002596 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002597 DRM_ERROR("Attempting to bind a purgeable object\n");
2598 return -EINVAL;
2599 }
2600
Eric Anholt673a3942008-07-30 12:06:12 -07002601 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002602 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002603 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002604 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2605 return -EINVAL;
2606 }
2607
Chris Wilson654fc602010-05-27 13:18:21 +01002608 /* If the object is bigger than the entire aperture, reject it early
2609 * before evicting everything in a vain attempt to find space.
2610 */
2611 if (obj->size > dev->gtt_total) {
2612 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2613 return -E2BIG;
2614 }
2615
Eric Anholt673a3942008-07-30 12:06:12 -07002616 search_free:
2617 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2618 obj->size, alignment, 0);
2619 if (free_space != NULL) {
2620 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2621 alignment);
Daniel Vetterdb3307a2010-07-02 15:02:12 +01002622 if (obj_priv->gtt_space != NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002623 obj_priv->gtt_offset = obj_priv->gtt_space->start;
Eric Anholt673a3942008-07-30 12:06:12 -07002624 }
2625 if (obj_priv->gtt_space == NULL) {
2626 /* If the gtt is empty and we're still having trouble
2627 * fitting our object in, we're out of memory.
2628 */
2629#if WATCH_LRU
2630 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2631#endif
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002632 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002633 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002634 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002635
Eric Anholt673a3942008-07-30 12:06:12 -07002636 goto search_free;
2637 }
2638
2639#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002640 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002641 obj->size, obj_priv->gtt_offset);
2642#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002643 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002644 if (ret) {
2645 drm_mm_put_block(obj_priv->gtt_space);
2646 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002647
2648 if (ret == -ENOMEM) {
2649 /* first try to clear up some space from the GTT */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002650 ret = i915_gem_evict_something(dev, obj->size,
2651 alignment);
Chris Wilson07f73f62009-09-14 16:50:30 +01002652 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002653 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002654 if (gfpmask) {
2655 gfpmask = 0;
2656 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002657 }
2658
2659 return ret;
2660 }
2661
2662 goto search_free;
2663 }
2664
Eric Anholt673a3942008-07-30 12:06:12 -07002665 return ret;
2666 }
2667
Eric Anholt673a3942008-07-30 12:06:12 -07002668 /* Create an AGP memory structure pointing at our pages, and bind it
2669 * into the GTT.
2670 */
2671 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002672 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002673 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002674 obj_priv->gtt_offset,
2675 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002676 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002677 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002678 drm_mm_put_block(obj_priv->gtt_space);
2679 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002680
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002681 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002682 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002683 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002684
2685 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002686 }
2687 atomic_inc(&dev->gtt_count);
2688 atomic_add(obj->size, &dev->gtt_memory);
2689
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002690 /* keep track of bounds object by adding it to the inactive list */
2691 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2692
Eric Anholt673a3942008-07-30 12:06:12 -07002693 /* Assert that the object is not currently in any GPU domain. As it
2694 * wasn't in the GTT, there shouldn't be any way it could have been in
2695 * a GPU cache
2696 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002697 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2698 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002699
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002700 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2701
Eric Anholt673a3942008-07-30 12:06:12 -07002702 return 0;
2703}
2704
2705void
2706i915_gem_clflush_object(struct drm_gem_object *obj)
2707{
Daniel Vetter23010e42010-03-08 13:35:02 +01002708 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002709
2710 /* If we don't have a page list set up, then we're not pinned
2711 * to GPU, and we can ignore the cache flush because it'll happen
2712 * again at bind time.
2713 */
Eric Anholt856fa192009-03-19 14:10:50 -07002714 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002715 return;
2716
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002717 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002718
Eric Anholt856fa192009-03-19 14:10:50 -07002719 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002720}
2721
Eric Anholte47c68e2008-11-14 13:35:19 -08002722/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002723static int
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002724i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2725 bool pipelined)
Eric Anholte47c68e2008-11-14 13:35:19 -08002726{
2727 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002728 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002729
2730 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002731 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002732
2733 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002734 old_write_domain = obj->write_domain;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002735 i915_gem_flush_ring(dev, NULL,
Chris Wilson92204342010-09-18 11:02:01 +01002736 to_intel_bo(obj)->ring,
2737 0, obj->write_domain);
Chris Wilson48b956c2010-09-14 12:50:34 +01002738 BUG_ON(obj->write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002739
2740 trace_i915_gem_object_change_domain(obj,
2741 obj->read_domains,
2742 old_write_domain);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002743
2744 if (pipelined)
2745 return 0;
2746
Chris Wilson2cf34d72010-09-14 13:03:28 +01002747 return i915_gem_object_wait_rendering(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08002748}
2749
2750/** Flushes the GTT write domain for the object if it's dirty. */
2751static void
2752i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2753{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002754 uint32_t old_write_domain;
2755
Eric Anholte47c68e2008-11-14 13:35:19 -08002756 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2757 return;
2758
2759 /* No actual flushing is required for the GTT write domain. Writes
2760 * to it immediately go to main memory as far as we know, so there's
2761 * no chipset flush. It also doesn't land in render cache.
2762 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002763 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002764 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002765
2766 trace_i915_gem_object_change_domain(obj,
2767 obj->read_domains,
2768 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002769}
2770
2771/** Flushes the CPU write domain for the object if it's dirty. */
2772static void
2773i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2774{
2775 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002776 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002777
2778 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2779 return;
2780
2781 i915_gem_clflush_object(obj);
2782 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002783 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002784 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002785
2786 trace_i915_gem_object_change_domain(obj,
2787 obj->read_domains,
2788 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002789}
2790
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002791/**
2792 * Moves a single object to the GTT read, and possibly write domain.
2793 *
2794 * This function returns when the move is complete, including waiting on
2795 * flushes to occur.
2796 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002797int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002798i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2799{
Daniel Vetter23010e42010-03-08 13:35:02 +01002800 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002801 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002802 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002803
Eric Anholt02354392008-11-26 13:58:13 -08002804 /* Not valid to be called on unbound objects. */
2805 if (obj_priv->gtt_space == NULL)
2806 return -EINVAL;
2807
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002808 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08002809 if (ret != 0)
2810 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002811
Chris Wilson72133422010-09-13 23:56:38 +01002812 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002813
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002814 if (write) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002815 ret = i915_gem_object_wait_rendering(obj, true);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002816 if (ret)
2817 return ret;
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002818 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002819
Chris Wilson72133422010-09-13 23:56:38 +01002820 old_write_domain = obj->write_domain;
2821 old_read_domains = obj->read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002822
2823 /* It should now be out of any other write domains, and we can update
2824 * the domain values for our changes.
2825 */
2826 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2827 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002828 if (write) {
Chris Wilson72133422010-09-13 23:56:38 +01002829 obj->read_domains = I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002830 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002831 obj_priv->dirty = 1;
2832 }
2833
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002834 trace_i915_gem_object_change_domain(obj,
2835 old_read_domains,
2836 old_write_domain);
2837
Eric Anholte47c68e2008-11-14 13:35:19 -08002838 return 0;
2839}
2840
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002841/*
2842 * Prepare buffer for display plane. Use uninterruptible for possible flush
2843 * wait, as in modesetting process we're not supposed to be interrupted.
2844 */
2845int
Chris Wilson48b956c2010-09-14 12:50:34 +01002846i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2847 bool pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002848{
Daniel Vetter23010e42010-03-08 13:35:02 +01002849 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002850 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002851 int ret;
2852
2853 /* Not valid to be called on unbound objects. */
2854 if (obj_priv->gtt_space == NULL)
2855 return -EINVAL;
2856
Chris Wilson48b956c2010-09-14 12:50:34 +01002857 ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
2858 if (ret)
Daniel Vettere35a41d2010-02-11 22:13:59 +01002859 return ret;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002860
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002861 i915_gem_object_flush_cpu_write_domain(obj);
2862
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002863 old_read_domains = obj->read_domains;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002864 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002865
2866 trace_i915_gem_object_change_domain(obj,
2867 old_read_domains,
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002868 obj->write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002869
2870 return 0;
2871}
2872
Eric Anholte47c68e2008-11-14 13:35:19 -08002873/**
2874 * Moves a single object to the CPU read, and possibly write domain.
2875 *
2876 * This function returns when the move is complete, including waiting on
2877 * flushes to occur.
2878 */
2879static int
2880i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2881{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002882 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002883 int ret;
2884
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002885 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08002886 if (ret != 0)
2887 return ret;
2888
2889 i915_gem_object_flush_gtt_write_domain(obj);
2890
2891 /* If we have a partially-valid cache of the object in the CPU,
2892 * finish invalidating it and free the per-page flags.
2893 */
2894 i915_gem_object_set_to_full_cpu_read_domain(obj);
2895
Chris Wilson72133422010-09-13 23:56:38 +01002896 if (write) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002897 ret = i915_gem_object_wait_rendering(obj, true);
Chris Wilson72133422010-09-13 23:56:38 +01002898 if (ret)
2899 return ret;
2900 }
2901
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002902 old_write_domain = obj->write_domain;
2903 old_read_domains = obj->read_domains;
2904
Eric Anholte47c68e2008-11-14 13:35:19 -08002905 /* Flush the CPU cache if it's still invalid. */
2906 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2907 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002908
2909 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2910 }
2911
2912 /* It should now be out of any other write domains, and we can update
2913 * the domain values for our changes.
2914 */
2915 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2916
2917 /* If we're writing through the CPU, then the GPU read domains will
2918 * need to be invalidated at next use.
2919 */
2920 if (write) {
Chris Wilsonc78ec302010-09-20 12:50:23 +01002921 obj->read_domains = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002922 obj->write_domain = I915_GEM_DOMAIN_CPU;
2923 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002924
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002925 trace_i915_gem_object_change_domain(obj,
2926 old_read_domains,
2927 old_write_domain);
2928
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002929 return 0;
2930}
2931
Eric Anholt673a3942008-07-30 12:06:12 -07002932/*
2933 * Set the next domain for the specified object. This
2934 * may not actually perform the necessary flushing/invaliding though,
2935 * as that may want to be batched with other set_domain operations
2936 *
2937 * This is (we hope) the only really tricky part of gem. The goal
2938 * is fairly simple -- track which caches hold bits of the object
2939 * and make sure they remain coherent. A few concrete examples may
2940 * help to explain how it works. For shorthand, we use the notation
2941 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2942 * a pair of read and write domain masks.
2943 *
2944 * Case 1: the batch buffer
2945 *
2946 * 1. Allocated
2947 * 2. Written by CPU
2948 * 3. Mapped to GTT
2949 * 4. Read by GPU
2950 * 5. Unmapped from GTT
2951 * 6. Freed
2952 *
2953 * Let's take these a step at a time
2954 *
2955 * 1. Allocated
2956 * Pages allocated from the kernel may still have
2957 * cache contents, so we set them to (CPU, CPU) always.
2958 * 2. Written by CPU (using pwrite)
2959 * The pwrite function calls set_domain (CPU, CPU) and
2960 * this function does nothing (as nothing changes)
2961 * 3. Mapped by GTT
2962 * This function asserts that the object is not
2963 * currently in any GPU-based read or write domains
2964 * 4. Read by GPU
2965 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2966 * As write_domain is zero, this function adds in the
2967 * current read domains (CPU+COMMAND, 0).
2968 * flush_domains is set to CPU.
2969 * invalidate_domains is set to COMMAND
2970 * clflush is run to get data out of the CPU caches
2971 * then i915_dev_set_domain calls i915_gem_flush to
2972 * emit an MI_FLUSH and drm_agp_chipset_flush
2973 * 5. Unmapped from GTT
2974 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2975 * flush_domains and invalidate_domains end up both zero
2976 * so no flushing/invalidating happens
2977 * 6. Freed
2978 * yay, done
2979 *
2980 * Case 2: The shared render buffer
2981 *
2982 * 1. Allocated
2983 * 2. Mapped to GTT
2984 * 3. Read/written by GPU
2985 * 4. set_domain to (CPU,CPU)
2986 * 5. Read/written by CPU
2987 * 6. Read/written by GPU
2988 *
2989 * 1. Allocated
2990 * Same as last example, (CPU, CPU)
2991 * 2. Mapped to GTT
2992 * Nothing changes (assertions find that it is not in the GPU)
2993 * 3. Read/written by GPU
2994 * execbuffer calls set_domain (RENDER, RENDER)
2995 * flush_domains gets CPU
2996 * invalidate_domains gets GPU
2997 * clflush (obj)
2998 * MI_FLUSH and drm_agp_chipset_flush
2999 * 4. set_domain (CPU, CPU)
3000 * flush_domains gets GPU
3001 * invalidate_domains gets CPU
3002 * wait_rendering (obj) to make sure all drawing is complete.
3003 * This will include an MI_FLUSH to get the data from GPU
3004 * to memory
3005 * clflush (obj) to invalidate the CPU cache
3006 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3007 * 5. Read/written by CPU
3008 * cache lines are loaded and dirtied
3009 * 6. Read written by GPU
3010 * Same as last GPU access
3011 *
3012 * Case 3: The constant buffer
3013 *
3014 * 1. Allocated
3015 * 2. Written by CPU
3016 * 3. Read by GPU
3017 * 4. Updated (written) by CPU again
3018 * 5. Read by GPU
3019 *
3020 * 1. Allocated
3021 * (CPU, CPU)
3022 * 2. Written by CPU
3023 * (CPU, CPU)
3024 * 3. Read by GPU
3025 * (CPU+RENDER, 0)
3026 * flush_domains = CPU
3027 * invalidate_domains = RENDER
3028 * clflush (obj)
3029 * MI_FLUSH
3030 * drm_agp_chipset_flush
3031 * 4. Updated (written) by CPU again
3032 * (CPU, CPU)
3033 * flush_domains = 0 (no previous write domain)
3034 * invalidate_domains = 0 (no new read domains)
3035 * 5. Read by GPU
3036 * (CPU+RENDER, 0)
3037 * flush_domains = CPU
3038 * invalidate_domains = RENDER
3039 * clflush (obj)
3040 * MI_FLUSH
3041 * drm_agp_chipset_flush
3042 */
Keith Packardc0d90822008-11-20 23:11:08 -08003043static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003044i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003045{
3046 struct drm_device *dev = obj->dev;
Chris Wilson92204342010-09-18 11:02:01 +01003047 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003048 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003049 uint32_t invalidate_domains = 0;
3050 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003051 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003052
Eric Anholt8b0e3782009-02-19 14:40:50 -08003053 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3054 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003055
Jesse Barnes652c3932009-08-17 13:31:43 -07003056 intel_mark_busy(dev, obj);
3057
Eric Anholt673a3942008-07-30 12:06:12 -07003058#if WATCH_BUF
3059 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3060 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003061 obj->read_domains, obj->pending_read_domains,
3062 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003063#endif
3064 /*
3065 * If the object isn't moving to a new write domain,
3066 * let the object stay in multiple read domains
3067 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003068 if (obj->pending_write_domain == 0)
3069 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003070 else
3071 obj_priv->dirty = 1;
3072
3073 /*
3074 * Flush the current write domain if
3075 * the new read domains don't match. Invalidate
3076 * any read domains which differ from the old
3077 * write domain
3078 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003079 if (obj->write_domain &&
3080 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003081 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003082 invalidate_domains |=
3083 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003084 }
3085 /*
3086 * Invalidate any read caches which may have
3087 * stale data. That is, any new read domains.
3088 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003089 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003090 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3091#if WATCH_BUF
3092 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3093 __func__, flush_domains, invalidate_domains);
3094#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003095 i915_gem_clflush_object(obj);
3096 }
3097
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003098 old_read_domains = obj->read_domains;
3099
Eric Anholtefbeed92009-02-19 14:54:51 -08003100 /* The actual obj->write_domain will be updated with
3101 * pending_write_domain after we emit the accumulated flush for all
3102 * of our domain changes in execbuffers (which clears objects'
3103 * write_domains). So if we have a current write domain that we
3104 * aren't changing, set pending_write_domain to that.
3105 */
3106 if (flush_domains == 0 && obj->pending_write_domain == 0)
3107 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003108 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003109
3110 dev->invalidate_domains |= invalidate_domains;
3111 dev->flush_domains |= flush_domains;
Chris Wilson92204342010-09-18 11:02:01 +01003112 if (obj_priv->ring)
3113 dev_priv->mm.flush_rings |= obj_priv->ring->id;
Eric Anholt673a3942008-07-30 12:06:12 -07003114#if WATCH_BUF
3115 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3116 __func__,
3117 obj->read_domains, obj->write_domain,
3118 dev->invalidate_domains, dev->flush_domains);
3119#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003120
3121 trace_i915_gem_object_change_domain(obj,
3122 old_read_domains,
3123 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003124}
3125
3126/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003127 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003128 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003129 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3130 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3131 */
3132static void
3133i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3134{
Daniel Vetter23010e42010-03-08 13:35:02 +01003135 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003136
3137 if (!obj_priv->page_cpu_valid)
3138 return;
3139
3140 /* If we're partially in the CPU read domain, finish moving it in.
3141 */
3142 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3143 int i;
3144
3145 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3146 if (obj_priv->page_cpu_valid[i])
3147 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003148 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003149 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003150 }
3151
3152 /* Free the page_cpu_valid mappings which are now stale, whether
3153 * or not we've got I915_GEM_DOMAIN_CPU.
3154 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003155 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003156 obj_priv->page_cpu_valid = NULL;
3157}
3158
3159/**
3160 * Set the CPU read domain on a range of the object.
3161 *
3162 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3163 * not entirely valid. The page_cpu_valid member of the object flags which
3164 * pages have been flushed, and will be respected by
3165 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3166 * of the whole object.
3167 *
3168 * This function returns when the move is complete, including waiting on
3169 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003170 */
3171static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003172i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3173 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003174{
Daniel Vetter23010e42010-03-08 13:35:02 +01003175 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003176 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003177 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003178
Eric Anholte47c68e2008-11-14 13:35:19 -08003179 if (offset == 0 && size == obj->size)
3180 return i915_gem_object_set_to_cpu_domain(obj, 0);
3181
Daniel Vetterba3d8d72010-02-11 22:37:04 +01003182 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003183 if (ret != 0)
3184 return ret;
3185 i915_gem_object_flush_gtt_write_domain(obj);
3186
3187 /* If we're already fully in the CPU read domain, we're done. */
3188 if (obj_priv->page_cpu_valid == NULL &&
3189 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003190 return 0;
3191
Eric Anholte47c68e2008-11-14 13:35:19 -08003192 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3193 * newly adding I915_GEM_DOMAIN_CPU
3194 */
Eric Anholt673a3942008-07-30 12:06:12 -07003195 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003196 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3197 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003198 if (obj_priv->page_cpu_valid == NULL)
3199 return -ENOMEM;
3200 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3201 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003202
3203 /* Flush the cache on any pages that are still invalid from the CPU's
3204 * perspective.
3205 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003206 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3207 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003208 if (obj_priv->page_cpu_valid[i])
3209 continue;
3210
Eric Anholt856fa192009-03-19 14:10:50 -07003211 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003212
3213 obj_priv->page_cpu_valid[i] = 1;
3214 }
3215
Eric Anholte47c68e2008-11-14 13:35:19 -08003216 /* It should now be out of any other write domains, and we can update
3217 * the domain values for our changes.
3218 */
3219 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3220
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003221 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003222 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3223
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003224 trace_i915_gem_object_change_domain(obj,
3225 old_read_domains,
3226 obj->write_domain);
3227
Eric Anholt673a3942008-07-30 12:06:12 -07003228 return 0;
3229}
3230
3231/**
Eric Anholt673a3942008-07-30 12:06:12 -07003232 * Pin an object to the GTT and evaluate the relocations landing in it.
3233 */
3234static int
3235i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3236 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003237 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003238 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003239{
3240 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003241 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003242 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003243 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003244 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003245 bool need_fence;
3246
3247 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3248 obj_priv->tiling_mode != I915_TILING_NONE;
3249
3250 /* Check fence reg constraints and rebind if necessary */
Chris Wilson808b24d62010-05-27 13:18:15 +01003251 if (need_fence &&
3252 !i915_gem_object_fence_offset_ok(obj,
3253 obj_priv->tiling_mode)) {
3254 ret = i915_gem_object_unbind(obj);
3255 if (ret)
3256 return ret;
3257 }
Eric Anholt673a3942008-07-30 12:06:12 -07003258
3259 /* Choose the GTT offset for our buffer and put it there. */
3260 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3261 if (ret)
3262 return ret;
3263
Jesse Barnes76446ca2009-12-17 22:05:42 -05003264 /*
3265 * Pre-965 chips need a fence register set up in order to
3266 * properly handle blits to/from tiled surfaces.
3267 */
3268 if (need_fence) {
Chris Wilson53640e12010-09-20 11:40:50 +01003269 ret = i915_gem_object_get_fence_reg(obj, true);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003270 if (ret != 0) {
Jesse Barnes76446ca2009-12-17 22:05:42 -05003271 i915_gem_object_unpin(obj);
3272 return ret;
3273 }
Chris Wilson53640e12010-09-20 11:40:50 +01003274
3275 dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003276 }
3277
Eric Anholt673a3942008-07-30 12:06:12 -07003278 entry->offset = obj_priv->gtt_offset;
3279
Eric Anholt673a3942008-07-30 12:06:12 -07003280 /* Apply the relocations, using the GTT aperture to avoid cache
3281 * flushing requirements.
3282 */
3283 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003284 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003285 struct drm_gem_object *target_obj;
3286 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003287 uint32_t reloc_val, reloc_offset;
3288 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003289
Eric Anholt673a3942008-07-30 12:06:12 -07003290 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003291 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003292 if (target_obj == NULL) {
3293 i915_gem_object_unpin(obj);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003294 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003295 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003296 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003297
Chris Wilson8542a0b2009-09-09 21:15:15 +01003298#if WATCH_RELOC
3299 DRM_INFO("%s: obj %p offset %08x target %d "
3300 "read %08x write %08x gtt %08x "
3301 "presumed %08x delta %08x\n",
3302 __func__,
3303 obj,
3304 (int) reloc->offset,
3305 (int) reloc->target_handle,
3306 (int) reloc->read_domains,
3307 (int) reloc->write_domain,
3308 (int) target_obj_priv->gtt_offset,
3309 (int) reloc->presumed_offset,
3310 reloc->delta);
3311#endif
3312
Eric Anholt673a3942008-07-30 12:06:12 -07003313 /* The target buffer should have appeared before us in the
3314 * exec_object list, so it should have a GTT space bound by now.
3315 */
3316 if (target_obj_priv->gtt_space == NULL) {
3317 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003318 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003319 drm_gem_object_unreference(target_obj);
3320 i915_gem_object_unpin(obj);
3321 return -EINVAL;
3322 }
3323
Chris Wilson8542a0b2009-09-09 21:15:15 +01003324 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003325 if (reloc->write_domain & (reloc->write_domain - 1)) {
3326 DRM_ERROR("reloc with multiple write domains: "
3327 "obj %p target %d offset %d "
3328 "read %08x write %08x",
3329 obj, reloc->target_handle,
3330 (int) reloc->offset,
3331 reloc->read_domains,
3332 reloc->write_domain);
3333 return -EINVAL;
3334 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003335 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3336 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3337 DRM_ERROR("reloc with read/write CPU domains: "
3338 "obj %p target %d offset %d "
3339 "read %08x write %08x",
3340 obj, reloc->target_handle,
3341 (int) reloc->offset,
3342 reloc->read_domains,
3343 reloc->write_domain);
3344 drm_gem_object_unreference(target_obj);
3345 i915_gem_object_unpin(obj);
3346 return -EINVAL;
3347 }
3348 if (reloc->write_domain && target_obj->pending_write_domain &&
3349 reloc->write_domain != target_obj->pending_write_domain) {
3350 DRM_ERROR("Write domain conflict: "
3351 "obj %p target %d offset %d "
3352 "new %08x old %08x\n",
3353 obj, reloc->target_handle,
3354 (int) reloc->offset,
3355 reloc->write_domain,
3356 target_obj->pending_write_domain);
3357 drm_gem_object_unreference(target_obj);
3358 i915_gem_object_unpin(obj);
3359 return -EINVAL;
3360 }
3361
3362 target_obj->pending_read_domains |= reloc->read_domains;
3363 target_obj->pending_write_domain |= reloc->write_domain;
3364
3365 /* If the relocation already has the right value in it, no
3366 * more work needs to be done.
3367 */
3368 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3369 drm_gem_object_unreference(target_obj);
3370 continue;
3371 }
3372
3373 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003374 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003375 DRM_ERROR("Relocation beyond object bounds: "
3376 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003377 obj, reloc->target_handle,
3378 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003379 drm_gem_object_unreference(target_obj);
3380 i915_gem_object_unpin(obj);
3381 return -EINVAL;
3382 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003383 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003384 DRM_ERROR("Relocation not 4-byte aligned: "
3385 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003386 obj, reloc->target_handle,
3387 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003388 drm_gem_object_unreference(target_obj);
3389 i915_gem_object_unpin(obj);
3390 return -EINVAL;
3391 }
3392
Chris Wilson8542a0b2009-09-09 21:15:15 +01003393 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003394 if (reloc->delta >= target_obj->size) {
3395 DRM_ERROR("Relocation beyond target object bounds: "
3396 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003397 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003398 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003399 drm_gem_object_unreference(target_obj);
3400 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003401 return -EINVAL;
3402 }
3403
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003404 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3405 if (ret != 0) {
3406 drm_gem_object_unreference(target_obj);
3407 i915_gem_object_unpin(obj);
3408 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003409 }
3410
3411 /* Map the page containing the relocation we're going to
3412 * perform.
3413 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003414 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003415 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3416 (reloc_offset &
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003417 ~(PAGE_SIZE - 1)),
3418 KM_USER0);
Eric Anholt3043c602008-10-02 12:24:47 -07003419 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003420 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003421 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003422
3423#if WATCH_BUF
3424 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003425 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003426 readl(reloc_entry), reloc_val);
3427#endif
3428 writel(reloc_val, reloc_entry);
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003429 io_mapping_unmap_atomic(reloc_page, KM_USER0);
Eric Anholt673a3942008-07-30 12:06:12 -07003430
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003431 /* The updated presumed offset for this entry will be
3432 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003433 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003434 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003435
3436 drm_gem_object_unreference(target_obj);
3437 }
3438
Eric Anholt673a3942008-07-30 12:06:12 -07003439#if WATCH_BUF
3440 if (0)
3441 i915_gem_dump_object(obj, 128, __func__, ~0);
3442#endif
3443 return 0;
3444}
3445
Eric Anholt673a3942008-07-30 12:06:12 -07003446/* Throttle our rendering by waiting until the ring has completed our requests
3447 * emitted over 20 msec ago.
3448 *
Eric Anholtb9624422009-06-03 07:27:35 +00003449 * Note that if we were to use the current jiffies each time around the loop,
3450 * we wouldn't escape the function with any frames outstanding if the time to
3451 * render a frame was over 20ms.
3452 *
Eric Anholt673a3942008-07-30 12:06:12 -07003453 * This should get us reasonable parallelism between CPU and GPU but also
3454 * relatively low latency when blocking on a particular request to finish.
3455 */
3456static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003457i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003458{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003459 struct drm_i915_private *dev_priv = dev->dev_private;
3460 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003461 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003462 struct drm_i915_gem_request *request;
3463 struct intel_ring_buffer *ring = NULL;
3464 u32 seqno = 0;
3465 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003466
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003467 mutex_lock(&file_priv->mutex);
3468 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003469 if (time_after_eq(request->emitted_jiffies, recent_enough))
3470 break;
3471
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003472 ring = request->ring;
3473 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003474 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003475 mutex_unlock(&file_priv->mutex);
3476
3477 if (seqno == 0)
3478 return 0;
3479
3480 ret = 0;
3481 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3482 /* And wait for the seqno passing without holding any locks and
3483 * causing extra latency for others. This is safe as the irq
3484 * generation is designed to be run atomically and so is
3485 * lockless.
3486 */
3487 ring->user_irq_get(dev, ring);
3488 ret = wait_event_interruptible(ring->irq_queue,
3489 i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3490 || atomic_read(&dev_priv->mm.wedged));
3491 ring->user_irq_put(dev, ring);
3492
3493 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3494 ret = -EIO;
3495 }
3496
3497 if (ret == 0)
3498 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003499
Eric Anholt673a3942008-07-30 12:06:12 -07003500 return ret;
3501}
3502
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003503static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003504i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003505 uint32_t buffer_count,
3506 struct drm_i915_gem_relocation_entry **relocs)
3507{
3508 uint32_t reloc_count = 0, reloc_index = 0, i;
3509 int ret;
3510
3511 *relocs = NULL;
3512 for (i = 0; i < buffer_count; i++) {
3513 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3514 return -EINVAL;
3515 reloc_count += exec_list[i].relocation_count;
3516 }
3517
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003518 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003519 if (*relocs == NULL) {
3520 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003521 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003522 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003523
3524 for (i = 0; i < buffer_count; i++) {
3525 struct drm_i915_gem_relocation_entry __user *user_relocs;
3526
3527 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3528
3529 ret = copy_from_user(&(*relocs)[reloc_index],
3530 user_relocs,
3531 exec_list[i].relocation_count *
3532 sizeof(**relocs));
3533 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003534 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003535 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003536 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003537 }
3538
3539 reloc_index += exec_list[i].relocation_count;
3540 }
3541
Florian Mickler2bc43b52009-04-06 22:55:41 +02003542 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003543}
3544
3545static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003546i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003547 uint32_t buffer_count,
3548 struct drm_i915_gem_relocation_entry *relocs)
3549{
3550 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003551 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003552
Chris Wilson93533c22010-01-31 10:40:48 +00003553 if (relocs == NULL)
3554 return 0;
3555
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003556 for (i = 0; i < buffer_count; i++) {
3557 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003558 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003559
3560 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3561
Florian Mickler2bc43b52009-04-06 22:55:41 +02003562 unwritten = copy_to_user(user_relocs,
3563 &relocs[reloc_count],
3564 exec_list[i].relocation_count *
3565 sizeof(*relocs));
3566
3567 if (unwritten) {
3568 ret = -EFAULT;
3569 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003570 }
3571
3572 reloc_count += exec_list[i].relocation_count;
3573 }
3574
Florian Mickler2bc43b52009-04-06 22:55:41 +02003575err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003576 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003577
3578 return ret;
3579}
3580
Chris Wilson83d60792009-06-06 09:45:57 +01003581static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003582i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003583 uint64_t exec_offset)
3584{
3585 uint32_t exec_start, exec_len;
3586
3587 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3588 exec_len = (uint32_t) exec->batch_len;
3589
3590 if ((exec_start | exec_len) & 0x7)
3591 return -EINVAL;
3592
3593 if (!exec_start)
3594 return -EINVAL;
3595
3596 return 0;
3597}
3598
Chris Wilsone6c3a2a2010-09-23 23:04:43 +01003599static int
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003600i915_gem_wait_for_pending_flip(struct drm_device *dev,
3601 struct drm_gem_object **object_list,
3602 int count)
3603{
3604 drm_i915_private_t *dev_priv = dev->dev_private;
3605 struct drm_i915_gem_object *obj_priv;
3606 DEFINE_WAIT(wait);
3607 int i, ret = 0;
3608
3609 for (;;) {
3610 prepare_to_wait(&dev_priv->pending_flip_queue,
3611 &wait, TASK_INTERRUPTIBLE);
3612 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003613 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003614 if (atomic_read(&obj_priv->pending_flip) > 0)
3615 break;
3616 }
3617 if (i == count)
3618 break;
3619
3620 if (!signal_pending(current)) {
3621 mutex_unlock(&dev->struct_mutex);
3622 schedule();
3623 mutex_lock(&dev->struct_mutex);
3624 continue;
3625 }
3626 ret = -ERESTARTSYS;
3627 break;
3628 }
3629 finish_wait(&dev_priv->pending_flip_queue, &wait);
3630
3631 return ret;
3632}
3633
Chris Wilson8dc5d142010-08-12 12:36:12 +01003634static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003635i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3636 struct drm_file *file_priv,
3637 struct drm_i915_gem_execbuffer2 *args,
3638 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003639{
3640 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003641 struct drm_gem_object **object_list = NULL;
3642 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003643 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003644 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003645 struct drm_i915_gem_relocation_entry *relocs = NULL;
Chris Wilson8dc5d142010-08-12 12:36:12 +01003646 struct drm_i915_gem_request *request = NULL;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003647 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003648 uint64_t exec_offset;
Chris Wilson5c12a07e2010-09-22 11:22:30 +01003649 uint32_t reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003650 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003651
Zou Nan hai852835f2010-05-21 09:08:56 +08003652 struct intel_ring_buffer *ring = NULL;
3653
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003654 ret = i915_gem_check_is_wedged(dev);
3655 if (ret)
3656 return ret;
3657
Eric Anholt673a3942008-07-30 12:06:12 -07003658#if WATCH_EXEC
3659 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3660 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3661#endif
Zou Nan haid1b851f2010-05-21 09:08:57 +08003662 if (args->flags & I915_EXEC_BSD) {
3663 if (!HAS_BSD(dev)) {
3664 DRM_ERROR("execbuf with wrong flag\n");
3665 return -EINVAL;
3666 }
3667 ring = &dev_priv->bsd_ring;
3668 } else {
3669 ring = &dev_priv->render_ring;
3670 }
3671
Eric Anholt4f481ed2008-09-10 14:22:49 -07003672 if (args->buffer_count < 1) {
3673 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3674 return -EINVAL;
3675 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003676 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003677 if (object_list == NULL) {
3678 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003679 args->buffer_count);
3680 ret = -ENOMEM;
3681 goto pre_mutex_err;
3682 }
Eric Anholt673a3942008-07-30 12:06:12 -07003683
Eric Anholt201361a2009-03-11 12:30:04 -07003684 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003685 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3686 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003687 if (cliprects == NULL) {
3688 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003689 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003690 }
Eric Anholt201361a2009-03-11 12:30:04 -07003691
3692 ret = copy_from_user(cliprects,
3693 (struct drm_clip_rect __user *)
3694 (uintptr_t) args->cliprects_ptr,
3695 sizeof(*cliprects) * args->num_cliprects);
3696 if (ret != 0) {
3697 DRM_ERROR("copy %d cliprects failed: %d\n",
3698 args->num_cliprects, ret);
Dan Carpenterc877cdc2010-06-23 19:03:01 +02003699 ret = -EFAULT;
Eric Anholt201361a2009-03-11 12:30:04 -07003700 goto pre_mutex_err;
3701 }
3702 }
3703
Chris Wilson8dc5d142010-08-12 12:36:12 +01003704 request = kzalloc(sizeof(*request), GFP_KERNEL);
3705 if (request == NULL) {
3706 ret = -ENOMEM;
3707 goto pre_mutex_err;
3708 }
3709
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003710 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3711 &relocs);
3712 if (ret != 0)
3713 goto pre_mutex_err;
3714
Chris Wilson76c1dec2010-09-25 11:22:51 +01003715 ret = i915_mutex_lock_interruptible(dev);
3716 if (ret)
3717 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003718
3719 i915_verify_inactive(dev, __FILE__, __LINE__);
3720
Eric Anholt673a3942008-07-30 12:06:12 -07003721 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003722 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003723 ret = -EBUSY;
3724 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003725 }
3726
Keith Packardac94a962008-11-20 23:30:27 -08003727 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003728 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003729 for (i = 0; i < args->buffer_count; i++) {
3730 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3731 exec_list[i].handle);
3732 if (object_list[i] == NULL) {
3733 DRM_ERROR("Invalid object handle %d at index %d\n",
3734 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003735 /* prevent error path from reading uninitialized data */
3736 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003737 ret = -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003738 goto err;
3739 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003740
Daniel Vetter23010e42010-03-08 13:35:02 +01003741 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003742 if (obj_priv->in_execbuffer) {
3743 DRM_ERROR("Object %p appears more than once in object list\n",
3744 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003745 /* prevent error path from reading uninitialized data */
3746 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003747 ret = -EINVAL;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003748 goto err;
3749 }
3750 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003751 flips += atomic_read(&obj_priv->pending_flip);
3752 }
3753
3754 if (flips > 0) {
3755 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3756 args->buffer_count);
3757 if (ret)
3758 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003759 }
Eric Anholt673a3942008-07-30 12:06:12 -07003760
Keith Packardac94a962008-11-20 23:30:27 -08003761 /* Pin and relocate */
3762 for (pin_tries = 0; ; pin_tries++) {
3763 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003764 reloc_index = 0;
3765
Keith Packardac94a962008-11-20 23:30:27 -08003766 for (i = 0; i < args->buffer_count; i++) {
3767 object_list[i]->pending_read_domains = 0;
3768 object_list[i]->pending_write_domain = 0;
3769 ret = i915_gem_object_pin_and_relocate(object_list[i],
3770 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003771 &exec_list[i],
3772 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003773 if (ret)
3774 break;
3775 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003776 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003777 }
3778 /* success */
3779 if (ret == 0)
3780 break;
3781
3782 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003783 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003784 if (ret != -ERESTARTSYS) {
3785 unsigned long long total_size = 0;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003786 int num_fences = 0;
3787 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson43b27f42010-07-02 08:57:15 +01003788 obj_priv = to_intel_bo(object_list[i]);
Chris Wilson3d1cc472010-05-27 13:18:19 +01003789
Chris Wilson07f73f62009-09-14 16:50:30 +01003790 total_size += object_list[i]->size;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003791 num_fences +=
3792 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3793 obj_priv->tiling_mode != I915_TILING_NONE;
3794 }
3795 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
Chris Wilson07f73f62009-09-14 16:50:30 +01003796 pinned+1, args->buffer_count,
Chris Wilson3d1cc472010-05-27 13:18:19 +01003797 total_size, num_fences,
3798 ret);
Chris Wilson07f73f62009-09-14 16:50:30 +01003799 DRM_ERROR("%d objects [%d pinned], "
3800 "%d object bytes [%d pinned], "
3801 "%d/%d gtt bytes\n",
3802 atomic_read(&dev->object_count),
3803 atomic_read(&dev->pin_count),
3804 atomic_read(&dev->object_memory),
3805 atomic_read(&dev->pin_memory),
3806 atomic_read(&dev->gtt_memory),
3807 dev->gtt_total);
3808 }
Eric Anholt673a3942008-07-30 12:06:12 -07003809 goto err;
3810 }
Keith Packardac94a962008-11-20 23:30:27 -08003811
3812 /* unpin all of our buffers */
3813 for (i = 0; i < pinned; i++)
3814 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003815 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003816
3817 /* evict everyone we can from the aperture */
3818 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003819 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003820 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003821 }
3822
3823 /* Set the pending read domains for the batch buffer to COMMAND */
3824 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003825 if (batch_obj->pending_write_domain) {
3826 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3827 ret = -EINVAL;
3828 goto err;
3829 }
3830 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003831
Chris Wilson83d60792009-06-06 09:45:57 +01003832 /* Sanity check the batch buffer, prior to moving objects */
3833 exec_offset = exec_list[args->buffer_count - 1].offset;
3834 ret = i915_gem_check_execbuffer (args, exec_offset);
3835 if (ret != 0) {
3836 DRM_ERROR("execbuf with invalid offset/length\n");
3837 goto err;
3838 }
3839
Eric Anholt673a3942008-07-30 12:06:12 -07003840 i915_verify_inactive(dev, __FILE__, __LINE__);
3841
Keith Packard646f0f62008-11-20 23:23:03 -08003842 /* Zero the global flush/invalidate flags. These
3843 * will be modified as new domains are computed
3844 * for each object
3845 */
3846 dev->invalidate_domains = 0;
3847 dev->flush_domains = 0;
Chris Wilson92204342010-09-18 11:02:01 +01003848 dev_priv->mm.flush_rings = 0;
Keith Packard646f0f62008-11-20 23:23:03 -08003849
Eric Anholt673a3942008-07-30 12:06:12 -07003850 for (i = 0; i < args->buffer_count; i++) {
3851 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003852
Keith Packard646f0f62008-11-20 23:23:03 -08003853 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003854 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003855 }
3856
3857 i915_verify_inactive(dev, __FILE__, __LINE__);
3858
Keith Packard646f0f62008-11-20 23:23:03 -08003859 if (dev->invalidate_domains | dev->flush_domains) {
3860#if WATCH_EXEC
3861 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3862 __func__,
3863 dev->invalidate_domains,
3864 dev->flush_domains);
3865#endif
Chris Wilsonc78ec302010-09-20 12:50:23 +01003866 i915_gem_flush(dev, file_priv,
Keith Packard646f0f62008-11-20 23:23:03 -08003867 dev->invalidate_domains,
Chris Wilson92204342010-09-18 11:02:01 +01003868 dev->flush_domains,
3869 dev_priv->mm.flush_rings);
Daniel Vettera6910432010-02-02 17:08:37 +01003870 }
3871
Eric Anholtefbeed92009-02-19 14:54:51 -08003872 for (i = 0; i < args->buffer_count; i++) {
3873 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003874 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003875 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003876
3877 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003878 if (obj->write_domain)
3879 list_move_tail(&obj_priv->gpu_write_list,
3880 &dev_priv->mm.gpu_write_list);
3881 else
3882 list_del_init(&obj_priv->gpu_write_list);
3883
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003884 trace_i915_gem_object_change_domain(obj,
3885 obj->read_domains,
3886 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003887 }
3888
Eric Anholt673a3942008-07-30 12:06:12 -07003889 i915_verify_inactive(dev, __FILE__, __LINE__);
3890
3891#if WATCH_COHERENCY
3892 for (i = 0; i < args->buffer_count; i++) {
3893 i915_gem_object_check_coherency(object_list[i],
3894 exec_list[i].handle);
3895 }
3896#endif
3897
Eric Anholt673a3942008-07-30 12:06:12 -07003898#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003899 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003900 args->batch_len,
3901 __func__,
3902 ~0);
3903#endif
3904
Eric Anholt673a3942008-07-30 12:06:12 -07003905 /* Exec the batchbuffer */
Zou Nan hai852835f2010-05-21 09:08:56 +08003906 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3907 cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003908 if (ret) {
3909 DRM_ERROR("dispatch failed %d\n", ret);
3910 goto err;
3911 }
3912
3913 /*
3914 * Ensure that the commands in the batch buffer are
3915 * finished before the interrupt fires
3916 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01003917 i915_retire_commands(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003918
3919 i915_verify_inactive(dev, __FILE__, __LINE__);
3920
Daniel Vetter617dbe22010-02-11 22:16:02 +01003921 for (i = 0; i < args->buffer_count; i++) {
3922 struct drm_gem_object *obj = object_list[i];
3923 obj_priv = to_intel_bo(obj);
3924
3925 i915_gem_object_move_to_active(obj, ring);
3926#if WATCH_LRU
3927 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3928#endif
3929 }
Chris Wilson5c12a07e2010-09-22 11:22:30 +01003930 i915_add_request(dev, file_priv, request, ring);
Chris Wilson8dc5d142010-08-12 12:36:12 +01003931 request = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07003932
Eric Anholt673a3942008-07-30 12:06:12 -07003933#if WATCH_LRU
3934 i915_dump_lru(dev, __func__);
3935#endif
3936
3937 i915_verify_inactive(dev, __FILE__, __LINE__);
3938
Eric Anholt673a3942008-07-30 12:06:12 -07003939err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003940 for (i = 0; i < pinned; i++)
3941 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003942
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003943 for (i = 0; i < args->buffer_count; i++) {
3944 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003945 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003946 obj_priv->in_execbuffer = false;
3947 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003948 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003949 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003950
Eric Anholt673a3942008-07-30 12:06:12 -07003951 mutex_unlock(&dev->struct_mutex);
3952
Chris Wilson93533c22010-01-31 10:40:48 +00003953pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003954 /* Copy the updated relocations out regardless of current error
3955 * state. Failure to update the relocs would mean that the next
3956 * time userland calls execbuf, it would do so with presumed offset
3957 * state that didn't match the actual object state.
3958 */
3959 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3960 relocs);
3961 if (ret2 != 0) {
3962 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3963
3964 if (ret == 0)
3965 ret = ret2;
3966 }
3967
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003968 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003969 kfree(cliprects);
Chris Wilson8dc5d142010-08-12 12:36:12 +01003970 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07003971
3972 return ret;
3973}
3974
Jesse Barnes76446ca2009-12-17 22:05:42 -05003975/*
3976 * Legacy execbuffer just creates an exec2 list from the original exec object
3977 * list array and passes it to the real function.
3978 */
3979int
3980i915_gem_execbuffer(struct drm_device *dev, void *data,
3981 struct drm_file *file_priv)
3982{
3983 struct drm_i915_gem_execbuffer *args = data;
3984 struct drm_i915_gem_execbuffer2 exec2;
3985 struct drm_i915_gem_exec_object *exec_list = NULL;
3986 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3987 int ret, i;
3988
3989#if WATCH_EXEC
3990 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3991 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3992#endif
3993
3994 if (args->buffer_count < 1) {
3995 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3996 return -EINVAL;
3997 }
3998
3999 /* Copy in the exec list from userland */
4000 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4001 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4002 if (exec_list == NULL || exec2_list == NULL) {
4003 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4004 args->buffer_count);
4005 drm_free_large(exec_list);
4006 drm_free_large(exec2_list);
4007 return -ENOMEM;
4008 }
4009 ret = copy_from_user(exec_list,
4010 (struct drm_i915_relocation_entry __user *)
4011 (uintptr_t) args->buffers_ptr,
4012 sizeof(*exec_list) * args->buffer_count);
4013 if (ret != 0) {
4014 DRM_ERROR("copy %d exec entries failed %d\n",
4015 args->buffer_count, ret);
4016 drm_free_large(exec_list);
4017 drm_free_large(exec2_list);
4018 return -EFAULT;
4019 }
4020
4021 for (i = 0; i < args->buffer_count; i++) {
4022 exec2_list[i].handle = exec_list[i].handle;
4023 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4024 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4025 exec2_list[i].alignment = exec_list[i].alignment;
4026 exec2_list[i].offset = exec_list[i].offset;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004027 if (INTEL_INFO(dev)->gen < 4)
Jesse Barnes76446ca2009-12-17 22:05:42 -05004028 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4029 else
4030 exec2_list[i].flags = 0;
4031 }
4032
4033 exec2.buffers_ptr = args->buffers_ptr;
4034 exec2.buffer_count = args->buffer_count;
4035 exec2.batch_start_offset = args->batch_start_offset;
4036 exec2.batch_len = args->batch_len;
4037 exec2.DR1 = args->DR1;
4038 exec2.DR4 = args->DR4;
4039 exec2.num_cliprects = args->num_cliprects;
4040 exec2.cliprects_ptr = args->cliprects_ptr;
Zou Nan hai852835f2010-05-21 09:08:56 +08004041 exec2.flags = I915_EXEC_RENDER;
Jesse Barnes76446ca2009-12-17 22:05:42 -05004042
4043 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4044 if (!ret) {
4045 /* Copy the new buffer offsets back to the user's exec list. */
4046 for (i = 0; i < args->buffer_count; i++)
4047 exec_list[i].offset = exec2_list[i].offset;
4048 /* ... and back out to userspace */
4049 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4050 (uintptr_t) args->buffers_ptr,
4051 exec_list,
4052 sizeof(*exec_list) * args->buffer_count);
4053 if (ret) {
4054 ret = -EFAULT;
4055 DRM_ERROR("failed to copy %d exec entries "
4056 "back to user (%d)\n",
4057 args->buffer_count, ret);
4058 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004059 }
4060
4061 drm_free_large(exec_list);
4062 drm_free_large(exec2_list);
4063 return ret;
4064}
4065
4066int
4067i915_gem_execbuffer2(struct drm_device *dev, void *data,
4068 struct drm_file *file_priv)
4069{
4070 struct drm_i915_gem_execbuffer2 *args = data;
4071 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4072 int ret;
4073
4074#if WATCH_EXEC
4075 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4076 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4077#endif
4078
4079 if (args->buffer_count < 1) {
4080 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4081 return -EINVAL;
4082 }
4083
4084 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4085 if (exec2_list == NULL) {
4086 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4087 args->buffer_count);
4088 return -ENOMEM;
4089 }
4090 ret = copy_from_user(exec2_list,
4091 (struct drm_i915_relocation_entry __user *)
4092 (uintptr_t) args->buffers_ptr,
4093 sizeof(*exec2_list) * args->buffer_count);
4094 if (ret != 0) {
4095 DRM_ERROR("copy %d exec entries failed %d\n",
4096 args->buffer_count, ret);
4097 drm_free_large(exec2_list);
4098 return -EFAULT;
4099 }
4100
4101 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4102 if (!ret) {
4103 /* Copy the new buffer offsets back to the user's exec list. */
4104 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4105 (uintptr_t) args->buffers_ptr,
4106 exec2_list,
4107 sizeof(*exec2_list) * args->buffer_count);
4108 if (ret) {
4109 ret = -EFAULT;
4110 DRM_ERROR("failed to copy %d exec entries "
4111 "back to user (%d)\n",
4112 args->buffer_count, ret);
4113 }
4114 }
4115
4116 drm_free_large(exec2_list);
4117 return ret;
4118}
4119
Eric Anholt673a3942008-07-30 12:06:12 -07004120int
4121i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4122{
4123 struct drm_device *dev = obj->dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004124 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004125 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004126 int ret;
4127
Daniel Vetter778c3542010-05-13 11:49:44 +02004128 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4129
Eric Anholt673a3942008-07-30 12:06:12 -07004130 i915_verify_inactive(dev, __FILE__, __LINE__);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004131
4132 if (obj_priv->gtt_space != NULL) {
4133 if (alignment == 0)
4134 alignment = i915_gem_get_gtt_alignment(obj);
4135 if (obj_priv->gtt_offset & (alignment - 1)) {
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004136 WARN(obj_priv->pin_count,
4137 "bo is already pinned with incorrect alignment:"
4138 " offset=%x, req.alignment=%x\n",
4139 obj_priv->gtt_offset, alignment);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004140 ret = i915_gem_object_unbind(obj);
4141 if (ret)
4142 return ret;
4143 }
4144 }
4145
Eric Anholt673a3942008-07-30 12:06:12 -07004146 if (obj_priv->gtt_space == NULL) {
4147 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004148 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004149 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004150 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004151
Eric Anholt673a3942008-07-30 12:06:12 -07004152 obj_priv->pin_count++;
4153
4154 /* If the object is not active and not pending a flush,
4155 * remove it from the inactive list
4156 */
4157 if (obj_priv->pin_count == 1) {
4158 atomic_inc(&dev->pin_count);
4159 atomic_add(obj->size, &dev->pin_memory);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004160 if (!obj_priv->active)
4161 list_move_tail(&obj_priv->list,
4162 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004163 }
4164 i915_verify_inactive(dev, __FILE__, __LINE__);
4165
4166 return 0;
4167}
4168
4169void
4170i915_gem_object_unpin(struct drm_gem_object *obj)
4171{
4172 struct drm_device *dev = obj->dev;
4173 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004174 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004175
4176 i915_verify_inactive(dev, __FILE__, __LINE__);
4177 obj_priv->pin_count--;
4178 BUG_ON(obj_priv->pin_count < 0);
4179 BUG_ON(obj_priv->gtt_space == NULL);
4180
4181 /* If the object is no longer pinned, and is
4182 * neither active nor being flushed, then stick it on
4183 * the inactive list
4184 */
4185 if (obj_priv->pin_count == 0) {
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004186 if (!obj_priv->active)
Eric Anholt673a3942008-07-30 12:06:12 -07004187 list_move_tail(&obj_priv->list,
4188 &dev_priv->mm.inactive_list);
4189 atomic_dec(&dev->pin_count);
4190 atomic_sub(obj->size, &dev->pin_memory);
4191 }
4192 i915_verify_inactive(dev, __FILE__, __LINE__);
4193}
4194
4195int
4196i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4197 struct drm_file *file_priv)
4198{
4199 struct drm_i915_gem_pin *args = data;
4200 struct drm_gem_object *obj;
4201 struct drm_i915_gem_object *obj_priv;
4202 int ret;
4203
Eric Anholt673a3942008-07-30 12:06:12 -07004204 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4205 if (obj == NULL) {
4206 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4207 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004208 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004209 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004210 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004211
Chris Wilson76c1dec2010-09-25 11:22:51 +01004212 ret = i915_mutex_lock_interruptible(dev);
4213 if (ret) {
4214 drm_gem_object_unreference_unlocked(obj);
4215 return ret;
4216 }
4217
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004218 if (obj_priv->madv != I915_MADV_WILLNEED) {
4219 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004220 drm_gem_object_unreference(obj);
4221 mutex_unlock(&dev->struct_mutex);
4222 return -EINVAL;
4223 }
4224
Jesse Barnes79e53942008-11-07 14:24:08 -08004225 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4226 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4227 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004228 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004229 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004230 return -EINVAL;
4231 }
4232
4233 obj_priv->user_pin_count++;
4234 obj_priv->pin_filp = file_priv;
4235 if (obj_priv->user_pin_count == 1) {
4236 ret = i915_gem_object_pin(obj, args->alignment);
4237 if (ret != 0) {
4238 drm_gem_object_unreference(obj);
4239 mutex_unlock(&dev->struct_mutex);
4240 return ret;
4241 }
Eric Anholt673a3942008-07-30 12:06:12 -07004242 }
4243
4244 /* XXX - flush the CPU caches for pinned objects
4245 * as the X server doesn't manage domains yet
4246 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004247 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004248 args->offset = obj_priv->gtt_offset;
4249 drm_gem_object_unreference(obj);
4250 mutex_unlock(&dev->struct_mutex);
4251
4252 return 0;
4253}
4254
4255int
4256i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4257 struct drm_file *file_priv)
4258{
4259 struct drm_i915_gem_pin *args = data;
4260 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004261 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004262 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004263
4264 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4265 if (obj == NULL) {
4266 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4267 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004268 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004269 }
4270
Daniel Vetter23010e42010-03-08 13:35:02 +01004271 obj_priv = to_intel_bo(obj);
Chris Wilson76c1dec2010-09-25 11:22:51 +01004272
4273 ret = i915_mutex_lock_interruptible(dev);
4274 if (ret) {
4275 drm_gem_object_unreference_unlocked(obj);
4276 return ret;
4277 }
4278
Jesse Barnes79e53942008-11-07 14:24:08 -08004279 if (obj_priv->pin_filp != file_priv) {
4280 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4281 args->handle);
4282 drm_gem_object_unreference(obj);
4283 mutex_unlock(&dev->struct_mutex);
4284 return -EINVAL;
4285 }
4286 obj_priv->user_pin_count--;
4287 if (obj_priv->user_pin_count == 0) {
4288 obj_priv->pin_filp = NULL;
4289 i915_gem_object_unpin(obj);
4290 }
Eric Anholt673a3942008-07-30 12:06:12 -07004291
4292 drm_gem_object_unreference(obj);
4293 mutex_unlock(&dev->struct_mutex);
4294 return 0;
4295}
4296
4297int
4298i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4299 struct drm_file *file_priv)
4300{
4301 struct drm_i915_gem_busy *args = data;
4302 struct drm_gem_object *obj;
4303 struct drm_i915_gem_object *obj_priv;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004304 int ret;
4305
Eric Anholt673a3942008-07-30 12:06:12 -07004306 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4307 if (obj == NULL) {
4308 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4309 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004310 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004311 }
4312
Chris Wilson76c1dec2010-09-25 11:22:51 +01004313 ret = i915_mutex_lock_interruptible(dev);
4314 if (ret) {
4315 drm_gem_object_unreference_unlocked(obj);
4316 return ret;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004317 }
4318
Chris Wilson0be555b2010-08-04 15:36:30 +01004319 /* Count all active objects as busy, even if they are currently not used
4320 * by the gpu. Users of this interface expect objects to eventually
4321 * become non-busy without any further actions, therefore emit any
4322 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004323 */
Chris Wilson0be555b2010-08-04 15:36:30 +01004324 obj_priv = to_intel_bo(obj);
4325 args->busy = obj_priv->active;
4326 if (args->busy) {
4327 /* Unconditionally flush objects, even when the gpu still uses this
4328 * object. Userspace calling this function indicates that it wants to
4329 * use this buffer rather sooner than later, so issuing the required
4330 * flush earlier is beneficial.
4331 */
Chris Wilsonc78ec302010-09-20 12:50:23 +01004332 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4333 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01004334 obj_priv->ring,
4335 0, obj->write_domain);
Chris Wilson0be555b2010-08-04 15:36:30 +01004336
4337 /* Update the active list for the hardware's current position.
4338 * Otherwise this only updates on a delayed timer or when irqs
4339 * are actually unmasked, and our working set ends up being
4340 * larger than required.
4341 */
4342 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4343
4344 args->busy = obj_priv->active;
4345 }
Eric Anholt673a3942008-07-30 12:06:12 -07004346
4347 drm_gem_object_unreference(obj);
4348 mutex_unlock(&dev->struct_mutex);
Chris Wilson76c1dec2010-09-25 11:22:51 +01004349 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004350}
4351
4352int
4353i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4354 struct drm_file *file_priv)
4355{
4356 return i915_gem_ring_throttle(dev, file_priv);
4357}
4358
Chris Wilson3ef94da2009-09-14 16:50:29 +01004359int
4360i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4361 struct drm_file *file_priv)
4362{
4363 struct drm_i915_gem_madvise *args = data;
4364 struct drm_gem_object *obj;
4365 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004366 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004367
4368 switch (args->madv) {
4369 case I915_MADV_DONTNEED:
4370 case I915_MADV_WILLNEED:
4371 break;
4372 default:
4373 return -EINVAL;
4374 }
4375
4376 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4377 if (obj == NULL) {
4378 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4379 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004380 return -ENOENT;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004381 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004382 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004383
Chris Wilson76c1dec2010-09-25 11:22:51 +01004384 ret = i915_mutex_lock_interruptible(dev);
4385 if (ret) {
4386 drm_gem_object_unreference_unlocked(obj);
4387 return ret;
4388 }
4389
Chris Wilson3ef94da2009-09-14 16:50:29 +01004390 if (obj_priv->pin_count) {
4391 drm_gem_object_unreference(obj);
4392 mutex_unlock(&dev->struct_mutex);
4393
4394 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4395 return -EINVAL;
4396 }
4397
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004398 if (obj_priv->madv != __I915_MADV_PURGED)
4399 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004400
Chris Wilson2d7ef392009-09-20 23:13:10 +01004401 /* if the object is no longer bound, discard its backing storage */
4402 if (i915_gem_object_is_purgeable(obj_priv) &&
4403 obj_priv->gtt_space == NULL)
4404 i915_gem_object_truncate(obj);
4405
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004406 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4407
Chris Wilson3ef94da2009-09-14 16:50:29 +01004408 drm_gem_object_unreference(obj);
4409 mutex_unlock(&dev->struct_mutex);
4410
4411 return 0;
4412}
4413
Daniel Vetterac52bc52010-04-09 19:05:06 +00004414struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4415 size_t size)
4416{
Daniel Vetterc397b902010-04-09 19:05:07 +00004417 struct drm_i915_gem_object *obj;
4418
4419 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4420 if (obj == NULL)
4421 return NULL;
4422
4423 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4424 kfree(obj);
4425 return NULL;
4426 }
4427
4428 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4429 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4430
4431 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004432 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004433 obj->fence_reg = I915_FENCE_REG_NONE;
4434 INIT_LIST_HEAD(&obj->list);
4435 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004436 obj->madv = I915_MADV_WILLNEED;
4437
4438 trace_i915_gem_object_create(&obj->base);
4439
4440 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004441}
4442
Eric Anholt673a3942008-07-30 12:06:12 -07004443int i915_gem_init_object(struct drm_gem_object *obj)
4444{
Daniel Vetterc397b902010-04-09 19:05:07 +00004445 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004446
Eric Anholt673a3942008-07-30 12:06:12 -07004447 return 0;
4448}
4449
Chris Wilsonbe726152010-07-23 23:18:50 +01004450static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4451{
4452 struct drm_device *dev = obj->dev;
4453 drm_i915_private_t *dev_priv = dev->dev_private;
4454 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4455 int ret;
4456
4457 ret = i915_gem_object_unbind(obj);
4458 if (ret == -ERESTARTSYS) {
4459 list_move(&obj_priv->list,
4460 &dev_priv->mm.deferred_free_list);
4461 return;
4462 }
4463
4464 if (obj_priv->mmap_offset)
4465 i915_gem_free_mmap_offset(obj);
4466
4467 drm_gem_object_release(obj);
4468
4469 kfree(obj_priv->page_cpu_valid);
4470 kfree(obj_priv->bit_17);
4471 kfree(obj_priv);
4472}
4473
Eric Anholt673a3942008-07-30 12:06:12 -07004474void i915_gem_free_object(struct drm_gem_object *obj)
4475{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004476 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004477 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004478
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004479 trace_i915_gem_object_destroy(obj);
4480
Eric Anholt673a3942008-07-30 12:06:12 -07004481 while (obj_priv->pin_count > 0)
4482 i915_gem_object_unpin(obj);
4483
Dave Airlie71acb5e2008-12-30 20:31:46 +10004484 if (obj_priv->phys_obj)
4485 i915_gem_detach_phys_object(dev, obj);
4486
Chris Wilsonbe726152010-07-23 23:18:50 +01004487 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004488}
4489
Jesse Barnes5669fca2009-02-17 15:13:31 -08004490int
Eric Anholt673a3942008-07-30 12:06:12 -07004491i915_gem_idle(struct drm_device *dev)
4492{
4493 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004494 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004495
Keith Packard6dbe2772008-10-14 21:41:13 -07004496 mutex_lock(&dev->struct_mutex);
4497
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004498 if (dev_priv->mm.suspended ||
Zou Nan haid1b851f2010-05-21 09:08:57 +08004499 (dev_priv->render_ring.gem_object == NULL) ||
4500 (HAS_BSD(dev) &&
4501 dev_priv->bsd_ring.gem_object == NULL)) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004502 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004503 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004504 }
Eric Anholt673a3942008-07-30 12:06:12 -07004505
Chris Wilson29105cc2010-01-07 10:39:13 +00004506 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004507 if (ret) {
4508 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004509 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004510 }
Eric Anholt673a3942008-07-30 12:06:12 -07004511
Chris Wilson29105cc2010-01-07 10:39:13 +00004512 /* Under UMS, be paranoid and evict. */
4513 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01004514 ret = i915_gem_evict_inactive(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004515 if (ret) {
4516 mutex_unlock(&dev->struct_mutex);
4517 return ret;
4518 }
4519 }
4520
4521 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4522 * We need to replace this with a semaphore, or something.
4523 * And not confound mm.suspended!
4524 */
4525 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02004526 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004527
4528 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004529 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004530
Keith Packard6dbe2772008-10-14 21:41:13 -07004531 mutex_unlock(&dev->struct_mutex);
4532
Chris Wilson29105cc2010-01-07 10:39:13 +00004533 /* Cancel the retire work handler, which should be idle now. */
4534 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4535
Eric Anholt673a3942008-07-30 12:06:12 -07004536 return 0;
4537}
4538
Jesse Barnese552eb72010-04-21 11:39:23 -07004539/*
4540 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4541 * over cache flushing.
4542 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004543static int
Jesse Barnese552eb72010-04-21 11:39:23 -07004544i915_gem_init_pipe_control(struct drm_device *dev)
4545{
4546 drm_i915_private_t *dev_priv = dev->dev_private;
4547 struct drm_gem_object *obj;
4548 struct drm_i915_gem_object *obj_priv;
4549 int ret;
4550
Eric Anholt34dc4d42010-05-07 14:30:03 -07004551 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004552 if (obj == NULL) {
4553 DRM_ERROR("Failed to allocate seqno page\n");
4554 ret = -ENOMEM;
4555 goto err;
4556 }
4557 obj_priv = to_intel_bo(obj);
4558 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4559
4560 ret = i915_gem_object_pin(obj, 4096);
4561 if (ret)
4562 goto err_unref;
4563
4564 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4565 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4566 if (dev_priv->seqno_page == NULL)
4567 goto err_unpin;
4568
4569 dev_priv->seqno_obj = obj;
4570 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4571
4572 return 0;
4573
4574err_unpin:
4575 i915_gem_object_unpin(obj);
4576err_unref:
4577 drm_gem_object_unreference(obj);
4578err:
4579 return ret;
4580}
4581
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004582
4583static void
Jesse Barnese552eb72010-04-21 11:39:23 -07004584i915_gem_cleanup_pipe_control(struct drm_device *dev)
4585{
4586 drm_i915_private_t *dev_priv = dev->dev_private;
4587 struct drm_gem_object *obj;
4588 struct drm_i915_gem_object *obj_priv;
4589
4590 obj = dev_priv->seqno_obj;
4591 obj_priv = to_intel_bo(obj);
4592 kunmap(obj_priv->pages[0]);
4593 i915_gem_object_unpin(obj);
4594 drm_gem_object_unreference(obj);
4595 dev_priv->seqno_obj = NULL;
4596
4597 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004598}
4599
Eric Anholt673a3942008-07-30 12:06:12 -07004600int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004601i915_gem_init_ringbuffer(struct drm_device *dev)
4602{
4603 drm_i915_private_t *dev_priv = dev->dev_private;
4604 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004605
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004606 if (HAS_PIPE_CONTROL(dev)) {
4607 ret = i915_gem_init_pipe_control(dev);
4608 if (ret)
4609 return ret;
4610 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004611
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004612 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004613 if (ret)
4614 goto cleanup_pipe_control;
4615
4616 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004617 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004618 if (ret)
4619 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004620 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004621
Chris Wilson6f392d5482010-08-07 11:01:22 +01004622 dev_priv->next_seqno = 1;
4623
Chris Wilson68f95ba2010-05-27 13:18:22 +01004624 return 0;
4625
4626cleanup_render_ring:
4627 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4628cleanup_pipe_control:
4629 if (HAS_PIPE_CONTROL(dev))
4630 i915_gem_cleanup_pipe_control(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004631 return ret;
4632}
4633
4634void
4635i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4636{
4637 drm_i915_private_t *dev_priv = dev->dev_private;
4638
4639 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004640 if (HAS_BSD(dev))
4641 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004642 if (HAS_PIPE_CONTROL(dev))
4643 i915_gem_cleanup_pipe_control(dev);
4644}
4645
4646int
Eric Anholt673a3942008-07-30 12:06:12 -07004647i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4648 struct drm_file *file_priv)
4649{
4650 drm_i915_private_t *dev_priv = dev->dev_private;
4651 int ret;
4652
Jesse Barnes79e53942008-11-07 14:24:08 -08004653 if (drm_core_check_feature(dev, DRIVER_MODESET))
4654 return 0;
4655
Ben Gamariba1234d2009-09-14 17:48:47 -04004656 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004657 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004658 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004659 }
4660
Eric Anholt673a3942008-07-30 12:06:12 -07004661 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004662 dev_priv->mm.suspended = 0;
4663
4664 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004665 if (ret != 0) {
4666 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004667 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004668 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004669
Zou Nan hai852835f2010-05-21 09:08:56 +08004670 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004671 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004672 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4673 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004674 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004675 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004676 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004677
Chris Wilson5f353082010-06-07 14:03:03 +01004678 ret = drm_irq_install(dev);
4679 if (ret)
4680 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004681
Eric Anholt673a3942008-07-30 12:06:12 -07004682 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004683
4684cleanup_ringbuffer:
4685 mutex_lock(&dev->struct_mutex);
4686 i915_gem_cleanup_ringbuffer(dev);
4687 dev_priv->mm.suspended = 1;
4688 mutex_unlock(&dev->struct_mutex);
4689
4690 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004691}
4692
4693int
4694i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4695 struct drm_file *file_priv)
4696{
Jesse Barnes79e53942008-11-07 14:24:08 -08004697 if (drm_core_check_feature(dev, DRIVER_MODESET))
4698 return 0;
4699
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004700 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004701 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004702}
4703
4704void
4705i915_gem_lastclose(struct drm_device *dev)
4706{
4707 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004708
Eric Anholte806b492009-01-22 09:56:58 -08004709 if (drm_core_check_feature(dev, DRIVER_MODESET))
4710 return;
4711
Keith Packard6dbe2772008-10-14 21:41:13 -07004712 ret = i915_gem_idle(dev);
4713 if (ret)
4714 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004715}
4716
4717void
4718i915_gem_load(struct drm_device *dev)
4719{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004720 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004721 drm_i915_private_t *dev_priv = dev->dev_private;
4722
Eric Anholt673a3942008-07-30 12:06:12 -07004723 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004724 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004725 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004726 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004727 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01004728 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08004729 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4730 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004731 if (HAS_BSD(dev)) {
4732 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4733 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4734 }
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004735 for (i = 0; i < 16; i++)
4736 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004737 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4738 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004739 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01004740 spin_lock(&shrink_list_lock);
4741 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4742 spin_unlock(&shrink_list_lock);
4743
Dave Airlie94400122010-07-20 13:15:31 +10004744 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4745 if (IS_GEN3(dev)) {
4746 u32 tmp = I915_READ(MI_ARB_STATE);
4747 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4748 /* arb state is a masked write, so set bit + bit in mask */
4749 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4750 I915_WRITE(MI_ARB_STATE, tmp);
4751 }
4752 }
4753
Jesse Barnesde151cf2008-11-12 10:03:55 -08004754 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004755 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4756 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004757
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004758 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004759 dev_priv->num_fence_regs = 16;
4760 else
4761 dev_priv->num_fence_regs = 8;
4762
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004763 /* Initialize fence registers to zero */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004764 switch (INTEL_INFO(dev)->gen) {
4765 case 6:
4766 for (i = 0; i < 16; i++)
4767 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4768 break;
4769 case 5:
4770 case 4:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004771 for (i = 0; i < 16; i++)
4772 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004773 break;
4774 case 3:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004775 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4776 for (i = 0; i < 8; i++)
4777 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004778 case 2:
4779 for (i = 0; i < 8; i++)
4780 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4781 break;
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004782 }
Eric Anholt673a3942008-07-30 12:06:12 -07004783 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004784 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004785}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004786
4787/*
4788 * Create a physically contiguous memory object for this object
4789 * e.g. for cursor + overlay regs
4790 */
Chris Wilson995b6762010-08-20 13:23:26 +01004791static int i915_gem_init_phys_object(struct drm_device *dev,
4792 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004793{
4794 drm_i915_private_t *dev_priv = dev->dev_private;
4795 struct drm_i915_gem_phys_object *phys_obj;
4796 int ret;
4797
4798 if (dev_priv->mm.phys_objs[id - 1] || !size)
4799 return 0;
4800
Eric Anholt9a298b22009-03-24 12:23:04 -07004801 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004802 if (!phys_obj)
4803 return -ENOMEM;
4804
4805 phys_obj->id = id;
4806
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004807 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004808 if (!phys_obj->handle) {
4809 ret = -ENOMEM;
4810 goto kfree_obj;
4811 }
4812#ifdef CONFIG_X86
4813 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4814#endif
4815
4816 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4817
4818 return 0;
4819kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004820 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004821 return ret;
4822}
4823
Chris Wilson995b6762010-08-20 13:23:26 +01004824static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004825{
4826 drm_i915_private_t *dev_priv = dev->dev_private;
4827 struct drm_i915_gem_phys_object *phys_obj;
4828
4829 if (!dev_priv->mm.phys_objs[id - 1])
4830 return;
4831
4832 phys_obj = dev_priv->mm.phys_objs[id - 1];
4833 if (phys_obj->cur_obj) {
4834 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4835 }
4836
4837#ifdef CONFIG_X86
4838 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4839#endif
4840 drm_pci_free(dev, phys_obj->handle);
4841 kfree(phys_obj);
4842 dev_priv->mm.phys_objs[id - 1] = NULL;
4843}
4844
4845void i915_gem_free_all_phys_object(struct drm_device *dev)
4846{
4847 int i;
4848
Dave Airlie260883c2009-01-22 17:58:49 +10004849 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004850 i915_gem_free_phys_object(dev, i);
4851}
4852
4853void i915_gem_detach_phys_object(struct drm_device *dev,
4854 struct drm_gem_object *obj)
4855{
4856 struct drm_i915_gem_object *obj_priv;
4857 int i;
4858 int ret;
4859 int page_count;
4860
Daniel Vetter23010e42010-03-08 13:35:02 +01004861 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004862 if (!obj_priv->phys_obj)
4863 return;
4864
Chris Wilson4bdadb92010-01-27 13:36:32 +00004865 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004866 if (ret)
4867 goto out;
4868
4869 page_count = obj->size / PAGE_SIZE;
4870
4871 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004872 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004873 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4874
4875 memcpy(dst, src, PAGE_SIZE);
4876 kunmap_atomic(dst, KM_USER0);
4877 }
Eric Anholt856fa192009-03-19 14:10:50 -07004878 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004879 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004880
4881 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004882out:
4883 obj_priv->phys_obj->cur_obj = NULL;
4884 obj_priv->phys_obj = NULL;
4885}
4886
4887int
4888i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004889 struct drm_gem_object *obj,
4890 int id,
4891 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004892{
4893 drm_i915_private_t *dev_priv = dev->dev_private;
4894 struct drm_i915_gem_object *obj_priv;
4895 int ret = 0;
4896 int page_count;
4897 int i;
4898
4899 if (id > I915_MAX_PHYS_OBJECT)
4900 return -EINVAL;
4901
Daniel Vetter23010e42010-03-08 13:35:02 +01004902 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004903
4904 if (obj_priv->phys_obj) {
4905 if (obj_priv->phys_obj->id == id)
4906 return 0;
4907 i915_gem_detach_phys_object(dev, obj);
4908 }
4909
Dave Airlie71acb5e2008-12-30 20:31:46 +10004910 /* create a new object */
4911 if (!dev_priv->mm.phys_objs[id - 1]) {
4912 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004913 obj->size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004914 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004915 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004916 goto out;
4917 }
4918 }
4919
4920 /* bind to the object */
4921 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4922 obj_priv->phys_obj->cur_obj = obj;
4923
Chris Wilson4bdadb92010-01-27 13:36:32 +00004924 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004925 if (ret) {
4926 DRM_ERROR("failed to get page list\n");
4927 goto out;
4928 }
4929
4930 page_count = obj->size / PAGE_SIZE;
4931
4932 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004933 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004934 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4935
4936 memcpy(dst, src, PAGE_SIZE);
4937 kunmap_atomic(src, KM_USER0);
4938 }
4939
Chris Wilsond78b47b2009-06-17 21:52:49 +01004940 i915_gem_object_put_pages(obj);
4941
Dave Airlie71acb5e2008-12-30 20:31:46 +10004942 return 0;
4943out:
4944 return ret;
4945}
4946
4947static int
4948i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4949 struct drm_i915_gem_pwrite *args,
4950 struct drm_file *file_priv)
4951{
Daniel Vetter23010e42010-03-08 13:35:02 +01004952 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004953 void *obj_addr;
4954 int ret;
4955 char __user *user_data;
4956
4957 user_data = (char __user *) (uintptr_t) args->data_ptr;
4958 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4959
Zhao Yakui44d98a62009-10-09 11:39:40 +08004960 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004961 ret = copy_from_user(obj_addr, user_data, args->size);
4962 if (ret)
4963 return -EFAULT;
4964
4965 drm_agp_chipset_flush(dev);
4966 return 0;
4967}
Eric Anholtb9624422009-06-03 07:27:35 +00004968
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004969void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004970{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004971 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004972
4973 /* Clean up our request list when the client is going away, so that
4974 * later retire_requests won't dereference our soon-to-be-gone
4975 * file_priv.
4976 */
4977 mutex_lock(&dev->struct_mutex);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004978 mutex_lock(&file_priv->mutex);
4979 while (!list_empty(&file_priv->mm.request_list)) {
4980 struct drm_i915_gem_request *request;
4981
4982 request = list_first_entry(&file_priv->mm.request_list,
4983 struct drm_i915_gem_request,
4984 client_list);
4985 list_del(&request->client_list);
4986 request->file_priv = NULL;
4987 }
4988 mutex_unlock(&file_priv->mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00004989 mutex_unlock(&dev->struct_mutex);
4990}
Chris Wilson31169712009-09-14 16:50:28 +01004991
Chris Wilson31169712009-09-14 16:50:28 +01004992static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004993i915_gpu_is_active(struct drm_device *dev)
4994{
4995 drm_i915_private_t *dev_priv = dev->dev_private;
4996 int lists_empty;
4997
Chris Wilson1637ef42010-04-20 17:10:35 +01004998 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan hai852835f2010-05-21 09:08:56 +08004999 list_empty(&dev_priv->render_ring.active_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005000 if (HAS_BSD(dev))
5001 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01005002
5003 return !lists_empty;
5004}
5005
5006static int
Dave Chinner7f8275d2010-07-19 14:56:17 +10005007i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01005008{
5009 drm_i915_private_t *dev_priv, *next_dev;
5010 struct drm_i915_gem_object *obj_priv, *next_obj;
5011 int cnt = 0;
5012 int would_deadlock = 1;
5013
5014 /* "fast-path" to count number of available objects */
5015 if (nr_to_scan == 0) {
5016 spin_lock(&shrink_list_lock);
5017 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5018 struct drm_device *dev = dev_priv->dev;
5019
5020 if (mutex_trylock(&dev->struct_mutex)) {
5021 list_for_each_entry(obj_priv,
5022 &dev_priv->mm.inactive_list,
5023 list)
5024 cnt++;
5025 mutex_unlock(&dev->struct_mutex);
5026 }
5027 }
5028 spin_unlock(&shrink_list_lock);
5029
5030 return (cnt / 100) * sysctl_vfs_cache_pressure;
5031 }
5032
5033 spin_lock(&shrink_list_lock);
5034
Chris Wilson1637ef42010-04-20 17:10:35 +01005035rescan:
Chris Wilson31169712009-09-14 16:50:28 +01005036 /* first scan for clean buffers */
5037 list_for_each_entry_safe(dev_priv, next_dev,
5038 &shrink_list, mm.shrink_list) {
5039 struct drm_device *dev = dev_priv->dev;
5040
5041 if (! mutex_trylock(&dev->struct_mutex))
5042 continue;
5043
5044 spin_unlock(&shrink_list_lock);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01005045 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005046
Chris Wilson31169712009-09-14 16:50:28 +01005047 list_for_each_entry_safe(obj_priv, next_obj,
5048 &dev_priv->mm.inactive_list,
5049 list) {
5050 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005051 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005052 if (--nr_to_scan <= 0)
5053 break;
5054 }
5055 }
5056
5057 spin_lock(&shrink_list_lock);
5058 mutex_unlock(&dev->struct_mutex);
5059
Chris Wilson963b4832009-09-20 23:03:54 +01005060 would_deadlock = 0;
5061
Chris Wilson31169712009-09-14 16:50:28 +01005062 if (nr_to_scan <= 0)
5063 break;
5064 }
5065
5066 /* second pass, evict/count anything still on the inactive list */
5067 list_for_each_entry_safe(dev_priv, next_dev,
5068 &shrink_list, mm.shrink_list) {
5069 struct drm_device *dev = dev_priv->dev;
5070
5071 if (! mutex_trylock(&dev->struct_mutex))
5072 continue;
5073
5074 spin_unlock(&shrink_list_lock);
5075
5076 list_for_each_entry_safe(obj_priv, next_obj,
5077 &dev_priv->mm.inactive_list,
5078 list) {
5079 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005080 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005081 nr_to_scan--;
5082 } else
5083 cnt++;
5084 }
5085
5086 spin_lock(&shrink_list_lock);
5087 mutex_unlock(&dev->struct_mutex);
5088
5089 would_deadlock = 0;
5090 }
5091
Chris Wilson1637ef42010-04-20 17:10:35 +01005092 if (nr_to_scan) {
5093 int active = 0;
5094
5095 /*
5096 * We are desperate for pages, so as a last resort, wait
5097 * for the GPU to finish and discard whatever we can.
5098 * This has a dramatic impact to reduce the number of
5099 * OOM-killer events whilst running the GPU aggressively.
5100 */
5101 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5102 struct drm_device *dev = dev_priv->dev;
5103
5104 if (!mutex_trylock(&dev->struct_mutex))
5105 continue;
5106
5107 spin_unlock(&shrink_list_lock);
5108
5109 if (i915_gpu_is_active(dev)) {
5110 i915_gpu_idle(dev);
5111 active++;
5112 }
5113
5114 spin_lock(&shrink_list_lock);
5115 mutex_unlock(&dev->struct_mutex);
5116 }
5117
5118 if (active)
5119 goto rescan;
5120 }
5121
Chris Wilson31169712009-09-14 16:50:28 +01005122 spin_unlock(&shrink_list_lock);
5123
5124 if (would_deadlock)
5125 return -1;
5126 else if (cnt > 0)
5127 return (cnt / 100) * sysctl_vfs_cache_pressure;
5128 else
5129 return 0;
5130}
5131
5132static struct shrinker shrinker = {
5133 .shrink = i915_gem_shrink,
5134 .seeks = DEFAULT_SEEKS,
5135};
5136
5137__init void
5138i915_gem_shrinker_init(void)
5139{
5140 register_shrinker(&shrinker);
5141}
5142
5143__exit void
5144i915_gem_shrinker_exit(void)
5145{
5146 unregister_shrinker(&shrinker);
5147}