blob: 5ce14f188c4369f11c65216c8ecfae83e1358d2e [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Zhenyu Wangf8f235e2010-08-27 11:08:57 +080037#include <linux/intel-gtt.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Daniel Vetter0108a3e2010-08-07 11:01:21 +010039static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +010040
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
42 bool pipelined);
Eric Anholte47c68e2008-11-14 13:35:19 -080043static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080045static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 int write);
47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint64_t offset,
49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +010051static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52 bool interruptible);
Jesse Barnesde151cf2008-11-12 10:03:55 -080053static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080055static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +100056static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
57 struct drm_i915_gem_pwrite *args,
58 struct drm_file *file_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +010059static void i915_gem_free_object_tail(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070060
Chris Wilson31169712009-09-14 16:50:28 +010061static LIST_HEAD(shrink_list);
62static DEFINE_SPINLOCK(shrink_list_lock);
63
Chris Wilson30dbf0c2010-09-25 10:19:17 +010064int
65i915_gem_check_is_wedged(struct drm_device *dev)
66{
67 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct completion *x = &dev_priv->error_completion;
69 unsigned long flags;
70 int ret;
71
72 if (!atomic_read(&dev_priv->mm.wedged))
73 return 0;
74
75 ret = wait_for_completion_interruptible(x);
76 if (ret)
77 return ret;
78
79 /* Success, we reset the GPU! */
80 if (!atomic_read(&dev_priv->mm.wedged))
81 return 0;
82
83 /* GPU is hung, bump the completion count to account for
84 * the token we just consumed so that we never hit zero and
85 * end up waiting upon a subsequent completion event that
86 * will never happen.
87 */
88 spin_lock_irqsave(&x->wait.lock, flags);
89 x->done++;
90 spin_unlock_irqrestore(&x->wait.lock, flags);
91 return -EIO;
92}
93
Chris Wilson76c1dec2010-09-25 11:22:51 +010094static int i915_mutex_lock_interruptible(struct drm_device *dev)
95{
96 struct drm_i915_private *dev_priv = dev->dev_private;
97 int ret;
98
99 ret = i915_gem_check_is_wedged(dev);
100 if (ret)
101 return ret;
102
103 ret = mutex_lock_interruptible(&dev->struct_mutex);
104 if (ret)
105 return ret;
106
107 if (atomic_read(&dev_priv->mm.wedged)) {
108 mutex_unlock(&dev->struct_mutex);
109 return -EAGAIN;
110 }
111
112 return 0;
113}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100114
Chris Wilson7d1c4802010-08-07 21:45:03 +0100115static inline bool
116i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
117{
118 return obj_priv->gtt_space &&
119 !obj_priv->active &&
120 obj_priv->pin_count == 0;
121}
122
Jesse Barnes79e53942008-11-07 14:24:08 -0800123int i915_gem_do_init(struct drm_device *dev, unsigned long start,
124 unsigned long end)
125{
126 drm_i915_private_t *dev_priv = dev->dev_private;
127
128 if (start >= end ||
129 (start & (PAGE_SIZE - 1)) != 0 ||
130 (end & (PAGE_SIZE - 1)) != 0) {
131 return -EINVAL;
132 }
133
134 drm_mm_init(&dev_priv->mm.gtt_space, start,
135 end - start);
136
137 dev->gtt_total = (uint32_t) (end - start);
138
139 return 0;
140}
Keith Packard6dbe2772008-10-14 21:41:13 -0700141
Eric Anholt673a3942008-07-30 12:06:12 -0700142int
143i915_gem_init_ioctl(struct drm_device *dev, void *data,
144 struct drm_file *file_priv)
145{
Eric Anholt673a3942008-07-30 12:06:12 -0700146 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -0800147 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700148
149 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -0800150 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700151 mutex_unlock(&dev->struct_mutex);
152
Jesse Barnes79e53942008-11-07 14:24:08 -0800153 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700154}
155
Eric Anholt5a125c32008-10-22 21:40:13 -0700156int
157i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
158 struct drm_file *file_priv)
159{
Eric Anholt5a125c32008-10-22 21:40:13 -0700160 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -0700161
162 if (!(dev->driver->driver_features & DRIVER_GEM))
163 return -ENODEV;
164
165 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800166 args->aper_available_size = (args->aper_size -
167 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700168
169 return 0;
170}
171
Eric Anholt673a3942008-07-30 12:06:12 -0700172
173/**
174 * Creates a new mm object and returns a handle to it.
175 */
176int
177i915_gem_create_ioctl(struct drm_device *dev, void *data,
178 struct drm_file *file_priv)
179{
180 struct drm_i915_gem_create *args = data;
181 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300182 int ret;
183 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700184
185 args->size = roundup(args->size, PAGE_SIZE);
186
187 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000188 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700189 if (obj == NULL)
190 return -ENOMEM;
191
192 ret = drm_gem_handle_create(file_priv, obj, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100193 if (ret) {
194 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700195 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100196 }
197
198 /* Sink the floating reference from kref_init(handlecount) */
199 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700200
201 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700202 return 0;
203}
204
Eric Anholt40123c12009-03-09 13:42:30 -0700205static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700206fast_shmem_read(struct page **pages,
207 loff_t page_base, int page_offset,
208 char __user *data,
209 int length)
210{
211 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200212 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700213
214 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
215 if (vaddr == NULL)
216 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200217 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700218 kunmap_atomic(vaddr, KM_USER0);
219
Florian Mickler2bc43b52009-04-06 22:55:41 +0200220 if (unwritten)
221 return -EFAULT;
222
223 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700224}
225
Eric Anholt280b7132009-03-12 16:56:27 -0700226static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
227{
228 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100229 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700230
231 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
232 obj_priv->tiling_mode != I915_TILING_NONE;
233}
234
Chris Wilson99a03df2010-05-27 14:15:34 +0100235static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700236slow_shmem_copy(struct page *dst_page,
237 int dst_offset,
238 struct page *src_page,
239 int src_offset,
240 int length)
241{
242 char *dst_vaddr, *src_vaddr;
243
Chris Wilson99a03df2010-05-27 14:15:34 +0100244 dst_vaddr = kmap(dst_page);
245 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700246
247 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
248
Chris Wilson99a03df2010-05-27 14:15:34 +0100249 kunmap(src_page);
250 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700251}
252
Chris Wilson99a03df2010-05-27 14:15:34 +0100253static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700254slow_shmem_bit17_copy(struct page *gpu_page,
255 int gpu_offset,
256 struct page *cpu_page,
257 int cpu_offset,
258 int length,
259 int is_read)
260{
261 char *gpu_vaddr, *cpu_vaddr;
262
263 /* Use the unswizzled path if this page isn't affected. */
264 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
265 if (is_read)
266 return slow_shmem_copy(cpu_page, cpu_offset,
267 gpu_page, gpu_offset, length);
268 else
269 return slow_shmem_copy(gpu_page, gpu_offset,
270 cpu_page, cpu_offset, length);
271 }
272
Chris Wilson99a03df2010-05-27 14:15:34 +0100273 gpu_vaddr = kmap(gpu_page);
274 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700275
276 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
277 * XORing with the other bits (A9 for Y, A9 and A10 for X)
278 */
279 while (length > 0) {
280 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281 int this_length = min(cacheline_end - gpu_offset, length);
282 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284 if (is_read) {
285 memcpy(cpu_vaddr + cpu_offset,
286 gpu_vaddr + swizzled_gpu_offset,
287 this_length);
288 } else {
289 memcpy(gpu_vaddr + swizzled_gpu_offset,
290 cpu_vaddr + cpu_offset,
291 this_length);
292 }
293 cpu_offset += this_length;
294 gpu_offset += this_length;
295 length -= this_length;
296 }
297
Chris Wilson99a03df2010-05-27 14:15:34 +0100298 kunmap(cpu_page);
299 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700300}
301
Eric Anholt673a3942008-07-30 12:06:12 -0700302/**
Eric Anholteb014592009-03-10 11:44:52 -0700303 * This is the fast shmem pread path, which attempts to copy_from_user directly
304 * from the backing pages of the object to the user's address space. On a
305 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
306 */
307static int
308i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
309 struct drm_i915_gem_pread *args,
310 struct drm_file *file_priv)
311{
Daniel Vetter23010e42010-03-08 13:35:02 +0100312 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700313 ssize_t remain;
314 loff_t offset, page_base;
315 char __user *user_data;
316 int page_offset, page_length;
317 int ret;
318
319 user_data = (char __user *) (uintptr_t) args->data_ptr;
320 remain = args->size;
321
Chris Wilson76c1dec2010-09-25 11:22:51 +0100322 ret = i915_mutex_lock_interruptible(dev);
323 if (ret)
324 return ret;
Eric Anholteb014592009-03-10 11:44:52 -0700325
Chris Wilson4bdadb92010-01-27 13:36:32 +0000326 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700327 if (ret != 0)
328 goto fail_unlock;
329
330 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
331 args->size);
332 if (ret != 0)
333 goto fail_put_pages;
334
Daniel Vetter23010e42010-03-08 13:35:02 +0100335 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700336 offset = args->offset;
337
338 while (remain > 0) {
339 /* Operation in this page
340 *
341 * page_base = page offset within aperture
342 * page_offset = offset within page
343 * page_length = bytes to copy for this page
344 */
345 page_base = (offset & ~(PAGE_SIZE-1));
346 page_offset = offset & (PAGE_SIZE-1);
347 page_length = remain;
348 if ((page_offset + remain) > PAGE_SIZE)
349 page_length = PAGE_SIZE - page_offset;
350
351 ret = fast_shmem_read(obj_priv->pages,
352 page_base, page_offset,
353 user_data, page_length);
354 if (ret)
355 goto fail_put_pages;
356
357 remain -= page_length;
358 user_data += page_length;
359 offset += page_length;
360 }
361
362fail_put_pages:
363 i915_gem_object_put_pages(obj);
364fail_unlock:
365 mutex_unlock(&dev->struct_mutex);
366
367 return ret;
368}
369
Chris Wilson07f73f62009-09-14 16:50:30 +0100370static int
371i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
372{
373 int ret;
374
Chris Wilson4bdadb92010-01-27 13:36:32 +0000375 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100376
377 /* If we've insufficient memory to map in the pages, attempt
378 * to make some space by throwing out some old buffers.
379 */
380 if (ret == -ENOMEM) {
381 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100382
Daniel Vetter0108a3e2010-08-07 11:01:21 +0100383 ret = i915_gem_evict_something(dev, obj->size,
384 i915_gem_get_gtt_alignment(obj));
Chris Wilson07f73f62009-09-14 16:50:30 +0100385 if (ret)
386 return ret;
387
Chris Wilson4bdadb92010-01-27 13:36:32 +0000388 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100389 }
390
391 return ret;
392}
393
Eric Anholteb014592009-03-10 11:44:52 -0700394/**
395 * This is the fallback shmem pread path, which allocates temporary storage
396 * in kernel space to copy_to_user into outside of the struct_mutex, so we
397 * can copy out of the object's backing pages while holding the struct mutex
398 * and not take page faults.
399 */
400static int
401i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
402 struct drm_i915_gem_pread *args,
403 struct drm_file *file_priv)
404{
Daniel Vetter23010e42010-03-08 13:35:02 +0100405 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700406 struct mm_struct *mm = current->mm;
407 struct page **user_pages;
408 ssize_t remain;
409 loff_t offset, pinned_pages, i;
410 loff_t first_data_page, last_data_page, num_pages;
411 int shmem_page_index, shmem_page_offset;
412 int data_page_index, data_page_offset;
413 int page_length;
414 int ret;
415 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700416 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700417
418 remain = args->size;
419
420 /* Pin the user pages containing the data. We can't fault while
421 * holding the struct mutex, yet we want to hold it while
422 * dereferencing the user data.
423 */
424 first_data_page = data_ptr / PAGE_SIZE;
425 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
426 num_pages = last_data_page - first_data_page + 1;
427
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700428 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700429 if (user_pages == NULL)
430 return -ENOMEM;
431
432 down_read(&mm->mmap_sem);
433 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700434 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700435 up_read(&mm->mmap_sem);
436 if (pinned_pages < num_pages) {
437 ret = -EFAULT;
438 goto fail_put_user_pages;
439 }
440
Eric Anholt280b7132009-03-12 16:56:27 -0700441 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
442
Chris Wilson76c1dec2010-09-25 11:22:51 +0100443 ret = i915_mutex_lock_interruptible(dev);
444 if (ret)
445 goto fail_put_user_pages;
Eric Anholteb014592009-03-10 11:44:52 -0700446
Chris Wilson07f73f62009-09-14 16:50:30 +0100447 ret = i915_gem_object_get_pages_or_evict(obj);
448 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700449 goto fail_unlock;
450
451 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
452 args->size);
453 if (ret != 0)
454 goto fail_put_pages;
455
Daniel Vetter23010e42010-03-08 13:35:02 +0100456 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700457 offset = args->offset;
458
459 while (remain > 0) {
460 /* Operation in this page
461 *
462 * shmem_page_index = page number within shmem file
463 * shmem_page_offset = offset within page in shmem file
464 * data_page_index = page number in get_user_pages return
465 * data_page_offset = offset with data_page_index page.
466 * page_length = bytes to copy for this page
467 */
468 shmem_page_index = offset / PAGE_SIZE;
469 shmem_page_offset = offset & ~PAGE_MASK;
470 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
471 data_page_offset = data_ptr & ~PAGE_MASK;
472
473 page_length = remain;
474 if ((shmem_page_offset + page_length) > PAGE_SIZE)
475 page_length = PAGE_SIZE - shmem_page_offset;
476 if ((data_page_offset + page_length) > PAGE_SIZE)
477 page_length = PAGE_SIZE - data_page_offset;
478
Eric Anholt280b7132009-03-12 16:56:27 -0700479 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100480 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700481 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100482 user_pages[data_page_index],
483 data_page_offset,
484 page_length,
485 1);
486 } else {
487 slow_shmem_copy(user_pages[data_page_index],
488 data_page_offset,
489 obj_priv->pages[shmem_page_index],
490 shmem_page_offset,
491 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700492 }
Eric Anholteb014592009-03-10 11:44:52 -0700493
494 remain -= page_length;
495 data_ptr += page_length;
496 offset += page_length;
497 }
498
499fail_put_pages:
500 i915_gem_object_put_pages(obj);
501fail_unlock:
502 mutex_unlock(&dev->struct_mutex);
503fail_put_user_pages:
504 for (i = 0; i < pinned_pages; i++) {
505 SetPageDirty(user_pages[i]);
506 page_cache_release(user_pages[i]);
507 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700508 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700509
510 return ret;
511}
512
Eric Anholt673a3942008-07-30 12:06:12 -0700513/**
514 * Reads data from the object referenced by handle.
515 *
516 * On error, the contents of *data are undefined.
517 */
518int
519i915_gem_pread_ioctl(struct drm_device *dev, void *data,
520 struct drm_file *file_priv)
521{
522 struct drm_i915_gem_pread *args = data;
523 struct drm_gem_object *obj;
524 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700525 int ret;
526
527 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
528 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100529 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100530 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700531
532 /* Bounds check source.
533 *
534 * XXX: This could use review for overflow issues...
535 */
536 if (args->offset > obj->size || args->size > obj->size ||
537 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000538 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700539 return -EINVAL;
540 }
541
Eric Anholt280b7132009-03-12 16:56:27 -0700542 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700543 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700544 } else {
545 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
546 if (ret != 0)
547 ret = i915_gem_shmem_pread_slow(dev, obj, args,
548 file_priv);
549 }
Eric Anholt673a3942008-07-30 12:06:12 -0700550
Luca Barbieribc9025b2010-02-09 05:49:12 +0000551 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700552
Eric Anholteb014592009-03-10 11:44:52 -0700553 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700554}
555
Keith Packard0839ccb2008-10-30 19:38:48 -0700556/* This is the fast write path which cannot handle
557 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700558 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700559
Keith Packard0839ccb2008-10-30 19:38:48 -0700560static inline int
561fast_user_write(struct io_mapping *mapping,
562 loff_t page_base, int page_offset,
563 char __user *user_data,
564 int length)
565{
566 char *vaddr_atomic;
567 unsigned long unwritten;
568
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100569 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700570 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
571 user_data, length);
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100572 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700573 if (unwritten)
574 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700575 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700576}
577
578/* Here's the write path which can sleep for
579 * page faults
580 */
581
Chris Wilsonab34c222010-05-27 14:15:35 +0100582static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700583slow_kernel_write(struct io_mapping *mapping,
584 loff_t gtt_base, int gtt_offset,
585 struct page *user_page, int user_offset,
586 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700587{
Chris Wilsonab34c222010-05-27 14:15:35 +0100588 char __iomem *dst_vaddr;
589 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700590
Chris Wilsonab34c222010-05-27 14:15:35 +0100591 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
592 src_vaddr = kmap(user_page);
593
594 memcpy_toio(dst_vaddr + gtt_offset,
595 src_vaddr + user_offset,
596 length);
597
598 kunmap(user_page);
599 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700600}
601
Eric Anholt40123c12009-03-09 13:42:30 -0700602static inline int
603fast_shmem_write(struct page **pages,
604 loff_t page_base, int page_offset,
605 char __user *data,
606 int length)
607{
608 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400609 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700610
611 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
612 if (vaddr == NULL)
613 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400614 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700615 kunmap_atomic(vaddr, KM_USER0);
616
Dave Airlied0088772009-03-28 20:29:48 -0400617 if (unwritten)
618 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700619 return 0;
620}
621
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622/**
623 * This is the fast pwrite path, where we copy the data directly from the
624 * user into the GTT, uncached.
625 */
Eric Anholt673a3942008-07-30 12:06:12 -0700626static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700627i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
628 struct drm_i915_gem_pwrite *args,
629 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700630{
Daniel Vetter23010e42010-03-08 13:35:02 +0100631 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700632 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700633 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700635 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700636 int page_offset, page_length;
637 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700638
639 user_data = (char __user *) (uintptr_t) args->data_ptr;
640 remain = args->size;
641 if (!access_ok(VERIFY_READ, user_data, remain))
642 return -EFAULT;
643
Chris Wilson76c1dec2010-09-25 11:22:51 +0100644 ret = i915_mutex_lock_interruptible(dev);
645 if (ret)
646 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700647
Eric Anholt673a3942008-07-30 12:06:12 -0700648 ret = i915_gem_object_pin(obj, 0);
649 if (ret) {
650 mutex_unlock(&dev->struct_mutex);
651 return ret;
652 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800653 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700654 if (ret)
655 goto fail;
656
Daniel Vetter23010e42010-03-08 13:35:02 +0100657 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700658 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700659
660 while (remain > 0) {
661 /* Operation in this page
662 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700663 * page_base = page offset within aperture
664 * page_offset = offset within page
665 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700666 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700667 page_base = (offset & ~(PAGE_SIZE-1));
668 page_offset = offset & (PAGE_SIZE-1);
669 page_length = remain;
670 if ((page_offset + remain) > PAGE_SIZE)
671 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700672
Keith Packard0839ccb2008-10-30 19:38:48 -0700673 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
674 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700675
Keith Packard0839ccb2008-10-30 19:38:48 -0700676 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700677 * source page isn't available. Return the error and we'll
678 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700679 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700680 if (ret)
681 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700682
Keith Packard0839ccb2008-10-30 19:38:48 -0700683 remain -= page_length;
684 user_data += page_length;
685 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700686 }
Eric Anholt673a3942008-07-30 12:06:12 -0700687
688fail:
689 i915_gem_object_unpin(obj);
690 mutex_unlock(&dev->struct_mutex);
691
692 return ret;
693}
694
Eric Anholt3de09aa2009-03-09 09:42:23 -0700695/**
696 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
697 * the memory and maps it using kmap_atomic for copying.
698 *
699 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
700 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
701 */
Eric Anholt3043c602008-10-02 12:24:47 -0700702static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700703i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
704 struct drm_i915_gem_pwrite *args,
705 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700706{
Daniel Vetter23010e42010-03-08 13:35:02 +0100707 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700708 drm_i915_private_t *dev_priv = dev->dev_private;
709 ssize_t remain;
710 loff_t gtt_page_base, offset;
711 loff_t first_data_page, last_data_page, num_pages;
712 loff_t pinned_pages, i;
713 struct page **user_pages;
714 struct mm_struct *mm = current->mm;
715 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700716 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700717 uint64_t data_ptr = args->data_ptr;
718
719 remain = args->size;
720
721 /* Pin the user pages containing the data. We can't fault while
722 * holding the struct mutex, and all of the pwrite implementations
723 * want to hold it while dereferencing the user data.
724 */
725 first_data_page = data_ptr / PAGE_SIZE;
726 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
727 num_pages = last_data_page - first_data_page + 1;
728
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700729 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700730 if (user_pages == NULL)
731 return -ENOMEM;
732
733 down_read(&mm->mmap_sem);
734 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
735 num_pages, 0, 0, user_pages, NULL);
736 up_read(&mm->mmap_sem);
737 if (pinned_pages < num_pages) {
738 ret = -EFAULT;
739 goto out_unpin_pages;
740 }
741
Chris Wilson76c1dec2010-09-25 11:22:51 +0100742 ret = i915_mutex_lock_interruptible(dev);
743 if (ret)
744 goto out_unpin_pages;
745
Eric Anholt3de09aa2009-03-09 09:42:23 -0700746 ret = i915_gem_object_pin(obj, 0);
747 if (ret)
748 goto out_unlock;
749
750 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
751 if (ret)
752 goto out_unpin_object;
753
Daniel Vetter23010e42010-03-08 13:35:02 +0100754 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700755 offset = obj_priv->gtt_offset + args->offset;
756
757 while (remain > 0) {
758 /* Operation in this page
759 *
760 * gtt_page_base = page offset within aperture
761 * gtt_page_offset = offset within page in aperture
762 * data_page_index = page number in get_user_pages return
763 * data_page_offset = offset with data_page_index page.
764 * page_length = bytes to copy for this page
765 */
766 gtt_page_base = offset & PAGE_MASK;
767 gtt_page_offset = offset & ~PAGE_MASK;
768 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
769 data_page_offset = data_ptr & ~PAGE_MASK;
770
771 page_length = remain;
772 if ((gtt_page_offset + page_length) > PAGE_SIZE)
773 page_length = PAGE_SIZE - gtt_page_offset;
774 if ((data_page_offset + page_length) > PAGE_SIZE)
775 page_length = PAGE_SIZE - data_page_offset;
776
Chris Wilsonab34c222010-05-27 14:15:35 +0100777 slow_kernel_write(dev_priv->mm.gtt_mapping,
778 gtt_page_base, gtt_page_offset,
779 user_pages[data_page_index],
780 data_page_offset,
781 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700782
783 remain -= page_length;
784 offset += page_length;
785 data_ptr += page_length;
786 }
787
788out_unpin_object:
789 i915_gem_object_unpin(obj);
790out_unlock:
791 mutex_unlock(&dev->struct_mutex);
792out_unpin_pages:
793 for (i = 0; i < pinned_pages; i++)
794 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700795 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700796
797 return ret;
798}
799
Eric Anholt40123c12009-03-09 13:42:30 -0700800/**
801 * This is the fast shmem pwrite path, which attempts to directly
802 * copy_from_user into the kmapped pages backing the object.
803 */
Eric Anholt673a3942008-07-30 12:06:12 -0700804static int
Eric Anholt40123c12009-03-09 13:42:30 -0700805i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
806 struct drm_i915_gem_pwrite *args,
807 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700808{
Daniel Vetter23010e42010-03-08 13:35:02 +0100809 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700810 ssize_t remain;
811 loff_t offset, page_base;
812 char __user *user_data;
813 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700814 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700815
816 user_data = (char __user *) (uintptr_t) args->data_ptr;
817 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700818
Chris Wilson76c1dec2010-09-25 11:22:51 +0100819 ret = i915_mutex_lock_interruptible(dev);
820 if (ret)
821 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700822
Chris Wilson4bdadb92010-01-27 13:36:32 +0000823 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700824 if (ret != 0)
825 goto fail_unlock;
826
Eric Anholte47c68e2008-11-14 13:35:19 -0800827 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700828 if (ret != 0)
829 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700830
Daniel Vetter23010e42010-03-08 13:35:02 +0100831 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700832 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700833 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700834
Eric Anholt40123c12009-03-09 13:42:30 -0700835 while (remain > 0) {
836 /* Operation in this page
837 *
838 * page_base = page offset within aperture
839 * page_offset = offset within page
840 * page_length = bytes to copy for this page
841 */
842 page_base = (offset & ~(PAGE_SIZE-1));
843 page_offset = offset & (PAGE_SIZE-1);
844 page_length = remain;
845 if ((page_offset + remain) > PAGE_SIZE)
846 page_length = PAGE_SIZE - page_offset;
847
848 ret = fast_shmem_write(obj_priv->pages,
849 page_base, page_offset,
850 user_data, page_length);
851 if (ret)
852 goto fail_put_pages;
853
854 remain -= page_length;
855 user_data += page_length;
856 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700857 }
858
Eric Anholt40123c12009-03-09 13:42:30 -0700859fail_put_pages:
860 i915_gem_object_put_pages(obj);
861fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700862 mutex_unlock(&dev->struct_mutex);
863
Eric Anholt40123c12009-03-09 13:42:30 -0700864 return ret;
865}
866
867/**
868 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
869 * the memory and maps it using kmap_atomic for copying.
870 *
871 * This avoids taking mmap_sem for faulting on the user's address while the
872 * struct_mutex is held.
873 */
874static int
875i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
876 struct drm_i915_gem_pwrite *args,
877 struct drm_file *file_priv)
878{
Daniel Vetter23010e42010-03-08 13:35:02 +0100879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700880 struct mm_struct *mm = current->mm;
881 struct page **user_pages;
882 ssize_t remain;
883 loff_t offset, pinned_pages, i;
884 loff_t first_data_page, last_data_page, num_pages;
885 int shmem_page_index, shmem_page_offset;
886 int data_page_index, data_page_offset;
887 int page_length;
888 int ret;
889 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700890 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700891
892 remain = args->size;
893
894 /* Pin the user pages containing the data. We can't fault while
895 * holding the struct mutex, and all of the pwrite implementations
896 * want to hold it while dereferencing the user data.
897 */
898 first_data_page = data_ptr / PAGE_SIZE;
899 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
900 num_pages = last_data_page - first_data_page + 1;
901
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700902 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700903 if (user_pages == NULL)
904 return -ENOMEM;
905
906 down_read(&mm->mmap_sem);
907 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
908 num_pages, 0, 0, user_pages, NULL);
909 up_read(&mm->mmap_sem);
910 if (pinned_pages < num_pages) {
911 ret = -EFAULT;
912 goto fail_put_user_pages;
913 }
914
Eric Anholt280b7132009-03-12 16:56:27 -0700915 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
916
Chris Wilson76c1dec2010-09-25 11:22:51 +0100917 ret = i915_mutex_lock_interruptible(dev);
918 if (ret)
919 goto fail_put_user_pages;
Eric Anholt40123c12009-03-09 13:42:30 -0700920
Chris Wilson07f73f62009-09-14 16:50:30 +0100921 ret = i915_gem_object_get_pages_or_evict(obj);
922 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700923 goto fail_unlock;
924
925 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
926 if (ret != 0)
927 goto fail_put_pages;
928
Daniel Vetter23010e42010-03-08 13:35:02 +0100929 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700930 offset = args->offset;
931 obj_priv->dirty = 1;
932
933 while (remain > 0) {
934 /* Operation in this page
935 *
936 * shmem_page_index = page number within shmem file
937 * shmem_page_offset = offset within page in shmem file
938 * data_page_index = page number in get_user_pages return
939 * data_page_offset = offset with data_page_index page.
940 * page_length = bytes to copy for this page
941 */
942 shmem_page_index = offset / PAGE_SIZE;
943 shmem_page_offset = offset & ~PAGE_MASK;
944 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
945 data_page_offset = data_ptr & ~PAGE_MASK;
946
947 page_length = remain;
948 if ((shmem_page_offset + page_length) > PAGE_SIZE)
949 page_length = PAGE_SIZE - shmem_page_offset;
950 if ((data_page_offset + page_length) > PAGE_SIZE)
951 page_length = PAGE_SIZE - data_page_offset;
952
Eric Anholt280b7132009-03-12 16:56:27 -0700953 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100954 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700955 shmem_page_offset,
956 user_pages[data_page_index],
957 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100958 page_length,
959 0);
960 } else {
961 slow_shmem_copy(obj_priv->pages[shmem_page_index],
962 shmem_page_offset,
963 user_pages[data_page_index],
964 data_page_offset,
965 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700966 }
Eric Anholt40123c12009-03-09 13:42:30 -0700967
968 remain -= page_length;
969 data_ptr += page_length;
970 offset += page_length;
971 }
972
973fail_put_pages:
974 i915_gem_object_put_pages(obj);
975fail_unlock:
976 mutex_unlock(&dev->struct_mutex);
977fail_put_user_pages:
978 for (i = 0; i < pinned_pages; i++)
979 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700980 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700981
982 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700983}
984
985/**
986 * Writes data to the object referenced by handle.
987 *
988 * On error, the contents of the buffer that were to be modified are undefined.
989 */
990int
991i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
992 struct drm_file *file_priv)
993{
994 struct drm_i915_gem_pwrite *args = data;
995 struct drm_gem_object *obj;
996 struct drm_i915_gem_object *obj_priv;
997 int ret = 0;
998
999 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1000 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001001 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +01001002 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001003
1004 /* Bounds check destination.
1005 *
1006 * XXX: This could use review for overflow issues...
1007 */
1008 if (args->offset > obj->size || args->size > obj->size ||
1009 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +00001010 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001011 return -EINVAL;
1012 }
1013
1014 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1015 * it would end up going through the fenced access, and we'll get
1016 * different detiling behavior between reading and writing.
1017 * pread/pwrite currently are reading and writing from the CPU
1018 * perspective, requiring manual detiling by the client.
1019 */
Dave Airlie71acb5e2008-12-30 20:31:46 +10001020 if (obj_priv->phys_obj)
1021 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
1022 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Chris Wilson9b8c4a02010-05-27 14:21:01 +01001023 dev->gtt_total != 0 &&
1024 obj->write_domain != I915_GEM_DOMAIN_CPU) {
Eric Anholt3de09aa2009-03-09 09:42:23 -07001025 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
1026 if (ret == -EFAULT) {
1027 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
1028 file_priv);
1029 }
Eric Anholt280b7132009-03-12 16:56:27 -07001030 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1031 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -07001032 } else {
1033 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1034 if (ret == -EFAULT) {
1035 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1036 file_priv);
1037 }
1038 }
Eric Anholt673a3942008-07-30 12:06:12 -07001039
1040#if WATCH_PWRITE
1041 if (ret)
1042 DRM_INFO("pwrite failed %d\n", ret);
1043#endif
1044
Luca Barbieribc9025b2010-02-09 05:49:12 +00001045 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001046
1047 return ret;
1048}
1049
1050/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001051 * Called when user space prepares to use an object with the CPU, either
1052 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001053 */
1054int
1055i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv)
1057{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001058 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001059 struct drm_i915_gem_set_domain *args = data;
1060 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001061 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001062 uint32_t read_domains = args->read_domains;
1063 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001064 int ret;
1065
1066 if (!(dev->driver->driver_features & DRIVER_GEM))
1067 return -ENODEV;
1068
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001069 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001070 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001071 return -EINVAL;
1072
Chris Wilson21d509e2009-06-06 09:46:02 +01001073 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001074 return -EINVAL;
1075
1076 /* Having something in the write domain implies it's in the read
1077 * domain, and only that read domain. Enforce that in the request.
1078 */
1079 if (write_domain != 0 && read_domains != write_domain)
1080 return -EINVAL;
1081
Eric Anholt673a3942008-07-30 12:06:12 -07001082 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1083 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001084 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +01001085 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001086
Chris Wilson76c1dec2010-09-25 11:22:51 +01001087 ret = i915_mutex_lock_interruptible(dev);
1088 if (ret) {
1089 drm_gem_object_unreference_unlocked(obj);
1090 return ret;
1091 }
Jesse Barnes652c3932009-08-17 13:31:43 -07001092
1093 intel_mark_busy(dev, obj);
1094
Eric Anholt673a3942008-07-30 12:06:12 -07001095#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001096 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001097 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001098#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001099 if (read_domains & I915_GEM_DOMAIN_GTT) {
1100 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001101
Eric Anholta09ba7f2009-08-29 12:49:51 -07001102 /* Update the LRU on the fence for the CPU access that's
1103 * about to occur.
1104 */
1105 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001106 struct drm_i915_fence_reg *reg =
1107 &dev_priv->fence_regs[obj_priv->fence_reg];
1108 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001109 &dev_priv->mm.fence_list);
1110 }
1111
Eric Anholt02354392008-11-26 13:58:13 -08001112 /* Silently promote "you're not bound, there was nothing to do"
1113 * to success, since the client was just asking us to
1114 * make sure everything was done.
1115 */
1116 if (ret == -EINVAL)
1117 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001118 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001119 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001120 }
1121
Chris Wilson7d1c4802010-08-07 21:45:03 +01001122 /* Maintain LRU order of "inactive" objects */
1123 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1124 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1125
Eric Anholt673a3942008-07-30 12:06:12 -07001126 drm_gem_object_unreference(obj);
1127 mutex_unlock(&dev->struct_mutex);
1128 return ret;
1129}
1130
1131/**
1132 * Called when user space has done writes to this buffer
1133 */
1134int
1135i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1137{
1138 struct drm_i915_gem_sw_finish *args = data;
1139 struct drm_gem_object *obj;
1140 struct drm_i915_gem_object *obj_priv;
1141 int ret = 0;
1142
1143 if (!(dev->driver->driver_features & DRIVER_GEM))
1144 return -ENODEV;
1145
Eric Anholt673a3942008-07-30 12:06:12 -07001146 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
Chris Wilson76c1dec2010-09-25 11:22:51 +01001147 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001148 return -ENOENT;
Chris Wilson76c1dec2010-09-25 11:22:51 +01001149
1150 ret = i915_mutex_lock_interruptible(dev);
1151 if (ret) {
1152 drm_gem_object_unreference_unlocked(obj);
1153 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001154 }
1155
1156#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001157 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001158 __func__, args->handle, obj, obj->size);
1159#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001160 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001161
1162 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001163 if (obj_priv->pin_count)
1164 i915_gem_object_flush_cpu_write_domain(obj);
1165
Eric Anholt673a3942008-07-30 12:06:12 -07001166 drm_gem_object_unreference(obj);
1167 mutex_unlock(&dev->struct_mutex);
1168 return ret;
1169}
1170
1171/**
1172 * Maps the contents of an object, returning the address it is mapped
1173 * into.
1174 *
1175 * While the mapping holds a reference on the contents of the object, it doesn't
1176 * imply a ref on the object itself.
1177 */
1178int
1179i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1180 struct drm_file *file_priv)
1181{
1182 struct drm_i915_gem_mmap *args = data;
1183 struct drm_gem_object *obj;
1184 loff_t offset;
1185 unsigned long addr;
1186
1187 if (!(dev->driver->driver_features & DRIVER_GEM))
1188 return -ENODEV;
1189
1190 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1191 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001192 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001193
1194 offset = args->offset;
1195
1196 down_write(&current->mm->mmap_sem);
1197 addr = do_mmap(obj->filp, 0, args->size,
1198 PROT_READ | PROT_WRITE, MAP_SHARED,
1199 args->offset);
1200 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001201 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001202 if (IS_ERR((void *)addr))
1203 return addr;
1204
1205 args->addr_ptr = (uint64_t) addr;
1206
1207 return 0;
1208}
1209
Jesse Barnesde151cf2008-11-12 10:03:55 -08001210/**
1211 * i915_gem_fault - fault a page into the GTT
1212 * vma: VMA in question
1213 * vmf: fault info
1214 *
1215 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1216 * from userspace. The fault handler takes care of binding the object to
1217 * the GTT (if needed), allocating and programming a fence register (again,
1218 * only if needed based on whether the old reg is still valid or the object
1219 * is tiled) and inserting a new PTE into the faulting process.
1220 *
1221 * Note that the faulting process may involve evicting existing objects
1222 * from the GTT and/or fence registers to make room. So performance may
1223 * suffer if the GTT working set is large or there are few fence registers
1224 * left.
1225 */
1226int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1227{
1228 struct drm_gem_object *obj = vma->vm_private_data;
1229 struct drm_device *dev = obj->dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001230 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001231 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001232 pgoff_t page_offset;
1233 unsigned long pfn;
1234 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001235 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001236
1237 /* We don't use vmf->pgoff since that has the fake offset */
1238 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1239 PAGE_SHIFT;
1240
1241 /* Now bind it into the GTT if needed */
1242 mutex_lock(&dev->struct_mutex);
1243 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001244 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001245 if (ret)
1246 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001247
Jesse Barnesde151cf2008-11-12 10:03:55 -08001248 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001249 if (ret)
1250 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001251 }
1252
1253 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001254 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01001255 ret = i915_gem_object_get_fence_reg(obj, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001256 if (ret)
1257 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001258 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001259
Chris Wilson7d1c4802010-08-07 21:45:03 +01001260 if (i915_gem_object_is_inactive(obj_priv))
1261 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1262
Jesse Barnesde151cf2008-11-12 10:03:55 -08001263 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1264 page_offset;
1265
1266 /* Finally, remap it using the new GTT offset */
1267 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001268unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001269 mutex_unlock(&dev->struct_mutex);
1270
1271 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001272 case 0:
1273 case -ERESTARTSYS:
1274 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001275 case -ENOMEM:
1276 case -EAGAIN:
1277 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001278 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001279 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001280 }
1281}
1282
1283/**
1284 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1285 * @obj: obj in question
1286 *
1287 * GEM memory mapping works by handing back to userspace a fake mmap offset
1288 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1289 * up the object based on the offset and sets up the various memory mapping
1290 * structures.
1291 *
1292 * This routine allocates and attaches a fake offset for @obj.
1293 */
1294static int
1295i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1296{
1297 struct drm_device *dev = obj->dev;
1298 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001299 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001300 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001301 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001302 int ret = 0;
1303
1304 /* Set the object up for mmap'ing */
1305 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001306 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001307 if (!list->map)
1308 return -ENOMEM;
1309
1310 map = list->map;
1311 map->type = _DRM_GEM;
1312 map->size = obj->size;
1313 map->handle = obj;
1314
1315 /* Get a DRM GEM mmap offset allocated... */
1316 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1317 obj->size / PAGE_SIZE, 0, 0);
1318 if (!list->file_offset_node) {
1319 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001320 ret = -ENOSPC;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001321 goto out_free_list;
1322 }
1323
1324 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1325 obj->size / PAGE_SIZE, 0);
1326 if (!list->file_offset_node) {
1327 ret = -ENOMEM;
1328 goto out_free_list;
1329 }
1330
1331 list->hash.key = list->file_offset_node->start;
Chris Wilson9e0ae5342010-09-21 15:05:24 +01001332 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1333 if (ret) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08001334 DRM_ERROR("failed to add to map hash\n");
1335 goto out_free_mm;
1336 }
1337
1338 /* By now we should be all set, any drm_mmap request on the offset
1339 * below will get to our mmap & fault handler */
1340 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1341
1342 return 0;
1343
1344out_free_mm:
1345 drm_mm_put_block(list->file_offset_node);
1346out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001347 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001348
1349 return ret;
1350}
1351
Chris Wilson901782b2009-07-10 08:18:50 +01001352/**
1353 * i915_gem_release_mmap - remove physical page mappings
1354 * @obj: obj in question
1355 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001356 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001357 * relinquish ownership of the pages back to the system.
1358 *
1359 * It is vital that we remove the page mapping if we have mapped a tiled
1360 * object through the GTT and then lose the fence register due to
1361 * resource pressure. Similarly if the object has been moved out of the
1362 * aperture, than pages mapped into userspace must be revoked. Removing the
1363 * mapping will then trigger a page fault on the next user access, allowing
1364 * fixup by i915_gem_fault().
1365 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001366void
Chris Wilson901782b2009-07-10 08:18:50 +01001367i915_gem_release_mmap(struct drm_gem_object *obj)
1368{
1369 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001370 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001371
1372 if (dev->dev_mapping)
1373 unmap_mapping_range(dev->dev_mapping,
1374 obj_priv->mmap_offset, obj->size, 1);
1375}
1376
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001377static void
1378i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1379{
1380 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001381 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001382 struct drm_gem_mm *mm = dev->mm_private;
1383 struct drm_map_list *list;
1384
1385 list = &obj->map_list;
1386 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1387
1388 if (list->file_offset_node) {
1389 drm_mm_put_block(list->file_offset_node);
1390 list->file_offset_node = NULL;
1391 }
1392
1393 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001394 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001395 list->map = NULL;
1396 }
1397
1398 obj_priv->mmap_offset = 0;
1399}
1400
Jesse Barnesde151cf2008-11-12 10:03:55 -08001401/**
1402 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1403 * @obj: object to check
1404 *
1405 * Return the required GTT alignment for an object, taking into account
1406 * potential fence register mapping if needed.
1407 */
1408static uint32_t
1409i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1410{
1411 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001412 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001413 int start, i;
1414
1415 /*
1416 * Minimum alignment is 4k (GTT page size), but might be greater
1417 * if a fence register is needed for the object.
1418 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001419 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001420 return 4096;
1421
1422 /*
1423 * Previous chips need to be aligned to the size of the smallest
1424 * fence register that can contain the object.
1425 */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001426 if (INTEL_INFO(dev)->gen == 3)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001427 start = 1024*1024;
1428 else
1429 start = 512*1024;
1430
1431 for (i = start; i < obj->size; i <<= 1)
1432 ;
1433
1434 return i;
1435}
1436
1437/**
1438 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1439 * @dev: DRM device
1440 * @data: GTT mapping ioctl data
1441 * @file_priv: GEM object info
1442 *
1443 * Simply returns the fake offset to userspace so it can mmap it.
1444 * The mmap call will end up in drm_gem_mmap(), which will set things
1445 * up so we can get faults in the handler above.
1446 *
1447 * The fault handler will take care of binding the object into the GTT
1448 * (since it may have been evicted to make room for something), allocating
1449 * a fence register, and mapping the appropriate aperture address into
1450 * userspace.
1451 */
1452int
1453i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1454 struct drm_file *file_priv)
1455{
1456 struct drm_i915_gem_mmap_gtt *args = data;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001457 struct drm_gem_object *obj;
1458 struct drm_i915_gem_object *obj_priv;
1459 int ret;
1460
1461 if (!(dev->driver->driver_features & DRIVER_GEM))
1462 return -ENODEV;
1463
1464 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1465 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001466 return -ENOENT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001467
Chris Wilson76c1dec2010-09-25 11:22:51 +01001468 ret = i915_mutex_lock_interruptible(dev);
1469 if (ret) {
1470 drm_gem_object_unreference_unlocked(obj);
1471 return ret;
1472 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001473
Daniel Vetter23010e42010-03-08 13:35:02 +01001474 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001475
Chris Wilsonab182822009-09-22 18:46:17 +01001476 if (obj_priv->madv != I915_MADV_WILLNEED) {
1477 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1478 drm_gem_object_unreference(obj);
1479 mutex_unlock(&dev->struct_mutex);
1480 return -EINVAL;
1481 }
1482
1483
Jesse Barnesde151cf2008-11-12 10:03:55 -08001484 if (!obj_priv->mmap_offset) {
1485 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001486 if (ret) {
1487 drm_gem_object_unreference(obj);
1488 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001489 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001490 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001491 }
1492
1493 args->offset = obj_priv->mmap_offset;
1494
Jesse Barnesde151cf2008-11-12 10:03:55 -08001495 /*
1496 * Pull it into the GTT so that we have a page list (makes the
1497 * initial fault faster and any subsequent flushing possible).
1498 */
1499 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001500 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001501 if (ret) {
1502 drm_gem_object_unreference(obj);
1503 mutex_unlock(&dev->struct_mutex);
1504 return ret;
1505 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001506 }
1507
1508 drm_gem_object_unreference(obj);
1509 mutex_unlock(&dev->struct_mutex);
1510
1511 return 0;
1512}
1513
Ben Gamari6911a9b2009-04-02 11:24:54 -07001514void
Eric Anholt856fa192009-03-19 14:10:50 -07001515i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001516{
Daniel Vetter23010e42010-03-08 13:35:02 +01001517 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001518 int page_count = obj->size / PAGE_SIZE;
1519 int i;
1520
Eric Anholt856fa192009-03-19 14:10:50 -07001521 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001522 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001523
1524 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001525 return;
1526
Eric Anholt280b7132009-03-12 16:56:27 -07001527 if (obj_priv->tiling_mode != I915_TILING_NONE)
1528 i915_gem_object_save_bit_17_swizzle(obj);
1529
Chris Wilson3ef94da2009-09-14 16:50:29 +01001530 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001531 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001532
1533 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001534 if (obj_priv->dirty)
1535 set_page_dirty(obj_priv->pages[i]);
1536
1537 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001538 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001539
1540 page_cache_release(obj_priv->pages[i]);
1541 }
Eric Anholt673a3942008-07-30 12:06:12 -07001542 obj_priv->dirty = 0;
1543
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001544 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001545 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001546}
1547
Chris Wilsona56ba562010-09-28 10:07:56 +01001548static uint32_t
1549i915_gem_next_request_seqno(struct drm_device *dev,
1550 struct intel_ring_buffer *ring)
1551{
1552 drm_i915_private_t *dev_priv = dev->dev_private;
1553
1554 ring->outstanding_lazy_request = true;
1555 return dev_priv->next_seqno;
1556}
1557
Eric Anholt673a3942008-07-30 12:06:12 -07001558static void
Daniel Vetter617dbe22010-02-11 22:16:02 +01001559i915_gem_object_move_to_active(struct drm_gem_object *obj,
Zou Nan hai852835f2010-05-21 09:08:56 +08001560 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001561{
Chris Wilsona56ba562010-09-28 10:07:56 +01001562 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001563 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsona56ba562010-09-28 10:07:56 +01001564 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001565
Zou Nan hai852835f2010-05-21 09:08:56 +08001566 BUG_ON(ring == NULL);
1567 obj_priv->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001568
1569 /* Add a reference if we're newly entering the active list. */
1570 if (!obj_priv->active) {
1571 drm_gem_object_reference(obj);
1572 obj_priv->active = 1;
1573 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001574
Eric Anholt673a3942008-07-30 12:06:12 -07001575 /* Move from whatever list we were on to the tail of execution. */
Zou Nan hai852835f2010-05-21 09:08:56 +08001576 list_move_tail(&obj_priv->list, &ring->active_list);
Chris Wilsona56ba562010-09-28 10:07:56 +01001577 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001578}
1579
Eric Anholtce44b0e2008-11-06 16:00:31 -08001580static void
1581i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1582{
1583 struct drm_device *dev = obj->dev;
1584 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001585 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001586
1587 BUG_ON(!obj_priv->active);
1588 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1589 obj_priv->last_rendering_seqno = 0;
1590}
Eric Anholt673a3942008-07-30 12:06:12 -07001591
Chris Wilson963b4832009-09-20 23:03:54 +01001592/* Immediately discard the backing storage */
1593static void
1594i915_gem_object_truncate(struct drm_gem_object *obj)
1595{
Daniel Vetter23010e42010-03-08 13:35:02 +01001596 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001597 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001598
Chris Wilsonae9fed62010-08-07 11:01:30 +01001599 /* Our goal here is to return as much of the memory as
1600 * is possible back to the system as we are called from OOM.
1601 * To do this we must instruct the shmfs to drop all of its
1602 * backing pages, *now*. Here we mirror the actions taken
1603 * when by shmem_delete_inode() to release the backing store.
1604 */
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001605 inode = obj->filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001606 truncate_inode_pages(inode->i_mapping, 0);
1607 if (inode->i_op->truncate_range)
1608 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001609
1610 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001611}
1612
1613static inline int
1614i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1615{
1616 return obj_priv->madv == I915_MADV_DONTNEED;
1617}
1618
Eric Anholt673a3942008-07-30 12:06:12 -07001619static void
1620i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1621{
1622 struct drm_device *dev = obj->dev;
1623 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001624 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001625
1626 i915_verify_inactive(dev, __FILE__, __LINE__);
1627 if (obj_priv->pin_count != 0)
Chris Wilsonf13d3f72010-09-20 17:36:15 +01001628 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001629 else
1630 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1631
Daniel Vetter99fcb762010-02-07 16:20:18 +01001632 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1633
Eric Anholtce44b0e2008-11-06 16:00:31 -08001634 obj_priv->last_rendering_seqno = 0;
Zou Nan hai852835f2010-05-21 09:08:56 +08001635 obj_priv->ring = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001636 if (obj_priv->active) {
1637 obj_priv->active = 0;
1638 drm_gem_object_unreference(obj);
1639 }
1640 i915_verify_inactive(dev, __FILE__, __LINE__);
1641}
1642
Chris Wilson92204342010-09-18 11:02:01 +01001643static void
Daniel Vetter63560392010-02-19 11:51:59 +01001644i915_gem_process_flushing_list(struct drm_device *dev,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001645 uint32_t flush_domains,
Zou Nan hai852835f2010-05-21 09:08:56 +08001646 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001647{
1648 drm_i915_private_t *dev_priv = dev->dev_private;
1649 struct drm_i915_gem_object *obj_priv, *next;
1650
1651 list_for_each_entry_safe(obj_priv, next,
1652 &dev_priv->mm.gpu_write_list,
1653 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001654 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001655
Chris Wilson2b6efaa2010-09-14 17:04:02 +01001656 if (obj->write_domain & flush_domains &&
1657 obj_priv->ring == ring) {
Daniel Vetter63560392010-02-19 11:51:59 +01001658 uint32_t old_write_domain = obj->write_domain;
1659
1660 obj->write_domain = 0;
1661 list_del_init(&obj_priv->gpu_write_list);
Daniel Vetter617dbe22010-02-11 22:16:02 +01001662 i915_gem_object_move_to_active(obj, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001663
1664 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001665 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1666 struct drm_i915_fence_reg *reg =
1667 &dev_priv->fence_regs[obj_priv->fence_reg];
1668 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001669 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001670 }
Daniel Vetter63560392010-02-19 11:51:59 +01001671
1672 trace_i915_gem_object_change_domain(obj,
1673 obj->read_domains,
1674 old_write_domain);
1675 }
1676 }
1677}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001678
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001679uint32_t
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001680i915_add_request(struct drm_device *dev,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001681 struct drm_file *file,
Chris Wilson8dc5d142010-08-12 12:36:12 +01001682 struct drm_i915_gem_request *request,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001683 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001684{
1685 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001686 struct drm_i915_file_private *file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001687 uint32_t seqno;
1688 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001689
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001690 if (file != NULL)
1691 file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001692
Chris Wilson8dc5d142010-08-12 12:36:12 +01001693 if (request == NULL) {
1694 request = kzalloc(sizeof(*request), GFP_KERNEL);
1695 if (request == NULL)
1696 return 0;
1697 }
Eric Anholt673a3942008-07-30 12:06:12 -07001698
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001699 seqno = ring->add_request(dev, ring, 0);
Chris Wilsona56ba562010-09-28 10:07:56 +01001700 ring->outstanding_lazy_request = false;
Eric Anholt673a3942008-07-30 12:06:12 -07001701
1702 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001703 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001704 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001705 was_empty = list_empty(&ring->request_list);
1706 list_add_tail(&request->list, &ring->request_list);
1707
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001708 if (file_priv) {
Chris Wilson1c255952010-09-26 11:03:27 +01001709 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001710 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001711 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001712 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001713 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001714 }
Eric Anholt673a3942008-07-30 12:06:12 -07001715
Ben Gamarif65d9422009-09-14 17:48:44 -04001716 if (!dev_priv->mm.suspended) {
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001717 mod_timer(&dev_priv->hangcheck_timer,
1718 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
Ben Gamarif65d9422009-09-14 17:48:44 -04001719 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001720 queue_delayed_work(dev_priv->wq,
1721 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001722 }
Eric Anholt673a3942008-07-30 12:06:12 -07001723 return seqno;
1724}
1725
1726/**
1727 * Command execution barrier
1728 *
1729 * Ensures that all commands in the ring are finished
1730 * before signalling the CPU
1731 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001732static void
Zou Nan hai852835f2010-05-21 09:08:56 +08001733i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001734{
Eric Anholt673a3942008-07-30 12:06:12 -07001735 uint32_t flush_domains = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001736
1737 /* The sampler always gets flushed on i965 (sigh) */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001738 if (INTEL_INFO(dev)->gen >= 4)
Eric Anholt673a3942008-07-30 12:06:12 -07001739 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
Zou Nan hai852835f2010-05-21 09:08:56 +08001740
1741 ring->flush(dev, ring,
1742 I915_GEM_DOMAIN_COMMAND, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001743}
1744
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001745static inline void
1746i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001747{
Chris Wilson1c255952010-09-26 11:03:27 +01001748 struct drm_i915_file_private *file_priv = request->file_priv;
1749
1750 if (!file_priv)
1751 return;
1752
1753 spin_lock(&file_priv->mm.lock);
1754 list_del(&request->client_list);
1755 request->file_priv = NULL;
1756 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001757}
1758
Chris Wilsondfaae392010-09-22 10:31:52 +01001759static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1760 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001761{
Chris Wilsondfaae392010-09-22 10:31:52 +01001762 while (!list_empty(&ring->request_list)) {
1763 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001764
Chris Wilsondfaae392010-09-22 10:31:52 +01001765 request = list_first_entry(&ring->request_list,
1766 struct drm_i915_gem_request,
1767 list);
1768
1769 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001770 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001771 kfree(request);
1772 }
1773
1774 while (!list_empty(&ring->active_list)) {
Chris Wilson9375e442010-09-19 12:21:28 +01001775 struct drm_i915_gem_object *obj_priv;
1776
Chris Wilsondfaae392010-09-22 10:31:52 +01001777 obj_priv = list_first_entry(&ring->active_list,
1778 struct drm_i915_gem_object,
1779 list);
1780
1781 obj_priv->base.write_domain = 0;
1782 list_del_init(&obj_priv->gpu_write_list);
1783 i915_gem_object_move_to_inactive(&obj_priv->base);
1784 }
1785}
1786
1787void i915_gem_reset_lists(struct drm_device *dev)
1788{
1789 struct drm_i915_private *dev_priv = dev->dev_private;
1790 struct drm_i915_gem_object *obj_priv;
1791
1792 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1793 if (HAS_BSD(dev))
1794 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1795
1796 /* Remove anything from the flushing lists. The GPU cache is likely
1797 * to be lost on reset along with the data, so simply move the
1798 * lost bo to the inactive list.
1799 */
1800 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson9375e442010-09-19 12:21:28 +01001801 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1802 struct drm_i915_gem_object,
1803 list);
1804
1805 obj_priv->base.write_domain = 0;
Chris Wilsondfaae392010-09-22 10:31:52 +01001806 list_del_init(&obj_priv->gpu_write_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001807 i915_gem_object_move_to_inactive(&obj_priv->base);
1808 }
Chris Wilson9375e442010-09-19 12:21:28 +01001809
Chris Wilsondfaae392010-09-22 10:31:52 +01001810 /* Move everything out of the GPU domains to ensure we do any
1811 * necessary invalidation upon reuse.
1812 */
Chris Wilson77f01232010-09-19 12:31:36 +01001813 list_for_each_entry(obj_priv,
1814 &dev_priv->mm.inactive_list,
1815 list)
1816 {
1817 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1818 }
1819}
1820
Eric Anholt673a3942008-07-30 12:06:12 -07001821/**
1822 * This function clears the request list as sequence numbers are passed.
1823 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001824static void
1825i915_gem_retire_requests_ring(struct drm_device *dev,
1826 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001827{
1828 drm_i915_private_t *dev_priv = dev->dev_private;
1829 uint32_t seqno;
1830
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001831 if (!ring->status_page.page_addr ||
1832 list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001833 return;
1834
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001835 seqno = ring->get_seqno(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001836 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001837 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001838
Zou Nan hai852835f2010-05-21 09:08:56 +08001839 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001840 struct drm_i915_gem_request,
1841 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001842
Chris Wilsondfaae392010-09-22 10:31:52 +01001843 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001844 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001845
1846 trace_i915_gem_request_retire(dev, request->seqno);
1847
1848 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001849 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001850 kfree(request);
1851 }
1852
1853 /* Move any buffers on the active list that are no longer referenced
1854 * by the ringbuffer to the flushing/inactive lists as appropriate.
1855 */
1856 while (!list_empty(&ring->active_list)) {
1857 struct drm_gem_object *obj;
1858 struct drm_i915_gem_object *obj_priv;
1859
1860 obj_priv = list_first_entry(&ring->active_list,
1861 struct drm_i915_gem_object,
1862 list);
1863
Chris Wilsondfaae392010-09-22 10:31:52 +01001864 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001865 break;
1866
1867 obj = &obj_priv->base;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001868 if (obj->write_domain != 0)
1869 i915_gem_object_move_to_flushing(obj);
1870 else
1871 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001872 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001873
1874 if (unlikely (dev_priv->trace_irq_seqno &&
1875 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001876 ring->user_irq_put(dev, ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001877 dev_priv->trace_irq_seqno = 0;
1878 }
Eric Anholt673a3942008-07-30 12:06:12 -07001879}
1880
1881void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001882i915_gem_retire_requests(struct drm_device *dev)
1883{
1884 drm_i915_private_t *dev_priv = dev->dev_private;
1885
Chris Wilsonbe726152010-07-23 23:18:50 +01001886 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1887 struct drm_i915_gem_object *obj_priv, *tmp;
1888
1889 /* We must be careful that during unbind() we do not
1890 * accidentally infinitely recurse into retire requests.
1891 * Currently:
1892 * retire -> free -> unbind -> wait -> retire_ring
1893 */
1894 list_for_each_entry_safe(obj_priv, tmp,
1895 &dev_priv->mm.deferred_free_list,
1896 list)
1897 i915_gem_free_object_tail(&obj_priv->base);
1898 }
1899
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001900 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1901 if (HAS_BSD(dev))
1902 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1903}
1904
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001905static void
Eric Anholt673a3942008-07-30 12:06:12 -07001906i915_gem_retire_work_handler(struct work_struct *work)
1907{
1908 drm_i915_private_t *dev_priv;
1909 struct drm_device *dev;
1910
1911 dev_priv = container_of(work, drm_i915_private_t,
1912 mm.retire_work.work);
1913 dev = dev_priv->dev;
1914
1915 mutex_lock(&dev->struct_mutex);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001916 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001917
Keith Packard6dbe2772008-10-14 21:41:13 -07001918 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001919 (!list_empty(&dev_priv->render_ring.request_list) ||
1920 (HAS_BSD(dev) &&
1921 !list_empty(&dev_priv->bsd_ring.request_list))))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001922 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001923 mutex_unlock(&dev->struct_mutex);
1924}
1925
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001926int
Zou Nan hai852835f2010-05-21 09:08:56 +08001927i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01001928 bool interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001929{
1930 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001931 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001932 int ret = 0;
1933
1934 BUG_ON(seqno == 0);
1935
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001936 if (atomic_read(&dev_priv->mm.wedged))
1937 return -EAGAIN;
1938
Chris Wilsona56ba562010-09-28 10:07:56 +01001939 if (ring->outstanding_lazy_request) {
Chris Wilson8dc5d142010-08-12 12:36:12 +01001940 seqno = i915_add_request(dev, NULL, NULL, ring);
Daniel Vettere35a41d2010-02-11 22:13:59 +01001941 if (seqno == 0)
1942 return -ENOMEM;
1943 }
Chris Wilsona56ba562010-09-28 10:07:56 +01001944 BUG_ON(seqno == dev_priv->next_seqno);
Daniel Vettere35a41d2010-02-11 22:13:59 +01001945
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001946 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001947 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001948 ier = I915_READ(DEIER) | I915_READ(GTIER);
1949 else
1950 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001951 if (!ier) {
1952 DRM_ERROR("something (likely vbetool) disabled "
1953 "interrupts, re-enabling\n");
1954 i915_driver_irq_preinstall(dev);
1955 i915_driver_irq_postinstall(dev);
1956 }
1957
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001958 trace_i915_gem_request_wait_begin(dev, seqno);
1959
Zou Nan hai852835f2010-05-21 09:08:56 +08001960 ring->waiting_gem_seqno = seqno;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001961 ring->user_irq_get(dev, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001962 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08001963 ret = wait_event_interruptible(ring->irq_queue,
1964 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001965 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001966 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001967 else
Zou Nan hai852835f2010-05-21 09:08:56 +08001968 wait_event(ring->irq_queue,
1969 i915_seqno_passed(
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001970 ring->get_seqno(dev, ring), seqno)
Zou Nan hai852835f2010-05-21 09:08:56 +08001971 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001972
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001973 ring->user_irq_put(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001974 ring->waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001975
1976 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001977 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001978 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001979 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001980
1981 if (ret && ret != -ERESTARTSYS)
Daniel Vetter8bff9172010-02-11 22:19:40 +01001982 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001983 __func__, ret, seqno, ring->get_seqno(dev, ring),
Daniel Vetter8bff9172010-02-11 22:19:40 +01001984 dev_priv->next_seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001985
1986 /* Directly dispatch request retiring. While we have the work queue
1987 * to handle this, the waiter on a request often wants an associated
1988 * buffer to have made it to the inactive list, and we would need
1989 * a separate wait queue to handle that.
1990 */
1991 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001992 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001993
1994 return ret;
1995}
1996
Daniel Vetter48764bf2009-09-15 22:57:32 +02001997/**
1998 * Waits for a sequence number to be signaled, and cleans up the
1999 * request and object lists appropriately for that event.
2000 */
2001static int
Zou Nan hai852835f2010-05-21 09:08:56 +08002002i915_wait_request(struct drm_device *dev, uint32_t seqno,
Chris Wilsona56ba562010-09-28 10:07:56 +01002003 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02002004{
Zou Nan hai852835f2010-05-21 09:08:56 +08002005 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002006}
2007
Chris Wilson20f0cd52010-09-23 11:00:38 +01002008static void
Chris Wilson92204342010-09-18 11:02:01 +01002009i915_gem_flush_ring(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01002010 struct drm_file *file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002011 struct intel_ring_buffer *ring,
2012 uint32_t invalidate_domains,
2013 uint32_t flush_domains)
2014{
2015 ring->flush(dev, ring, invalidate_domains, flush_domains);
2016 i915_gem_process_flushing_list(dev, flush_domains, ring);
2017}
2018
2019static void
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002020i915_gem_flush(struct drm_device *dev,
Chris Wilsonc78ec302010-09-20 12:50:23 +01002021 struct drm_file *file_priv,
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002022 uint32_t invalidate_domains,
Chris Wilson92204342010-09-18 11:02:01 +01002023 uint32_t flush_domains,
2024 uint32_t flush_rings)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002025{
2026 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter8bff9172010-02-11 22:19:40 +01002027
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002028 if (flush_domains & I915_GEM_DOMAIN_CPU)
2029 drm_agp_chipset_flush(dev);
Daniel Vetter8bff9172010-02-11 22:19:40 +01002030
Chris Wilson92204342010-09-18 11:02:01 +01002031 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2032 if (flush_rings & RING_RENDER)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002033 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002034 &dev_priv->render_ring,
2035 invalidate_domains, flush_domains);
2036 if (flush_rings & RING_BSD)
Chris Wilsonc78ec302010-09-20 12:50:23 +01002037 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01002038 &dev_priv->bsd_ring,
2039 invalidate_domains, flush_domains);
2040 }
Zou Nan hai8187a2b2010-05-21 09:08:55 +08002041}
2042
Eric Anholt673a3942008-07-30 12:06:12 -07002043/**
2044 * Ensures that all rendering to the object has completed and the object is
2045 * safe to unbind from the GTT or access from the CPU.
2046 */
2047static int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002048i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2049 bool interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07002050{
2051 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002052 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002053 int ret;
2054
Eric Anholte47c68e2008-11-14 13:35:19 -08002055 /* This function only exists to support waiting for existing rendering,
2056 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002057 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002058 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002059
2060 /* If there is rendering queued on the buffer being evicted, wait for
2061 * it.
2062 */
2063 if (obj_priv->active) {
2064#if WATCH_BUF
2065 DRM_INFO("%s: object %p wait for seqno %08x\n",
2066 __func__, obj, obj_priv->last_rendering_seqno);
2067#endif
Chris Wilson2cf34d72010-09-14 13:03:28 +01002068 ret = i915_do_wait_request(dev,
2069 obj_priv->last_rendering_seqno,
2070 interruptible,
2071 obj_priv->ring);
2072 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002073 return ret;
2074 }
2075
2076 return 0;
2077}
2078
2079/**
2080 * Unbinds an object from the GTT aperture.
2081 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002082int
Eric Anholt673a3942008-07-30 12:06:12 -07002083i915_gem_object_unbind(struct drm_gem_object *obj)
2084{
2085 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002086 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002087 int ret = 0;
2088
2089#if WATCH_BUF
2090 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2091 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2092#endif
2093 if (obj_priv->gtt_space == NULL)
2094 return 0;
2095
2096 if (obj_priv->pin_count != 0) {
2097 DRM_ERROR("Attempting to unbind pinned buffer\n");
2098 return -EINVAL;
2099 }
2100
Eric Anholt5323fd02009-09-09 11:50:45 -07002101 /* blow away mappings if mapped through GTT */
2102 i915_gem_release_mmap(obj);
2103
Eric Anholt673a3942008-07-30 12:06:12 -07002104 /* Move the object to the CPU domain to ensure that
2105 * any possible CPU writes while it's not in the GTT
2106 * are flushed when we go to remap it. This will
2107 * also ensure that all pending GPU writes are finished
2108 * before we unbind.
2109 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002110 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01002111 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07002112 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002113 /* Continue on if we fail due to EIO, the GPU is hung so we
2114 * should be safe and we need to cleanup or else we might
2115 * cause memory corruption through use-after-free.
2116 */
Eric Anholt673a3942008-07-30 12:06:12 -07002117
Daniel Vetter96b47b62009-12-15 17:50:00 +01002118 /* release the fence reg _after_ flushing */
2119 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2120 i915_gem_clear_fence_reg(obj);
2121
Eric Anholt673a3942008-07-30 12:06:12 -07002122 if (obj_priv->agp_mem != NULL) {
2123 drm_unbind_agp(obj_priv->agp_mem);
2124 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2125 obj_priv->agp_mem = NULL;
2126 }
2127
Eric Anholt856fa192009-03-19 14:10:50 -07002128 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002129 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002130
2131 if (obj_priv->gtt_space) {
2132 atomic_dec(&dev->gtt_count);
2133 atomic_sub(obj->size, &dev->gtt_memory);
2134
2135 drm_mm_put_block(obj_priv->gtt_space);
2136 obj_priv->gtt_space = NULL;
2137 }
2138
Chris Wilsonf13d3f72010-09-20 17:36:15 +01002139 list_del_init(&obj_priv->list);
Eric Anholt673a3942008-07-30 12:06:12 -07002140
Chris Wilson963b4832009-09-20 23:03:54 +01002141 if (i915_gem_object_is_purgeable(obj_priv))
2142 i915_gem_object_truncate(obj);
2143
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002144 trace_i915_gem_object_unbind(obj);
2145
Chris Wilson8dc17752010-07-23 23:18:51 +01002146 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002147}
2148
Chris Wilsona56ba562010-09-28 10:07:56 +01002149static int i915_ring_idle(struct drm_device *dev,
2150 struct intel_ring_buffer *ring)
2151{
2152 i915_gem_flush_ring(dev, NULL, ring,
2153 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2154 return i915_wait_request(dev,
2155 i915_gem_next_request_seqno(dev, ring),
2156 ring);
2157}
2158
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002159int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002160i915_gpu_idle(struct drm_device *dev)
2161{
2162 drm_i915_private_t *dev_priv = dev->dev_private;
2163 bool lists_empty;
Zou Nan hai852835f2010-05-21 09:08:56 +08002164 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002165
Zou Nan haid1b851f2010-05-21 09:08:57 +08002166 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2167 list_empty(&dev_priv->render_ring.active_list) &&
2168 (!HAS_BSD(dev) ||
2169 list_empty(&dev_priv->bsd_ring.active_list)));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002170 if (lists_empty)
2171 return 0;
2172
2173 /* Flush everything onto the inactive list. */
Chris Wilsona56ba562010-09-28 10:07:56 +01002174 ret = i915_ring_idle(dev, &dev_priv->render_ring);
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002175 if (ret)
2176 return ret;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002177
2178 if (HAS_BSD(dev)) {
Chris Wilsona56ba562010-09-28 10:07:56 +01002179 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002180 if (ret)
2181 return ret;
2182 }
2183
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002184 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002185}
2186
Ben Gamari6911a9b2009-04-02 11:24:54 -07002187int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002188i915_gem_object_get_pages(struct drm_gem_object *obj,
2189 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002190{
Daniel Vetter23010e42010-03-08 13:35:02 +01002191 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002192 int page_count, i;
2193 struct address_space *mapping;
2194 struct inode *inode;
2195 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002196
Daniel Vetter778c3542010-05-13 11:49:44 +02002197 BUG_ON(obj_priv->pages_refcount
2198 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2199
Eric Anholt856fa192009-03-19 14:10:50 -07002200 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002201 return 0;
2202
2203 /* Get the list of pages out of our struct file. They'll be pinned
2204 * at this point until we release them.
2205 */
2206 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002207 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002208 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002209 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002210 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002211 return -ENOMEM;
2212 }
2213
2214 inode = obj->filp->f_path.dentry->d_inode;
2215 mapping = inode->i_mapping;
2216 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002217 page = read_cache_page_gfp(mapping, i,
Linus Torvalds985b8232010-07-02 10:04:42 +10002218 GFP_HIGHUSER |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002219 __GFP_COLD |
Linus Torvaldscd9f0402010-07-18 09:44:37 -07002220 __GFP_RECLAIMABLE |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002221 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002222 if (IS_ERR(page))
2223 goto err_pages;
2224
Eric Anholt856fa192009-03-19 14:10:50 -07002225 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002226 }
Eric Anholt280b7132009-03-12 16:56:27 -07002227
2228 if (obj_priv->tiling_mode != I915_TILING_NONE)
2229 i915_gem_object_do_bit_17_swizzle(obj);
2230
Eric Anholt673a3942008-07-30 12:06:12 -07002231 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002232
2233err_pages:
2234 while (i--)
2235 page_cache_release(obj_priv->pages[i]);
2236
2237 drm_free_large(obj_priv->pages);
2238 obj_priv->pages = NULL;
2239 obj_priv->pages_refcount--;
2240 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002241}
2242
Eric Anholt4e901fd2009-10-26 16:44:17 -07002243static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2244{
2245 struct drm_gem_object *obj = reg->obj;
2246 struct drm_device *dev = obj->dev;
2247 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002248 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002249 int regnum = obj_priv->fence_reg;
2250 uint64_t val;
2251
2252 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2253 0xfffff000) << 32;
2254 val |= obj_priv->gtt_offset & 0xfffff000;
2255 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2256 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2257
2258 if (obj_priv->tiling_mode == I915_TILING_Y)
2259 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2260 val |= I965_FENCE_REG_VALID;
2261
2262 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2263}
2264
Jesse Barnesde151cf2008-11-12 10:03:55 -08002265static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2266{
2267 struct drm_gem_object *obj = reg->obj;
2268 struct drm_device *dev = obj->dev;
2269 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002270 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002271 int regnum = obj_priv->fence_reg;
2272 uint64_t val;
2273
2274 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2275 0xfffff000) << 32;
2276 val |= obj_priv->gtt_offset & 0xfffff000;
2277 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2278 if (obj_priv->tiling_mode == I915_TILING_Y)
2279 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2280 val |= I965_FENCE_REG_VALID;
2281
2282 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2283}
2284
2285static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2286{
2287 struct drm_gem_object *obj = reg->obj;
2288 struct drm_device *dev = obj->dev;
2289 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002290 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002291 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002292 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002293 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002294 uint32_t pitch_val;
2295
2296 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2297 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002298 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002299 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002300 return;
2301 }
2302
Jesse Barnes0f973f22009-01-26 17:10:45 -08002303 if (obj_priv->tiling_mode == I915_TILING_Y &&
2304 HAS_128_BYTE_Y_TILING(dev))
2305 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002306 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002307 tile_width = 512;
2308
2309 /* Note: pitch better be a power of two tile widths */
2310 pitch_val = obj_priv->stride / tile_width;
2311 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002312
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002313 if (obj_priv->tiling_mode == I915_TILING_Y &&
2314 HAS_128_BYTE_Y_TILING(dev))
2315 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2316 else
2317 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2318
Jesse Barnesde151cf2008-11-12 10:03:55 -08002319 val = obj_priv->gtt_offset;
2320 if (obj_priv->tiling_mode == I915_TILING_Y)
2321 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2322 val |= I915_FENCE_SIZE_BITS(obj->size);
2323 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2324 val |= I830_FENCE_REG_VALID;
2325
Eric Anholtdc529a42009-03-10 22:34:49 -07002326 if (regnum < 8)
2327 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2328 else
2329 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2330 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002331}
2332
2333static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2334{
2335 struct drm_gem_object *obj = reg->obj;
2336 struct drm_device *dev = obj->dev;
2337 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002338 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002339 int regnum = obj_priv->fence_reg;
2340 uint32_t val;
2341 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002342 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002343
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002344 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002345 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002346 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002347 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002348 return;
2349 }
2350
Eric Anholte76a16d2009-05-26 17:44:56 -07002351 pitch_val = obj_priv->stride / 128;
2352 pitch_val = ffs(pitch_val) - 1;
2353 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2354
Jesse Barnesde151cf2008-11-12 10:03:55 -08002355 val = obj_priv->gtt_offset;
2356 if (obj_priv->tiling_mode == I915_TILING_Y)
2357 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002358 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2359 WARN_ON(fence_size_bits & ~0x00000f00);
2360 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002361 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2362 val |= I830_FENCE_REG_VALID;
2363
2364 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002365}
2366
Chris Wilson2cf34d72010-09-14 13:03:28 +01002367static int i915_find_fence_reg(struct drm_device *dev,
2368 bool interruptible)
Daniel Vetterae3db242010-02-19 11:51:58 +01002369{
2370 struct drm_i915_fence_reg *reg = NULL;
2371 struct drm_i915_gem_object *obj_priv = NULL;
2372 struct drm_i915_private *dev_priv = dev->dev_private;
2373 struct drm_gem_object *obj = NULL;
2374 int i, avail, ret;
2375
2376 /* First try to find a free reg */
2377 avail = 0;
2378 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2379 reg = &dev_priv->fence_regs[i];
2380 if (!reg->obj)
2381 return i;
2382
Daniel Vetter23010e42010-03-08 13:35:02 +01002383 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002384 if (!obj_priv->pin_count)
2385 avail++;
2386 }
2387
2388 if (avail == 0)
2389 return -ENOSPC;
2390
2391 /* None available, try to steal one or wait for a user to finish */
2392 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002393 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2394 lru_list) {
2395 obj = reg->obj;
2396 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002397
2398 if (obj_priv->pin_count)
2399 continue;
2400
2401 /* found one! */
2402 i = obj_priv->fence_reg;
2403 break;
2404 }
2405
2406 BUG_ON(i == I915_FENCE_REG_NONE);
2407
2408 /* We only have a reference on obj from the active list. put_fence_reg
2409 * might drop that one, causing a use-after-free in it. So hold a
2410 * private reference to obj like the other callers of put_fence_reg
2411 * (set_tiling ioctl) do. */
2412 drm_gem_object_reference(obj);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002413 ret = i915_gem_object_put_fence_reg(obj, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002414 drm_gem_object_unreference(obj);
2415 if (ret != 0)
2416 return ret;
2417
2418 return i;
2419}
2420
Jesse Barnesde151cf2008-11-12 10:03:55 -08002421/**
2422 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2423 * @obj: object to map through a fence reg
2424 *
2425 * When mapping objects through the GTT, userspace wants to be able to write
2426 * to them without having to worry about swizzling if the object is tiled.
2427 *
2428 * This function walks the fence regs looking for a free one for @obj,
2429 * stealing one if it can't find any.
2430 *
2431 * It then sets up the reg based on the object's properties: address, pitch
2432 * and tiling format.
2433 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002434int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002435i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2436 bool interruptible)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002437{
2438 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002439 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002440 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002441 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002442 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002443
Eric Anholta09ba7f2009-08-29 12:49:51 -07002444 /* Just update our place in the LRU if our fence is getting used. */
2445 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002446 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2447 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002448 return 0;
2449 }
2450
Jesse Barnesde151cf2008-11-12 10:03:55 -08002451 switch (obj_priv->tiling_mode) {
2452 case I915_TILING_NONE:
2453 WARN(1, "allocating a fence for non-tiled object?\n");
2454 break;
2455 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002456 if (!obj_priv->stride)
2457 return -EINVAL;
2458 WARN((obj_priv->stride & (512 - 1)),
2459 "object 0x%08x is X tiled but has non-512B pitch\n",
2460 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002461 break;
2462 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002463 if (!obj_priv->stride)
2464 return -EINVAL;
2465 WARN((obj_priv->stride & (128 - 1)),
2466 "object 0x%08x is Y tiled but has non-128B pitch\n",
2467 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002468 break;
2469 }
2470
Chris Wilson2cf34d72010-09-14 13:03:28 +01002471 ret = i915_find_fence_reg(dev, interruptible);
Daniel Vetterae3db242010-02-19 11:51:58 +01002472 if (ret < 0)
2473 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002474
Daniel Vetterae3db242010-02-19 11:51:58 +01002475 obj_priv->fence_reg = ret;
2476 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002477 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002478
Jesse Barnesde151cf2008-11-12 10:03:55 -08002479 reg->obj = obj;
2480
Chris Wilsone259bef2010-09-17 00:32:02 +01002481 switch (INTEL_INFO(dev)->gen) {
2482 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002483 sandybridge_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002484 break;
2485 case 5:
2486 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002487 i965_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002488 break;
2489 case 3:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002490 i915_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002491 break;
2492 case 2:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002493 i830_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002494 break;
2495 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002496
Daniel Vetterae3db242010-02-19 11:51:58 +01002497 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2498 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002499
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002500 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002501}
2502
2503/**
2504 * i915_gem_clear_fence_reg - clear out fence register info
2505 * @obj: object to clear
2506 *
2507 * Zeroes out the fence register itself and clears out the associated
2508 * data structures in dev_priv and obj_priv.
2509 */
2510static void
2511i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2512{
2513 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002514 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002515 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002516 struct drm_i915_fence_reg *reg =
2517 &dev_priv->fence_regs[obj_priv->fence_reg];
Chris Wilsone259bef2010-09-17 00:32:02 +01002518 uint32_t fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002519
Chris Wilsone259bef2010-09-17 00:32:02 +01002520 switch (INTEL_INFO(dev)->gen) {
2521 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002522 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2523 (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002524 break;
2525 case 5:
2526 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002527 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002528 break;
2529 case 3:
Chris Wilson9b74f732010-09-22 19:10:44 +01002530 if (obj_priv->fence_reg >= 8)
Chris Wilsone259bef2010-09-17 00:32:02 +01002531 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002532 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002533 case 2:
2534 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002535
2536 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002537 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002538 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002539
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002540 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002541 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002542 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002543}
2544
Eric Anholt673a3942008-07-30 12:06:12 -07002545/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002546 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2547 * to the buffer to finish, and then resets the fence register.
2548 * @obj: tiled object holding a fence register.
Chris Wilson2cf34d72010-09-14 13:03:28 +01002549 * @bool: whether the wait upon the fence is interruptible
Chris Wilson52dc7d32009-06-06 09:46:01 +01002550 *
2551 * Zeroes out the fence register itself and clears out the associated
2552 * data structures in dev_priv and obj_priv.
2553 */
2554int
Chris Wilson2cf34d72010-09-14 13:03:28 +01002555i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2556 bool interruptible)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002557{
2558 struct drm_device *dev = obj->dev;
Chris Wilson53640e12010-09-20 11:40:50 +01002559 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002560 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson53640e12010-09-20 11:40:50 +01002561 struct drm_i915_fence_reg *reg;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002562
2563 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2564 return 0;
2565
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002566 /* If we've changed tiling, GTT-mappings of the object
2567 * need to re-fault to ensure that the correct fence register
2568 * setup is in place.
2569 */
2570 i915_gem_release_mmap(obj);
2571
Chris Wilson52dc7d32009-06-06 09:46:01 +01002572 /* On the i915, GPU access to tiled buffers is via a fence,
2573 * therefore we must wait for any outstanding access to complete
2574 * before clearing the fence.
2575 */
Chris Wilson53640e12010-09-20 11:40:50 +01002576 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2577 if (reg->gpu) {
Chris Wilson52dc7d32009-06-06 09:46:01 +01002578 int ret;
2579
Chris Wilson2cf34d72010-09-14 13:03:28 +01002580 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002581 if (ret)
2582 return ret;
2583
Chris Wilson2cf34d72010-09-14 13:03:28 +01002584 ret = i915_gem_object_wait_rendering(obj, interruptible);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002585 if (ret)
Chris Wilson52dc7d32009-06-06 09:46:01 +01002586 return ret;
Chris Wilson53640e12010-09-20 11:40:50 +01002587
2588 reg->gpu = false;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002589 }
2590
Daniel Vetter4a726612010-02-01 13:59:16 +01002591 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson0bc23aa2010-09-14 10:22:23 +01002592 i915_gem_clear_fence_reg(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002593
2594 return 0;
2595}
2596
2597/**
Eric Anholt673a3942008-07-30 12:06:12 -07002598 * Finds free space in the GTT aperture and binds the object there.
2599 */
2600static int
2601i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2602{
2603 struct drm_device *dev = obj->dev;
2604 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002605 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002606 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002607 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002608 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002609
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002610 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002611 DRM_ERROR("Attempting to bind a purgeable object\n");
2612 return -EINVAL;
2613 }
2614
Eric Anholt673a3942008-07-30 12:06:12 -07002615 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002616 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002617 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002618 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2619 return -EINVAL;
2620 }
2621
Chris Wilson654fc602010-05-27 13:18:21 +01002622 /* If the object is bigger than the entire aperture, reject it early
2623 * before evicting everything in a vain attempt to find space.
2624 */
2625 if (obj->size > dev->gtt_total) {
2626 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2627 return -E2BIG;
2628 }
2629
Eric Anholt673a3942008-07-30 12:06:12 -07002630 search_free:
2631 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2632 obj->size, alignment, 0);
2633 if (free_space != NULL) {
2634 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2635 alignment);
Daniel Vetterdb3307a2010-07-02 15:02:12 +01002636 if (obj_priv->gtt_space != NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002637 obj_priv->gtt_offset = obj_priv->gtt_space->start;
Eric Anholt673a3942008-07-30 12:06:12 -07002638 }
2639 if (obj_priv->gtt_space == NULL) {
2640 /* If the gtt is empty and we're still having trouble
2641 * fitting our object in, we're out of memory.
2642 */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002643 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002644 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002645 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002646
Eric Anholt673a3942008-07-30 12:06:12 -07002647 goto search_free;
2648 }
2649
2650#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002651 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002652 obj->size, obj_priv->gtt_offset);
2653#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002654 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002655 if (ret) {
2656 drm_mm_put_block(obj_priv->gtt_space);
2657 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002658
2659 if (ret == -ENOMEM) {
2660 /* first try to clear up some space from the GTT */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002661 ret = i915_gem_evict_something(dev, obj->size,
2662 alignment);
Chris Wilson07f73f62009-09-14 16:50:30 +01002663 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002664 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002665 if (gfpmask) {
2666 gfpmask = 0;
2667 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002668 }
2669
2670 return ret;
2671 }
2672
2673 goto search_free;
2674 }
2675
Eric Anholt673a3942008-07-30 12:06:12 -07002676 return ret;
2677 }
2678
Eric Anholt673a3942008-07-30 12:06:12 -07002679 /* Create an AGP memory structure pointing at our pages, and bind it
2680 * into the GTT.
2681 */
2682 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002683 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002684 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002685 obj_priv->gtt_offset,
2686 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002687 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002688 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002689 drm_mm_put_block(obj_priv->gtt_space);
2690 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002691
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002692 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002693 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002694 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002695
2696 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002697 }
2698 atomic_inc(&dev->gtt_count);
2699 atomic_add(obj->size, &dev->gtt_memory);
2700
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002701 /* keep track of bounds object by adding it to the inactive list */
2702 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2703
Eric Anholt673a3942008-07-30 12:06:12 -07002704 /* Assert that the object is not currently in any GPU domain. As it
2705 * wasn't in the GTT, there shouldn't be any way it could have been in
2706 * a GPU cache
2707 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002708 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2709 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002710
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002711 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2712
Eric Anholt673a3942008-07-30 12:06:12 -07002713 return 0;
2714}
2715
2716void
2717i915_gem_clflush_object(struct drm_gem_object *obj)
2718{
Daniel Vetter23010e42010-03-08 13:35:02 +01002719 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002720
2721 /* If we don't have a page list set up, then we're not pinned
2722 * to GPU, and we can ignore the cache flush because it'll happen
2723 * again at bind time.
2724 */
Eric Anholt856fa192009-03-19 14:10:50 -07002725 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002726 return;
2727
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002728 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002729
Eric Anholt856fa192009-03-19 14:10:50 -07002730 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002731}
2732
Eric Anholte47c68e2008-11-14 13:35:19 -08002733/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002734static int
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002735i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2736 bool pipelined)
Eric Anholte47c68e2008-11-14 13:35:19 -08002737{
2738 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002739 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002740
2741 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002742 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002743
2744 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002745 old_write_domain = obj->write_domain;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002746 i915_gem_flush_ring(dev, NULL,
Chris Wilson92204342010-09-18 11:02:01 +01002747 to_intel_bo(obj)->ring,
2748 0, obj->write_domain);
Chris Wilson48b956c2010-09-14 12:50:34 +01002749 BUG_ON(obj->write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002750
2751 trace_i915_gem_object_change_domain(obj,
2752 obj->read_domains,
2753 old_write_domain);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002754
2755 if (pipelined)
2756 return 0;
2757
Chris Wilson2cf34d72010-09-14 13:03:28 +01002758 return i915_gem_object_wait_rendering(obj, true);
Eric Anholte47c68e2008-11-14 13:35:19 -08002759}
2760
2761/** Flushes the GTT write domain for the object if it's dirty. */
2762static void
2763i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2764{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002765 uint32_t old_write_domain;
2766
Eric Anholte47c68e2008-11-14 13:35:19 -08002767 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2768 return;
2769
2770 /* No actual flushing is required for the GTT write domain. Writes
2771 * to it immediately go to main memory as far as we know, so there's
2772 * no chipset flush. It also doesn't land in render cache.
2773 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002774 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002775 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002776
2777 trace_i915_gem_object_change_domain(obj,
2778 obj->read_domains,
2779 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002780}
2781
2782/** Flushes the CPU write domain for the object if it's dirty. */
2783static void
2784i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2785{
2786 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002787 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002788
2789 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2790 return;
2791
2792 i915_gem_clflush_object(obj);
2793 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002794 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002795 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002796
2797 trace_i915_gem_object_change_domain(obj,
2798 obj->read_domains,
2799 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002800}
2801
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002802/**
2803 * Moves a single object to the GTT read, and possibly write domain.
2804 *
2805 * This function returns when the move is complete, including waiting on
2806 * flushes to occur.
2807 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002808int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002809i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2810{
Daniel Vetter23010e42010-03-08 13:35:02 +01002811 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002812 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002813 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002814
Eric Anholt02354392008-11-26 13:58:13 -08002815 /* Not valid to be called on unbound objects. */
2816 if (obj_priv->gtt_space == NULL)
2817 return -EINVAL;
2818
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002819 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08002820 if (ret != 0)
2821 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002822
Chris Wilson72133422010-09-13 23:56:38 +01002823 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002824
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002825 if (write) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002826 ret = i915_gem_object_wait_rendering(obj, true);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002827 if (ret)
2828 return ret;
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002829 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002830
Chris Wilson72133422010-09-13 23:56:38 +01002831 old_write_domain = obj->write_domain;
2832 old_read_domains = obj->read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002833
2834 /* It should now be out of any other write domains, and we can update
2835 * the domain values for our changes.
2836 */
2837 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2838 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002839 if (write) {
Chris Wilson72133422010-09-13 23:56:38 +01002840 obj->read_domains = I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002841 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002842 obj_priv->dirty = 1;
2843 }
2844
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002845 trace_i915_gem_object_change_domain(obj,
2846 old_read_domains,
2847 old_write_domain);
2848
Eric Anholte47c68e2008-11-14 13:35:19 -08002849 return 0;
2850}
2851
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002852/*
2853 * Prepare buffer for display plane. Use uninterruptible for possible flush
2854 * wait, as in modesetting process we're not supposed to be interrupted.
2855 */
2856int
Chris Wilson48b956c2010-09-14 12:50:34 +01002857i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2858 bool pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002859{
Daniel Vetter23010e42010-03-08 13:35:02 +01002860 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002861 uint32_t old_read_domains;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002862 int ret;
2863
2864 /* Not valid to be called on unbound objects. */
2865 if (obj_priv->gtt_space == NULL)
2866 return -EINVAL;
2867
Chris Wilsonced270f2010-09-26 22:47:46 +01002868 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
Chris Wilson48b956c2010-09-14 12:50:34 +01002869 if (ret)
Daniel Vettere35a41d2010-02-11 22:13:59 +01002870 return ret;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002871
Chris Wilsonced270f2010-09-26 22:47:46 +01002872 /* Currently, we are always called from an non-interruptible context. */
2873 if (!pipelined) {
2874 ret = i915_gem_object_wait_rendering(obj, false);
2875 if (ret)
2876 return ret;
2877 }
2878
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002879 i915_gem_object_flush_cpu_write_domain(obj);
2880
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002881 old_read_domains = obj->read_domains;
Chris Wilsonc78ec302010-09-20 12:50:23 +01002882 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002883
2884 trace_i915_gem_object_change_domain(obj,
2885 old_read_domains,
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002886 obj->write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002887
2888 return 0;
2889}
2890
Eric Anholte47c68e2008-11-14 13:35:19 -08002891/**
2892 * Moves a single object to the CPU read, and possibly write domain.
2893 *
2894 * This function returns when the move is complete, including waiting on
2895 * flushes to occur.
2896 */
2897static int
2898i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2899{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002900 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002901 int ret;
2902
Daniel Vetterba3d8d72010-02-11 22:37:04 +01002903 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08002904 if (ret != 0)
2905 return ret;
2906
2907 i915_gem_object_flush_gtt_write_domain(obj);
2908
2909 /* If we have a partially-valid cache of the object in the CPU,
2910 * finish invalidating it and free the per-page flags.
2911 */
2912 i915_gem_object_set_to_full_cpu_read_domain(obj);
2913
Chris Wilson72133422010-09-13 23:56:38 +01002914 if (write) {
Chris Wilson2cf34d72010-09-14 13:03:28 +01002915 ret = i915_gem_object_wait_rendering(obj, true);
Chris Wilson72133422010-09-13 23:56:38 +01002916 if (ret)
2917 return ret;
2918 }
2919
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002920 old_write_domain = obj->write_domain;
2921 old_read_domains = obj->read_domains;
2922
Eric Anholte47c68e2008-11-14 13:35:19 -08002923 /* Flush the CPU cache if it's still invalid. */
2924 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2925 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002926
2927 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2928 }
2929
2930 /* It should now be out of any other write domains, and we can update
2931 * the domain values for our changes.
2932 */
2933 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2934
2935 /* If we're writing through the CPU, then the GPU read domains will
2936 * need to be invalidated at next use.
2937 */
2938 if (write) {
Chris Wilsonc78ec302010-09-20 12:50:23 +01002939 obj->read_domains = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002940 obj->write_domain = I915_GEM_DOMAIN_CPU;
2941 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002942
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002943 trace_i915_gem_object_change_domain(obj,
2944 old_read_domains,
2945 old_write_domain);
2946
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002947 return 0;
2948}
2949
Eric Anholt673a3942008-07-30 12:06:12 -07002950/*
2951 * Set the next domain for the specified object. This
2952 * may not actually perform the necessary flushing/invaliding though,
2953 * as that may want to be batched with other set_domain operations
2954 *
2955 * This is (we hope) the only really tricky part of gem. The goal
2956 * is fairly simple -- track which caches hold bits of the object
2957 * and make sure they remain coherent. A few concrete examples may
2958 * help to explain how it works. For shorthand, we use the notation
2959 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2960 * a pair of read and write domain masks.
2961 *
2962 * Case 1: the batch buffer
2963 *
2964 * 1. Allocated
2965 * 2. Written by CPU
2966 * 3. Mapped to GTT
2967 * 4. Read by GPU
2968 * 5. Unmapped from GTT
2969 * 6. Freed
2970 *
2971 * Let's take these a step at a time
2972 *
2973 * 1. Allocated
2974 * Pages allocated from the kernel may still have
2975 * cache contents, so we set them to (CPU, CPU) always.
2976 * 2. Written by CPU (using pwrite)
2977 * The pwrite function calls set_domain (CPU, CPU) and
2978 * this function does nothing (as nothing changes)
2979 * 3. Mapped by GTT
2980 * This function asserts that the object is not
2981 * currently in any GPU-based read or write domains
2982 * 4. Read by GPU
2983 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2984 * As write_domain is zero, this function adds in the
2985 * current read domains (CPU+COMMAND, 0).
2986 * flush_domains is set to CPU.
2987 * invalidate_domains is set to COMMAND
2988 * clflush is run to get data out of the CPU caches
2989 * then i915_dev_set_domain calls i915_gem_flush to
2990 * emit an MI_FLUSH and drm_agp_chipset_flush
2991 * 5. Unmapped from GTT
2992 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2993 * flush_domains and invalidate_domains end up both zero
2994 * so no flushing/invalidating happens
2995 * 6. Freed
2996 * yay, done
2997 *
2998 * Case 2: The shared render buffer
2999 *
3000 * 1. Allocated
3001 * 2. Mapped to GTT
3002 * 3. Read/written by GPU
3003 * 4. set_domain to (CPU,CPU)
3004 * 5. Read/written by CPU
3005 * 6. Read/written by GPU
3006 *
3007 * 1. Allocated
3008 * Same as last example, (CPU, CPU)
3009 * 2. Mapped to GTT
3010 * Nothing changes (assertions find that it is not in the GPU)
3011 * 3. Read/written by GPU
3012 * execbuffer calls set_domain (RENDER, RENDER)
3013 * flush_domains gets CPU
3014 * invalidate_domains gets GPU
3015 * clflush (obj)
3016 * MI_FLUSH and drm_agp_chipset_flush
3017 * 4. set_domain (CPU, CPU)
3018 * flush_domains gets GPU
3019 * invalidate_domains gets CPU
3020 * wait_rendering (obj) to make sure all drawing is complete.
3021 * This will include an MI_FLUSH to get the data from GPU
3022 * to memory
3023 * clflush (obj) to invalidate the CPU cache
3024 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3025 * 5. Read/written by CPU
3026 * cache lines are loaded and dirtied
3027 * 6. Read written by GPU
3028 * Same as last GPU access
3029 *
3030 * Case 3: The constant buffer
3031 *
3032 * 1. Allocated
3033 * 2. Written by CPU
3034 * 3. Read by GPU
3035 * 4. Updated (written) by CPU again
3036 * 5. Read by GPU
3037 *
3038 * 1. Allocated
3039 * (CPU, CPU)
3040 * 2. Written by CPU
3041 * (CPU, CPU)
3042 * 3. Read by GPU
3043 * (CPU+RENDER, 0)
3044 * flush_domains = CPU
3045 * invalidate_domains = RENDER
3046 * clflush (obj)
3047 * MI_FLUSH
3048 * drm_agp_chipset_flush
3049 * 4. Updated (written) by CPU again
3050 * (CPU, CPU)
3051 * flush_domains = 0 (no previous write domain)
3052 * invalidate_domains = 0 (no new read domains)
3053 * 5. Read by GPU
3054 * (CPU+RENDER, 0)
3055 * flush_domains = CPU
3056 * invalidate_domains = RENDER
3057 * clflush (obj)
3058 * MI_FLUSH
3059 * drm_agp_chipset_flush
3060 */
Keith Packardc0d90822008-11-20 23:11:08 -08003061static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003062i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003063{
3064 struct drm_device *dev = obj->dev;
Chris Wilson92204342010-09-18 11:02:01 +01003065 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003066 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003067 uint32_t invalidate_domains = 0;
3068 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003069 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003070
Eric Anholt8b0e3782009-02-19 14:40:50 -08003071 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3072 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003073
Jesse Barnes652c3932009-08-17 13:31:43 -07003074 intel_mark_busy(dev, obj);
3075
Eric Anholt673a3942008-07-30 12:06:12 -07003076#if WATCH_BUF
3077 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3078 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003079 obj->read_domains, obj->pending_read_domains,
3080 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003081#endif
3082 /*
3083 * If the object isn't moving to a new write domain,
3084 * let the object stay in multiple read domains
3085 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003086 if (obj->pending_write_domain == 0)
3087 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003088 else
3089 obj_priv->dirty = 1;
3090
3091 /*
3092 * Flush the current write domain if
3093 * the new read domains don't match. Invalidate
3094 * any read domains which differ from the old
3095 * write domain
3096 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003097 if (obj->write_domain &&
3098 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003099 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003100 invalidate_domains |=
3101 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003102 }
3103 /*
3104 * Invalidate any read caches which may have
3105 * stale data. That is, any new read domains.
3106 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003107 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003108 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3109#if WATCH_BUF
3110 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3111 __func__, flush_domains, invalidate_domains);
3112#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003113 i915_gem_clflush_object(obj);
3114 }
3115
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003116 old_read_domains = obj->read_domains;
3117
Eric Anholtefbeed92009-02-19 14:54:51 -08003118 /* The actual obj->write_domain will be updated with
3119 * pending_write_domain after we emit the accumulated flush for all
3120 * of our domain changes in execbuffers (which clears objects'
3121 * write_domains). So if we have a current write domain that we
3122 * aren't changing, set pending_write_domain to that.
3123 */
3124 if (flush_domains == 0 && obj->pending_write_domain == 0)
3125 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003126 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003127
3128 dev->invalidate_domains |= invalidate_domains;
3129 dev->flush_domains |= flush_domains;
Chris Wilson92204342010-09-18 11:02:01 +01003130 if (obj_priv->ring)
3131 dev_priv->mm.flush_rings |= obj_priv->ring->id;
Eric Anholt673a3942008-07-30 12:06:12 -07003132#if WATCH_BUF
3133 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3134 __func__,
3135 obj->read_domains, obj->write_domain,
3136 dev->invalidate_domains, dev->flush_domains);
3137#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003138
3139 trace_i915_gem_object_change_domain(obj,
3140 old_read_domains,
3141 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003142}
3143
3144/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003145 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003146 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003147 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3148 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3149 */
3150static void
3151i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3152{
Daniel Vetter23010e42010-03-08 13:35:02 +01003153 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003154
3155 if (!obj_priv->page_cpu_valid)
3156 return;
3157
3158 /* If we're partially in the CPU read domain, finish moving it in.
3159 */
3160 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3161 int i;
3162
3163 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3164 if (obj_priv->page_cpu_valid[i])
3165 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003166 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003167 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003168 }
3169
3170 /* Free the page_cpu_valid mappings which are now stale, whether
3171 * or not we've got I915_GEM_DOMAIN_CPU.
3172 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003173 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003174 obj_priv->page_cpu_valid = NULL;
3175}
3176
3177/**
3178 * Set the CPU read domain on a range of the object.
3179 *
3180 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3181 * not entirely valid. The page_cpu_valid member of the object flags which
3182 * pages have been flushed, and will be respected by
3183 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3184 * of the whole object.
3185 *
3186 * This function returns when the move is complete, including waiting on
3187 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003188 */
3189static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003190i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3191 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003192{
Daniel Vetter23010e42010-03-08 13:35:02 +01003193 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003194 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003195 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003196
Eric Anholte47c68e2008-11-14 13:35:19 -08003197 if (offset == 0 && size == obj->size)
3198 return i915_gem_object_set_to_cpu_domain(obj, 0);
3199
Daniel Vetterba3d8d72010-02-11 22:37:04 +01003200 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
Eric Anholte47c68e2008-11-14 13:35:19 -08003201 if (ret != 0)
3202 return ret;
3203 i915_gem_object_flush_gtt_write_domain(obj);
3204
3205 /* If we're already fully in the CPU read domain, we're done. */
3206 if (obj_priv->page_cpu_valid == NULL &&
3207 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003208 return 0;
3209
Eric Anholte47c68e2008-11-14 13:35:19 -08003210 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3211 * newly adding I915_GEM_DOMAIN_CPU
3212 */
Eric Anholt673a3942008-07-30 12:06:12 -07003213 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003214 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3215 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003216 if (obj_priv->page_cpu_valid == NULL)
3217 return -ENOMEM;
3218 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3219 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003220
3221 /* Flush the cache on any pages that are still invalid from the CPU's
3222 * perspective.
3223 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003224 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3225 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003226 if (obj_priv->page_cpu_valid[i])
3227 continue;
3228
Eric Anholt856fa192009-03-19 14:10:50 -07003229 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003230
3231 obj_priv->page_cpu_valid[i] = 1;
3232 }
3233
Eric Anholte47c68e2008-11-14 13:35:19 -08003234 /* It should now be out of any other write domains, and we can update
3235 * the domain values for our changes.
3236 */
3237 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3238
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003239 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003240 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3241
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003242 trace_i915_gem_object_change_domain(obj,
3243 old_read_domains,
3244 obj->write_domain);
3245
Eric Anholt673a3942008-07-30 12:06:12 -07003246 return 0;
3247}
3248
3249/**
Eric Anholt673a3942008-07-30 12:06:12 -07003250 * Pin an object to the GTT and evaluate the relocations landing in it.
3251 */
3252static int
3253i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3254 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003255 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003256 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003257{
3258 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003259 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003260 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003261 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003262 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003263 bool need_fence;
3264
3265 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3266 obj_priv->tiling_mode != I915_TILING_NONE;
3267
3268 /* Check fence reg constraints and rebind if necessary */
Chris Wilson808b24d62010-05-27 13:18:15 +01003269 if (need_fence &&
3270 !i915_gem_object_fence_offset_ok(obj,
3271 obj_priv->tiling_mode)) {
3272 ret = i915_gem_object_unbind(obj);
3273 if (ret)
3274 return ret;
3275 }
Eric Anholt673a3942008-07-30 12:06:12 -07003276
3277 /* Choose the GTT offset for our buffer and put it there. */
3278 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3279 if (ret)
3280 return ret;
3281
Jesse Barnes76446ca2009-12-17 22:05:42 -05003282 /*
3283 * Pre-965 chips need a fence register set up in order to
3284 * properly handle blits to/from tiled surfaces.
3285 */
3286 if (need_fence) {
Chris Wilson53640e12010-09-20 11:40:50 +01003287 ret = i915_gem_object_get_fence_reg(obj, true);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003288 if (ret != 0) {
Jesse Barnes76446ca2009-12-17 22:05:42 -05003289 i915_gem_object_unpin(obj);
3290 return ret;
3291 }
Chris Wilson53640e12010-09-20 11:40:50 +01003292
3293 dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003294 }
3295
Eric Anholt673a3942008-07-30 12:06:12 -07003296 entry->offset = obj_priv->gtt_offset;
3297
Eric Anholt673a3942008-07-30 12:06:12 -07003298 /* Apply the relocations, using the GTT aperture to avoid cache
3299 * flushing requirements.
3300 */
3301 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003302 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003303 struct drm_gem_object *target_obj;
3304 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003305 uint32_t reloc_val, reloc_offset;
3306 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003307
Eric Anholt673a3942008-07-30 12:06:12 -07003308 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003309 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003310 if (target_obj == NULL) {
3311 i915_gem_object_unpin(obj);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003312 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003313 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003314 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003315
Chris Wilson8542a0b2009-09-09 21:15:15 +01003316#if WATCH_RELOC
3317 DRM_INFO("%s: obj %p offset %08x target %d "
3318 "read %08x write %08x gtt %08x "
3319 "presumed %08x delta %08x\n",
3320 __func__,
3321 obj,
3322 (int) reloc->offset,
3323 (int) reloc->target_handle,
3324 (int) reloc->read_domains,
3325 (int) reloc->write_domain,
3326 (int) target_obj_priv->gtt_offset,
3327 (int) reloc->presumed_offset,
3328 reloc->delta);
3329#endif
3330
Eric Anholt673a3942008-07-30 12:06:12 -07003331 /* The target buffer should have appeared before us in the
3332 * exec_object list, so it should have a GTT space bound by now.
3333 */
3334 if (target_obj_priv->gtt_space == NULL) {
3335 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003336 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003337 drm_gem_object_unreference(target_obj);
3338 i915_gem_object_unpin(obj);
3339 return -EINVAL;
3340 }
3341
Chris Wilson8542a0b2009-09-09 21:15:15 +01003342 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003343 if (reloc->write_domain & (reloc->write_domain - 1)) {
3344 DRM_ERROR("reloc with multiple write domains: "
3345 "obj %p target %d offset %d "
3346 "read %08x write %08x",
3347 obj, reloc->target_handle,
3348 (int) reloc->offset,
3349 reloc->read_domains,
3350 reloc->write_domain);
3351 return -EINVAL;
3352 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003353 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3354 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3355 DRM_ERROR("reloc with read/write CPU domains: "
3356 "obj %p target %d offset %d "
3357 "read %08x write %08x",
3358 obj, reloc->target_handle,
3359 (int) reloc->offset,
3360 reloc->read_domains,
3361 reloc->write_domain);
3362 drm_gem_object_unreference(target_obj);
3363 i915_gem_object_unpin(obj);
3364 return -EINVAL;
3365 }
3366 if (reloc->write_domain && target_obj->pending_write_domain &&
3367 reloc->write_domain != target_obj->pending_write_domain) {
3368 DRM_ERROR("Write domain conflict: "
3369 "obj %p target %d offset %d "
3370 "new %08x old %08x\n",
3371 obj, reloc->target_handle,
3372 (int) reloc->offset,
3373 reloc->write_domain,
3374 target_obj->pending_write_domain);
3375 drm_gem_object_unreference(target_obj);
3376 i915_gem_object_unpin(obj);
3377 return -EINVAL;
3378 }
3379
3380 target_obj->pending_read_domains |= reloc->read_domains;
3381 target_obj->pending_write_domain |= reloc->write_domain;
3382
3383 /* If the relocation already has the right value in it, no
3384 * more work needs to be done.
3385 */
3386 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3387 drm_gem_object_unreference(target_obj);
3388 continue;
3389 }
3390
3391 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003392 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003393 DRM_ERROR("Relocation beyond object bounds: "
3394 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003395 obj, reloc->target_handle,
3396 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003397 drm_gem_object_unreference(target_obj);
3398 i915_gem_object_unpin(obj);
3399 return -EINVAL;
3400 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003401 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003402 DRM_ERROR("Relocation not 4-byte aligned: "
3403 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003404 obj, reloc->target_handle,
3405 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003406 drm_gem_object_unreference(target_obj);
3407 i915_gem_object_unpin(obj);
3408 return -EINVAL;
3409 }
3410
Chris Wilson8542a0b2009-09-09 21:15:15 +01003411 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003412 if (reloc->delta >= target_obj->size) {
3413 DRM_ERROR("Relocation beyond target object bounds: "
3414 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003415 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003416 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003417 drm_gem_object_unreference(target_obj);
3418 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003419 return -EINVAL;
3420 }
3421
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003422 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3423 if (ret != 0) {
3424 drm_gem_object_unreference(target_obj);
3425 i915_gem_object_unpin(obj);
3426 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003427 }
3428
3429 /* Map the page containing the relocation we're going to
3430 * perform.
3431 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003432 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003433 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3434 (reloc_offset &
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003435 ~(PAGE_SIZE - 1)),
3436 KM_USER0);
Eric Anholt3043c602008-10-02 12:24:47 -07003437 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003438 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003439 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003440
3441#if WATCH_BUF
3442 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003443 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003444 readl(reloc_entry), reloc_val);
3445#endif
3446 writel(reloc_val, reloc_entry);
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003447 io_mapping_unmap_atomic(reloc_page, KM_USER0);
Eric Anholt673a3942008-07-30 12:06:12 -07003448
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003449 /* The updated presumed offset for this entry will be
3450 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003451 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003452 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003453
3454 drm_gem_object_unreference(target_obj);
3455 }
3456
Eric Anholt673a3942008-07-30 12:06:12 -07003457#if WATCH_BUF
3458 if (0)
3459 i915_gem_dump_object(obj, 128, __func__, ~0);
3460#endif
3461 return 0;
3462}
3463
Eric Anholt673a3942008-07-30 12:06:12 -07003464/* Throttle our rendering by waiting until the ring has completed our requests
3465 * emitted over 20 msec ago.
3466 *
Eric Anholtb9624422009-06-03 07:27:35 +00003467 * Note that if we were to use the current jiffies each time around the loop,
3468 * we wouldn't escape the function with any frames outstanding if the time to
3469 * render a frame was over 20ms.
3470 *
Eric Anholt673a3942008-07-30 12:06:12 -07003471 * This should get us reasonable parallelism between CPU and GPU but also
3472 * relatively low latency when blocking on a particular request to finish.
3473 */
3474static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003475i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003476{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003477 struct drm_i915_private *dev_priv = dev->dev_private;
3478 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003479 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003480 struct drm_i915_gem_request *request;
3481 struct intel_ring_buffer *ring = NULL;
3482 u32 seqno = 0;
3483 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003484
Chris Wilson1c255952010-09-26 11:03:27 +01003485 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003486 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003487 if (time_after_eq(request->emitted_jiffies, recent_enough))
3488 break;
3489
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003490 ring = request->ring;
3491 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003492 }
Chris Wilson1c255952010-09-26 11:03:27 +01003493 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003494
3495 if (seqno == 0)
3496 return 0;
3497
3498 ret = 0;
3499 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3500 /* And wait for the seqno passing without holding any locks and
3501 * causing extra latency for others. This is safe as the irq
3502 * generation is designed to be run atomically and so is
3503 * lockless.
3504 */
3505 ring->user_irq_get(dev, ring);
3506 ret = wait_event_interruptible(ring->irq_queue,
3507 i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3508 || atomic_read(&dev_priv->mm.wedged));
3509 ring->user_irq_put(dev, ring);
3510
3511 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3512 ret = -EIO;
3513 }
3514
3515 if (ret == 0)
3516 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003517
Eric Anholt673a3942008-07-30 12:06:12 -07003518 return ret;
3519}
3520
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003521static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003522i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003523 uint32_t buffer_count,
3524 struct drm_i915_gem_relocation_entry **relocs)
3525{
3526 uint32_t reloc_count = 0, reloc_index = 0, i;
3527 int ret;
3528
3529 *relocs = NULL;
3530 for (i = 0; i < buffer_count; i++) {
3531 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3532 return -EINVAL;
3533 reloc_count += exec_list[i].relocation_count;
3534 }
3535
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003536 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003537 if (*relocs == NULL) {
3538 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003539 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003540 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003541
3542 for (i = 0; i < buffer_count; i++) {
3543 struct drm_i915_gem_relocation_entry __user *user_relocs;
3544
3545 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3546
3547 ret = copy_from_user(&(*relocs)[reloc_index],
3548 user_relocs,
3549 exec_list[i].relocation_count *
3550 sizeof(**relocs));
3551 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003552 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003553 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003554 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003555 }
3556
3557 reloc_index += exec_list[i].relocation_count;
3558 }
3559
Florian Mickler2bc43b52009-04-06 22:55:41 +02003560 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003561}
3562
3563static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003564i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003565 uint32_t buffer_count,
3566 struct drm_i915_gem_relocation_entry *relocs)
3567{
3568 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003569 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003570
Chris Wilson93533c22010-01-31 10:40:48 +00003571 if (relocs == NULL)
3572 return 0;
3573
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003574 for (i = 0; i < buffer_count; i++) {
3575 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003576 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003577
3578 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3579
Florian Mickler2bc43b52009-04-06 22:55:41 +02003580 unwritten = copy_to_user(user_relocs,
3581 &relocs[reloc_count],
3582 exec_list[i].relocation_count *
3583 sizeof(*relocs));
3584
3585 if (unwritten) {
3586 ret = -EFAULT;
3587 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003588 }
3589
3590 reloc_count += exec_list[i].relocation_count;
3591 }
3592
Florian Mickler2bc43b52009-04-06 22:55:41 +02003593err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003594 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003595
3596 return ret;
3597}
3598
Chris Wilson83d60792009-06-06 09:45:57 +01003599static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003600i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003601 uint64_t exec_offset)
3602{
3603 uint32_t exec_start, exec_len;
3604
3605 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3606 exec_len = (uint32_t) exec->batch_len;
3607
3608 if ((exec_start | exec_len) & 0x7)
3609 return -EINVAL;
3610
3611 if (!exec_start)
3612 return -EINVAL;
3613
3614 return 0;
3615}
3616
Chris Wilsone6c3a2a2010-09-23 23:04:43 +01003617static int
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003618i915_gem_wait_for_pending_flip(struct drm_device *dev,
3619 struct drm_gem_object **object_list,
3620 int count)
3621{
3622 drm_i915_private_t *dev_priv = dev->dev_private;
3623 struct drm_i915_gem_object *obj_priv;
3624 DEFINE_WAIT(wait);
3625 int i, ret = 0;
3626
3627 for (;;) {
3628 prepare_to_wait(&dev_priv->pending_flip_queue,
3629 &wait, TASK_INTERRUPTIBLE);
3630 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003631 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003632 if (atomic_read(&obj_priv->pending_flip) > 0)
3633 break;
3634 }
3635 if (i == count)
3636 break;
3637
3638 if (!signal_pending(current)) {
3639 mutex_unlock(&dev->struct_mutex);
3640 schedule();
3641 mutex_lock(&dev->struct_mutex);
3642 continue;
3643 }
3644 ret = -ERESTARTSYS;
3645 break;
3646 }
3647 finish_wait(&dev_priv->pending_flip_queue, &wait);
3648
3649 return ret;
3650}
3651
Chris Wilson8dc5d142010-08-12 12:36:12 +01003652static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003653i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3654 struct drm_file *file_priv,
3655 struct drm_i915_gem_execbuffer2 *args,
3656 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003657{
3658 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003659 struct drm_gem_object **object_list = NULL;
3660 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003661 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003662 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003663 struct drm_i915_gem_relocation_entry *relocs = NULL;
Chris Wilson8dc5d142010-08-12 12:36:12 +01003664 struct drm_i915_gem_request *request = NULL;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003665 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003666 uint64_t exec_offset;
Chris Wilson5c12a07e2010-09-22 11:22:30 +01003667 uint32_t reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003668 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003669
Zou Nan hai852835f2010-05-21 09:08:56 +08003670 struct intel_ring_buffer *ring = NULL;
3671
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003672 ret = i915_gem_check_is_wedged(dev);
3673 if (ret)
3674 return ret;
3675
Eric Anholt673a3942008-07-30 12:06:12 -07003676#if WATCH_EXEC
3677 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3678 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3679#endif
Zou Nan haid1b851f2010-05-21 09:08:57 +08003680 if (args->flags & I915_EXEC_BSD) {
3681 if (!HAS_BSD(dev)) {
3682 DRM_ERROR("execbuf with wrong flag\n");
3683 return -EINVAL;
3684 }
3685 ring = &dev_priv->bsd_ring;
3686 } else {
3687 ring = &dev_priv->render_ring;
3688 }
3689
Eric Anholt4f481ed2008-09-10 14:22:49 -07003690 if (args->buffer_count < 1) {
3691 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3692 return -EINVAL;
3693 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003694 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003695 if (object_list == NULL) {
3696 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003697 args->buffer_count);
3698 ret = -ENOMEM;
3699 goto pre_mutex_err;
3700 }
Eric Anholt673a3942008-07-30 12:06:12 -07003701
Eric Anholt201361a2009-03-11 12:30:04 -07003702 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003703 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3704 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003705 if (cliprects == NULL) {
3706 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003707 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003708 }
Eric Anholt201361a2009-03-11 12:30:04 -07003709
3710 ret = copy_from_user(cliprects,
3711 (struct drm_clip_rect __user *)
3712 (uintptr_t) args->cliprects_ptr,
3713 sizeof(*cliprects) * args->num_cliprects);
3714 if (ret != 0) {
3715 DRM_ERROR("copy %d cliprects failed: %d\n",
3716 args->num_cliprects, ret);
Dan Carpenterc877cdce2010-06-23 19:03:01 +02003717 ret = -EFAULT;
Eric Anholt201361a2009-03-11 12:30:04 -07003718 goto pre_mutex_err;
3719 }
3720 }
3721
Chris Wilson8dc5d142010-08-12 12:36:12 +01003722 request = kzalloc(sizeof(*request), GFP_KERNEL);
3723 if (request == NULL) {
3724 ret = -ENOMEM;
3725 goto pre_mutex_err;
3726 }
3727
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003728 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3729 &relocs);
3730 if (ret != 0)
3731 goto pre_mutex_err;
3732
Chris Wilson76c1dec2010-09-25 11:22:51 +01003733 ret = i915_mutex_lock_interruptible(dev);
3734 if (ret)
3735 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003736
3737 i915_verify_inactive(dev, __FILE__, __LINE__);
3738
Eric Anholt673a3942008-07-30 12:06:12 -07003739 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003740 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003741 ret = -EBUSY;
3742 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003743 }
3744
Keith Packardac94a962008-11-20 23:30:27 -08003745 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003746 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003747 for (i = 0; i < args->buffer_count; i++) {
3748 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3749 exec_list[i].handle);
3750 if (object_list[i] == NULL) {
3751 DRM_ERROR("Invalid object handle %d at index %d\n",
3752 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003753 /* prevent error path from reading uninitialized data */
3754 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003755 ret = -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003756 goto err;
3757 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003758
Daniel Vetter23010e42010-03-08 13:35:02 +01003759 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003760 if (obj_priv->in_execbuffer) {
3761 DRM_ERROR("Object %p appears more than once in object list\n",
3762 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003763 /* prevent error path from reading uninitialized data */
3764 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003765 ret = -EINVAL;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003766 goto err;
3767 }
3768 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003769 flips += atomic_read(&obj_priv->pending_flip);
3770 }
3771
3772 if (flips > 0) {
3773 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3774 args->buffer_count);
3775 if (ret)
3776 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003777 }
Eric Anholt673a3942008-07-30 12:06:12 -07003778
Keith Packardac94a962008-11-20 23:30:27 -08003779 /* Pin and relocate */
3780 for (pin_tries = 0; ; pin_tries++) {
3781 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003782 reloc_index = 0;
3783
Keith Packardac94a962008-11-20 23:30:27 -08003784 for (i = 0; i < args->buffer_count; i++) {
3785 object_list[i]->pending_read_domains = 0;
3786 object_list[i]->pending_write_domain = 0;
3787 ret = i915_gem_object_pin_and_relocate(object_list[i],
3788 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003789 &exec_list[i],
3790 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003791 if (ret)
3792 break;
3793 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003794 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003795 }
3796 /* success */
3797 if (ret == 0)
3798 break;
3799
3800 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003801 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003802 if (ret != -ERESTARTSYS) {
3803 unsigned long long total_size = 0;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003804 int num_fences = 0;
3805 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson43b27f42010-07-02 08:57:15 +01003806 obj_priv = to_intel_bo(object_list[i]);
Chris Wilson3d1cc472010-05-27 13:18:19 +01003807
Chris Wilson07f73f62009-09-14 16:50:30 +01003808 total_size += object_list[i]->size;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003809 num_fences +=
3810 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3811 obj_priv->tiling_mode != I915_TILING_NONE;
3812 }
3813 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
Chris Wilson07f73f62009-09-14 16:50:30 +01003814 pinned+1, args->buffer_count,
Chris Wilson3d1cc472010-05-27 13:18:19 +01003815 total_size, num_fences,
3816 ret);
Chris Wilson07f73f62009-09-14 16:50:30 +01003817 DRM_ERROR("%d objects [%d pinned], "
3818 "%d object bytes [%d pinned], "
3819 "%d/%d gtt bytes\n",
3820 atomic_read(&dev->object_count),
3821 atomic_read(&dev->pin_count),
3822 atomic_read(&dev->object_memory),
3823 atomic_read(&dev->pin_memory),
3824 atomic_read(&dev->gtt_memory),
3825 dev->gtt_total);
3826 }
Eric Anholt673a3942008-07-30 12:06:12 -07003827 goto err;
3828 }
Keith Packardac94a962008-11-20 23:30:27 -08003829
3830 /* unpin all of our buffers */
3831 for (i = 0; i < pinned; i++)
3832 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003833 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003834
3835 /* evict everyone we can from the aperture */
3836 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003837 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003838 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003839 }
3840
3841 /* Set the pending read domains for the batch buffer to COMMAND */
3842 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003843 if (batch_obj->pending_write_domain) {
3844 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3845 ret = -EINVAL;
3846 goto err;
3847 }
3848 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003849
Chris Wilson83d60792009-06-06 09:45:57 +01003850 /* Sanity check the batch buffer, prior to moving objects */
3851 exec_offset = exec_list[args->buffer_count - 1].offset;
3852 ret = i915_gem_check_execbuffer (args, exec_offset);
3853 if (ret != 0) {
3854 DRM_ERROR("execbuf with invalid offset/length\n");
3855 goto err;
3856 }
3857
Eric Anholt673a3942008-07-30 12:06:12 -07003858 i915_verify_inactive(dev, __FILE__, __LINE__);
3859
Keith Packard646f0f62008-11-20 23:23:03 -08003860 /* Zero the global flush/invalidate flags. These
3861 * will be modified as new domains are computed
3862 * for each object
3863 */
3864 dev->invalidate_domains = 0;
3865 dev->flush_domains = 0;
Chris Wilson92204342010-09-18 11:02:01 +01003866 dev_priv->mm.flush_rings = 0;
Keith Packard646f0f62008-11-20 23:23:03 -08003867
Eric Anholt673a3942008-07-30 12:06:12 -07003868 for (i = 0; i < args->buffer_count; i++) {
3869 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003870
Keith Packard646f0f62008-11-20 23:23:03 -08003871 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003872 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003873 }
3874
3875 i915_verify_inactive(dev, __FILE__, __LINE__);
3876
Keith Packard646f0f62008-11-20 23:23:03 -08003877 if (dev->invalidate_domains | dev->flush_domains) {
3878#if WATCH_EXEC
3879 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3880 __func__,
3881 dev->invalidate_domains,
3882 dev->flush_domains);
3883#endif
Chris Wilsonc78ec302010-09-20 12:50:23 +01003884 i915_gem_flush(dev, file_priv,
Keith Packard646f0f62008-11-20 23:23:03 -08003885 dev->invalidate_domains,
Chris Wilson92204342010-09-18 11:02:01 +01003886 dev->flush_domains,
3887 dev_priv->mm.flush_rings);
Daniel Vettera6910432010-02-02 17:08:37 +01003888 }
3889
Eric Anholtefbeed92009-02-19 14:54:51 -08003890 for (i = 0; i < args->buffer_count; i++) {
3891 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003892 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003893 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003894
3895 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003896 if (obj->write_domain)
3897 list_move_tail(&obj_priv->gpu_write_list,
3898 &dev_priv->mm.gpu_write_list);
3899 else
3900 list_del_init(&obj_priv->gpu_write_list);
3901
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003902 trace_i915_gem_object_change_domain(obj,
3903 obj->read_domains,
3904 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003905 }
3906
Eric Anholt673a3942008-07-30 12:06:12 -07003907 i915_verify_inactive(dev, __FILE__, __LINE__);
3908
3909#if WATCH_COHERENCY
3910 for (i = 0; i < args->buffer_count; i++) {
3911 i915_gem_object_check_coherency(object_list[i],
3912 exec_list[i].handle);
3913 }
3914#endif
3915
Eric Anholt673a3942008-07-30 12:06:12 -07003916#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003917 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003918 args->batch_len,
3919 __func__,
3920 ~0);
3921#endif
3922
Eric Anholt673a3942008-07-30 12:06:12 -07003923 /* Exec the batchbuffer */
Zou Nan hai852835f2010-05-21 09:08:56 +08003924 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3925 cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003926 if (ret) {
3927 DRM_ERROR("dispatch failed %d\n", ret);
3928 goto err;
3929 }
3930
3931 /*
3932 * Ensure that the commands in the batch buffer are
3933 * finished before the interrupt fires
3934 */
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01003935 i915_retire_commands(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003936
3937 i915_verify_inactive(dev, __FILE__, __LINE__);
3938
Daniel Vetter617dbe22010-02-11 22:16:02 +01003939 for (i = 0; i < args->buffer_count; i++) {
3940 struct drm_gem_object *obj = object_list[i];
3941 obj_priv = to_intel_bo(obj);
3942
3943 i915_gem_object_move_to_active(obj, ring);
Daniel Vetter617dbe22010-02-11 22:16:02 +01003944 }
Chris Wilsona56ba562010-09-28 10:07:56 +01003945
Chris Wilson5c12a07e2010-09-22 11:22:30 +01003946 i915_add_request(dev, file_priv, request, ring);
Chris Wilson8dc5d142010-08-12 12:36:12 +01003947 request = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07003948
Eric Anholt673a3942008-07-30 12:06:12 -07003949 i915_verify_inactive(dev, __FILE__, __LINE__);
3950
Eric Anholt673a3942008-07-30 12:06:12 -07003951err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003952 for (i = 0; i < pinned; i++)
3953 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003954
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003955 for (i = 0; i < args->buffer_count; i++) {
3956 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003957 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003958 obj_priv->in_execbuffer = false;
3959 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003960 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003961 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003962
Eric Anholt673a3942008-07-30 12:06:12 -07003963 mutex_unlock(&dev->struct_mutex);
3964
Chris Wilson93533c22010-01-31 10:40:48 +00003965pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003966 /* Copy the updated relocations out regardless of current error
3967 * state. Failure to update the relocs would mean that the next
3968 * time userland calls execbuf, it would do so with presumed offset
3969 * state that didn't match the actual object state.
3970 */
3971 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3972 relocs);
3973 if (ret2 != 0) {
3974 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3975
3976 if (ret == 0)
3977 ret = ret2;
3978 }
3979
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003980 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003981 kfree(cliprects);
Chris Wilson8dc5d142010-08-12 12:36:12 +01003982 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07003983
3984 return ret;
3985}
3986
Jesse Barnes76446ca2009-12-17 22:05:42 -05003987/*
3988 * Legacy execbuffer just creates an exec2 list from the original exec object
3989 * list array and passes it to the real function.
3990 */
3991int
3992i915_gem_execbuffer(struct drm_device *dev, void *data,
3993 struct drm_file *file_priv)
3994{
3995 struct drm_i915_gem_execbuffer *args = data;
3996 struct drm_i915_gem_execbuffer2 exec2;
3997 struct drm_i915_gem_exec_object *exec_list = NULL;
3998 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3999 int ret, i;
4000
4001#if WATCH_EXEC
4002 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4003 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4004#endif
4005
4006 if (args->buffer_count < 1) {
4007 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4008 return -EINVAL;
4009 }
4010
4011 /* Copy in the exec list from userland */
4012 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4013 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4014 if (exec_list == NULL || exec2_list == NULL) {
4015 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4016 args->buffer_count);
4017 drm_free_large(exec_list);
4018 drm_free_large(exec2_list);
4019 return -ENOMEM;
4020 }
4021 ret = copy_from_user(exec_list,
4022 (struct drm_i915_relocation_entry __user *)
4023 (uintptr_t) args->buffers_ptr,
4024 sizeof(*exec_list) * args->buffer_count);
4025 if (ret != 0) {
4026 DRM_ERROR("copy %d exec entries failed %d\n",
4027 args->buffer_count, ret);
4028 drm_free_large(exec_list);
4029 drm_free_large(exec2_list);
4030 return -EFAULT;
4031 }
4032
4033 for (i = 0; i < args->buffer_count; i++) {
4034 exec2_list[i].handle = exec_list[i].handle;
4035 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4036 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4037 exec2_list[i].alignment = exec_list[i].alignment;
4038 exec2_list[i].offset = exec_list[i].offset;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004039 if (INTEL_INFO(dev)->gen < 4)
Jesse Barnes76446ca2009-12-17 22:05:42 -05004040 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4041 else
4042 exec2_list[i].flags = 0;
4043 }
4044
4045 exec2.buffers_ptr = args->buffers_ptr;
4046 exec2.buffer_count = args->buffer_count;
4047 exec2.batch_start_offset = args->batch_start_offset;
4048 exec2.batch_len = args->batch_len;
4049 exec2.DR1 = args->DR1;
4050 exec2.DR4 = args->DR4;
4051 exec2.num_cliprects = args->num_cliprects;
4052 exec2.cliprects_ptr = args->cliprects_ptr;
Zou Nan hai852835f2010-05-21 09:08:56 +08004053 exec2.flags = I915_EXEC_RENDER;
Jesse Barnes76446ca2009-12-17 22:05:42 -05004054
4055 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4056 if (!ret) {
4057 /* Copy the new buffer offsets back to the user's exec list. */
4058 for (i = 0; i < args->buffer_count; i++)
4059 exec_list[i].offset = exec2_list[i].offset;
4060 /* ... and back out to userspace */
4061 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4062 (uintptr_t) args->buffers_ptr,
4063 exec_list,
4064 sizeof(*exec_list) * args->buffer_count);
4065 if (ret) {
4066 ret = -EFAULT;
4067 DRM_ERROR("failed to copy %d exec entries "
4068 "back to user (%d)\n",
4069 args->buffer_count, ret);
4070 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004071 }
4072
4073 drm_free_large(exec_list);
4074 drm_free_large(exec2_list);
4075 return ret;
4076}
4077
4078int
4079i915_gem_execbuffer2(struct drm_device *dev, void *data,
4080 struct drm_file *file_priv)
4081{
4082 struct drm_i915_gem_execbuffer2 *args = data;
4083 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4084 int ret;
4085
4086#if WATCH_EXEC
4087 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4088 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4089#endif
4090
4091 if (args->buffer_count < 1) {
4092 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4093 return -EINVAL;
4094 }
4095
4096 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4097 if (exec2_list == NULL) {
4098 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4099 args->buffer_count);
4100 return -ENOMEM;
4101 }
4102 ret = copy_from_user(exec2_list,
4103 (struct drm_i915_relocation_entry __user *)
4104 (uintptr_t) args->buffers_ptr,
4105 sizeof(*exec2_list) * args->buffer_count);
4106 if (ret != 0) {
4107 DRM_ERROR("copy %d exec entries failed %d\n",
4108 args->buffer_count, ret);
4109 drm_free_large(exec2_list);
4110 return -EFAULT;
4111 }
4112
4113 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4114 if (!ret) {
4115 /* Copy the new buffer offsets back to the user's exec list. */
4116 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4117 (uintptr_t) args->buffers_ptr,
4118 exec2_list,
4119 sizeof(*exec2_list) * args->buffer_count);
4120 if (ret) {
4121 ret = -EFAULT;
4122 DRM_ERROR("failed to copy %d exec entries "
4123 "back to user (%d)\n",
4124 args->buffer_count, ret);
4125 }
4126 }
4127
4128 drm_free_large(exec2_list);
4129 return ret;
4130}
4131
Eric Anholt673a3942008-07-30 12:06:12 -07004132int
4133i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4134{
4135 struct drm_device *dev = obj->dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004136 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004137 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004138 int ret;
4139
Daniel Vetter778c3542010-05-13 11:49:44 +02004140 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4141
Eric Anholt673a3942008-07-30 12:06:12 -07004142 i915_verify_inactive(dev, __FILE__, __LINE__);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004143
4144 if (obj_priv->gtt_space != NULL) {
4145 if (alignment == 0)
4146 alignment = i915_gem_get_gtt_alignment(obj);
4147 if (obj_priv->gtt_offset & (alignment - 1)) {
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004148 WARN(obj_priv->pin_count,
4149 "bo is already pinned with incorrect alignment:"
4150 " offset=%x, req.alignment=%x\n",
4151 obj_priv->gtt_offset, alignment);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004152 ret = i915_gem_object_unbind(obj);
4153 if (ret)
4154 return ret;
4155 }
4156 }
4157
Eric Anholt673a3942008-07-30 12:06:12 -07004158 if (obj_priv->gtt_space == NULL) {
4159 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004160 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004161 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004162 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004163
Eric Anholt673a3942008-07-30 12:06:12 -07004164 obj_priv->pin_count++;
4165
4166 /* If the object is not active and not pending a flush,
4167 * remove it from the inactive list
4168 */
4169 if (obj_priv->pin_count == 1) {
4170 atomic_inc(&dev->pin_count);
4171 atomic_add(obj->size, &dev->pin_memory);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004172 if (!obj_priv->active)
4173 list_move_tail(&obj_priv->list,
4174 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004175 }
4176 i915_verify_inactive(dev, __FILE__, __LINE__);
4177
4178 return 0;
4179}
4180
4181void
4182i915_gem_object_unpin(struct drm_gem_object *obj)
4183{
4184 struct drm_device *dev = obj->dev;
4185 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004186 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004187
4188 i915_verify_inactive(dev, __FILE__, __LINE__);
4189 obj_priv->pin_count--;
4190 BUG_ON(obj_priv->pin_count < 0);
4191 BUG_ON(obj_priv->gtt_space == NULL);
4192
4193 /* If the object is no longer pinned, and is
4194 * neither active nor being flushed, then stick it on
4195 * the inactive list
4196 */
4197 if (obj_priv->pin_count == 0) {
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004198 if (!obj_priv->active)
Eric Anholt673a3942008-07-30 12:06:12 -07004199 list_move_tail(&obj_priv->list,
4200 &dev_priv->mm.inactive_list);
4201 atomic_dec(&dev->pin_count);
4202 atomic_sub(obj->size, &dev->pin_memory);
4203 }
4204 i915_verify_inactive(dev, __FILE__, __LINE__);
4205}
4206
4207int
4208i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4209 struct drm_file *file_priv)
4210{
4211 struct drm_i915_gem_pin *args = data;
4212 struct drm_gem_object *obj;
4213 struct drm_i915_gem_object *obj_priv;
4214 int ret;
4215
Eric Anholt673a3942008-07-30 12:06:12 -07004216 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4217 if (obj == NULL) {
4218 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4219 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004220 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004221 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004222 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004223
Chris Wilson76c1dec2010-09-25 11:22:51 +01004224 ret = i915_mutex_lock_interruptible(dev);
4225 if (ret) {
4226 drm_gem_object_unreference_unlocked(obj);
4227 return ret;
4228 }
4229
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004230 if (obj_priv->madv != I915_MADV_WILLNEED) {
4231 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004232 drm_gem_object_unreference(obj);
4233 mutex_unlock(&dev->struct_mutex);
4234 return -EINVAL;
4235 }
4236
Jesse Barnes79e53942008-11-07 14:24:08 -08004237 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4238 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4239 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004240 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004241 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004242 return -EINVAL;
4243 }
4244
4245 obj_priv->user_pin_count++;
4246 obj_priv->pin_filp = file_priv;
4247 if (obj_priv->user_pin_count == 1) {
4248 ret = i915_gem_object_pin(obj, args->alignment);
4249 if (ret != 0) {
4250 drm_gem_object_unreference(obj);
4251 mutex_unlock(&dev->struct_mutex);
4252 return ret;
4253 }
Eric Anholt673a3942008-07-30 12:06:12 -07004254 }
4255
4256 /* XXX - flush the CPU caches for pinned objects
4257 * as the X server doesn't manage domains yet
4258 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004259 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004260 args->offset = obj_priv->gtt_offset;
4261 drm_gem_object_unreference(obj);
4262 mutex_unlock(&dev->struct_mutex);
4263
4264 return 0;
4265}
4266
4267int
4268i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4269 struct drm_file *file_priv)
4270{
4271 struct drm_i915_gem_pin *args = data;
4272 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004273 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004274 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004275
4276 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4277 if (obj == NULL) {
4278 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4279 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004280 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004281 }
4282
Daniel Vetter23010e42010-03-08 13:35:02 +01004283 obj_priv = to_intel_bo(obj);
Chris Wilson76c1dec2010-09-25 11:22:51 +01004284
4285 ret = i915_mutex_lock_interruptible(dev);
4286 if (ret) {
4287 drm_gem_object_unreference_unlocked(obj);
4288 return ret;
4289 }
4290
Jesse Barnes79e53942008-11-07 14:24:08 -08004291 if (obj_priv->pin_filp != file_priv) {
4292 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4293 args->handle);
4294 drm_gem_object_unreference(obj);
4295 mutex_unlock(&dev->struct_mutex);
4296 return -EINVAL;
4297 }
4298 obj_priv->user_pin_count--;
4299 if (obj_priv->user_pin_count == 0) {
4300 obj_priv->pin_filp = NULL;
4301 i915_gem_object_unpin(obj);
4302 }
Eric Anholt673a3942008-07-30 12:06:12 -07004303
4304 drm_gem_object_unreference(obj);
4305 mutex_unlock(&dev->struct_mutex);
4306 return 0;
4307}
4308
4309int
4310i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4311 struct drm_file *file_priv)
4312{
4313 struct drm_i915_gem_busy *args = data;
4314 struct drm_gem_object *obj;
4315 struct drm_i915_gem_object *obj_priv;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004316 int ret;
4317
Eric Anholt673a3942008-07-30 12:06:12 -07004318 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4319 if (obj == NULL) {
4320 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4321 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004322 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004323 }
4324
Chris Wilson76c1dec2010-09-25 11:22:51 +01004325 ret = i915_mutex_lock_interruptible(dev);
4326 if (ret) {
4327 drm_gem_object_unreference_unlocked(obj);
4328 return ret;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004329 }
4330
Chris Wilson0be555b2010-08-04 15:36:30 +01004331 /* Count all active objects as busy, even if they are currently not used
4332 * by the gpu. Users of this interface expect objects to eventually
4333 * become non-busy without any further actions, therefore emit any
4334 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004335 */
Chris Wilson0be555b2010-08-04 15:36:30 +01004336 obj_priv = to_intel_bo(obj);
4337 args->busy = obj_priv->active;
4338 if (args->busy) {
4339 /* Unconditionally flush objects, even when the gpu still uses this
4340 * object. Userspace calling this function indicates that it wants to
4341 * use this buffer rather sooner than later, so issuing the required
4342 * flush earlier is beneficial.
4343 */
Chris Wilsonc78ec302010-09-20 12:50:23 +01004344 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4345 i915_gem_flush_ring(dev, file_priv,
Chris Wilson92204342010-09-18 11:02:01 +01004346 obj_priv->ring,
4347 0, obj->write_domain);
Chris Wilson0be555b2010-08-04 15:36:30 +01004348
4349 /* Update the active list for the hardware's current position.
4350 * Otherwise this only updates on a delayed timer or when irqs
4351 * are actually unmasked, and our working set ends up being
4352 * larger than required.
4353 */
4354 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4355
4356 args->busy = obj_priv->active;
4357 }
Eric Anholt673a3942008-07-30 12:06:12 -07004358
4359 drm_gem_object_unreference(obj);
4360 mutex_unlock(&dev->struct_mutex);
Chris Wilson76c1dec2010-09-25 11:22:51 +01004361 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004362}
4363
4364int
4365i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4366 struct drm_file *file_priv)
4367{
4368 return i915_gem_ring_throttle(dev, file_priv);
4369}
4370
Chris Wilson3ef94da2009-09-14 16:50:29 +01004371int
4372i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4373 struct drm_file *file_priv)
4374{
4375 struct drm_i915_gem_madvise *args = data;
4376 struct drm_gem_object *obj;
4377 struct drm_i915_gem_object *obj_priv;
Chris Wilson76c1dec2010-09-25 11:22:51 +01004378 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004379
4380 switch (args->madv) {
4381 case I915_MADV_DONTNEED:
4382 case I915_MADV_WILLNEED:
4383 break;
4384 default:
4385 return -EINVAL;
4386 }
4387
4388 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4389 if (obj == NULL) {
4390 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4391 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004392 return -ENOENT;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004393 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004394 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004395
Chris Wilson76c1dec2010-09-25 11:22:51 +01004396 ret = i915_mutex_lock_interruptible(dev);
4397 if (ret) {
4398 drm_gem_object_unreference_unlocked(obj);
4399 return ret;
4400 }
4401
Chris Wilson3ef94da2009-09-14 16:50:29 +01004402 if (obj_priv->pin_count) {
4403 drm_gem_object_unreference(obj);
4404 mutex_unlock(&dev->struct_mutex);
4405
4406 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4407 return -EINVAL;
4408 }
4409
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004410 if (obj_priv->madv != __I915_MADV_PURGED)
4411 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004412
Chris Wilson2d7ef392009-09-20 23:13:10 +01004413 /* if the object is no longer bound, discard its backing storage */
4414 if (i915_gem_object_is_purgeable(obj_priv) &&
4415 obj_priv->gtt_space == NULL)
4416 i915_gem_object_truncate(obj);
4417
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004418 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4419
Chris Wilson3ef94da2009-09-14 16:50:29 +01004420 drm_gem_object_unreference(obj);
4421 mutex_unlock(&dev->struct_mutex);
4422
4423 return 0;
4424}
4425
Daniel Vetterac52bc52010-04-09 19:05:06 +00004426struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4427 size_t size)
4428{
Daniel Vetterc397b902010-04-09 19:05:07 +00004429 struct drm_i915_gem_object *obj;
4430
4431 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4432 if (obj == NULL)
4433 return NULL;
4434
4435 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4436 kfree(obj);
4437 return NULL;
4438 }
4439
4440 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4441 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4442
4443 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004444 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004445 obj->fence_reg = I915_FENCE_REG_NONE;
4446 INIT_LIST_HEAD(&obj->list);
4447 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004448 obj->madv = I915_MADV_WILLNEED;
4449
4450 trace_i915_gem_object_create(&obj->base);
4451
4452 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004453}
4454
Eric Anholt673a3942008-07-30 12:06:12 -07004455int i915_gem_init_object(struct drm_gem_object *obj)
4456{
Daniel Vetterc397b902010-04-09 19:05:07 +00004457 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004458
Eric Anholt673a3942008-07-30 12:06:12 -07004459 return 0;
4460}
4461
Chris Wilsonbe726152010-07-23 23:18:50 +01004462static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4463{
4464 struct drm_device *dev = obj->dev;
4465 drm_i915_private_t *dev_priv = dev->dev_private;
4466 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4467 int ret;
4468
4469 ret = i915_gem_object_unbind(obj);
4470 if (ret == -ERESTARTSYS) {
4471 list_move(&obj_priv->list,
4472 &dev_priv->mm.deferred_free_list);
4473 return;
4474 }
4475
4476 if (obj_priv->mmap_offset)
4477 i915_gem_free_mmap_offset(obj);
4478
4479 drm_gem_object_release(obj);
4480
4481 kfree(obj_priv->page_cpu_valid);
4482 kfree(obj_priv->bit_17);
4483 kfree(obj_priv);
4484}
4485
Eric Anholt673a3942008-07-30 12:06:12 -07004486void i915_gem_free_object(struct drm_gem_object *obj)
4487{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004488 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004490
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004491 trace_i915_gem_object_destroy(obj);
4492
Eric Anholt673a3942008-07-30 12:06:12 -07004493 while (obj_priv->pin_count > 0)
4494 i915_gem_object_unpin(obj);
4495
Dave Airlie71acb5e2008-12-30 20:31:46 +10004496 if (obj_priv->phys_obj)
4497 i915_gem_detach_phys_object(dev, obj);
4498
Chris Wilsonbe726152010-07-23 23:18:50 +01004499 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004500}
4501
Jesse Barnes5669fca2009-02-17 15:13:31 -08004502int
Eric Anholt673a3942008-07-30 12:06:12 -07004503i915_gem_idle(struct drm_device *dev)
4504{
4505 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004506 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004507
Keith Packard6dbe2772008-10-14 21:41:13 -07004508 mutex_lock(&dev->struct_mutex);
4509
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004510 if (dev_priv->mm.suspended ||
Zou Nan haid1b851f2010-05-21 09:08:57 +08004511 (dev_priv->render_ring.gem_object == NULL) ||
4512 (HAS_BSD(dev) &&
4513 dev_priv->bsd_ring.gem_object == NULL)) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004514 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004515 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004516 }
Eric Anholt673a3942008-07-30 12:06:12 -07004517
Chris Wilson29105cc2010-01-07 10:39:13 +00004518 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004519 if (ret) {
4520 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004521 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004522 }
Eric Anholt673a3942008-07-30 12:06:12 -07004523
Chris Wilson29105cc2010-01-07 10:39:13 +00004524 /* Under UMS, be paranoid and evict. */
4525 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01004526 ret = i915_gem_evict_inactive(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004527 if (ret) {
4528 mutex_unlock(&dev->struct_mutex);
4529 return ret;
4530 }
4531 }
4532
4533 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4534 * We need to replace this with a semaphore, or something.
4535 * And not confound mm.suspended!
4536 */
4537 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02004538 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00004539
4540 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004541 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004542
Keith Packard6dbe2772008-10-14 21:41:13 -07004543 mutex_unlock(&dev->struct_mutex);
4544
Chris Wilson29105cc2010-01-07 10:39:13 +00004545 /* Cancel the retire work handler, which should be idle now. */
4546 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4547
Eric Anholt673a3942008-07-30 12:06:12 -07004548 return 0;
4549}
4550
Jesse Barnese552eb72010-04-21 11:39:23 -07004551/*
4552 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4553 * over cache flushing.
4554 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004555static int
Jesse Barnese552eb72010-04-21 11:39:23 -07004556i915_gem_init_pipe_control(struct drm_device *dev)
4557{
4558 drm_i915_private_t *dev_priv = dev->dev_private;
4559 struct drm_gem_object *obj;
4560 struct drm_i915_gem_object *obj_priv;
4561 int ret;
4562
Eric Anholt34dc4d42010-05-07 14:30:03 -07004563 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004564 if (obj == NULL) {
4565 DRM_ERROR("Failed to allocate seqno page\n");
4566 ret = -ENOMEM;
4567 goto err;
4568 }
4569 obj_priv = to_intel_bo(obj);
4570 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4571
4572 ret = i915_gem_object_pin(obj, 4096);
4573 if (ret)
4574 goto err_unref;
4575
4576 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4577 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4578 if (dev_priv->seqno_page == NULL)
4579 goto err_unpin;
4580
4581 dev_priv->seqno_obj = obj;
4582 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4583
4584 return 0;
4585
4586err_unpin:
4587 i915_gem_object_unpin(obj);
4588err_unref:
4589 drm_gem_object_unreference(obj);
4590err:
4591 return ret;
4592}
4593
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004594
4595static void
Jesse Barnese552eb72010-04-21 11:39:23 -07004596i915_gem_cleanup_pipe_control(struct drm_device *dev)
4597{
4598 drm_i915_private_t *dev_priv = dev->dev_private;
4599 struct drm_gem_object *obj;
4600 struct drm_i915_gem_object *obj_priv;
4601
4602 obj = dev_priv->seqno_obj;
4603 obj_priv = to_intel_bo(obj);
4604 kunmap(obj_priv->pages[0]);
4605 i915_gem_object_unpin(obj);
4606 drm_gem_object_unreference(obj);
4607 dev_priv->seqno_obj = NULL;
4608
4609 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004610}
4611
Eric Anholt673a3942008-07-30 12:06:12 -07004612int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004613i915_gem_init_ringbuffer(struct drm_device *dev)
4614{
4615 drm_i915_private_t *dev_priv = dev->dev_private;
4616 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004617
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004618 if (HAS_PIPE_CONTROL(dev)) {
4619 ret = i915_gem_init_pipe_control(dev);
4620 if (ret)
4621 return ret;
4622 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004623
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004624 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004625 if (ret)
4626 goto cleanup_pipe_control;
4627
4628 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08004629 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004630 if (ret)
4631 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004632 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004633
Chris Wilson6f392d52010-08-07 11:01:22 +01004634 dev_priv->next_seqno = 1;
4635
Chris Wilson68f95ba2010-05-27 13:18:22 +01004636 return 0;
4637
4638cleanup_render_ring:
4639 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4640cleanup_pipe_control:
4641 if (HAS_PIPE_CONTROL(dev))
4642 i915_gem_cleanup_pipe_control(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004643 return ret;
4644}
4645
4646void
4647i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4648{
4649 drm_i915_private_t *dev_priv = dev->dev_private;
4650
4651 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004652 if (HAS_BSD(dev))
4653 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004654 if (HAS_PIPE_CONTROL(dev))
4655 i915_gem_cleanup_pipe_control(dev);
4656}
4657
4658int
Eric Anholt673a3942008-07-30 12:06:12 -07004659i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4660 struct drm_file *file_priv)
4661{
4662 drm_i915_private_t *dev_priv = dev->dev_private;
4663 int ret;
4664
Jesse Barnes79e53942008-11-07 14:24:08 -08004665 if (drm_core_check_feature(dev, DRIVER_MODESET))
4666 return 0;
4667
Ben Gamariba1234d2009-09-14 17:48:47 -04004668 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004669 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004670 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004671 }
4672
Eric Anholt673a3942008-07-30 12:06:12 -07004673 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004674 dev_priv->mm.suspended = 0;
4675
4676 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004677 if (ret != 0) {
4678 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004679 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004680 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004681
Zou Nan hai852835f2010-05-21 09:08:56 +08004682 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004683 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004684 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4685 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004686 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004687 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004688 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004689
Chris Wilson5f353082010-06-07 14:03:03 +01004690 ret = drm_irq_install(dev);
4691 if (ret)
4692 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004693
Eric Anholt673a3942008-07-30 12:06:12 -07004694 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004695
4696cleanup_ringbuffer:
4697 mutex_lock(&dev->struct_mutex);
4698 i915_gem_cleanup_ringbuffer(dev);
4699 dev_priv->mm.suspended = 1;
4700 mutex_unlock(&dev->struct_mutex);
4701
4702 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004703}
4704
4705int
4706i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4707 struct drm_file *file_priv)
4708{
Jesse Barnes79e53942008-11-07 14:24:08 -08004709 if (drm_core_check_feature(dev, DRIVER_MODESET))
4710 return 0;
4711
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004712 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004713 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004714}
4715
4716void
4717i915_gem_lastclose(struct drm_device *dev)
4718{
4719 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004720
Eric Anholte806b492009-01-22 09:56:58 -08004721 if (drm_core_check_feature(dev, DRIVER_MODESET))
4722 return;
4723
Keith Packard6dbe2772008-10-14 21:41:13 -07004724 ret = i915_gem_idle(dev);
4725 if (ret)
4726 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004727}
4728
4729void
4730i915_gem_load(struct drm_device *dev)
4731{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004732 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004733 drm_i915_private_t *dev_priv = dev->dev_private;
4734
Eric Anholt673a3942008-07-30 12:06:12 -07004735 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004736 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004737 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01004738 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004739 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01004740 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08004741 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4742 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004743 if (HAS_BSD(dev)) {
4744 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4745 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4746 }
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004747 for (i = 0; i < 16; i++)
4748 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004749 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4750 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01004751 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01004752 spin_lock(&shrink_list_lock);
4753 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4754 spin_unlock(&shrink_list_lock);
4755
Dave Airlie94400122010-07-20 13:15:31 +10004756 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4757 if (IS_GEN3(dev)) {
4758 u32 tmp = I915_READ(MI_ARB_STATE);
4759 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4760 /* arb state is a masked write, so set bit + bit in mask */
4761 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4762 I915_WRITE(MI_ARB_STATE, tmp);
4763 }
4764 }
4765
Jesse Barnesde151cf2008-11-12 10:03:55 -08004766 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004767 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4768 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004769
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004770 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004771 dev_priv->num_fence_regs = 16;
4772 else
4773 dev_priv->num_fence_regs = 8;
4774
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004775 /* Initialize fence registers to zero */
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004776 switch (INTEL_INFO(dev)->gen) {
4777 case 6:
4778 for (i = 0; i < 16; i++)
4779 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4780 break;
4781 case 5:
4782 case 4:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004783 for (i = 0; i < 16; i++)
4784 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004785 break;
4786 case 3:
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004787 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4788 for (i = 0; i < 8; i++)
4789 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
Chris Wilsona6c45cf2010-09-17 00:32:17 +01004790 case 2:
4791 for (i = 0; i < 8; i++)
4792 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4793 break;
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004794 }
Eric Anholt673a3942008-07-30 12:06:12 -07004795 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004796 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004797}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004798
4799/*
4800 * Create a physically contiguous memory object for this object
4801 * e.g. for cursor + overlay regs
4802 */
Chris Wilson995b6762010-08-20 13:23:26 +01004803static int i915_gem_init_phys_object(struct drm_device *dev,
4804 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004805{
4806 drm_i915_private_t *dev_priv = dev->dev_private;
4807 struct drm_i915_gem_phys_object *phys_obj;
4808 int ret;
4809
4810 if (dev_priv->mm.phys_objs[id - 1] || !size)
4811 return 0;
4812
Eric Anholt9a298b22009-03-24 12:23:04 -07004813 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004814 if (!phys_obj)
4815 return -ENOMEM;
4816
4817 phys_obj->id = id;
4818
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004819 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004820 if (!phys_obj->handle) {
4821 ret = -ENOMEM;
4822 goto kfree_obj;
4823 }
4824#ifdef CONFIG_X86
4825 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4826#endif
4827
4828 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4829
4830 return 0;
4831kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004832 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004833 return ret;
4834}
4835
Chris Wilson995b6762010-08-20 13:23:26 +01004836static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004837{
4838 drm_i915_private_t *dev_priv = dev->dev_private;
4839 struct drm_i915_gem_phys_object *phys_obj;
4840
4841 if (!dev_priv->mm.phys_objs[id - 1])
4842 return;
4843
4844 phys_obj = dev_priv->mm.phys_objs[id - 1];
4845 if (phys_obj->cur_obj) {
4846 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4847 }
4848
4849#ifdef CONFIG_X86
4850 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4851#endif
4852 drm_pci_free(dev, phys_obj->handle);
4853 kfree(phys_obj);
4854 dev_priv->mm.phys_objs[id - 1] = NULL;
4855}
4856
4857void i915_gem_free_all_phys_object(struct drm_device *dev)
4858{
4859 int i;
4860
Dave Airlie260883c2009-01-22 17:58:49 +10004861 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004862 i915_gem_free_phys_object(dev, i);
4863}
4864
4865void i915_gem_detach_phys_object(struct drm_device *dev,
4866 struct drm_gem_object *obj)
4867{
4868 struct drm_i915_gem_object *obj_priv;
4869 int i;
4870 int ret;
4871 int page_count;
4872
Daniel Vetter23010e42010-03-08 13:35:02 +01004873 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004874 if (!obj_priv->phys_obj)
4875 return;
4876
Chris Wilson4bdadb92010-01-27 13:36:32 +00004877 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004878 if (ret)
4879 goto out;
4880
4881 page_count = obj->size / PAGE_SIZE;
4882
4883 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004884 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004885 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4886
4887 memcpy(dst, src, PAGE_SIZE);
4888 kunmap_atomic(dst, KM_USER0);
4889 }
Eric Anholt856fa192009-03-19 14:10:50 -07004890 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004891 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004892
4893 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004894out:
4895 obj_priv->phys_obj->cur_obj = NULL;
4896 obj_priv->phys_obj = NULL;
4897}
4898
4899int
4900i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004901 struct drm_gem_object *obj,
4902 int id,
4903 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004904{
4905 drm_i915_private_t *dev_priv = dev->dev_private;
4906 struct drm_i915_gem_object *obj_priv;
4907 int ret = 0;
4908 int page_count;
4909 int i;
4910
4911 if (id > I915_MAX_PHYS_OBJECT)
4912 return -EINVAL;
4913
Daniel Vetter23010e42010-03-08 13:35:02 +01004914 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004915
4916 if (obj_priv->phys_obj) {
4917 if (obj_priv->phys_obj->id == id)
4918 return 0;
4919 i915_gem_detach_phys_object(dev, obj);
4920 }
4921
Dave Airlie71acb5e2008-12-30 20:31:46 +10004922 /* create a new object */
4923 if (!dev_priv->mm.phys_objs[id - 1]) {
4924 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004925 obj->size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004926 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004927 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004928 goto out;
4929 }
4930 }
4931
4932 /* bind to the object */
4933 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4934 obj_priv->phys_obj->cur_obj = obj;
4935
Chris Wilson4bdadb92010-01-27 13:36:32 +00004936 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004937 if (ret) {
4938 DRM_ERROR("failed to get page list\n");
4939 goto out;
4940 }
4941
4942 page_count = obj->size / PAGE_SIZE;
4943
4944 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004945 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004946 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4947
4948 memcpy(dst, src, PAGE_SIZE);
4949 kunmap_atomic(src, KM_USER0);
4950 }
4951
Chris Wilsond78b47b2009-06-17 21:52:49 +01004952 i915_gem_object_put_pages(obj);
4953
Dave Airlie71acb5e2008-12-30 20:31:46 +10004954 return 0;
4955out:
4956 return ret;
4957}
4958
4959static int
4960i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4961 struct drm_i915_gem_pwrite *args,
4962 struct drm_file *file_priv)
4963{
Daniel Vetter23010e42010-03-08 13:35:02 +01004964 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004965 void *obj_addr;
4966 int ret;
4967 char __user *user_data;
4968
4969 user_data = (char __user *) (uintptr_t) args->data_ptr;
4970 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4971
Zhao Yakui44d98a62009-10-09 11:39:40 +08004972 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004973 ret = copy_from_user(obj_addr, user_data, args->size);
4974 if (ret)
4975 return -EFAULT;
4976
4977 drm_agp_chipset_flush(dev);
4978 return 0;
4979}
Eric Anholtb9624422009-06-03 07:27:35 +00004980
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004981void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004982{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004983 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004984
4985 /* Clean up our request list when the client is going away, so that
4986 * later retire_requests won't dereference our soon-to-be-gone
4987 * file_priv.
4988 */
Chris Wilson1c255952010-09-26 11:03:27 +01004989 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004990 while (!list_empty(&file_priv->mm.request_list)) {
4991 struct drm_i915_gem_request *request;
4992
4993 request = list_first_entry(&file_priv->mm.request_list,
4994 struct drm_i915_gem_request,
4995 client_list);
4996 list_del(&request->client_list);
4997 request->file_priv = NULL;
4998 }
Chris Wilson1c255952010-09-26 11:03:27 +01004999 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00005000}
Chris Wilson31169712009-09-14 16:50:28 +01005001
Chris Wilson31169712009-09-14 16:50:28 +01005002static int
Chris Wilson1637ef42010-04-20 17:10:35 +01005003i915_gpu_is_active(struct drm_device *dev)
5004{
5005 drm_i915_private_t *dev_priv = dev->dev_private;
5006 int lists_empty;
5007
Chris Wilson1637ef42010-04-20 17:10:35 +01005008 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan hai852835f2010-05-21 09:08:56 +08005009 list_empty(&dev_priv->render_ring.active_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005010 if (HAS_BSD(dev))
5011 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01005012
5013 return !lists_empty;
5014}
5015
5016static int
Dave Chinner7f8275d2010-07-19 14:56:17 +10005017i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01005018{
5019 drm_i915_private_t *dev_priv, *next_dev;
5020 struct drm_i915_gem_object *obj_priv, *next_obj;
5021 int cnt = 0;
5022 int would_deadlock = 1;
5023
5024 /* "fast-path" to count number of available objects */
5025 if (nr_to_scan == 0) {
5026 spin_lock(&shrink_list_lock);
5027 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5028 struct drm_device *dev = dev_priv->dev;
5029
5030 if (mutex_trylock(&dev->struct_mutex)) {
5031 list_for_each_entry(obj_priv,
5032 &dev_priv->mm.inactive_list,
5033 list)
5034 cnt++;
5035 mutex_unlock(&dev->struct_mutex);
5036 }
5037 }
5038 spin_unlock(&shrink_list_lock);
5039
5040 return (cnt / 100) * sysctl_vfs_cache_pressure;
5041 }
5042
5043 spin_lock(&shrink_list_lock);
5044
Chris Wilson1637ef42010-04-20 17:10:35 +01005045rescan:
Chris Wilson31169712009-09-14 16:50:28 +01005046 /* first scan for clean buffers */
5047 list_for_each_entry_safe(dev_priv, next_dev,
5048 &shrink_list, mm.shrink_list) {
5049 struct drm_device *dev = dev_priv->dev;
5050
5051 if (! mutex_trylock(&dev->struct_mutex))
5052 continue;
5053
5054 spin_unlock(&shrink_list_lock);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01005055 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005056
Chris Wilson31169712009-09-14 16:50:28 +01005057 list_for_each_entry_safe(obj_priv, next_obj,
5058 &dev_priv->mm.inactive_list,
5059 list) {
5060 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005061 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005062 if (--nr_to_scan <= 0)
5063 break;
5064 }
5065 }
5066
5067 spin_lock(&shrink_list_lock);
5068 mutex_unlock(&dev->struct_mutex);
5069
Chris Wilson963b4832009-09-20 23:03:54 +01005070 would_deadlock = 0;
5071
Chris Wilson31169712009-09-14 16:50:28 +01005072 if (nr_to_scan <= 0)
5073 break;
5074 }
5075
5076 /* second pass, evict/count anything still on the inactive list */
5077 list_for_each_entry_safe(dev_priv, next_dev,
5078 &shrink_list, mm.shrink_list) {
5079 struct drm_device *dev = dev_priv->dev;
5080
5081 if (! mutex_trylock(&dev->struct_mutex))
5082 continue;
5083
5084 spin_unlock(&shrink_list_lock);
5085
5086 list_for_each_entry_safe(obj_priv, next_obj,
5087 &dev_priv->mm.inactive_list,
5088 list) {
5089 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005090 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005091 nr_to_scan--;
5092 } else
5093 cnt++;
5094 }
5095
5096 spin_lock(&shrink_list_lock);
5097 mutex_unlock(&dev->struct_mutex);
5098
5099 would_deadlock = 0;
5100 }
5101
Chris Wilson1637ef42010-04-20 17:10:35 +01005102 if (nr_to_scan) {
5103 int active = 0;
5104
5105 /*
5106 * We are desperate for pages, so as a last resort, wait
5107 * for the GPU to finish and discard whatever we can.
5108 * This has a dramatic impact to reduce the number of
5109 * OOM-killer events whilst running the GPU aggressively.
5110 */
5111 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5112 struct drm_device *dev = dev_priv->dev;
5113
5114 if (!mutex_trylock(&dev->struct_mutex))
5115 continue;
5116
5117 spin_unlock(&shrink_list_lock);
5118
5119 if (i915_gpu_is_active(dev)) {
5120 i915_gpu_idle(dev);
5121 active++;
5122 }
5123
5124 spin_lock(&shrink_list_lock);
5125 mutex_unlock(&dev->struct_mutex);
5126 }
5127
5128 if (active)
5129 goto rescan;
5130 }
5131
Chris Wilson31169712009-09-14 16:50:28 +01005132 spin_unlock(&shrink_list_lock);
5133
5134 if (would_deadlock)
5135 return -1;
5136 else if (cnt > 0)
5137 return (cnt / 100) * sysctl_vfs_cache_pressure;
5138 else
5139 return 0;
5140}
5141
5142static struct shrinker shrinker = {
5143 .shrink = i915_gem_shrink,
5144 .seeks = DEFAULT_SEEKS,
5145};
5146
5147__init void
5148i915_gem_shrinker_init(void)
5149{
5150 register_shrinker(&shrinker);
5151}
5152
5153__exit void
5154i915_gem_shrinker_exit(void)
5155{
5156 unregister_shrinker(&shrinker);
5157}