blob: 5e579a41b6ad23514b84707c87b1564390aa2158 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Eric Anholt673a3942008-07-30 12:06:12 -070034#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080035#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036
Eric Anholt28dfe522008-11-13 15:00:55 -080037#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38
Eric Anholte47c68e2008-11-14 13:35:19 -080039static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080042static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
43 int write);
44static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070048static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080049static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080051static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Chris Wilson07f73f62009-09-14 16:50:30 +010052static int i915_gem_evict_something(struct drm_device *dev, int min_size);
Chris Wilsonab5ee572009-09-20 19:25:47 +010053static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100054static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070057
Chris Wilson31169712009-09-14 16:50:28 +010058static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
Jesse Barnes79e53942008-11-07 14:24:08 -080061int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62 unsigned long end)
63{
64 drm_i915_private_t *dev_priv = dev->dev_private;
65
66 if (start >= end ||
67 (start & (PAGE_SIZE - 1)) != 0 ||
68 (end & (PAGE_SIZE - 1)) != 0) {
69 return -EINVAL;
70 }
71
72 drm_mm_init(&dev_priv->mm.gtt_space, start,
73 end - start);
74
75 dev->gtt_total = (uint32_t) (end - start);
76
77 return 0;
78}
Keith Packard6dbe2772008-10-14 21:41:13 -070079
Eric Anholt673a3942008-07-30 12:06:12 -070080int
81i915_gem_init_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
83{
Eric Anholt673a3942008-07-30 12:06:12 -070084 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080085 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070086
87 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080088 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070089 mutex_unlock(&dev->struct_mutex);
90
Jesse Barnes79e53942008-11-07 14:24:08 -080091 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070092}
93
Eric Anholt5a125c32008-10-22 21:40:13 -070094int
95i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv)
97{
Eric Anholt5a125c32008-10-22 21:40:13 -070098 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070099
100 if (!(dev->driver->driver_features & DRIVER_GEM))
101 return -ENODEV;
102
103 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800104 args->aper_available_size = (args->aper_size -
105 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700106
107 return 0;
108}
109
Eric Anholt673a3942008-07-30 12:06:12 -0700110
111/**
112 * Creates a new mm object and returns a handle to it.
113 */
114int
115i915_gem_create_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv)
117{
118 struct drm_i915_gem_create *args = data;
119 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300120 int ret;
121 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700122
123 args->size = roundup(args->size, PAGE_SIZE);
124
125 /* Allocate the new object */
126 obj = drm_gem_object_alloc(dev, args->size);
127 if (obj == NULL)
128 return -ENOMEM;
129
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134
135 if (ret)
136 return ret;
137
138 args->handle = handle;
139
140 return 0;
141}
142
Eric Anholt40123c12009-03-09 13:42:30 -0700143static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700144fast_shmem_read(struct page **pages,
145 loff_t page_base, int page_offset,
146 char __user *data,
147 int length)
148{
149 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200150 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700151
152 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
153 if (vaddr == NULL)
154 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200155 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700156 kunmap_atomic(vaddr, KM_USER0);
157
Florian Mickler2bc43b52009-04-06 22:55:41 +0200158 if (unwritten)
159 return -EFAULT;
160
161 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700162}
163
Eric Anholt280b7132009-03-12 16:56:27 -0700164static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
165{
166 drm_i915_private_t *dev_priv = obj->dev->dev_private;
167 struct drm_i915_gem_object *obj_priv = obj->driver_private;
168
169 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170 obj_priv->tiling_mode != I915_TILING_NONE;
171}
172
Eric Anholteb014592009-03-10 11:44:52 -0700173static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700174slow_shmem_copy(struct page *dst_page,
175 int dst_offset,
176 struct page *src_page,
177 int src_offset,
178 int length)
179{
180 char *dst_vaddr, *src_vaddr;
181
182 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
183 if (dst_vaddr == NULL)
184 return -ENOMEM;
185
186 src_vaddr = kmap_atomic(src_page, KM_USER1);
187 if (src_vaddr == NULL) {
188 kunmap_atomic(dst_vaddr, KM_USER0);
189 return -ENOMEM;
190 }
191
192 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
193
194 kunmap_atomic(src_vaddr, KM_USER1);
195 kunmap_atomic(dst_vaddr, KM_USER0);
196
197 return 0;
198}
199
Eric Anholt280b7132009-03-12 16:56:27 -0700200static inline int
201slow_shmem_bit17_copy(struct page *gpu_page,
202 int gpu_offset,
203 struct page *cpu_page,
204 int cpu_offset,
205 int length,
206 int is_read)
207{
208 char *gpu_vaddr, *cpu_vaddr;
209
210 /* Use the unswizzled path if this page isn't affected. */
211 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212 if (is_read)
213 return slow_shmem_copy(cpu_page, cpu_offset,
214 gpu_page, gpu_offset, length);
215 else
216 return slow_shmem_copy(gpu_page, gpu_offset,
217 cpu_page, cpu_offset, length);
218 }
219
220 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
221 if (gpu_vaddr == NULL)
222 return -ENOMEM;
223
224 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
225 if (cpu_vaddr == NULL) {
226 kunmap_atomic(gpu_vaddr, KM_USER0);
227 return -ENOMEM;
228 }
229
230 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
231 * XORing with the other bits (A9 for Y, A9 and A10 for X)
232 */
233 while (length > 0) {
234 int cacheline_end = ALIGN(gpu_offset + 1, 64);
235 int this_length = min(cacheline_end - gpu_offset, length);
236 int swizzled_gpu_offset = gpu_offset ^ 64;
237
238 if (is_read) {
239 memcpy(cpu_vaddr + cpu_offset,
240 gpu_vaddr + swizzled_gpu_offset,
241 this_length);
242 } else {
243 memcpy(gpu_vaddr + swizzled_gpu_offset,
244 cpu_vaddr + cpu_offset,
245 this_length);
246 }
247 cpu_offset += this_length;
248 gpu_offset += this_length;
249 length -= this_length;
250 }
251
252 kunmap_atomic(cpu_vaddr, KM_USER1);
253 kunmap_atomic(gpu_vaddr, KM_USER0);
254
255 return 0;
256}
257
Eric Anholt673a3942008-07-30 12:06:12 -0700258/**
Eric Anholteb014592009-03-10 11:44:52 -0700259 * This is the fast shmem pread path, which attempts to copy_from_user directly
260 * from the backing pages of the object to the user's address space. On a
261 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
262 */
263static int
264i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 struct drm_i915_gem_pread *args,
266 struct drm_file *file_priv)
267{
268 struct drm_i915_gem_object *obj_priv = obj->driver_private;
269 ssize_t remain;
270 loff_t offset, page_base;
271 char __user *user_data;
272 int page_offset, page_length;
273 int ret;
274
275 user_data = (char __user *) (uintptr_t) args->data_ptr;
276 remain = args->size;
277
278 mutex_lock(&dev->struct_mutex);
279
280 ret = i915_gem_object_get_pages(obj);
281 if (ret != 0)
282 goto fail_unlock;
283
284 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
285 args->size);
286 if (ret != 0)
287 goto fail_put_pages;
288
289 obj_priv = obj->driver_private;
290 offset = args->offset;
291
292 while (remain > 0) {
293 /* Operation in this page
294 *
295 * page_base = page offset within aperture
296 * page_offset = offset within page
297 * page_length = bytes to copy for this page
298 */
299 page_base = (offset & ~(PAGE_SIZE-1));
300 page_offset = offset & (PAGE_SIZE-1);
301 page_length = remain;
302 if ((page_offset + remain) > PAGE_SIZE)
303 page_length = PAGE_SIZE - page_offset;
304
305 ret = fast_shmem_read(obj_priv->pages,
306 page_base, page_offset,
307 user_data, page_length);
308 if (ret)
309 goto fail_put_pages;
310
311 remain -= page_length;
312 user_data += page_length;
313 offset += page_length;
314 }
315
316fail_put_pages:
317 i915_gem_object_put_pages(obj);
318fail_unlock:
319 mutex_unlock(&dev->struct_mutex);
320
321 return ret;
322}
323
Chris Wilson07f73f62009-09-14 16:50:30 +0100324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{
339 int ret;
340
341 ret = i915_gem_object_get_pages(obj);
342
343 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers.
345 */
346 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349
350 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret)
352 return ret;
353
354 gfp = i915_gem_object_get_page_gfp_mask(obj);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 }
359
360 return ret;
361}
362
Eric Anholteb014592009-03-10 11:44:52 -0700363/**
364 * This is the fallback shmem pread path, which allocates temporary storage
365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
366 * can copy out of the object's backing pages while holding the struct mutex
367 * and not take page faults.
368 */
369static int
370i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
371 struct drm_i915_gem_pread *args,
372 struct drm_file *file_priv)
373{
374 struct drm_i915_gem_object *obj_priv = obj->driver_private;
375 struct mm_struct *mm = current->mm;
376 struct page **user_pages;
377 ssize_t remain;
378 loff_t offset, pinned_pages, i;
379 loff_t first_data_page, last_data_page, num_pages;
380 int shmem_page_index, shmem_page_offset;
381 int data_page_index, data_page_offset;
382 int page_length;
383 int ret;
384 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700385 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700386
387 remain = args->size;
388
389 /* Pin the user pages containing the data. We can't fault while
390 * holding the struct mutex, yet we want to hold it while
391 * dereferencing the user data.
392 */
393 first_data_page = data_ptr / PAGE_SIZE;
394 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
395 num_pages = last_data_page - first_data_page + 1;
396
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700397 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700398 if (user_pages == NULL)
399 return -ENOMEM;
400
401 down_read(&mm->mmap_sem);
402 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700403 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700404 up_read(&mm->mmap_sem);
405 if (pinned_pages < num_pages) {
406 ret = -EFAULT;
407 goto fail_put_user_pages;
408 }
409
Eric Anholt280b7132009-03-12 16:56:27 -0700410 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
411
Eric Anholteb014592009-03-10 11:44:52 -0700412 mutex_lock(&dev->struct_mutex);
413
Chris Wilson07f73f62009-09-14 16:50:30 +0100414 ret = i915_gem_object_get_pages_or_evict(obj);
415 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700416 goto fail_unlock;
417
418 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
419 args->size);
420 if (ret != 0)
421 goto fail_put_pages;
422
423 obj_priv = obj->driver_private;
424 offset = args->offset;
425
426 while (remain > 0) {
427 /* Operation in this page
428 *
429 * shmem_page_index = page number within shmem file
430 * shmem_page_offset = offset within page in shmem file
431 * data_page_index = page number in get_user_pages return
432 * data_page_offset = offset with data_page_index page.
433 * page_length = bytes to copy for this page
434 */
435 shmem_page_index = offset / PAGE_SIZE;
436 shmem_page_offset = offset & ~PAGE_MASK;
437 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
438 data_page_offset = data_ptr & ~PAGE_MASK;
439
440 page_length = remain;
441 if ((shmem_page_offset + page_length) > PAGE_SIZE)
442 page_length = PAGE_SIZE - shmem_page_offset;
443 if ((data_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - data_page_offset;
445
Eric Anholt280b7132009-03-12 16:56:27 -0700446 if (do_bit17_swizzling) {
447 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
448 shmem_page_offset,
449 user_pages[data_page_index],
450 data_page_offset,
451 page_length,
452 1);
453 } else {
454 ret = slow_shmem_copy(user_pages[data_page_index],
455 data_page_offset,
456 obj_priv->pages[shmem_page_index],
457 shmem_page_offset,
458 page_length);
459 }
Eric Anholteb014592009-03-10 11:44:52 -0700460 if (ret)
461 goto fail_put_pages;
462
463 remain -= page_length;
464 data_ptr += page_length;
465 offset += page_length;
466 }
467
468fail_put_pages:
469 i915_gem_object_put_pages(obj);
470fail_unlock:
471 mutex_unlock(&dev->struct_mutex);
472fail_put_user_pages:
473 for (i = 0; i < pinned_pages; i++) {
474 SetPageDirty(user_pages[i]);
475 page_cache_release(user_pages[i]);
476 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700477 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700478
479 return ret;
480}
481
Eric Anholt673a3942008-07-30 12:06:12 -0700482/**
483 * Reads data from the object referenced by handle.
484 *
485 * On error, the contents of *data are undefined.
486 */
487int
488i915_gem_pread_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv)
490{
491 struct drm_i915_gem_pread *args = data;
492 struct drm_gem_object *obj;
493 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700494 int ret;
495
496 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
497 if (obj == NULL)
498 return -EBADF;
499 obj_priv = obj->driver_private;
500
501 /* Bounds check source.
502 *
503 * XXX: This could use review for overflow issues...
504 */
505 if (args->offset > obj->size || args->size > obj->size ||
506 args->offset + args->size > obj->size) {
507 drm_gem_object_unreference(obj);
508 return -EINVAL;
509 }
510
Eric Anholt280b7132009-03-12 16:56:27 -0700511 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700512 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700513 } else {
514 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
515 if (ret != 0)
516 ret = i915_gem_shmem_pread_slow(dev, obj, args,
517 file_priv);
518 }
Eric Anholt673a3942008-07-30 12:06:12 -0700519
520 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700521
Eric Anholteb014592009-03-10 11:44:52 -0700522 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700523}
524
Keith Packard0839ccb2008-10-30 19:38:48 -0700525/* This is the fast write path which cannot handle
526 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700527 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700528
Keith Packard0839ccb2008-10-30 19:38:48 -0700529static inline int
530fast_user_write(struct io_mapping *mapping,
531 loff_t page_base, int page_offset,
532 char __user *user_data,
533 int length)
534{
535 char *vaddr_atomic;
536 unsigned long unwritten;
537
538 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
539 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
540 user_data, length);
541 io_mapping_unmap_atomic(vaddr_atomic);
542 if (unwritten)
543 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700544 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700545}
546
547/* Here's the write path which can sleep for
548 * page faults
549 */
550
551static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700552slow_kernel_write(struct io_mapping *mapping,
553 loff_t gtt_base, int gtt_offset,
554 struct page *user_page, int user_offset,
555 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700556{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700557 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700558 unsigned long unwritten;
559
Eric Anholt3de09aa2009-03-09 09:42:23 -0700560 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
561 src_vaddr = kmap_atomic(user_page, KM_USER1);
562 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
563 src_vaddr + user_offset,
564 length);
565 kunmap_atomic(src_vaddr, KM_USER1);
566 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700567 if (unwritten)
568 return -EFAULT;
569 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700570}
571
Eric Anholt40123c12009-03-09 13:42:30 -0700572static inline int
573fast_shmem_write(struct page **pages,
574 loff_t page_base, int page_offset,
575 char __user *data,
576 int length)
577{
578 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400579 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700580
581 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
582 if (vaddr == NULL)
583 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400584 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700585 kunmap_atomic(vaddr, KM_USER0);
586
Dave Airlied0088772009-03-28 20:29:48 -0400587 if (unwritten)
588 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700589 return 0;
590}
591
Eric Anholt3de09aa2009-03-09 09:42:23 -0700592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
Eric Anholt673a3942008-07-30 12:06:12 -0700596static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700597i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
598 struct drm_i915_gem_pwrite *args,
599 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700600{
601 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700603 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700604 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700605 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700606 int page_offset, page_length;
607 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700608
609 user_data = (char __user *) (uintptr_t) args->data_ptr;
610 remain = args->size;
611 if (!access_ok(VERIFY_READ, user_data, remain))
612 return -EFAULT;
613
614
615 mutex_lock(&dev->struct_mutex);
616 ret = i915_gem_object_pin(obj, 0);
617 if (ret) {
618 mutex_unlock(&dev->struct_mutex);
619 return ret;
620 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800621 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700622 if (ret)
623 goto fail;
624
625 obj_priv = obj->driver_private;
626 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700627
628 while (remain > 0) {
629 /* Operation in this page
630 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700631 * page_base = page offset within aperture
632 * page_offset = offset within page
633 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700634 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700635 page_base = (offset & ~(PAGE_SIZE-1));
636 page_offset = offset & (PAGE_SIZE-1);
637 page_length = remain;
638 if ((page_offset + remain) > PAGE_SIZE)
639 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700640
Keith Packard0839ccb2008-10-30 19:38:48 -0700641 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
642 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700643
Keith Packard0839ccb2008-10-30 19:38:48 -0700644 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700645 * source page isn't available. Return the error and we'll
646 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700647 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700648 if (ret)
649 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700650
Keith Packard0839ccb2008-10-30 19:38:48 -0700651 remain -= page_length;
652 user_data += page_length;
653 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700654 }
Eric Anholt673a3942008-07-30 12:06:12 -0700655
656fail:
657 i915_gem_object_unpin(obj);
658 mutex_unlock(&dev->struct_mutex);
659
660 return ret;
661}
662
Eric Anholt3de09aa2009-03-09 09:42:23 -0700663/**
664 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
665 * the memory and maps it using kmap_atomic for copying.
666 *
667 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
668 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
669 */
Eric Anholt3043c602008-10-02 12:24:47 -0700670static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700671i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
672 struct drm_i915_gem_pwrite *args,
673 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700674{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675 struct drm_i915_gem_object *obj_priv = obj->driver_private;
676 drm_i915_private_t *dev_priv = dev->dev_private;
677 ssize_t remain;
678 loff_t gtt_page_base, offset;
679 loff_t first_data_page, last_data_page, num_pages;
680 loff_t pinned_pages, i;
681 struct page **user_pages;
682 struct mm_struct *mm = current->mm;
683 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700684 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700685 uint64_t data_ptr = args->data_ptr;
686
687 remain = args->size;
688
689 /* Pin the user pages containing the data. We can't fault while
690 * holding the struct mutex, and all of the pwrite implementations
691 * want to hold it while dereferencing the user data.
692 */
693 first_data_page = data_ptr / PAGE_SIZE;
694 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
695 num_pages = last_data_page - first_data_page + 1;
696
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700697 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700698 if (user_pages == NULL)
699 return -ENOMEM;
700
701 down_read(&mm->mmap_sem);
702 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
703 num_pages, 0, 0, user_pages, NULL);
704 up_read(&mm->mmap_sem);
705 if (pinned_pages < num_pages) {
706 ret = -EFAULT;
707 goto out_unpin_pages;
708 }
709
710 mutex_lock(&dev->struct_mutex);
711 ret = i915_gem_object_pin(obj, 0);
712 if (ret)
713 goto out_unlock;
714
715 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
716 if (ret)
717 goto out_unpin_object;
718
719 obj_priv = obj->driver_private;
720 offset = obj_priv->gtt_offset + args->offset;
721
722 while (remain > 0) {
723 /* Operation in this page
724 *
725 * gtt_page_base = page offset within aperture
726 * gtt_page_offset = offset within page in aperture
727 * data_page_index = page number in get_user_pages return
728 * data_page_offset = offset with data_page_index page.
729 * page_length = bytes to copy for this page
730 */
731 gtt_page_base = offset & PAGE_MASK;
732 gtt_page_offset = offset & ~PAGE_MASK;
733 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
734 data_page_offset = data_ptr & ~PAGE_MASK;
735
736 page_length = remain;
737 if ((gtt_page_offset + page_length) > PAGE_SIZE)
738 page_length = PAGE_SIZE - gtt_page_offset;
739 if ((data_page_offset + page_length) > PAGE_SIZE)
740 page_length = PAGE_SIZE - data_page_offset;
741
742 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
743 gtt_page_base, gtt_page_offset,
744 user_pages[data_page_index],
745 data_page_offset,
746 page_length);
747
748 /* If we get a fault while copying data, then (presumably) our
749 * source page isn't available. Return the error and we'll
750 * retry in the slow path.
751 */
752 if (ret)
753 goto out_unpin_object;
754
755 remain -= page_length;
756 offset += page_length;
757 data_ptr += page_length;
758 }
759
760out_unpin_object:
761 i915_gem_object_unpin(obj);
762out_unlock:
763 mutex_unlock(&dev->struct_mutex);
764out_unpin_pages:
765 for (i = 0; i < pinned_pages; i++)
766 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700767 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700768
769 return ret;
770}
771
Eric Anholt40123c12009-03-09 13:42:30 -0700772/**
773 * This is the fast shmem pwrite path, which attempts to directly
774 * copy_from_user into the kmapped pages backing the object.
775 */
Eric Anholt673a3942008-07-30 12:06:12 -0700776static int
Eric Anholt40123c12009-03-09 13:42:30 -0700777i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
778 struct drm_i915_gem_pwrite *args,
779 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700780{
Eric Anholt40123c12009-03-09 13:42:30 -0700781 struct drm_i915_gem_object *obj_priv = obj->driver_private;
782 ssize_t remain;
783 loff_t offset, page_base;
784 char __user *user_data;
785 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700786 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700787
788 user_data = (char __user *) (uintptr_t) args->data_ptr;
789 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700790
791 mutex_lock(&dev->struct_mutex);
792
Eric Anholt40123c12009-03-09 13:42:30 -0700793 ret = i915_gem_object_get_pages(obj);
794 if (ret != 0)
795 goto fail_unlock;
796
Eric Anholte47c68e2008-11-14 13:35:19 -0800797 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700798 if (ret != 0)
799 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700800
Eric Anholt40123c12009-03-09 13:42:30 -0700801 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700802 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700803 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700804
Eric Anholt40123c12009-03-09 13:42:30 -0700805 while (remain > 0) {
806 /* Operation in this page
807 *
808 * page_base = page offset within aperture
809 * page_offset = offset within page
810 * page_length = bytes to copy for this page
811 */
812 page_base = (offset & ~(PAGE_SIZE-1));
813 page_offset = offset & (PAGE_SIZE-1);
814 page_length = remain;
815 if ((page_offset + remain) > PAGE_SIZE)
816 page_length = PAGE_SIZE - page_offset;
817
818 ret = fast_shmem_write(obj_priv->pages,
819 page_base, page_offset,
820 user_data, page_length);
821 if (ret)
822 goto fail_put_pages;
823
824 remain -= page_length;
825 user_data += page_length;
826 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700827 }
828
Eric Anholt40123c12009-03-09 13:42:30 -0700829fail_put_pages:
830 i915_gem_object_put_pages(obj);
831fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700832 mutex_unlock(&dev->struct_mutex);
833
Eric Anholt40123c12009-03-09 13:42:30 -0700834 return ret;
835}
836
837/**
838 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
839 * the memory and maps it using kmap_atomic for copying.
840 *
841 * This avoids taking mmap_sem for faulting on the user's address while the
842 * struct_mutex is held.
843 */
844static int
845i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
846 struct drm_i915_gem_pwrite *args,
847 struct drm_file *file_priv)
848{
849 struct drm_i915_gem_object *obj_priv = obj->driver_private;
850 struct mm_struct *mm = current->mm;
851 struct page **user_pages;
852 ssize_t remain;
853 loff_t offset, pinned_pages, i;
854 loff_t first_data_page, last_data_page, num_pages;
855 int shmem_page_index, shmem_page_offset;
856 int data_page_index, data_page_offset;
857 int page_length;
858 int ret;
859 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700860 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700861
862 remain = args->size;
863
864 /* Pin the user pages containing the data. We can't fault while
865 * holding the struct mutex, and all of the pwrite implementations
866 * want to hold it while dereferencing the user data.
867 */
868 first_data_page = data_ptr / PAGE_SIZE;
869 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
870 num_pages = last_data_page - first_data_page + 1;
871
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700872 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700873 if (user_pages == NULL)
874 return -ENOMEM;
875
876 down_read(&mm->mmap_sem);
877 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
878 num_pages, 0, 0, user_pages, NULL);
879 up_read(&mm->mmap_sem);
880 if (pinned_pages < num_pages) {
881 ret = -EFAULT;
882 goto fail_put_user_pages;
883 }
884
Eric Anholt280b7132009-03-12 16:56:27 -0700885 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
Eric Anholt40123c12009-03-09 13:42:30 -0700887 mutex_lock(&dev->struct_mutex);
888
Chris Wilson07f73f62009-09-14 16:50:30 +0100889 ret = i915_gem_object_get_pages_or_evict(obj);
890 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700891 goto fail_unlock;
892
893 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
894 if (ret != 0)
895 goto fail_put_pages;
896
897 obj_priv = obj->driver_private;
898 offset = args->offset;
899 obj_priv->dirty = 1;
900
901 while (remain > 0) {
902 /* Operation in this page
903 *
904 * shmem_page_index = page number within shmem file
905 * shmem_page_offset = offset within page in shmem file
906 * data_page_index = page number in get_user_pages return
907 * data_page_offset = offset with data_page_index page.
908 * page_length = bytes to copy for this page
909 */
910 shmem_page_index = offset / PAGE_SIZE;
911 shmem_page_offset = offset & ~PAGE_MASK;
912 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
913 data_page_offset = data_ptr & ~PAGE_MASK;
914
915 page_length = remain;
916 if ((shmem_page_offset + page_length) > PAGE_SIZE)
917 page_length = PAGE_SIZE - shmem_page_offset;
918 if ((data_page_offset + page_length) > PAGE_SIZE)
919 page_length = PAGE_SIZE - data_page_offset;
920
Eric Anholt280b7132009-03-12 16:56:27 -0700921 if (do_bit17_swizzling) {
922 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
923 shmem_page_offset,
924 user_pages[data_page_index],
925 data_page_offset,
926 page_length,
927 0);
928 } else {
929 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
930 shmem_page_offset,
931 user_pages[data_page_index],
932 data_page_offset,
933 page_length);
934 }
Eric Anholt40123c12009-03-09 13:42:30 -0700935 if (ret)
936 goto fail_put_pages;
937
938 remain -= page_length;
939 data_ptr += page_length;
940 offset += page_length;
941 }
942
943fail_put_pages:
944 i915_gem_object_put_pages(obj);
945fail_unlock:
946 mutex_unlock(&dev->struct_mutex);
947fail_put_user_pages:
948 for (i = 0; i < pinned_pages; i++)
949 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700950 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700951
952 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700953}
954
955/**
956 * Writes data to the object referenced by handle.
957 *
958 * On error, the contents of the buffer that were to be modified are undefined.
959 */
960int
961i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
962 struct drm_file *file_priv)
963{
964 struct drm_i915_gem_pwrite *args = data;
965 struct drm_gem_object *obj;
966 struct drm_i915_gem_object *obj_priv;
967 int ret = 0;
968
969 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
970 if (obj == NULL)
971 return -EBADF;
972 obj_priv = obj->driver_private;
973
974 /* Bounds check destination.
975 *
976 * XXX: This could use review for overflow issues...
977 */
978 if (args->offset > obj->size || args->size > obj->size ||
979 args->offset + args->size > obj->size) {
980 drm_gem_object_unreference(obj);
981 return -EINVAL;
982 }
983
984 /* We can only do the GTT pwrite on untiled buffers, as otherwise
985 * it would end up going through the fenced access, and we'll get
986 * different detiling behavior between reading and writing.
987 * pread/pwrite currently are reading and writing from the CPU
988 * perspective, requiring manual detiling by the client.
989 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000990 if (obj_priv->phys_obj)
991 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
992 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700993 dev->gtt_total != 0) {
994 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
995 if (ret == -EFAULT) {
996 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
997 file_priv);
998 }
Eric Anholt280b7132009-03-12 16:56:27 -0700999 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1000 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -07001001 } else {
1002 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1003 if (ret == -EFAULT) {
1004 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1005 file_priv);
1006 }
1007 }
Eric Anholt673a3942008-07-30 12:06:12 -07001008
1009#if WATCH_PWRITE
1010 if (ret)
1011 DRM_INFO("pwrite failed %d\n", ret);
1012#endif
1013
1014 drm_gem_object_unreference(obj);
1015
1016 return ret;
1017}
1018
1019/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001020 * Called when user space prepares to use an object with the CPU, either
1021 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001022 */
1023int
1024i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1025 struct drm_file *file_priv)
1026{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001027 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001028 struct drm_i915_gem_set_domain *args = data;
1029 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001030 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001031 uint32_t read_domains = args->read_domains;
1032 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001033 int ret;
1034
1035 if (!(dev->driver->driver_features & DRIVER_GEM))
1036 return -ENODEV;
1037
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001038 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001039 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001040 return -EINVAL;
1041
Chris Wilson21d509e2009-06-06 09:46:02 +01001042 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001043 return -EINVAL;
1044
1045 /* Having something in the write domain implies it's in the read
1046 * domain, and only that read domain. Enforce that in the request.
1047 */
1048 if (write_domain != 0 && read_domains != write_domain)
1049 return -EINVAL;
1050
Eric Anholt673a3942008-07-30 12:06:12 -07001051 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1052 if (obj == NULL)
1053 return -EBADF;
Jesse Barnes652c3932009-08-17 13:31:43 -07001054 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001055
1056 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001057
1058 intel_mark_busy(dev, obj);
1059
Eric Anholt673a3942008-07-30 12:06:12 -07001060#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001061 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001062 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001063#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001064 if (read_domains & I915_GEM_DOMAIN_GTT) {
1065 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001066
Eric Anholta09ba7f2009-08-29 12:49:51 -07001067 /* Update the LRU on the fence for the CPU access that's
1068 * about to occur.
1069 */
1070 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1071 list_move_tail(&obj_priv->fence_list,
1072 &dev_priv->mm.fence_list);
1073 }
1074
Eric Anholt02354392008-11-26 13:58:13 -08001075 /* Silently promote "you're not bound, there was nothing to do"
1076 * to success, since the client was just asking us to
1077 * make sure everything was done.
1078 */
1079 if (ret == -EINVAL)
1080 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001081 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001082 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001083 }
1084
Eric Anholt673a3942008-07-30 12:06:12 -07001085 drm_gem_object_unreference(obj);
1086 mutex_unlock(&dev->struct_mutex);
1087 return ret;
1088}
1089
1090/**
1091 * Called when user space has done writes to this buffer
1092 */
1093int
1094i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv)
1096{
1097 struct drm_i915_gem_sw_finish *args = data;
1098 struct drm_gem_object *obj;
1099 struct drm_i915_gem_object *obj_priv;
1100 int ret = 0;
1101
1102 if (!(dev->driver->driver_features & DRIVER_GEM))
1103 return -ENODEV;
1104
1105 mutex_lock(&dev->struct_mutex);
1106 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1107 if (obj == NULL) {
1108 mutex_unlock(&dev->struct_mutex);
1109 return -EBADF;
1110 }
1111
1112#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001113 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001114 __func__, args->handle, obj, obj->size);
1115#endif
1116 obj_priv = obj->driver_private;
1117
1118 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001119 if (obj_priv->pin_count)
1120 i915_gem_object_flush_cpu_write_domain(obj);
1121
Eric Anholt673a3942008-07-30 12:06:12 -07001122 drm_gem_object_unreference(obj);
1123 mutex_unlock(&dev->struct_mutex);
1124 return ret;
1125}
1126
1127/**
1128 * Maps the contents of an object, returning the address it is mapped
1129 * into.
1130 *
1131 * While the mapping holds a reference on the contents of the object, it doesn't
1132 * imply a ref on the object itself.
1133 */
1134int
1135i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1137{
1138 struct drm_i915_gem_mmap *args = data;
1139 struct drm_gem_object *obj;
1140 loff_t offset;
1141 unsigned long addr;
1142
1143 if (!(dev->driver->driver_features & DRIVER_GEM))
1144 return -ENODEV;
1145
1146 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1147 if (obj == NULL)
1148 return -EBADF;
1149
1150 offset = args->offset;
1151
1152 down_write(&current->mm->mmap_sem);
1153 addr = do_mmap(obj->filp, 0, args->size,
1154 PROT_READ | PROT_WRITE, MAP_SHARED,
1155 args->offset);
1156 up_write(&current->mm->mmap_sem);
1157 mutex_lock(&dev->struct_mutex);
1158 drm_gem_object_unreference(obj);
1159 mutex_unlock(&dev->struct_mutex);
1160 if (IS_ERR((void *)addr))
1161 return addr;
1162
1163 args->addr_ptr = (uint64_t) addr;
1164
1165 return 0;
1166}
1167
Jesse Barnesde151cf2008-11-12 10:03:55 -08001168/**
1169 * i915_gem_fault - fault a page into the GTT
1170 * vma: VMA in question
1171 * vmf: fault info
1172 *
1173 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1174 * from userspace. The fault handler takes care of binding the object to
1175 * the GTT (if needed), allocating and programming a fence register (again,
1176 * only if needed based on whether the old reg is still valid or the object
1177 * is tiled) and inserting a new PTE into the faulting process.
1178 *
1179 * Note that the faulting process may involve evicting existing objects
1180 * from the GTT and/or fence registers to make room. So performance may
1181 * suffer if the GTT working set is large or there are few fence registers
1182 * left.
1183 */
1184int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185{
1186 struct drm_gem_object *obj = vma->vm_private_data;
1187 struct drm_device *dev = obj->dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private;
1189 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1190 pgoff_t page_offset;
1191 unsigned long pfn;
1192 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001193 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001194
1195 /* We don't use vmf->pgoff since that has the fake offset */
1196 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1197 PAGE_SHIFT;
1198
1199 /* Now bind it into the GTT if needed */
1200 mutex_lock(&dev->struct_mutex);
1201 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001202 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001203 if (ret)
1204 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001205
Jesse Barnes14b60392009-05-20 16:47:08 -04001206 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001207
1208 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001209 if (ret)
1210 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001211 }
1212
1213 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001214 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001215 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001216 if (ret)
1217 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001218 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001219
1220 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1221 page_offset;
1222
1223 /* Finally, remap it using the new GTT offset */
1224 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001225unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001226 mutex_unlock(&dev->struct_mutex);
1227
1228 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001229 case 0:
1230 case -ERESTARTSYS:
1231 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001232 case -ENOMEM:
1233 case -EAGAIN:
1234 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001235 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001236 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001237 }
1238}
1239
1240/**
1241 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1242 * @obj: obj in question
1243 *
1244 * GEM memory mapping works by handing back to userspace a fake mmap offset
1245 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1246 * up the object based on the offset and sets up the various memory mapping
1247 * structures.
1248 *
1249 * This routine allocates and attaches a fake offset for @obj.
1250 */
1251static int
1252i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1253{
1254 struct drm_device *dev = obj->dev;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1257 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001258 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001259 int ret = 0;
1260
1261 /* Set the object up for mmap'ing */
1262 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001263 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001264 if (!list->map)
1265 return -ENOMEM;
1266
1267 map = list->map;
1268 map->type = _DRM_GEM;
1269 map->size = obj->size;
1270 map->handle = obj;
1271
1272 /* Get a DRM GEM mmap offset allocated... */
1273 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1274 obj->size / PAGE_SIZE, 0, 0);
1275 if (!list->file_offset_node) {
1276 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1277 ret = -ENOMEM;
1278 goto out_free_list;
1279 }
1280
1281 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1282 obj->size / PAGE_SIZE, 0);
1283 if (!list->file_offset_node) {
1284 ret = -ENOMEM;
1285 goto out_free_list;
1286 }
1287
1288 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n");
1291 goto out_free_mm;
1292 }
1293
1294 /* By now we should be all set, any drm_mmap request on the offset
1295 * below will get to our mmap & fault handler */
1296 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1297
1298 return 0;
1299
1300out_free_mm:
1301 drm_mm_put_block(list->file_offset_node);
1302out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001303 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001304
1305 return ret;
1306}
1307
Chris Wilson901782b2009-07-10 08:18:50 +01001308/**
1309 * i915_gem_release_mmap - remove physical page mappings
1310 * @obj: obj in question
1311 *
1312 * Preserve the reservation of the mmaping with the DRM core code, but
1313 * relinquish ownership of the pages back to the system.
1314 *
1315 * It is vital that we remove the page mapping if we have mapped a tiled
1316 * object through the GTT and then lose the fence register due to
1317 * resource pressure. Similarly if the object has been moved out of the
1318 * aperture, than pages mapped into userspace must be revoked. Removing the
1319 * mapping will then trigger a page fault on the next user access, allowing
1320 * fixup by i915_gem_fault().
1321 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001322void
Chris Wilson901782b2009-07-10 08:18:50 +01001323i915_gem_release_mmap(struct drm_gem_object *obj)
1324{
1325 struct drm_device *dev = obj->dev;
1326 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1327
1328 if (dev->dev_mapping)
1329 unmap_mapping_range(dev->dev_mapping,
1330 obj_priv->mmap_offset, obj->size, 1);
1331}
1332
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001333static void
1334i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1335{
1336 struct drm_device *dev = obj->dev;
1337 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1338 struct drm_gem_mm *mm = dev->mm_private;
1339 struct drm_map_list *list;
1340
1341 list = &obj->map_list;
1342 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1343
1344 if (list->file_offset_node) {
1345 drm_mm_put_block(list->file_offset_node);
1346 list->file_offset_node = NULL;
1347 }
1348
1349 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001350 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001351 list->map = NULL;
1352 }
1353
1354 obj_priv->mmap_offset = 0;
1355}
1356
Jesse Barnesde151cf2008-11-12 10:03:55 -08001357/**
1358 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1359 * @obj: object to check
1360 *
1361 * Return the required GTT alignment for an object, taking into account
1362 * potential fence register mapping if needed.
1363 */
1364static uint32_t
1365i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1366{
1367 struct drm_device *dev = obj->dev;
1368 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1369 int start, i;
1370
1371 /*
1372 * Minimum alignment is 4k (GTT page size), but might be greater
1373 * if a fence register is needed for the object.
1374 */
1375 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1376 return 4096;
1377
1378 /*
1379 * Previous chips need to be aligned to the size of the smallest
1380 * fence register that can contain the object.
1381 */
1382 if (IS_I9XX(dev))
1383 start = 1024*1024;
1384 else
1385 start = 512*1024;
1386
1387 for (i = start; i < obj->size; i <<= 1)
1388 ;
1389
1390 return i;
1391}
1392
1393/**
1394 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1395 * @dev: DRM device
1396 * @data: GTT mapping ioctl data
1397 * @file_priv: GEM object info
1398 *
1399 * Simply returns the fake offset to userspace so it can mmap it.
1400 * The mmap call will end up in drm_gem_mmap(), which will set things
1401 * up so we can get faults in the handler above.
1402 *
1403 * The fault handler will take care of binding the object into the GTT
1404 * (since it may have been evicted to make room for something), allocating
1405 * a fence register, and mapping the appropriate aperture address into
1406 * userspace.
1407 */
1408int
1409i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1410 struct drm_file *file_priv)
1411{
1412 struct drm_i915_gem_mmap_gtt *args = data;
1413 struct drm_i915_private *dev_priv = dev->dev_private;
1414 struct drm_gem_object *obj;
1415 struct drm_i915_gem_object *obj_priv;
1416 int ret;
1417
1418 if (!(dev->driver->driver_features & DRIVER_GEM))
1419 return -ENODEV;
1420
1421 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1422 if (obj == NULL)
1423 return -EBADF;
1424
1425 mutex_lock(&dev->struct_mutex);
1426
1427 obj_priv = obj->driver_private;
1428
Chris Wilsonab182822009-09-22 18:46:17 +01001429 if (obj_priv->madv != I915_MADV_WILLNEED) {
1430 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1431 drm_gem_object_unreference(obj);
1432 mutex_unlock(&dev->struct_mutex);
1433 return -EINVAL;
1434 }
1435
1436
Jesse Barnesde151cf2008-11-12 10:03:55 -08001437 if (!obj_priv->mmap_offset) {
1438 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001439 if (ret) {
1440 drm_gem_object_unreference(obj);
1441 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001442 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001443 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001444 }
1445
1446 args->offset = obj_priv->mmap_offset;
1447
Jesse Barnesde151cf2008-11-12 10:03:55 -08001448 /*
1449 * Pull it into the GTT so that we have a page list (makes the
1450 * initial fault faster and any subsequent flushing possible).
1451 */
1452 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001453 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001454 if (ret) {
1455 drm_gem_object_unreference(obj);
1456 mutex_unlock(&dev->struct_mutex);
1457 return ret;
1458 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001459 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001460 }
1461
1462 drm_gem_object_unreference(obj);
1463 mutex_unlock(&dev->struct_mutex);
1464
1465 return 0;
1466}
1467
Ben Gamari6911a9b2009-04-02 11:24:54 -07001468void
Eric Anholt856fa192009-03-19 14:10:50 -07001469i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001470{
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1472 int page_count = obj->size / PAGE_SIZE;
1473 int i;
1474
Eric Anholt856fa192009-03-19 14:10:50 -07001475 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001476 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001477
1478 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001479 return;
1480
Eric Anholt280b7132009-03-12 16:56:27 -07001481 if (obj_priv->tiling_mode != I915_TILING_NONE)
1482 i915_gem_object_save_bit_17_swizzle(obj);
1483
Chris Wilson3ef94da2009-09-14 16:50:29 +01001484 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001485 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001486
1487 for (i = 0; i < page_count; i++) {
1488 if (obj_priv->pages[i] == NULL)
1489 break;
1490
1491 if (obj_priv->dirty)
1492 set_page_dirty(obj_priv->pages[i]);
1493
1494 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001495 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001496
1497 page_cache_release(obj_priv->pages[i]);
1498 }
Eric Anholt673a3942008-07-30 12:06:12 -07001499 obj_priv->dirty = 0;
1500
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001501 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001502 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001503}
1504
1505static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001506i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001507{
1508 struct drm_device *dev = obj->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private;
1510 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1511
1512 /* Add a reference if we're newly entering the active list. */
1513 if (!obj_priv->active) {
1514 drm_gem_object_reference(obj);
1515 obj_priv->active = 1;
1516 }
1517 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001518 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001519 list_move_tail(&obj_priv->list,
1520 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001521 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001522 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001523}
1524
Eric Anholtce44b0e2008-11-06 16:00:31 -08001525static void
1526i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1527{
1528 struct drm_device *dev = obj->dev;
1529 drm_i915_private_t *dev_priv = dev->dev_private;
1530 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1531
1532 BUG_ON(!obj_priv->active);
1533 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1534 obj_priv->last_rendering_seqno = 0;
1535}
Eric Anholt673a3942008-07-30 12:06:12 -07001536
Chris Wilson963b4832009-09-20 23:03:54 +01001537/* Immediately discard the backing storage */
1538static void
1539i915_gem_object_truncate(struct drm_gem_object *obj)
1540{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001541 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1542 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001543
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001544 inode = obj->filp->f_path.dentry->d_inode;
1545 if (inode->i_op->truncate)
1546 inode->i_op->truncate (inode);
1547
1548 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001549}
1550
1551static inline int
1552i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1553{
1554 return obj_priv->madv == I915_MADV_DONTNEED;
1555}
1556
Eric Anholt673a3942008-07-30 12:06:12 -07001557static void
1558i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1559{
1560 struct drm_device *dev = obj->dev;
1561 drm_i915_private_t *dev_priv = dev->dev_private;
1562 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1563
1564 i915_verify_inactive(dev, __FILE__, __LINE__);
1565 if (obj_priv->pin_count != 0)
1566 list_del_init(&obj_priv->list);
1567 else
1568 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1569
Eric Anholtce44b0e2008-11-06 16:00:31 -08001570 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001571 if (obj_priv->active) {
1572 obj_priv->active = 0;
1573 drm_gem_object_unreference(obj);
1574 }
1575 i915_verify_inactive(dev, __FILE__, __LINE__);
1576}
1577
1578/**
1579 * Creates a new sequence number, emitting a write of it to the status page
1580 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1581 *
1582 * Must be called with struct_lock held.
1583 *
1584 * Returned sequence numbers are nonzero on success.
1585 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001586uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001587i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains)
Eric Anholt673a3942008-07-30 12:06:12 -07001589{
1590 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001591 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001592 struct drm_i915_gem_request *request;
1593 uint32_t seqno;
1594 int was_empty;
1595 RING_LOCALS;
1596
Eric Anholtb9624422009-06-03 07:27:35 +00001597 if (file_priv != NULL)
1598 i915_file_priv = file_priv->driver_priv;
1599
Eric Anholt9a298b22009-03-24 12:23:04 -07001600 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001601 if (request == NULL)
1602 return 0;
1603
1604 /* Grab the seqno we're going to make this request be, and bump the
1605 * next (skipping 0 so it can be the reserved no-seqno value).
1606 */
1607 seqno = dev_priv->mm.next_gem_seqno;
1608 dev_priv->mm.next_gem_seqno++;
1609 if (dev_priv->mm.next_gem_seqno == 0)
1610 dev_priv->mm.next_gem_seqno++;
1611
1612 BEGIN_LP_RING(4);
1613 OUT_RING(MI_STORE_DWORD_INDEX);
1614 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1615 OUT_RING(seqno);
1616
1617 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING();
1619
1620 DRM_DEBUG("%d\n", seqno);
1621
1622 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001624 was_empty = list_empty(&dev_priv->mm.request_list);
1625 list_add_tail(&request->list, &dev_priv->mm.request_list);
Eric Anholtb9624422009-06-03 07:27:35 +00001626 if (i915_file_priv) {
1627 list_add_tail(&request->client_list,
1628 &i915_file_priv->mm.request_list);
1629 } else {
1630 INIT_LIST_HEAD(&request->client_list);
1631 }
Eric Anholt673a3942008-07-30 12:06:12 -07001632
Eric Anholtce44b0e2008-11-06 16:00:31 -08001633 /* Associate any objects on the flushing list matching the write
1634 * domain we're flushing with our flush.
1635 */
1636 if (flush_domains != 0) {
1637 struct drm_i915_gem_object *obj_priv, *next;
1638
1639 list_for_each_entry_safe(obj_priv, next,
1640 &dev_priv->mm.flushing_list, list) {
1641 struct drm_gem_object *obj = obj_priv->obj;
1642
1643 if ((obj->write_domain & flush_domains) ==
1644 obj->write_domain) {
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001645 uint32_t old_write_domain = obj->write_domain;
1646
Eric Anholtce44b0e2008-11-06 16:00:31 -08001647 obj->write_domain = 0;
1648 i915_gem_object_move_to_active(obj, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001649
1650 trace_i915_gem_object_change_domain(obj,
1651 obj->read_domains,
1652 old_write_domain);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001653 }
1654 }
1655
1656 }
1657
Ben Gamarif65d9422009-09-14 17:48:44 -04001658 if (!dev_priv->mm.suspended) {
1659 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1660 if (was_empty)
1661 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1662 }
Eric Anholt673a3942008-07-30 12:06:12 -07001663 return seqno;
1664}
1665
1666/**
1667 * Command execution barrier
1668 *
1669 * Ensures that all commands in the ring are finished
1670 * before signalling the CPU
1671 */
Eric Anholt3043c602008-10-02 12:24:47 -07001672static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001673i915_retire_commands(struct drm_device *dev)
1674{
1675 drm_i915_private_t *dev_priv = dev->dev_private;
1676 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1677 uint32_t flush_domains = 0;
1678 RING_LOCALS;
1679
1680 /* The sampler always gets flushed on i965 (sigh) */
1681 if (IS_I965G(dev))
1682 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1683 BEGIN_LP_RING(2);
1684 OUT_RING(cmd);
1685 OUT_RING(0); /* noop */
1686 ADVANCE_LP_RING();
1687 return flush_domains;
1688}
1689
1690/**
1691 * Moves buffers associated only with the given active seqno from the active
1692 * to inactive list, potentially freeing them.
1693 */
1694static void
1695i915_gem_retire_request(struct drm_device *dev,
1696 struct drm_i915_gem_request *request)
1697{
1698 drm_i915_private_t *dev_priv = dev->dev_private;
1699
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001700 trace_i915_gem_request_retire(dev, request->seqno);
1701
Eric Anholt673a3942008-07-30 12:06:12 -07001702 /* Move any buffers on the active list that are no longer referenced
1703 * by the ringbuffer to the flushing/inactive lists as appropriate.
1704 */
Carl Worth5e118f42009-03-20 11:54:25 -07001705 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001706 while (!list_empty(&dev_priv->mm.active_list)) {
1707 struct drm_gem_object *obj;
1708 struct drm_i915_gem_object *obj_priv;
1709
1710 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1711 struct drm_i915_gem_object,
1712 list);
1713 obj = obj_priv->obj;
1714
1715 /* If the seqno being retired doesn't match the oldest in the
1716 * list, then the oldest in the list must still be newer than
1717 * this seqno.
1718 */
1719 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001720 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001721
Eric Anholt673a3942008-07-30 12:06:12 -07001722#if WATCH_LRU
1723 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1724 __func__, request->seqno, obj);
1725#endif
1726
Eric Anholtce44b0e2008-11-06 16:00:31 -08001727 if (obj->write_domain != 0)
1728 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001729 else {
1730 /* Take a reference on the object so it won't be
1731 * freed while the spinlock is held. The list
1732 * protection for this spinlock is safe when breaking
1733 * the lock like this since the next thing we do
1734 * is just get the head of the list again.
1735 */
1736 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001737 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001738 spin_unlock(&dev_priv->mm.active_list_lock);
1739 drm_gem_object_unreference(obj);
1740 spin_lock(&dev_priv->mm.active_list_lock);
1741 }
Eric Anholt673a3942008-07-30 12:06:12 -07001742 }
Carl Worth5e118f42009-03-20 11:54:25 -07001743out:
1744 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001745}
1746
1747/**
1748 * Returns true if seq1 is later than seq2.
1749 */
Ben Gamari22be1722009-09-14 17:48:43 -04001750bool
Eric Anholt673a3942008-07-30 12:06:12 -07001751i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1752{
1753 return (int32_t)(seq1 - seq2) >= 0;
1754}
1755
1756uint32_t
1757i915_get_gem_seqno(struct drm_device *dev)
1758{
1759 drm_i915_private_t *dev_priv = dev->dev_private;
1760
1761 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1762}
1763
1764/**
1765 * This function clears the request list as sequence numbers are passed.
1766 */
1767void
1768i915_gem_retire_requests(struct drm_device *dev)
1769{
1770 drm_i915_private_t *dev_priv = dev->dev_private;
1771 uint32_t seqno;
1772
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001773 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001774 return;
1775
Eric Anholt673a3942008-07-30 12:06:12 -07001776 seqno = i915_get_gem_seqno(dev);
1777
1778 while (!list_empty(&dev_priv->mm.request_list)) {
1779 struct drm_i915_gem_request *request;
1780 uint32_t retiring_seqno;
1781
1782 request = list_first_entry(&dev_priv->mm.request_list,
1783 struct drm_i915_gem_request,
1784 list);
1785 retiring_seqno = request->seqno;
1786
1787 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001788 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001789 i915_gem_retire_request(dev, request);
1790
1791 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001792 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001793 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001794 } else
1795 break;
1796 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001797
1798 if (unlikely (dev_priv->trace_irq_seqno &&
1799 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1800 i915_user_irq_put(dev);
1801 dev_priv->trace_irq_seqno = 0;
1802 }
Eric Anholt673a3942008-07-30 12:06:12 -07001803}
1804
1805void
1806i915_gem_retire_work_handler(struct work_struct *work)
1807{
1808 drm_i915_private_t *dev_priv;
1809 struct drm_device *dev;
1810
1811 dev_priv = container_of(work, drm_i915_private_t,
1812 mm.retire_work.work);
1813 dev = dev_priv->dev;
1814
1815 mutex_lock(&dev->struct_mutex);
1816 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001817 if (!dev_priv->mm.suspended &&
1818 !list_empty(&dev_priv->mm.request_list))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001819 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001820 mutex_unlock(&dev->struct_mutex);
1821}
1822
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001823int
Daniel Vetter48764bf2009-09-15 22:57:32 +02001824i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07001825{
1826 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001827 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001828 int ret = 0;
1829
1830 BUG_ON(seqno == 0);
1831
Ben Gamariba1234d2009-09-14 17:48:47 -04001832 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001833 return -EIO;
1834
Eric Anholt673a3942008-07-30 12:06:12 -07001835 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001836 if (IS_IGDNG(dev))
1837 ier = I915_READ(DEIER) | I915_READ(GTIER);
1838 else
1839 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001840 if (!ier) {
1841 DRM_ERROR("something (likely vbetool) disabled "
1842 "interrupts, re-enabling\n");
1843 i915_driver_irq_preinstall(dev);
1844 i915_driver_irq_postinstall(dev);
1845 }
1846
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001847 trace_i915_gem_request_wait_begin(dev, seqno);
1848
Eric Anholt673a3942008-07-30 12:06:12 -07001849 dev_priv->mm.waiting_gem_seqno = seqno;
1850 i915_user_irq_get(dev);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001851 if (interruptible)
1852 ret = wait_event_interruptible(dev_priv->irq_queue,
1853 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1854 atomic_read(&dev_priv->mm.wedged));
1855 else
1856 wait_event(dev_priv->irq_queue,
1857 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged));
1859
Eric Anholt673a3942008-07-30 12:06:12 -07001860 i915_user_irq_put(dev);
1861 dev_priv->mm.waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001862
1863 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001864 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001865 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001866 ret = -EIO;
1867
1868 if (ret && ret != -ERESTARTSYS)
1869 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1870 __func__, ret, seqno, i915_get_gem_seqno(dev));
1871
1872 /* Directly dispatch request retiring. While we have the work queue
1873 * to handle this, the waiter on a request often wants an associated
1874 * buffer to have made it to the inactive list, and we would need
1875 * a separate wait queue to handle that.
1876 */
1877 if (ret == 0)
1878 i915_gem_retire_requests(dev);
1879
1880 return ret;
1881}
1882
Daniel Vetter48764bf2009-09-15 22:57:32 +02001883/**
1884 * Waits for a sequence number to be signaled, and cleans up the
1885 * request and object lists appropriately for that event.
1886 */
1887static int
1888i915_wait_request(struct drm_device *dev, uint32_t seqno)
1889{
1890 return i915_do_wait_request(dev, seqno, 1);
1891}
1892
1893/**
1894 * Waits for the ring to finish up to the latest request. Usefull for waiting
1895 * for flip events, e.g for the overlay support. */
1896int i915_lp_ring_sync(struct drm_device *dev)
1897{
1898 uint32_t seqno;
1899 int ret;
1900
1901 seqno = i915_add_request(dev, NULL, 0);
1902
1903 if (seqno == 0)
1904 return -ENOMEM;
1905
1906 ret = i915_do_wait_request(dev, seqno, 0);
1907 BUG_ON(ret == -ERESTARTSYS);
1908 return ret;
1909}
1910
Eric Anholt673a3942008-07-30 12:06:12 -07001911static void
1912i915_gem_flush(struct drm_device *dev,
1913 uint32_t invalidate_domains,
1914 uint32_t flush_domains)
1915{
1916 drm_i915_private_t *dev_priv = dev->dev_private;
1917 uint32_t cmd;
1918 RING_LOCALS;
1919
1920#if WATCH_EXEC
1921 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1922 invalidate_domains, flush_domains);
1923#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001924 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1925 invalidate_domains, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001926
1927 if (flush_domains & I915_GEM_DOMAIN_CPU)
1928 drm_agp_chipset_flush(dev);
1929
Chris Wilson21d509e2009-06-06 09:46:02 +01001930 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
Eric Anholt673a3942008-07-30 12:06:12 -07001931 /*
1932 * read/write caches:
1933 *
1934 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1935 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1936 * also flushed at 2d versus 3d pipeline switches.
1937 *
1938 * read-only caches:
1939 *
1940 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1941 * MI_READ_FLUSH is set, and is always flushed on 965.
1942 *
1943 * I915_GEM_DOMAIN_COMMAND may not exist?
1944 *
1945 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1946 * invalidated when MI_EXE_FLUSH is set.
1947 *
1948 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1949 * invalidated with every MI_FLUSH.
1950 *
1951 * TLBs:
1952 *
1953 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1954 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1955 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1956 * are flushed at any MI_FLUSH.
1957 */
1958
1959 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1960 if ((invalidate_domains|flush_domains) &
1961 I915_GEM_DOMAIN_RENDER)
1962 cmd &= ~MI_NO_WRITE_FLUSH;
1963 if (!IS_I965G(dev)) {
1964 /*
1965 * On the 965, the sampler cache always gets flushed
1966 * and this bit is reserved.
1967 */
1968 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1969 cmd |= MI_READ_FLUSH;
1970 }
1971 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1972 cmd |= MI_EXE_FLUSH;
1973
1974#if WATCH_EXEC
1975 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1976#endif
1977 BEGIN_LP_RING(2);
1978 OUT_RING(cmd);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001979 OUT_RING(MI_NOOP);
Eric Anholt673a3942008-07-30 12:06:12 -07001980 ADVANCE_LP_RING();
1981 }
1982}
1983
1984/**
1985 * Ensures that all rendering to the object has completed and the object is
1986 * safe to unbind from the GTT or access from the CPU.
1987 */
1988static int
1989i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1990{
1991 struct drm_device *dev = obj->dev;
1992 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1993 int ret;
1994
Eric Anholte47c68e2008-11-14 13:35:19 -08001995 /* This function only exists to support waiting for existing rendering,
1996 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001997 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001998 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001999
2000 /* If there is rendering queued on the buffer being evicted, wait for
2001 * it.
2002 */
2003 if (obj_priv->active) {
2004#if WATCH_BUF
2005 DRM_INFO("%s: object %p wait for seqno %08x\n",
2006 __func__, obj, obj_priv->last_rendering_seqno);
2007#endif
2008 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
2009 if (ret != 0)
2010 return ret;
2011 }
2012
2013 return 0;
2014}
2015
2016/**
2017 * Unbinds an object from the GTT aperture.
2018 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002019int
Eric Anholt673a3942008-07-30 12:06:12 -07002020i915_gem_object_unbind(struct drm_gem_object *obj)
2021{
2022 struct drm_device *dev = obj->dev;
2023 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2024 int ret = 0;
2025
2026#if WATCH_BUF
2027 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2028 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2029#endif
2030 if (obj_priv->gtt_space == NULL)
2031 return 0;
2032
2033 if (obj_priv->pin_count != 0) {
2034 DRM_ERROR("Attempting to unbind pinned buffer\n");
2035 return -EINVAL;
2036 }
2037
Eric Anholt5323fd02009-09-09 11:50:45 -07002038 /* blow away mappings if mapped through GTT */
2039 i915_gem_release_mmap(obj);
2040
2041 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2042 i915_gem_clear_fence_reg(obj);
2043
Eric Anholt673a3942008-07-30 12:06:12 -07002044 /* Move the object to the CPU domain to ensure that
2045 * any possible CPU writes while it's not in the GTT
2046 * are flushed when we go to remap it. This will
2047 * also ensure that all pending GPU writes are finished
2048 * before we unbind.
2049 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002050 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07002051 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002052 if (ret != -ERESTARTSYS)
2053 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07002054 return ret;
2055 }
2056
Eric Anholt5323fd02009-09-09 11:50:45 -07002057 BUG_ON(obj_priv->active);
2058
Eric Anholt673a3942008-07-30 12:06:12 -07002059 if (obj_priv->agp_mem != NULL) {
2060 drm_unbind_agp(obj_priv->agp_mem);
2061 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2062 obj_priv->agp_mem = NULL;
2063 }
2064
Eric Anholt856fa192009-03-19 14:10:50 -07002065 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002066 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002067
2068 if (obj_priv->gtt_space) {
2069 atomic_dec(&dev->gtt_count);
2070 atomic_sub(obj->size, &dev->gtt_memory);
2071
2072 drm_mm_put_block(obj_priv->gtt_space);
2073 obj_priv->gtt_space = NULL;
2074 }
2075
2076 /* Remove ourselves from the LRU list if present. */
2077 if (!list_empty(&obj_priv->list))
2078 list_del_init(&obj_priv->list);
2079
Chris Wilson963b4832009-09-20 23:03:54 +01002080 if (i915_gem_object_is_purgeable(obj_priv))
2081 i915_gem_object_truncate(obj);
2082
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002083 trace_i915_gem_object_unbind(obj);
2084
Eric Anholt673a3942008-07-30 12:06:12 -07002085 return 0;
2086}
2087
Chris Wilson07f73f62009-09-14 16:50:30 +01002088static struct drm_gem_object *
2089i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2090{
2091 drm_i915_private_t *dev_priv = dev->dev_private;
2092 struct drm_i915_gem_object *obj_priv;
2093 struct drm_gem_object *best = NULL;
2094 struct drm_gem_object *first = NULL;
2095
2096 /* Try to find the smallest clean object */
2097 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2098 struct drm_gem_object *obj = obj_priv->obj;
2099 if (obj->size >= min_size) {
Chris Wilson963b4832009-09-20 23:03:54 +01002100 if ((!obj_priv->dirty ||
2101 i915_gem_object_is_purgeable(obj_priv)) &&
Chris Wilson07f73f62009-09-14 16:50:30 +01002102 (!best || obj->size < best->size)) {
2103 best = obj;
2104 if (best->size == min_size)
2105 return best;
2106 }
2107 if (!first)
2108 first = obj;
2109 }
2110 }
2111
2112 return best ? best : first;
2113}
2114
Eric Anholt673a3942008-07-30 12:06:12 -07002115static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002116i915_gem_evict_everything(struct drm_device *dev)
2117{
2118 drm_i915_private_t *dev_priv = dev->dev_private;
2119 uint32_t seqno;
2120 int ret;
2121 bool lists_empty;
2122
Chris Wilson07f73f62009-09-14 16:50:30 +01002123 spin_lock(&dev_priv->mm.active_list_lock);
2124 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2125 list_empty(&dev_priv->mm.flushing_list) &&
2126 list_empty(&dev_priv->mm.active_list));
2127 spin_unlock(&dev_priv->mm.active_list_lock);
2128
Chris Wilson97311292009-09-21 00:22:34 +01002129 if (lists_empty)
Chris Wilson07f73f62009-09-14 16:50:30 +01002130 return -ENOSPC;
Chris Wilson07f73f62009-09-14 16:50:30 +01002131
2132 /* Flush everything (on to the inactive lists) and evict */
2133 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2134 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2135 if (seqno == 0)
2136 return -ENOMEM;
2137
2138 ret = i915_wait_request(dev, seqno);
2139 if (ret)
2140 return ret;
2141
Chris Wilsonab5ee572009-09-20 19:25:47 +01002142 ret = i915_gem_evict_from_inactive_list(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002143 if (ret)
2144 return ret;
2145
2146 spin_lock(&dev_priv->mm.active_list_lock);
2147 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2148 list_empty(&dev_priv->mm.flushing_list) &&
2149 list_empty(&dev_priv->mm.active_list));
2150 spin_unlock(&dev_priv->mm.active_list_lock);
2151 BUG_ON(!lists_empty);
2152
Eric Anholt673a3942008-07-30 12:06:12 -07002153 return 0;
2154}
2155
2156static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002157i915_gem_evict_something(struct drm_device *dev, int min_size)
Eric Anholt673a3942008-07-30 12:06:12 -07002158{
2159 drm_i915_private_t *dev_priv = dev->dev_private;
2160 struct drm_gem_object *obj;
Chris Wilson07f73f62009-09-14 16:50:30 +01002161 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002162
2163 for (;;) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002164 i915_gem_retire_requests(dev);
2165
Eric Anholt673a3942008-07-30 12:06:12 -07002166 /* If there's an inactive buffer available now, grab it
2167 * and be done.
2168 */
Chris Wilson07f73f62009-09-14 16:50:30 +01002169 obj = i915_gem_find_inactive_object(dev, min_size);
2170 if (obj) {
2171 struct drm_i915_gem_object *obj_priv;
2172
Eric Anholt673a3942008-07-30 12:06:12 -07002173#if WATCH_LRU
2174 DRM_INFO("%s: evicting %p\n", __func__, obj);
2175#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002176 obj_priv = obj->driver_private;
2177 BUG_ON(obj_priv->pin_count != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002178 BUG_ON(obj_priv->active);
2179
2180 /* Wait on the rendering and unbind the buffer. */
Chris Wilson07f73f62009-09-14 16:50:30 +01002181 return i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002182 }
2183
2184 /* If we didn't get anything, but the ring is still processing
Chris Wilson07f73f62009-09-14 16:50:30 +01002185 * things, wait for the next to finish and hopefully leave us
2186 * a buffer to evict.
Eric Anholt673a3942008-07-30 12:06:12 -07002187 */
2188 if (!list_empty(&dev_priv->mm.request_list)) {
2189 struct drm_i915_gem_request *request;
2190
2191 request = list_first_entry(&dev_priv->mm.request_list,
2192 struct drm_i915_gem_request,
2193 list);
2194
2195 ret = i915_wait_request(dev, request->seqno);
2196 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002197 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002198
Chris Wilson07f73f62009-09-14 16:50:30 +01002199 continue;
Eric Anholt673a3942008-07-30 12:06:12 -07002200 }
2201
2202 /* If we didn't have anything on the request list but there
2203 * are buffers awaiting a flush, emit one and try again.
2204 * When we wait on it, those buffers waiting for that flush
2205 * will get moved to inactive.
2206 */
2207 if (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002208 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002209
Chris Wilson9a1e2582009-09-20 20:16:50 +01002210 /* Find an object that we can immediately reuse */
2211 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2212 obj = obj_priv->obj;
2213 if (obj->size >= min_size)
2214 break;
Eric Anholt673a3942008-07-30 12:06:12 -07002215
Chris Wilson9a1e2582009-09-20 20:16:50 +01002216 obj = NULL;
2217 }
Eric Anholt673a3942008-07-30 12:06:12 -07002218
Chris Wilson9a1e2582009-09-20 20:16:50 +01002219 if (obj != NULL) {
2220 uint32_t seqno;
Chris Wilson07f73f62009-09-14 16:50:30 +01002221
Chris Wilson9a1e2582009-09-20 20:16:50 +01002222 i915_gem_flush(dev,
2223 obj->write_domain,
2224 obj->write_domain);
2225 seqno = i915_add_request(dev, NULL, obj->write_domain);
2226 if (seqno == 0)
2227 return -ENOMEM;
2228
2229 ret = i915_wait_request(dev, seqno);
2230 if (ret)
2231 return ret;
2232
2233 continue;
2234 }
Eric Anholt673a3942008-07-30 12:06:12 -07002235 }
2236
Chris Wilson07f73f62009-09-14 16:50:30 +01002237 /* If we didn't do any of the above, there's no single buffer
2238 * large enough to swap out for the new one, so just evict
2239 * everything and start again. (This should be rare.)
Eric Anholt673a3942008-07-30 12:06:12 -07002240 */
Chris Wilson97311292009-09-21 00:22:34 +01002241 if (!list_empty (&dev_priv->mm.inactive_list))
Chris Wilsonab5ee572009-09-20 19:25:47 +01002242 return i915_gem_evict_from_inactive_list(dev);
Chris Wilson97311292009-09-21 00:22:34 +01002243 else
Chris Wilson07f73f62009-09-14 16:50:30 +01002244 return i915_gem_evict_everything(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002245 }
Keith Packardac94a962008-11-20 23:30:27 -08002246}
2247
Ben Gamari6911a9b2009-04-02 11:24:54 -07002248int
Eric Anholt856fa192009-03-19 14:10:50 -07002249i915_gem_object_get_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002250{
2251 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2252 int page_count, i;
2253 struct address_space *mapping;
2254 struct inode *inode;
2255 struct page *page;
2256 int ret;
2257
Eric Anholt856fa192009-03-19 14:10:50 -07002258 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002259 return 0;
2260
2261 /* Get the list of pages out of our struct file. They'll be pinned
2262 * at this point until we release them.
2263 */
2264 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002265 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002266 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002267 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002268 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002269 return -ENOMEM;
2270 }
2271
2272 inode = obj->filp->f_path.dentry->d_inode;
2273 mapping = inode->i_mapping;
2274 for (i = 0; i < page_count; i++) {
2275 page = read_mapping_page(mapping, i, NULL);
2276 if (IS_ERR(page)) {
2277 ret = PTR_ERR(page);
Eric Anholt856fa192009-03-19 14:10:50 -07002278 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002279 return ret;
2280 }
Eric Anholt856fa192009-03-19 14:10:50 -07002281 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002282 }
Eric Anholt280b7132009-03-12 16:56:27 -07002283
2284 if (obj_priv->tiling_mode != I915_TILING_NONE)
2285 i915_gem_object_do_bit_17_swizzle(obj);
2286
Eric Anholt673a3942008-07-30 12:06:12 -07002287 return 0;
2288}
2289
Jesse Barnesde151cf2008-11-12 10:03:55 -08002290static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2291{
2292 struct drm_gem_object *obj = reg->obj;
2293 struct drm_device *dev = obj->dev;
2294 drm_i915_private_t *dev_priv = dev->dev_private;
2295 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2296 int regnum = obj_priv->fence_reg;
2297 uint64_t val;
2298
2299 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2300 0xfffff000) << 32;
2301 val |= obj_priv->gtt_offset & 0xfffff000;
2302 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2303 if (obj_priv->tiling_mode == I915_TILING_Y)
2304 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2305 val |= I965_FENCE_REG_VALID;
2306
2307 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2308}
2309
2310static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2311{
2312 struct drm_gem_object *obj = reg->obj;
2313 struct drm_device *dev = obj->dev;
2314 drm_i915_private_t *dev_priv = dev->dev_private;
2315 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2316 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002317 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002318 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002319 uint32_t pitch_val;
2320
2321 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2322 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002323 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002324 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002325 return;
2326 }
2327
Jesse Barnes0f973f22009-01-26 17:10:45 -08002328 if (obj_priv->tiling_mode == I915_TILING_Y &&
2329 HAS_128_BYTE_Y_TILING(dev))
2330 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002331 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002332 tile_width = 512;
2333
2334 /* Note: pitch better be a power of two tile widths */
2335 pitch_val = obj_priv->stride / tile_width;
2336 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002337
2338 val = obj_priv->gtt_offset;
2339 if (obj_priv->tiling_mode == I915_TILING_Y)
2340 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2341 val |= I915_FENCE_SIZE_BITS(obj->size);
2342 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2343 val |= I830_FENCE_REG_VALID;
2344
Eric Anholtdc529a42009-03-10 22:34:49 -07002345 if (regnum < 8)
2346 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2347 else
2348 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2349 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002350}
2351
2352static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2353{
2354 struct drm_gem_object *obj = reg->obj;
2355 struct drm_device *dev = obj->dev;
2356 drm_i915_private_t *dev_priv = dev->dev_private;
2357 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2358 int regnum = obj_priv->fence_reg;
2359 uint32_t val;
2360 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002361 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002362
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002363 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002364 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002365 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002366 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002367 return;
2368 }
2369
Eric Anholte76a16d2009-05-26 17:44:56 -07002370 pitch_val = obj_priv->stride / 128;
2371 pitch_val = ffs(pitch_val) - 1;
2372 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2373
Jesse Barnesde151cf2008-11-12 10:03:55 -08002374 val = obj_priv->gtt_offset;
2375 if (obj_priv->tiling_mode == I915_TILING_Y)
2376 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002377 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2378 WARN_ON(fence_size_bits & ~0x00000f00);
2379 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002380 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2381 val |= I830_FENCE_REG_VALID;
2382
2383 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002384}
2385
2386/**
2387 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2388 * @obj: object to map through a fence reg
2389 *
2390 * When mapping objects through the GTT, userspace wants to be able to write
2391 * to them without having to worry about swizzling if the object is tiled.
2392 *
2393 * This function walks the fence regs looking for a free one for @obj,
2394 * stealing one if it can't find any.
2395 *
2396 * It then sets up the reg based on the object's properties: address, pitch
2397 * and tiling format.
2398 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002399int
2400i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002401{
2402 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002403 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002404 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2405 struct drm_i915_fence_reg *reg = NULL;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002406 struct drm_i915_gem_object *old_obj_priv = NULL;
2407 int i, ret, avail;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002408
Eric Anholta09ba7f2009-08-29 12:49:51 -07002409 /* Just update our place in the LRU if our fence is getting used. */
2410 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2411 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2412 return 0;
2413 }
2414
Jesse Barnesde151cf2008-11-12 10:03:55 -08002415 switch (obj_priv->tiling_mode) {
2416 case I915_TILING_NONE:
2417 WARN(1, "allocating a fence for non-tiled object?\n");
2418 break;
2419 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002420 if (!obj_priv->stride)
2421 return -EINVAL;
2422 WARN((obj_priv->stride & (512 - 1)),
2423 "object 0x%08x is X tiled but has non-512B pitch\n",
2424 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002425 break;
2426 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002427 if (!obj_priv->stride)
2428 return -EINVAL;
2429 WARN((obj_priv->stride & (128 - 1)),
2430 "object 0x%08x is Y tiled but has non-128B pitch\n",
2431 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002432 break;
2433 }
2434
2435 /* First try to find a free reg */
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002436 avail = 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002437 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2438 reg = &dev_priv->fence_regs[i];
2439 if (!reg->obj)
2440 break;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002441
2442 old_obj_priv = reg->obj->driver_private;
2443 if (!old_obj_priv->pin_count)
2444 avail++;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002445 }
2446
2447 /* None available, try to steal one or wait for a user to finish */
2448 if (i == dev_priv->num_fence_regs) {
Eric Anholta09ba7f2009-08-29 12:49:51 -07002449 struct drm_gem_object *old_obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002450
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002451 if (avail == 0)
Chris Wilson2939e1f2009-06-06 09:46:03 +01002452 return -ENOSPC;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002453
Eric Anholta09ba7f2009-08-29 12:49:51 -07002454 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2455 fence_list) {
2456 old_obj = old_obj_priv->obj;
Chris Wilsond7619c42009-02-11 14:26:47 +00002457
Chris Wilsond7619c42009-02-11 14:26:47 +00002458 if (old_obj_priv->pin_count)
2459 continue;
2460
Eric Anholta09ba7f2009-08-29 12:49:51 -07002461 /* Take a reference, as otherwise the wait_rendering
2462 * below may cause the object to get freed out from
2463 * under us.
2464 */
2465 drm_gem_object_reference(old_obj);
2466
Chris Wilsond7619c42009-02-11 14:26:47 +00002467 /* i915 uses fences for GPU access to tiled buffers */
2468 if (IS_I965G(dev) || !old_obj_priv->active)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002469 break;
Chris Wilsond7619c42009-02-11 14:26:47 +00002470
Eric Anholta09ba7f2009-08-29 12:49:51 -07002471 /* This brings the object to the head of the LRU if it
2472 * had been written to. The only way this should
2473 * result in us waiting longer than the expected
2474 * optimal amount of time is if there was a
2475 * fence-using buffer later that was read-only.
2476 */
2477 i915_gem_object_flush_gpu_write_domain(old_obj);
2478 ret = i915_gem_object_wait_rendering(old_obj);
Chris Wilson58c2fb62009-09-01 12:02:39 +01002479 if (ret != 0) {
2480 drm_gem_object_unreference(old_obj);
Chris Wilsond7619c42009-02-11 14:26:47 +00002481 return ret;
Chris Wilson58c2fb62009-09-01 12:02:39 +01002482 }
2483
Eric Anholta09ba7f2009-08-29 12:49:51 -07002484 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002485 }
2486
2487 /*
2488 * Zap this virtual mapping so we can set up a fence again
2489 * for this object next time we need it.
2490 */
Chris Wilson58c2fb62009-09-01 12:02:39 +01002491 i915_gem_release_mmap(old_obj);
2492
Eric Anholta09ba7f2009-08-29 12:49:51 -07002493 i = old_obj_priv->fence_reg;
Chris Wilson58c2fb62009-09-01 12:02:39 +01002494 reg = &dev_priv->fence_regs[i];
2495
Jesse Barnesde151cf2008-11-12 10:03:55 -08002496 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002497 list_del_init(&old_obj_priv->fence_list);
Chris Wilson58c2fb62009-09-01 12:02:39 +01002498
Eric Anholta09ba7f2009-08-29 12:49:51 -07002499 drm_gem_object_unreference(old_obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002500 }
2501
2502 obj_priv->fence_reg = i;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002503 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2504
Jesse Barnesde151cf2008-11-12 10:03:55 -08002505 reg->obj = obj;
2506
2507 if (IS_I965G(dev))
2508 i965_write_fence_reg(reg);
2509 else if (IS_I9XX(dev))
2510 i915_write_fence_reg(reg);
2511 else
2512 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002513
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002514 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2515
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002516 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002517}
2518
2519/**
2520 * i915_gem_clear_fence_reg - clear out fence register info
2521 * @obj: object to clear
2522 *
2523 * Zeroes out the fence register itself and clears out the associated
2524 * data structures in dev_priv and obj_priv.
2525 */
2526static void
2527i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2528{
2529 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002530 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002531 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2532
2533 if (IS_I965G(dev))
2534 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholtdc529a42009-03-10 22:34:49 -07002535 else {
2536 uint32_t fence_reg;
2537
2538 if (obj_priv->fence_reg < 8)
2539 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2540 else
2541 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2542 8) * 4;
2543
2544 I915_WRITE(fence_reg, 0);
2545 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002546
2547 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2548 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002549 list_del_init(&obj_priv->fence_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002550}
2551
Eric Anholt673a3942008-07-30 12:06:12 -07002552/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002553 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2554 * to the buffer to finish, and then resets the fence register.
2555 * @obj: tiled object holding a fence register.
2556 *
2557 * Zeroes out the fence register itself and clears out the associated
2558 * data structures in dev_priv and obj_priv.
2559 */
2560int
2561i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2562{
2563 struct drm_device *dev = obj->dev;
2564 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2565
2566 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2567 return 0;
2568
2569 /* On the i915, GPU access to tiled buffers is via a fence,
2570 * therefore we must wait for any outstanding access to complete
2571 * before clearing the fence.
2572 */
2573 if (!IS_I965G(dev)) {
2574 int ret;
2575
2576 i915_gem_object_flush_gpu_write_domain(obj);
2577 i915_gem_object_flush_gtt_write_domain(obj);
2578 ret = i915_gem_object_wait_rendering(obj);
2579 if (ret != 0)
2580 return ret;
2581 }
2582
2583 i915_gem_clear_fence_reg (obj);
2584
2585 return 0;
2586}
2587
2588/**
Eric Anholt673a3942008-07-30 12:06:12 -07002589 * Finds free space in the GTT aperture and binds the object there.
2590 */
2591static int
2592i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2593{
2594 struct drm_device *dev = obj->dev;
2595 drm_i915_private_t *dev_priv = dev->dev_private;
2596 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2597 struct drm_mm_node *free_space;
Chris Wilson07f73f62009-09-14 16:50:30 +01002598 bool retry_alloc = false;
2599 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002600
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08002601 if (dev_priv->mm.suspended)
2602 return -EBUSY;
Chris Wilson3ef94da2009-09-14 16:50:29 +01002603
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002604 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002605 DRM_ERROR("Attempting to bind a purgeable object\n");
2606 return -EINVAL;
2607 }
2608
Eric Anholt673a3942008-07-30 12:06:12 -07002609 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002610 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002611 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002612 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2613 return -EINVAL;
2614 }
2615
2616 search_free:
2617 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2618 obj->size, alignment, 0);
2619 if (free_space != NULL) {
2620 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2621 alignment);
2622 if (obj_priv->gtt_space != NULL) {
2623 obj_priv->gtt_space->private = obj;
2624 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2625 }
2626 }
2627 if (obj_priv->gtt_space == NULL) {
2628 /* If the gtt is empty and we're still having trouble
2629 * fitting our object in, we're out of memory.
2630 */
2631#if WATCH_LRU
2632 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2633#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002634 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002635 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002636 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002637
Eric Anholt673a3942008-07-30 12:06:12 -07002638 goto search_free;
2639 }
2640
2641#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002642 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002643 obj->size, obj_priv->gtt_offset);
2644#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002645 if (retry_alloc) {
2646 i915_gem_object_set_page_gfp_mask (obj,
2647 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2648 }
Eric Anholt856fa192009-03-19 14:10:50 -07002649 ret = i915_gem_object_get_pages(obj);
Chris Wilson07f73f62009-09-14 16:50:30 +01002650 if (retry_alloc) {
2651 i915_gem_object_set_page_gfp_mask (obj,
2652 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2653 }
Eric Anholt673a3942008-07-30 12:06:12 -07002654 if (ret) {
2655 drm_mm_put_block(obj_priv->gtt_space);
2656 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002657
2658 if (ret == -ENOMEM) {
2659 /* first try to clear up some space from the GTT */
2660 ret = i915_gem_evict_something(dev, obj->size);
2661 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002662 /* now try to shrink everyone else */
2663 if (! retry_alloc) {
2664 retry_alloc = true;
2665 goto search_free;
2666 }
2667
2668 return ret;
2669 }
2670
2671 goto search_free;
2672 }
2673
Eric Anholt673a3942008-07-30 12:06:12 -07002674 return ret;
2675 }
2676
Eric Anholt673a3942008-07-30 12:06:12 -07002677 /* Create an AGP memory structure pointing at our pages, and bind it
2678 * into the GTT.
2679 */
2680 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002681 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002682 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002683 obj_priv->gtt_offset,
2684 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002685 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002686 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002687 drm_mm_put_block(obj_priv->gtt_space);
2688 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002689
2690 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002691 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002692 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002693
2694 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002695 }
2696 atomic_inc(&dev->gtt_count);
2697 atomic_add(obj->size, &dev->gtt_memory);
2698
2699 /* Assert that the object is not currently in any GPU domain. As it
2700 * wasn't in the GTT, there shouldn't be any way it could have been in
2701 * a GPU cache
2702 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002703 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2704 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002705
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002706 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2707
Eric Anholt673a3942008-07-30 12:06:12 -07002708 return 0;
2709}
2710
2711void
2712i915_gem_clflush_object(struct drm_gem_object *obj)
2713{
2714 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2715
2716 /* If we don't have a page list set up, then we're not pinned
2717 * to GPU, and we can ignore the cache flush because it'll happen
2718 * again at bind time.
2719 */
Eric Anholt856fa192009-03-19 14:10:50 -07002720 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002721 return;
2722
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002723 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002724
Eric Anholt856fa192009-03-19 14:10:50 -07002725 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002726}
2727
Eric Anholte47c68e2008-11-14 13:35:19 -08002728/** Flushes any GPU write domain for the object if it's dirty. */
2729static void
2730i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2731{
2732 struct drm_device *dev = obj->dev;
2733 uint32_t seqno;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002734 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002735
2736 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2737 return;
2738
2739 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002740 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002741 i915_gem_flush(dev, 0, obj->write_domain);
Eric Anholtb9624422009-06-03 07:27:35 +00002742 seqno = i915_add_request(dev, NULL, obj->write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002743 obj->write_domain = 0;
2744 i915_gem_object_move_to_active(obj, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002745
2746 trace_i915_gem_object_change_domain(obj,
2747 obj->read_domains,
2748 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002749}
2750
2751/** Flushes the GTT write domain for the object if it's dirty. */
2752static void
2753i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2754{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002755 uint32_t old_write_domain;
2756
Eric Anholte47c68e2008-11-14 13:35:19 -08002757 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2758 return;
2759
2760 /* No actual flushing is required for the GTT write domain. Writes
2761 * to it immediately go to main memory as far as we know, so there's
2762 * no chipset flush. It also doesn't land in render cache.
2763 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002764 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002765 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002766
2767 trace_i915_gem_object_change_domain(obj,
2768 obj->read_domains,
2769 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002770}
2771
2772/** Flushes the CPU write domain for the object if it's dirty. */
2773static void
2774i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2775{
2776 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002777 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002778
2779 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2780 return;
2781
2782 i915_gem_clflush_object(obj);
2783 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002784 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002785 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002786
2787 trace_i915_gem_object_change_domain(obj,
2788 obj->read_domains,
2789 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002790}
2791
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002792/**
2793 * Moves a single object to the GTT read, and possibly write domain.
2794 *
2795 * This function returns when the move is complete, including waiting on
2796 * flushes to occur.
2797 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002798int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002799i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2800{
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002801 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002802 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002803 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002804
Eric Anholt02354392008-11-26 13:58:13 -08002805 /* Not valid to be called on unbound objects. */
2806 if (obj_priv->gtt_space == NULL)
2807 return -EINVAL;
2808
Eric Anholte47c68e2008-11-14 13:35:19 -08002809 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002810 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002811 ret = i915_gem_object_wait_rendering(obj);
2812 if (ret != 0)
2813 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002814
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002815 old_write_domain = obj->write_domain;
2816 old_read_domains = obj->read_domains;
2817
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002818 /* If we're writing through the GTT domain, then CPU and GPU caches
2819 * will need to be invalidated at next use.
2820 */
2821 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002822 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002823
Eric Anholte47c68e2008-11-14 13:35:19 -08002824 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002825
2826 /* It should now be out of any other write domains, and we can update
2827 * the domain values for our changes.
2828 */
2829 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2830 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002831 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002832 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002833 obj_priv->dirty = 1;
2834 }
2835
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002836 trace_i915_gem_object_change_domain(obj,
2837 old_read_domains,
2838 old_write_domain);
2839
Eric Anholte47c68e2008-11-14 13:35:19 -08002840 return 0;
2841}
2842
2843/**
2844 * Moves a single object to the CPU read, and possibly write domain.
2845 *
2846 * This function returns when the move is complete, including waiting on
2847 * flushes to occur.
2848 */
2849static int
2850i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2851{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002852 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002853 int ret;
2854
2855 i915_gem_object_flush_gpu_write_domain(obj);
2856 /* Wait on any GPU rendering and flushing to occur. */
2857 ret = i915_gem_object_wait_rendering(obj);
2858 if (ret != 0)
2859 return ret;
2860
2861 i915_gem_object_flush_gtt_write_domain(obj);
2862
2863 /* If we have a partially-valid cache of the object in the CPU,
2864 * finish invalidating it and free the per-page flags.
2865 */
2866 i915_gem_object_set_to_full_cpu_read_domain(obj);
2867
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002868 old_write_domain = obj->write_domain;
2869 old_read_domains = obj->read_domains;
2870
Eric Anholte47c68e2008-11-14 13:35:19 -08002871 /* Flush the CPU cache if it's still invalid. */
2872 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2873 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002874
2875 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2876 }
2877
2878 /* It should now be out of any other write domains, and we can update
2879 * the domain values for our changes.
2880 */
2881 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2882
2883 /* If we're writing through the CPU, then the GPU read domains will
2884 * need to be invalidated at next use.
2885 */
2886 if (write) {
2887 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2888 obj->write_domain = I915_GEM_DOMAIN_CPU;
2889 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002890
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002891 trace_i915_gem_object_change_domain(obj,
2892 old_read_domains,
2893 old_write_domain);
2894
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002895 return 0;
2896}
2897
Eric Anholt673a3942008-07-30 12:06:12 -07002898/*
2899 * Set the next domain for the specified object. This
2900 * may not actually perform the necessary flushing/invaliding though,
2901 * as that may want to be batched with other set_domain operations
2902 *
2903 * This is (we hope) the only really tricky part of gem. The goal
2904 * is fairly simple -- track which caches hold bits of the object
2905 * and make sure they remain coherent. A few concrete examples may
2906 * help to explain how it works. For shorthand, we use the notation
2907 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2908 * a pair of read and write domain masks.
2909 *
2910 * Case 1: the batch buffer
2911 *
2912 * 1. Allocated
2913 * 2. Written by CPU
2914 * 3. Mapped to GTT
2915 * 4. Read by GPU
2916 * 5. Unmapped from GTT
2917 * 6. Freed
2918 *
2919 * Let's take these a step at a time
2920 *
2921 * 1. Allocated
2922 * Pages allocated from the kernel may still have
2923 * cache contents, so we set them to (CPU, CPU) always.
2924 * 2. Written by CPU (using pwrite)
2925 * The pwrite function calls set_domain (CPU, CPU) and
2926 * this function does nothing (as nothing changes)
2927 * 3. Mapped by GTT
2928 * This function asserts that the object is not
2929 * currently in any GPU-based read or write domains
2930 * 4. Read by GPU
2931 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2932 * As write_domain is zero, this function adds in the
2933 * current read domains (CPU+COMMAND, 0).
2934 * flush_domains is set to CPU.
2935 * invalidate_domains is set to COMMAND
2936 * clflush is run to get data out of the CPU caches
2937 * then i915_dev_set_domain calls i915_gem_flush to
2938 * emit an MI_FLUSH and drm_agp_chipset_flush
2939 * 5. Unmapped from GTT
2940 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2941 * flush_domains and invalidate_domains end up both zero
2942 * so no flushing/invalidating happens
2943 * 6. Freed
2944 * yay, done
2945 *
2946 * Case 2: The shared render buffer
2947 *
2948 * 1. Allocated
2949 * 2. Mapped to GTT
2950 * 3. Read/written by GPU
2951 * 4. set_domain to (CPU,CPU)
2952 * 5. Read/written by CPU
2953 * 6. Read/written by GPU
2954 *
2955 * 1. Allocated
2956 * Same as last example, (CPU, CPU)
2957 * 2. Mapped to GTT
2958 * Nothing changes (assertions find that it is not in the GPU)
2959 * 3. Read/written by GPU
2960 * execbuffer calls set_domain (RENDER, RENDER)
2961 * flush_domains gets CPU
2962 * invalidate_domains gets GPU
2963 * clflush (obj)
2964 * MI_FLUSH and drm_agp_chipset_flush
2965 * 4. set_domain (CPU, CPU)
2966 * flush_domains gets GPU
2967 * invalidate_domains gets CPU
2968 * wait_rendering (obj) to make sure all drawing is complete.
2969 * This will include an MI_FLUSH to get the data from GPU
2970 * to memory
2971 * clflush (obj) to invalidate the CPU cache
2972 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2973 * 5. Read/written by CPU
2974 * cache lines are loaded and dirtied
2975 * 6. Read written by GPU
2976 * Same as last GPU access
2977 *
2978 * Case 3: The constant buffer
2979 *
2980 * 1. Allocated
2981 * 2. Written by CPU
2982 * 3. Read by GPU
2983 * 4. Updated (written) by CPU again
2984 * 5. Read by GPU
2985 *
2986 * 1. Allocated
2987 * (CPU, CPU)
2988 * 2. Written by CPU
2989 * (CPU, CPU)
2990 * 3. Read by GPU
2991 * (CPU+RENDER, 0)
2992 * flush_domains = CPU
2993 * invalidate_domains = RENDER
2994 * clflush (obj)
2995 * MI_FLUSH
2996 * drm_agp_chipset_flush
2997 * 4. Updated (written) by CPU again
2998 * (CPU, CPU)
2999 * flush_domains = 0 (no previous write domain)
3000 * invalidate_domains = 0 (no new read domains)
3001 * 5. Read by GPU
3002 * (CPU+RENDER, 0)
3003 * flush_domains = CPU
3004 * invalidate_domains = RENDER
3005 * clflush (obj)
3006 * MI_FLUSH
3007 * drm_agp_chipset_flush
3008 */
Keith Packardc0d90822008-11-20 23:11:08 -08003009static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003010i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003011{
3012 struct drm_device *dev = obj->dev;
3013 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3014 uint32_t invalidate_domains = 0;
3015 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003016 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003017
Eric Anholt8b0e3782009-02-19 14:40:50 -08003018 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3019 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003020
Jesse Barnes652c3932009-08-17 13:31:43 -07003021 intel_mark_busy(dev, obj);
3022
Eric Anholt673a3942008-07-30 12:06:12 -07003023#if WATCH_BUF
3024 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3025 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003026 obj->read_domains, obj->pending_read_domains,
3027 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003028#endif
3029 /*
3030 * If the object isn't moving to a new write domain,
3031 * let the object stay in multiple read domains
3032 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003033 if (obj->pending_write_domain == 0)
3034 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003035 else
3036 obj_priv->dirty = 1;
3037
3038 /*
3039 * Flush the current write domain if
3040 * the new read domains don't match. Invalidate
3041 * any read domains which differ from the old
3042 * write domain
3043 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003044 if (obj->write_domain &&
3045 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003046 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003047 invalidate_domains |=
3048 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003049 }
3050 /*
3051 * Invalidate any read caches which may have
3052 * stale data. That is, any new read domains.
3053 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003054 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003055 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3056#if WATCH_BUF
3057 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3058 __func__, flush_domains, invalidate_domains);
3059#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003060 i915_gem_clflush_object(obj);
3061 }
3062
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003063 old_read_domains = obj->read_domains;
3064
Eric Anholtefbeed92009-02-19 14:54:51 -08003065 /* The actual obj->write_domain will be updated with
3066 * pending_write_domain after we emit the accumulated flush for all
3067 * of our domain changes in execbuffers (which clears objects'
3068 * write_domains). So if we have a current write domain that we
3069 * aren't changing, set pending_write_domain to that.
3070 */
3071 if (flush_domains == 0 && obj->pending_write_domain == 0)
3072 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003073 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003074
3075 dev->invalidate_domains |= invalidate_domains;
3076 dev->flush_domains |= flush_domains;
3077#if WATCH_BUF
3078 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3079 __func__,
3080 obj->read_domains, obj->write_domain,
3081 dev->invalidate_domains, dev->flush_domains);
3082#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003083
3084 trace_i915_gem_object_change_domain(obj,
3085 old_read_domains,
3086 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003087}
3088
3089/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003090 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003091 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003092 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3093 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3094 */
3095static void
3096i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3097{
Eric Anholte47c68e2008-11-14 13:35:19 -08003098 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3099
3100 if (!obj_priv->page_cpu_valid)
3101 return;
3102
3103 /* If we're partially in the CPU read domain, finish moving it in.
3104 */
3105 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3106 int i;
3107
3108 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3109 if (obj_priv->page_cpu_valid[i])
3110 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003111 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003112 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003113 }
3114
3115 /* Free the page_cpu_valid mappings which are now stale, whether
3116 * or not we've got I915_GEM_DOMAIN_CPU.
3117 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003118 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003119 obj_priv->page_cpu_valid = NULL;
3120}
3121
3122/**
3123 * Set the CPU read domain on a range of the object.
3124 *
3125 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3126 * not entirely valid. The page_cpu_valid member of the object flags which
3127 * pages have been flushed, and will be respected by
3128 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3129 * of the whole object.
3130 *
3131 * This function returns when the move is complete, including waiting on
3132 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003133 */
3134static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003135i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3136 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003137{
3138 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003139 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003140 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003141
Eric Anholte47c68e2008-11-14 13:35:19 -08003142 if (offset == 0 && size == obj->size)
3143 return i915_gem_object_set_to_cpu_domain(obj, 0);
3144
3145 i915_gem_object_flush_gpu_write_domain(obj);
3146 /* Wait on any GPU rendering and flushing to occur. */
3147 ret = i915_gem_object_wait_rendering(obj);
3148 if (ret != 0)
3149 return ret;
3150 i915_gem_object_flush_gtt_write_domain(obj);
3151
3152 /* If we're already fully in the CPU read domain, we're done. */
3153 if (obj_priv->page_cpu_valid == NULL &&
3154 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003155 return 0;
3156
Eric Anholte47c68e2008-11-14 13:35:19 -08003157 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3158 * newly adding I915_GEM_DOMAIN_CPU
3159 */
Eric Anholt673a3942008-07-30 12:06:12 -07003160 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003161 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3162 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003163 if (obj_priv->page_cpu_valid == NULL)
3164 return -ENOMEM;
3165 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3166 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003167
3168 /* Flush the cache on any pages that are still invalid from the CPU's
3169 * perspective.
3170 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003171 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3172 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003173 if (obj_priv->page_cpu_valid[i])
3174 continue;
3175
Eric Anholt856fa192009-03-19 14:10:50 -07003176 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003177
3178 obj_priv->page_cpu_valid[i] = 1;
3179 }
3180
Eric Anholte47c68e2008-11-14 13:35:19 -08003181 /* It should now be out of any other write domains, and we can update
3182 * the domain values for our changes.
3183 */
3184 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3185
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003186 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003187 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3188
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003189 trace_i915_gem_object_change_domain(obj,
3190 old_read_domains,
3191 obj->write_domain);
3192
Eric Anholt673a3942008-07-30 12:06:12 -07003193 return 0;
3194}
3195
3196/**
Eric Anholt673a3942008-07-30 12:06:12 -07003197 * Pin an object to the GTT and evaluate the relocations landing in it.
3198 */
3199static int
3200i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3201 struct drm_file *file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003202 struct drm_i915_gem_exec_object *entry,
3203 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003204{
3205 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003206 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003207 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3208 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003209 void __iomem *reloc_page;
Eric Anholt673a3942008-07-30 12:06:12 -07003210
3211 /* Choose the GTT offset for our buffer and put it there. */
3212 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3213 if (ret)
3214 return ret;
3215
3216 entry->offset = obj_priv->gtt_offset;
3217
Eric Anholt673a3942008-07-30 12:06:12 -07003218 /* Apply the relocations, using the GTT aperture to avoid cache
3219 * flushing requirements.
3220 */
3221 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003222 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003223 struct drm_gem_object *target_obj;
3224 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003225 uint32_t reloc_val, reloc_offset;
3226 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003227
Eric Anholt673a3942008-07-30 12:06:12 -07003228 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003229 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003230 if (target_obj == NULL) {
3231 i915_gem_object_unpin(obj);
3232 return -EBADF;
3233 }
3234 target_obj_priv = target_obj->driver_private;
3235
Chris Wilson8542a0b2009-09-09 21:15:15 +01003236#if WATCH_RELOC
3237 DRM_INFO("%s: obj %p offset %08x target %d "
3238 "read %08x write %08x gtt %08x "
3239 "presumed %08x delta %08x\n",
3240 __func__,
3241 obj,
3242 (int) reloc->offset,
3243 (int) reloc->target_handle,
3244 (int) reloc->read_domains,
3245 (int) reloc->write_domain,
3246 (int) target_obj_priv->gtt_offset,
3247 (int) reloc->presumed_offset,
3248 reloc->delta);
3249#endif
3250
Eric Anholt673a3942008-07-30 12:06:12 -07003251 /* The target buffer should have appeared before us in the
3252 * exec_object list, so it should have a GTT space bound by now.
3253 */
3254 if (target_obj_priv->gtt_space == NULL) {
3255 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003256 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003257 drm_gem_object_unreference(target_obj);
3258 i915_gem_object_unpin(obj);
3259 return -EINVAL;
3260 }
3261
Chris Wilson8542a0b2009-09-09 21:15:15 +01003262 /* Validate that the target is in a valid r/w GPU domain */
3263 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3264 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3265 DRM_ERROR("reloc with read/write CPU domains: "
3266 "obj %p target %d offset %d "
3267 "read %08x write %08x",
3268 obj, reloc->target_handle,
3269 (int) reloc->offset,
3270 reloc->read_domains,
3271 reloc->write_domain);
3272 drm_gem_object_unreference(target_obj);
3273 i915_gem_object_unpin(obj);
3274 return -EINVAL;
3275 }
3276 if (reloc->write_domain && target_obj->pending_write_domain &&
3277 reloc->write_domain != target_obj->pending_write_domain) {
3278 DRM_ERROR("Write domain conflict: "
3279 "obj %p target %d offset %d "
3280 "new %08x old %08x\n",
3281 obj, reloc->target_handle,
3282 (int) reloc->offset,
3283 reloc->write_domain,
3284 target_obj->pending_write_domain);
3285 drm_gem_object_unreference(target_obj);
3286 i915_gem_object_unpin(obj);
3287 return -EINVAL;
3288 }
3289
3290 target_obj->pending_read_domains |= reloc->read_domains;
3291 target_obj->pending_write_domain |= reloc->write_domain;
3292
3293 /* If the relocation already has the right value in it, no
3294 * more work needs to be done.
3295 */
3296 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3297 drm_gem_object_unreference(target_obj);
3298 continue;
3299 }
3300
3301 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003302 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003303 DRM_ERROR("Relocation beyond object bounds: "
3304 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003305 obj, reloc->target_handle,
3306 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003307 drm_gem_object_unreference(target_obj);
3308 i915_gem_object_unpin(obj);
3309 return -EINVAL;
3310 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003311 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003312 DRM_ERROR("Relocation not 4-byte aligned: "
3313 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003314 obj, reloc->target_handle,
3315 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003316 drm_gem_object_unreference(target_obj);
3317 i915_gem_object_unpin(obj);
3318 return -EINVAL;
3319 }
3320
Chris Wilson8542a0b2009-09-09 21:15:15 +01003321 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003322 if (reloc->delta >= target_obj->size) {
3323 DRM_ERROR("Relocation beyond target object bounds: "
3324 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003325 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003326 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003327 drm_gem_object_unreference(target_obj);
3328 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003329 return -EINVAL;
3330 }
3331
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003332 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3333 if (ret != 0) {
3334 drm_gem_object_unreference(target_obj);
3335 i915_gem_object_unpin(obj);
3336 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003337 }
3338
3339 /* Map the page containing the relocation we're going to
3340 * perform.
3341 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003342 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003343 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3344 (reloc_offset &
3345 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003346 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003347 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003348 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003349
3350#if WATCH_BUF
3351 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003352 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003353 readl(reloc_entry), reloc_val);
3354#endif
3355 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003356 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003357
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003358 /* The updated presumed offset for this entry will be
3359 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003360 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003361 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003362
3363 drm_gem_object_unreference(target_obj);
3364 }
3365
Eric Anholt673a3942008-07-30 12:06:12 -07003366#if WATCH_BUF
3367 if (0)
3368 i915_gem_dump_object(obj, 128, __func__, ~0);
3369#endif
3370 return 0;
3371}
3372
3373/** Dispatch a batchbuffer to the ring
3374 */
3375static int
3376i915_dispatch_gem_execbuffer(struct drm_device *dev,
3377 struct drm_i915_gem_execbuffer *exec,
Eric Anholt201361a2009-03-11 12:30:04 -07003378 struct drm_clip_rect *cliprects,
Eric Anholt673a3942008-07-30 12:06:12 -07003379 uint64_t exec_offset)
3380{
3381 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003382 int nbox = exec->num_cliprects;
3383 int i = 0, count;
Chris Wilson83d60792009-06-06 09:45:57 +01003384 uint32_t exec_start, exec_len;
Eric Anholt673a3942008-07-30 12:06:12 -07003385 RING_LOCALS;
3386
3387 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3388 exec_len = (uint32_t) exec->batch_len;
3389
Chris Wilson8f0dc5b2009-09-24 00:43:17 +01003390 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003391
Eric Anholt673a3942008-07-30 12:06:12 -07003392 count = nbox ? nbox : 1;
3393
3394 for (i = 0; i < count; i++) {
3395 if (i < nbox) {
Eric Anholt201361a2009-03-11 12:30:04 -07003396 int ret = i915_emit_box(dev, cliprects, i,
Eric Anholt673a3942008-07-30 12:06:12 -07003397 exec->DR1, exec->DR4);
3398 if (ret)
3399 return ret;
3400 }
3401
3402 if (IS_I830(dev) || IS_845G(dev)) {
3403 BEGIN_LP_RING(4);
3404 OUT_RING(MI_BATCH_BUFFER);
3405 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3406 OUT_RING(exec_start + exec_len - 4);
3407 OUT_RING(0);
3408 ADVANCE_LP_RING();
3409 } else {
3410 BEGIN_LP_RING(2);
3411 if (IS_I965G(dev)) {
3412 OUT_RING(MI_BATCH_BUFFER_START |
3413 (2 << 6) |
3414 MI_BATCH_NON_SECURE_I965);
3415 OUT_RING(exec_start);
3416 } else {
3417 OUT_RING(MI_BATCH_BUFFER_START |
3418 (2 << 6));
3419 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3420 }
3421 ADVANCE_LP_RING();
3422 }
3423 }
3424
3425 /* XXX breadcrumb */
3426 return 0;
3427}
3428
3429/* Throttle our rendering by waiting until the ring has completed our requests
3430 * emitted over 20 msec ago.
3431 *
Eric Anholtb9624422009-06-03 07:27:35 +00003432 * Note that if we were to use the current jiffies each time around the loop,
3433 * we wouldn't escape the function with any frames outstanding if the time to
3434 * render a frame was over 20ms.
3435 *
Eric Anholt673a3942008-07-30 12:06:12 -07003436 * This should get us reasonable parallelism between CPU and GPU but also
3437 * relatively low latency when blocking on a particular request to finish.
3438 */
3439static int
3440i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3441{
3442 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3443 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003444 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003445
3446 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003447 while (!list_empty(&i915_file_priv->mm.request_list)) {
3448 struct drm_i915_gem_request *request;
3449
3450 request = list_first_entry(&i915_file_priv->mm.request_list,
3451 struct drm_i915_gem_request,
3452 client_list);
3453
3454 if (time_after_eq(request->emitted_jiffies, recent_enough))
3455 break;
3456
3457 ret = i915_wait_request(dev, request->seqno);
3458 if (ret != 0)
3459 break;
3460 }
Eric Anholt673a3942008-07-30 12:06:12 -07003461 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003462
Eric Anholt673a3942008-07-30 12:06:12 -07003463 return ret;
3464}
3465
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003466static int
3467i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3468 uint32_t buffer_count,
3469 struct drm_i915_gem_relocation_entry **relocs)
3470{
3471 uint32_t reloc_count = 0, reloc_index = 0, i;
3472 int ret;
3473
3474 *relocs = NULL;
3475 for (i = 0; i < buffer_count; i++) {
3476 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3477 return -EINVAL;
3478 reloc_count += exec_list[i].relocation_count;
3479 }
3480
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003481 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003482 if (*relocs == NULL)
3483 return -ENOMEM;
3484
3485 for (i = 0; i < buffer_count; i++) {
3486 struct drm_i915_gem_relocation_entry __user *user_relocs;
3487
3488 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3489
3490 ret = copy_from_user(&(*relocs)[reloc_index],
3491 user_relocs,
3492 exec_list[i].relocation_count *
3493 sizeof(**relocs));
3494 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003495 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003496 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003497 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003498 }
3499
3500 reloc_index += exec_list[i].relocation_count;
3501 }
3502
Florian Mickler2bc43b52009-04-06 22:55:41 +02003503 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003504}
3505
3506static int
3507i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3508 uint32_t buffer_count,
3509 struct drm_i915_gem_relocation_entry *relocs)
3510{
3511 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003512 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003513
3514 for (i = 0; i < buffer_count; i++) {
3515 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003516 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003517
3518 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3519
Florian Mickler2bc43b52009-04-06 22:55:41 +02003520 unwritten = copy_to_user(user_relocs,
3521 &relocs[reloc_count],
3522 exec_list[i].relocation_count *
3523 sizeof(*relocs));
3524
3525 if (unwritten) {
3526 ret = -EFAULT;
3527 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003528 }
3529
3530 reloc_count += exec_list[i].relocation_count;
3531 }
3532
Florian Mickler2bc43b52009-04-06 22:55:41 +02003533err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003534 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003535
3536 return ret;
3537}
3538
Chris Wilson83d60792009-06-06 09:45:57 +01003539static int
3540i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3541 uint64_t exec_offset)
3542{
3543 uint32_t exec_start, exec_len;
3544
3545 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3546 exec_len = (uint32_t) exec->batch_len;
3547
3548 if ((exec_start | exec_len) & 0x7)
3549 return -EINVAL;
3550
3551 if (!exec_start)
3552 return -EINVAL;
3553
3554 return 0;
3555}
3556
Eric Anholt673a3942008-07-30 12:06:12 -07003557int
3558i915_gem_execbuffer(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv)
3560{
3561 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003562 struct drm_i915_gem_execbuffer *args = data;
3563 struct drm_i915_gem_exec_object *exec_list = NULL;
3564 struct drm_gem_object **object_list = NULL;
3565 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003566 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003567 struct drm_clip_rect *cliprects = NULL;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003568 struct drm_i915_gem_relocation_entry *relocs;
3569 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003570 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003571 uint32_t seqno, flush_domains, reloc_index;
Keith Packardac94a962008-11-20 23:30:27 -08003572 int pin_tries;
Eric Anholt673a3942008-07-30 12:06:12 -07003573
3574#if WATCH_EXEC
3575 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3576 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3577#endif
3578
Eric Anholt4f481ed2008-09-10 14:22:49 -07003579 if (args->buffer_count < 1) {
3580 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3581 return -EINVAL;
3582 }
Eric Anholt673a3942008-07-30 12:06:12 -07003583 /* Copy in the exec list from userland */
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003584 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3585 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
Eric Anholt673a3942008-07-30 12:06:12 -07003586 if (exec_list == NULL || object_list == NULL) {
3587 DRM_ERROR("Failed to allocate exec or object list "
3588 "for %d buffers\n",
3589 args->buffer_count);
3590 ret = -ENOMEM;
3591 goto pre_mutex_err;
3592 }
3593 ret = copy_from_user(exec_list,
3594 (struct drm_i915_relocation_entry __user *)
3595 (uintptr_t) args->buffers_ptr,
3596 sizeof(*exec_list) * args->buffer_count);
3597 if (ret != 0) {
3598 DRM_ERROR("copy %d exec entries failed %d\n",
3599 args->buffer_count, ret);
3600 goto pre_mutex_err;
3601 }
3602
Eric Anholt201361a2009-03-11 12:30:04 -07003603 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003604 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3605 GFP_KERNEL);
Eric Anholt201361a2009-03-11 12:30:04 -07003606 if (cliprects == NULL)
3607 goto pre_mutex_err;
3608
3609 ret = copy_from_user(cliprects,
3610 (struct drm_clip_rect __user *)
3611 (uintptr_t) args->cliprects_ptr,
3612 sizeof(*cliprects) * args->num_cliprects);
3613 if (ret != 0) {
3614 DRM_ERROR("copy %d cliprects failed: %d\n",
3615 args->num_cliprects, ret);
3616 goto pre_mutex_err;
3617 }
3618 }
3619
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003620 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3621 &relocs);
3622 if (ret != 0)
3623 goto pre_mutex_err;
3624
Eric Anholt673a3942008-07-30 12:06:12 -07003625 mutex_lock(&dev->struct_mutex);
3626
3627 i915_verify_inactive(dev, __FILE__, __LINE__);
3628
Ben Gamariba1234d2009-09-14 17:48:47 -04003629 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003630 DRM_ERROR("Execbuf while wedged\n");
3631 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003632 ret = -EIO;
3633 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003634 }
3635
3636 if (dev_priv->mm.suspended) {
3637 DRM_ERROR("Execbuf while VT-switched.\n");
3638 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003639 ret = -EBUSY;
3640 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003641 }
3642
Keith Packardac94a962008-11-20 23:30:27 -08003643 /* Look up object handles */
Eric Anholt673a3942008-07-30 12:06:12 -07003644 for (i = 0; i < args->buffer_count; i++) {
3645 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3646 exec_list[i].handle);
3647 if (object_list[i] == NULL) {
3648 DRM_ERROR("Invalid object handle %d at index %d\n",
3649 exec_list[i].handle, i);
3650 ret = -EBADF;
3651 goto err;
3652 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003653
3654 obj_priv = object_list[i]->driver_private;
3655 if (obj_priv->in_execbuffer) {
3656 DRM_ERROR("Object %p appears more than once in object list\n",
3657 object_list[i]);
3658 ret = -EBADF;
3659 goto err;
3660 }
3661 obj_priv->in_execbuffer = true;
Keith Packardac94a962008-11-20 23:30:27 -08003662 }
Eric Anholt673a3942008-07-30 12:06:12 -07003663
Keith Packardac94a962008-11-20 23:30:27 -08003664 /* Pin and relocate */
3665 for (pin_tries = 0; ; pin_tries++) {
3666 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003667 reloc_index = 0;
3668
Keith Packardac94a962008-11-20 23:30:27 -08003669 for (i = 0; i < args->buffer_count; i++) {
3670 object_list[i]->pending_read_domains = 0;
3671 object_list[i]->pending_write_domain = 0;
3672 ret = i915_gem_object_pin_and_relocate(object_list[i],
3673 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003674 &exec_list[i],
3675 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003676 if (ret)
3677 break;
3678 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003679 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003680 }
3681 /* success */
3682 if (ret == 0)
3683 break;
3684
3685 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003686 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003687 if (ret != -ERESTARTSYS) {
3688 unsigned long long total_size = 0;
3689 for (i = 0; i < args->buffer_count; i++)
3690 total_size += object_list[i]->size;
3691 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3692 pinned+1, args->buffer_count,
3693 total_size, ret);
3694 DRM_ERROR("%d objects [%d pinned], "
3695 "%d object bytes [%d pinned], "
3696 "%d/%d gtt bytes\n",
3697 atomic_read(&dev->object_count),
3698 atomic_read(&dev->pin_count),
3699 atomic_read(&dev->object_memory),
3700 atomic_read(&dev->pin_memory),
3701 atomic_read(&dev->gtt_memory),
3702 dev->gtt_total);
3703 }
Eric Anholt673a3942008-07-30 12:06:12 -07003704 goto err;
3705 }
Keith Packardac94a962008-11-20 23:30:27 -08003706
3707 /* unpin all of our buffers */
3708 for (i = 0; i < pinned; i++)
3709 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003710 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003711
3712 /* evict everyone we can from the aperture */
3713 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003714 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003715 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003716 }
3717
3718 /* Set the pending read domains for the batch buffer to COMMAND */
3719 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003720 if (batch_obj->pending_write_domain) {
3721 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3722 ret = -EINVAL;
3723 goto err;
3724 }
3725 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003726
Chris Wilson83d60792009-06-06 09:45:57 +01003727 /* Sanity check the batch buffer, prior to moving objects */
3728 exec_offset = exec_list[args->buffer_count - 1].offset;
3729 ret = i915_gem_check_execbuffer (args, exec_offset);
3730 if (ret != 0) {
3731 DRM_ERROR("execbuf with invalid offset/length\n");
3732 goto err;
3733 }
3734
Eric Anholt673a3942008-07-30 12:06:12 -07003735 i915_verify_inactive(dev, __FILE__, __LINE__);
3736
Keith Packard646f0f62008-11-20 23:23:03 -08003737 /* Zero the global flush/invalidate flags. These
3738 * will be modified as new domains are computed
3739 * for each object
3740 */
3741 dev->invalidate_domains = 0;
3742 dev->flush_domains = 0;
3743
Eric Anholt673a3942008-07-30 12:06:12 -07003744 for (i = 0; i < args->buffer_count; i++) {
3745 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003746
Keith Packard646f0f62008-11-20 23:23:03 -08003747 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003748 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003749 }
3750
3751 i915_verify_inactive(dev, __FILE__, __LINE__);
3752
Keith Packard646f0f62008-11-20 23:23:03 -08003753 if (dev->invalidate_domains | dev->flush_domains) {
3754#if WATCH_EXEC
3755 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3756 __func__,
3757 dev->invalidate_domains,
3758 dev->flush_domains);
3759#endif
3760 i915_gem_flush(dev,
3761 dev->invalidate_domains,
3762 dev->flush_domains);
3763 if (dev->flush_domains)
Eric Anholtb9624422009-06-03 07:27:35 +00003764 (void)i915_add_request(dev, file_priv,
3765 dev->flush_domains);
Keith Packard646f0f62008-11-20 23:23:03 -08003766 }
Eric Anholt673a3942008-07-30 12:06:12 -07003767
Eric Anholtefbeed92009-02-19 14:54:51 -08003768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003770 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003771
3772 obj->write_domain = obj->pending_write_domain;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003773 trace_i915_gem_object_change_domain(obj,
3774 obj->read_domains,
3775 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003776 }
3777
Eric Anholt673a3942008-07-30 12:06:12 -07003778 i915_verify_inactive(dev, __FILE__, __LINE__);
3779
3780#if WATCH_COHERENCY
3781 for (i = 0; i < args->buffer_count; i++) {
3782 i915_gem_object_check_coherency(object_list[i],
3783 exec_list[i].handle);
3784 }
3785#endif
3786
Eric Anholt673a3942008-07-30 12:06:12 -07003787#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003788 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003789 args->batch_len,
3790 __func__,
3791 ~0);
3792#endif
3793
Eric Anholt673a3942008-07-30 12:06:12 -07003794 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07003795 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003796 if (ret) {
3797 DRM_ERROR("dispatch failed %d\n", ret);
3798 goto err;
3799 }
3800
3801 /*
3802 * Ensure that the commands in the batch buffer are
3803 * finished before the interrupt fires
3804 */
3805 flush_domains = i915_retire_commands(dev);
3806
3807 i915_verify_inactive(dev, __FILE__, __LINE__);
3808
3809 /*
3810 * Get a seqno representing the execution of the current buffer,
3811 * which we can wait on. We would like to mitigate these interrupts,
3812 * likely by only creating seqnos occasionally (so that we have
3813 * *some* interrupts representing completion of buffers that we can
3814 * wait on when trying to clear up gtt space).
3815 */
Eric Anholtb9624422009-06-03 07:27:35 +00003816 seqno = i915_add_request(dev, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07003817 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003818 for (i = 0; i < args->buffer_count; i++) {
3819 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003820
Eric Anholtce44b0e2008-11-06 16:00:31 -08003821 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07003822#if WATCH_LRU
3823 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3824#endif
3825 }
3826#if WATCH_LRU
3827 i915_dump_lru(dev, __func__);
3828#endif
3829
3830 i915_verify_inactive(dev, __FILE__, __LINE__);
3831
Eric Anholt673a3942008-07-30 12:06:12 -07003832err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003833 for (i = 0; i < pinned; i++)
3834 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003835
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003836 for (i = 0; i < args->buffer_count; i++) {
3837 if (object_list[i]) {
3838 obj_priv = object_list[i]->driver_private;
3839 obj_priv->in_execbuffer = false;
3840 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003841 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003842 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003843
Eric Anholt673a3942008-07-30 12:06:12 -07003844 mutex_unlock(&dev->struct_mutex);
3845
Roland Dreiera35f2e22009-02-06 17:48:09 -08003846 if (!ret) {
3847 /* Copy the new buffer offsets back to the user's exec list. */
3848 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3849 (uintptr_t) args->buffers_ptr,
3850 exec_list,
3851 sizeof(*exec_list) * args->buffer_count);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003852 if (ret) {
3853 ret = -EFAULT;
Roland Dreiera35f2e22009-02-06 17:48:09 -08003854 DRM_ERROR("failed to copy %d exec entries "
3855 "back to user (%d)\n",
3856 args->buffer_count, ret);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003857 }
Roland Dreiera35f2e22009-02-06 17:48:09 -08003858 }
3859
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003860 /* Copy the updated relocations out regardless of current error
3861 * state. Failure to update the relocs would mean that the next
3862 * time userland calls execbuf, it would do so with presumed offset
3863 * state that didn't match the actual object state.
3864 */
3865 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3866 relocs);
3867 if (ret2 != 0) {
3868 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3869
3870 if (ret == 0)
3871 ret = ret2;
3872 }
3873
Eric Anholt673a3942008-07-30 12:06:12 -07003874pre_mutex_err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003875 drm_free_large(object_list);
3876 drm_free_large(exec_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003877 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07003878
3879 return ret;
3880}
3881
3882int
3883i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3884{
3885 struct drm_device *dev = obj->dev;
3886 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3887 int ret;
3888
3889 i915_verify_inactive(dev, __FILE__, __LINE__);
3890 if (obj_priv->gtt_space == NULL) {
3891 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01003892 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003893 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003894 }
3895 /*
3896 * Pre-965 chips need a fence register set up in order to
3897 * properly handle tiled surfaces.
3898 */
Eric Anholta09ba7f2009-08-29 12:49:51 -07003899 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003900 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilson22c344e2009-02-11 14:26:45 +00003901 if (ret != 0) {
3902 if (ret != -EBUSY && ret != -ERESTARTSYS)
3903 DRM_ERROR("Failure to install fence: %d\n",
3904 ret);
3905 return ret;
3906 }
Eric Anholt673a3942008-07-30 12:06:12 -07003907 }
3908 obj_priv->pin_count++;
3909
3910 /* If the object is not active and not pending a flush,
3911 * remove it from the inactive list
3912 */
3913 if (obj_priv->pin_count == 1) {
3914 atomic_inc(&dev->pin_count);
3915 atomic_add(obj->size, &dev->pin_memory);
3916 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003917 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07003918 !list_empty(&obj_priv->list))
3919 list_del_init(&obj_priv->list);
3920 }
3921 i915_verify_inactive(dev, __FILE__, __LINE__);
3922
3923 return 0;
3924}
3925
3926void
3927i915_gem_object_unpin(struct drm_gem_object *obj)
3928{
3929 struct drm_device *dev = obj->dev;
3930 drm_i915_private_t *dev_priv = dev->dev_private;
3931 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3932
3933 i915_verify_inactive(dev, __FILE__, __LINE__);
3934 obj_priv->pin_count--;
3935 BUG_ON(obj_priv->pin_count < 0);
3936 BUG_ON(obj_priv->gtt_space == NULL);
3937
3938 /* If the object is no longer pinned, and is
3939 * neither active nor being flushed, then stick it on
3940 * the inactive list
3941 */
3942 if (obj_priv->pin_count == 0) {
3943 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003944 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003945 list_move_tail(&obj_priv->list,
3946 &dev_priv->mm.inactive_list);
3947 atomic_dec(&dev->pin_count);
3948 atomic_sub(obj->size, &dev->pin_memory);
3949 }
3950 i915_verify_inactive(dev, __FILE__, __LINE__);
3951}
3952
3953int
3954i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3955 struct drm_file *file_priv)
3956{
3957 struct drm_i915_gem_pin *args = data;
3958 struct drm_gem_object *obj;
3959 struct drm_i915_gem_object *obj_priv;
3960 int ret;
3961
3962 mutex_lock(&dev->struct_mutex);
3963
3964 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3965 if (obj == NULL) {
3966 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3967 args->handle);
3968 mutex_unlock(&dev->struct_mutex);
3969 return -EBADF;
3970 }
3971 obj_priv = obj->driver_private;
3972
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003973 if (obj_priv->madv != I915_MADV_WILLNEED) {
3974 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01003975 drm_gem_object_unreference(obj);
3976 mutex_unlock(&dev->struct_mutex);
3977 return -EINVAL;
3978 }
3979
Jesse Barnes79e53942008-11-07 14:24:08 -08003980 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3981 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3982 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00003983 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003984 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08003985 return -EINVAL;
3986 }
3987
3988 obj_priv->user_pin_count++;
3989 obj_priv->pin_filp = file_priv;
3990 if (obj_priv->user_pin_count == 1) {
3991 ret = i915_gem_object_pin(obj, args->alignment);
3992 if (ret != 0) {
3993 drm_gem_object_unreference(obj);
3994 mutex_unlock(&dev->struct_mutex);
3995 return ret;
3996 }
Eric Anholt673a3942008-07-30 12:06:12 -07003997 }
3998
3999 /* XXX - flush the CPU caches for pinned objects
4000 * as the X server doesn't manage domains yet
4001 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004002 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004003 args->offset = obj_priv->gtt_offset;
4004 drm_gem_object_unreference(obj);
4005 mutex_unlock(&dev->struct_mutex);
4006
4007 return 0;
4008}
4009
4010int
4011i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4012 struct drm_file *file_priv)
4013{
4014 struct drm_i915_gem_pin *args = data;
4015 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004016 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07004017
4018 mutex_lock(&dev->struct_mutex);
4019
4020 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4021 if (obj == NULL) {
4022 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4023 args->handle);
4024 mutex_unlock(&dev->struct_mutex);
4025 return -EBADF;
4026 }
4027
Jesse Barnes79e53942008-11-07 14:24:08 -08004028 obj_priv = obj->driver_private;
4029 if (obj_priv->pin_filp != file_priv) {
4030 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4031 args->handle);
4032 drm_gem_object_unreference(obj);
4033 mutex_unlock(&dev->struct_mutex);
4034 return -EINVAL;
4035 }
4036 obj_priv->user_pin_count--;
4037 if (obj_priv->user_pin_count == 0) {
4038 obj_priv->pin_filp = NULL;
4039 i915_gem_object_unpin(obj);
4040 }
Eric Anholt673a3942008-07-30 12:06:12 -07004041
4042 drm_gem_object_unreference(obj);
4043 mutex_unlock(&dev->struct_mutex);
4044 return 0;
4045}
4046
4047int
4048i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4049 struct drm_file *file_priv)
4050{
4051 struct drm_i915_gem_busy *args = data;
4052 struct drm_gem_object *obj;
4053 struct drm_i915_gem_object *obj_priv;
4054
Eric Anholt673a3942008-07-30 12:06:12 -07004055 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4056 if (obj == NULL) {
4057 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4058 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07004059 return -EBADF;
4060 }
4061
Chris Wilsonb1ce7862009-06-06 09:46:00 +01004062 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08004063 /* Update the active list for the hardware's current position.
4064 * Otherwise this only updates on a delayed timer or when irqs are
4065 * actually unmasked, and our working set ends up being larger than
4066 * required.
4067 */
4068 i915_gem_retire_requests(dev);
4069
Eric Anholt673a3942008-07-30 12:06:12 -07004070 obj_priv = obj->driver_private;
Eric Anholtc4de0a52008-12-14 19:05:04 -08004071 /* Don't count being on the flushing list against the object being
4072 * done. Otherwise, a buffer left on the flushing list but not getting
4073 * flushed (because nobody's flushing that domain) won't ever return
4074 * unbusy and get reused by libdrm's bo cache. The other expected
4075 * consumer of this interface, OpenGL's occlusion queries, also specs
4076 * that the objects get unbusy "eventually" without any interference.
4077 */
4078 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004079
4080 drm_gem_object_unreference(obj);
4081 mutex_unlock(&dev->struct_mutex);
4082 return 0;
4083}
4084
4085int
4086i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4087 struct drm_file *file_priv)
4088{
4089 return i915_gem_ring_throttle(dev, file_priv);
4090}
4091
Chris Wilson3ef94da2009-09-14 16:50:29 +01004092int
4093i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4094 struct drm_file *file_priv)
4095{
4096 struct drm_i915_gem_madvise *args = data;
4097 struct drm_gem_object *obj;
4098 struct drm_i915_gem_object *obj_priv;
4099
4100 switch (args->madv) {
4101 case I915_MADV_DONTNEED:
4102 case I915_MADV_WILLNEED:
4103 break;
4104 default:
4105 return -EINVAL;
4106 }
4107
4108 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4109 if (obj == NULL) {
4110 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4111 args->handle);
4112 return -EBADF;
4113 }
4114
4115 mutex_lock(&dev->struct_mutex);
4116 obj_priv = obj->driver_private;
4117
4118 if (obj_priv->pin_count) {
4119 drm_gem_object_unreference(obj);
4120 mutex_unlock(&dev->struct_mutex);
4121
4122 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4123 return -EINVAL;
4124 }
4125
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004126 if (obj_priv->madv != __I915_MADV_PURGED)
4127 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004128
Chris Wilson2d7ef392009-09-20 23:13:10 +01004129 /* if the object is no longer bound, discard its backing storage */
4130 if (i915_gem_object_is_purgeable(obj_priv) &&
4131 obj_priv->gtt_space == NULL)
4132 i915_gem_object_truncate(obj);
4133
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004134 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4135
Chris Wilson3ef94da2009-09-14 16:50:29 +01004136 drm_gem_object_unreference(obj);
4137 mutex_unlock(&dev->struct_mutex);
4138
4139 return 0;
4140}
4141
Eric Anholt673a3942008-07-30 12:06:12 -07004142int i915_gem_init_object(struct drm_gem_object *obj)
4143{
4144 struct drm_i915_gem_object *obj_priv;
4145
Eric Anholt9a298b22009-03-24 12:23:04 -07004146 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07004147 if (obj_priv == NULL)
4148 return -ENOMEM;
4149
4150 /*
4151 * We've just allocated pages from the kernel,
4152 * so they've just been written by the CPU with
4153 * zeros. They'll need to be clflushed before we
4154 * use them with the GPU.
4155 */
4156 obj->write_domain = I915_GEM_DOMAIN_CPU;
4157 obj->read_domains = I915_GEM_DOMAIN_CPU;
4158
Keith Packardba1eb1d2008-10-14 19:55:10 -07004159 obj_priv->agp_type = AGP_USER_MEMORY;
4160
Eric Anholt673a3942008-07-30 12:06:12 -07004161 obj->driver_private = obj_priv;
4162 obj_priv->obj = obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004163 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholt673a3942008-07-30 12:06:12 -07004164 INIT_LIST_HEAD(&obj_priv->list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004165 INIT_LIST_HEAD(&obj_priv->fence_list);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004166 obj_priv->madv = I915_MADV_WILLNEED;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004167
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004168 trace_i915_gem_object_create(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004169
4170 return 0;
4171}
4172
4173void i915_gem_free_object(struct drm_gem_object *obj)
4174{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004175 struct drm_device *dev = obj->dev;
Eric Anholt673a3942008-07-30 12:06:12 -07004176 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4177
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004178 trace_i915_gem_object_destroy(obj);
4179
Eric Anholt673a3942008-07-30 12:06:12 -07004180 while (obj_priv->pin_count > 0)
4181 i915_gem_object_unpin(obj);
4182
Dave Airlie71acb5e2008-12-30 20:31:46 +10004183 if (obj_priv->phys_obj)
4184 i915_gem_detach_phys_object(dev, obj);
4185
Eric Anholt673a3942008-07-30 12:06:12 -07004186 i915_gem_object_unbind(obj);
4187
Chris Wilson7e616152009-09-10 08:53:04 +01004188 if (obj_priv->mmap_offset)
4189 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08004190
Eric Anholt9a298b22009-03-24 12:23:04 -07004191 kfree(obj_priv->page_cpu_valid);
Eric Anholt280b7132009-03-12 16:56:27 -07004192 kfree(obj_priv->bit_17);
Eric Anholt9a298b22009-03-24 12:23:04 -07004193 kfree(obj->driver_private);
Eric Anholt673a3942008-07-30 12:06:12 -07004194}
4195
Chris Wilsonab5ee572009-09-20 19:25:47 +01004196/** Unbinds all inactive objects. */
Eric Anholt673a3942008-07-30 12:06:12 -07004197static int
Chris Wilsonab5ee572009-09-20 19:25:47 +01004198i915_gem_evict_from_inactive_list(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004199{
Chris Wilsonab5ee572009-09-20 19:25:47 +01004200 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07004201
Chris Wilsonab5ee572009-09-20 19:25:47 +01004202 while (!list_empty(&dev_priv->mm.inactive_list)) {
4203 struct drm_gem_object *obj;
4204 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004205
Chris Wilsonab5ee572009-09-20 19:25:47 +01004206 obj = list_first_entry(&dev_priv->mm.inactive_list,
4207 struct drm_i915_gem_object,
4208 list)->obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004209
4210 ret = i915_gem_object_unbind(obj);
4211 if (ret != 0) {
Chris Wilsonab5ee572009-09-20 19:25:47 +01004212 DRM_ERROR("Error unbinding object: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004213 return ret;
4214 }
4215 }
4216
Eric Anholt673a3942008-07-30 12:06:12 -07004217 return 0;
4218}
4219
Jesse Barnes5669fca2009-02-17 15:13:31 -08004220int
Eric Anholt673a3942008-07-30 12:06:12 -07004221i915_gem_idle(struct drm_device *dev)
4222{
4223 drm_i915_private_t *dev_priv = dev->dev_private;
4224 uint32_t seqno, cur_seqno, last_seqno;
4225 int stuck, ret;
4226
Keith Packard6dbe2772008-10-14 21:41:13 -07004227 mutex_lock(&dev->struct_mutex);
4228
4229 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4230 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004231 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004232 }
Eric Anholt673a3942008-07-30 12:06:12 -07004233
4234 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4235 * We need to replace this with a semaphore, or something.
4236 */
4237 dev_priv->mm.suspended = 1;
Ben Gamarif65d9422009-09-14 17:48:44 -04004238 del_timer(&dev_priv->hangcheck_timer);
Eric Anholt673a3942008-07-30 12:06:12 -07004239
Keith Packard6dbe2772008-10-14 21:41:13 -07004240 /* Cancel the retire work handler, wait for it to finish if running
4241 */
4242 mutex_unlock(&dev->struct_mutex);
4243 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4244 mutex_lock(&dev->struct_mutex);
4245
Eric Anholt673a3942008-07-30 12:06:12 -07004246 i915_kernel_lost_context(dev);
4247
4248 /* Flush the GPU along with all non-CPU write domains
4249 */
Chris Wilson21d509e2009-06-06 09:46:02 +01004250 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4251 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07004252
4253 if (seqno == 0) {
4254 mutex_unlock(&dev->struct_mutex);
4255 return -ENOMEM;
4256 }
4257
4258 dev_priv->mm.waiting_gem_seqno = seqno;
4259 last_seqno = 0;
4260 stuck = 0;
4261 for (;;) {
4262 cur_seqno = i915_get_gem_seqno(dev);
4263 if (i915_seqno_passed(cur_seqno, seqno))
4264 break;
4265 if (last_seqno == cur_seqno) {
4266 if (stuck++ > 100) {
4267 DRM_ERROR("hardware wedged\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004268 atomic_set(&dev_priv->mm.wedged, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07004269 DRM_WAKEUP(&dev_priv->irq_queue);
4270 break;
4271 }
4272 }
4273 msleep(10);
4274 last_seqno = cur_seqno;
4275 }
4276 dev_priv->mm.waiting_gem_seqno = 0;
4277
4278 i915_gem_retire_requests(dev);
4279
Carl Worth5e118f42009-03-20 11:54:25 -07004280 spin_lock(&dev_priv->mm.active_list_lock);
Ben Gamariba1234d2009-09-14 17:48:47 -04004281 if (!atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt28dfe522008-11-13 15:00:55 -08004282 /* Active and flushing should now be empty as we've
4283 * waited for a sequence higher than any pending execbuffer
4284 */
4285 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4286 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4287 /* Request should now be empty as we've also waited
4288 * for the last request in the list
4289 */
4290 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4291 }
Eric Anholt673a3942008-07-30 12:06:12 -07004292
Eric Anholt28dfe522008-11-13 15:00:55 -08004293 /* Empty the active and flushing lists to inactive. If there's
4294 * anything left at this point, it means that we're wedged and
4295 * nothing good's going to happen by leaving them there. So strip
4296 * the GPU domains and just stuff them onto inactive.
Eric Anholt673a3942008-07-30 12:06:12 -07004297 */
Eric Anholt28dfe522008-11-13 15:00:55 -08004298 while (!list_empty(&dev_priv->mm.active_list)) {
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004299 struct drm_gem_object *obj;
4300 uint32_t old_write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07004301
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004302 obj = list_first_entry(&dev_priv->mm.active_list,
4303 struct drm_i915_gem_object,
4304 list)->obj;
4305 old_write_domain = obj->write_domain;
4306 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4307 i915_gem_object_move_to_inactive(obj);
4308
4309 trace_i915_gem_object_change_domain(obj,
4310 obj->read_domains,
4311 old_write_domain);
Eric Anholt28dfe522008-11-13 15:00:55 -08004312 }
Carl Worth5e118f42009-03-20 11:54:25 -07004313 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08004314
4315 while (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004316 struct drm_gem_object *obj;
4317 uint32_t old_write_domain;
Eric Anholt28dfe522008-11-13 15:00:55 -08004318
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004319 obj = list_first_entry(&dev_priv->mm.flushing_list,
4320 struct drm_i915_gem_object,
4321 list)->obj;
4322 old_write_domain = obj->write_domain;
4323 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4324 i915_gem_object_move_to_inactive(obj);
4325
4326 trace_i915_gem_object_change_domain(obj,
4327 obj->read_domains,
4328 old_write_domain);
Eric Anholt28dfe522008-11-13 15:00:55 -08004329 }
4330
4331
4332 /* Move all inactive buffers out of the GTT. */
Chris Wilsonab5ee572009-09-20 19:25:47 +01004333 ret = i915_gem_evict_from_inactive_list(dev);
Eric Anholt28dfe522008-11-13 15:00:55 -08004334 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
Keith Packard6dbe2772008-10-14 21:41:13 -07004335 if (ret) {
4336 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004337 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004338 }
Eric Anholt673a3942008-07-30 12:06:12 -07004339
Keith Packard6dbe2772008-10-14 21:41:13 -07004340 i915_gem_cleanup_ringbuffer(dev);
4341 mutex_unlock(&dev->struct_mutex);
4342
Eric Anholt673a3942008-07-30 12:06:12 -07004343 return 0;
4344}
4345
4346static int
4347i915_gem_init_hws(struct drm_device *dev)
4348{
4349 drm_i915_private_t *dev_priv = dev->dev_private;
4350 struct drm_gem_object *obj;
4351 struct drm_i915_gem_object *obj_priv;
4352 int ret;
4353
4354 /* If we need a physical address for the status page, it's already
4355 * initialized at driver load time.
4356 */
4357 if (!I915_NEED_GFX_HWS(dev))
4358 return 0;
4359
4360 obj = drm_gem_object_alloc(dev, 4096);
4361 if (obj == NULL) {
4362 DRM_ERROR("Failed to allocate status page\n");
4363 return -ENOMEM;
4364 }
4365 obj_priv = obj->driver_private;
Keith Packardba1eb1d2008-10-14 19:55:10 -07004366 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt673a3942008-07-30 12:06:12 -07004367
4368 ret = i915_gem_object_pin(obj, 4096);
4369 if (ret != 0) {
4370 drm_gem_object_unreference(obj);
4371 return ret;
4372 }
4373
4374 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07004375
Eric Anholt856fa192009-03-19 14:10:50 -07004376 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004377 if (dev_priv->hw_status_page == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004378 DRM_ERROR("Failed to map status page.\n");
4379 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Chris Wilson3eb2ee72009-02-11 14:26:34 +00004380 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004381 drm_gem_object_unreference(obj);
4382 return -EINVAL;
4383 }
4384 dev_priv->hws_obj = obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004385 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4386 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004387 I915_READ(HWS_PGA); /* posting read */
Eric Anholt673a3942008-07-30 12:06:12 -07004388 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4389
4390 return 0;
4391}
4392
Chris Wilson85a7bb92009-02-11 14:52:44 +00004393static void
4394i915_gem_cleanup_hws(struct drm_device *dev)
4395{
4396 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004397 struct drm_gem_object *obj;
4398 struct drm_i915_gem_object *obj_priv;
Chris Wilson85a7bb92009-02-11 14:52:44 +00004399
4400 if (dev_priv->hws_obj == NULL)
4401 return;
4402
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004403 obj = dev_priv->hws_obj;
4404 obj_priv = obj->driver_private;
4405
Eric Anholt856fa192009-03-19 14:10:50 -07004406 kunmap(obj_priv->pages[0]);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004407 i915_gem_object_unpin(obj);
4408 drm_gem_object_unreference(obj);
4409 dev_priv->hws_obj = NULL;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004410
Chris Wilson85a7bb92009-02-11 14:52:44 +00004411 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4412 dev_priv->hw_status_page = NULL;
4413
4414 /* Write high address into HWS_PGA when disabling. */
4415 I915_WRITE(HWS_PGA, 0x1ffff000);
4416}
4417
Jesse Barnes79e53942008-11-07 14:24:08 -08004418int
Eric Anholt673a3942008-07-30 12:06:12 -07004419i915_gem_init_ringbuffer(struct drm_device *dev)
4420{
4421 drm_i915_private_t *dev_priv = dev->dev_private;
4422 struct drm_gem_object *obj;
4423 struct drm_i915_gem_object *obj_priv;
Jesse Barnes79e53942008-11-07 14:24:08 -08004424 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
Eric Anholt673a3942008-07-30 12:06:12 -07004425 int ret;
Keith Packard50aa253d2008-10-14 17:20:35 -07004426 u32 head;
Eric Anholt673a3942008-07-30 12:06:12 -07004427
4428 ret = i915_gem_init_hws(dev);
4429 if (ret != 0)
4430 return ret;
4431
4432 obj = drm_gem_object_alloc(dev, 128 * 1024);
4433 if (obj == NULL) {
4434 DRM_ERROR("Failed to allocate ringbuffer\n");
Chris Wilson85a7bb92009-02-11 14:52:44 +00004435 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004436 return -ENOMEM;
4437 }
4438 obj_priv = obj->driver_private;
4439
4440 ret = i915_gem_object_pin(obj, 4096);
4441 if (ret != 0) {
4442 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004443 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004444 return ret;
4445 }
4446
4447 /* Set up the kernel mapping for the ring. */
Jesse Barnes79e53942008-11-07 14:24:08 -08004448 ring->Size = obj->size;
Eric Anholt673a3942008-07-30 12:06:12 -07004449
Jesse Barnes79e53942008-11-07 14:24:08 -08004450 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4451 ring->map.size = obj->size;
4452 ring->map.type = 0;
4453 ring->map.flags = 0;
4454 ring->map.mtrr = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004455
Jesse Barnes79e53942008-11-07 14:24:08 -08004456 drm_core_ioremap_wc(&ring->map, dev);
4457 if (ring->map.handle == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004458 DRM_ERROR("Failed to map ringbuffer.\n");
4459 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
Chris Wilson47ed1852009-02-11 14:26:33 +00004460 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004461 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004462 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004463 return -EINVAL;
4464 }
Jesse Barnes79e53942008-11-07 14:24:08 -08004465 ring->ring_obj = obj;
4466 ring->virtual_start = ring->map.handle;
Eric Anholt673a3942008-07-30 12:06:12 -07004467
4468 /* Stop the ring if it's running. */
4469 I915_WRITE(PRB0_CTL, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004470 I915_WRITE(PRB0_TAIL, 0);
Keith Packard50aa253d2008-10-14 17:20:35 -07004471 I915_WRITE(PRB0_HEAD, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004472
4473 /* Initialize the ring. */
4474 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
Keith Packard50aa253d2008-10-14 17:20:35 -07004475 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4476
4477 /* G45 ring initialization fails to reset head to zero */
4478 if (head != 0) {
4479 DRM_ERROR("Ring head not reset to zero "
4480 "ctl %08x head %08x tail %08x start %08x\n",
4481 I915_READ(PRB0_CTL),
4482 I915_READ(PRB0_HEAD),
4483 I915_READ(PRB0_TAIL),
4484 I915_READ(PRB0_START));
4485 I915_WRITE(PRB0_HEAD, 0);
4486
4487 DRM_ERROR("Ring head forced to zero "
4488 "ctl %08x head %08x tail %08x start %08x\n",
4489 I915_READ(PRB0_CTL),
4490 I915_READ(PRB0_HEAD),
4491 I915_READ(PRB0_TAIL),
4492 I915_READ(PRB0_START));
4493 }
4494
Eric Anholt673a3942008-07-30 12:06:12 -07004495 I915_WRITE(PRB0_CTL,
4496 ((obj->size - 4096) & RING_NR_PAGES) |
4497 RING_NO_REPORT |
4498 RING_VALID);
4499
Keith Packard50aa253d2008-10-14 17:20:35 -07004500 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4501
4502 /* If the head is still not zero, the ring is dead */
4503 if (head != 0) {
4504 DRM_ERROR("Ring initialization failed "
4505 "ctl %08x head %08x tail %08x start %08x\n",
4506 I915_READ(PRB0_CTL),
4507 I915_READ(PRB0_HEAD),
4508 I915_READ(PRB0_TAIL),
4509 I915_READ(PRB0_START));
4510 return -EIO;
4511 }
4512
Eric Anholt673a3942008-07-30 12:06:12 -07004513 /* Update our cache of the ring state */
Jesse Barnes79e53942008-11-07 14:24:08 -08004514 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4515 i915_kernel_lost_context(dev);
4516 else {
4517 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4518 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4519 ring->space = ring->head - (ring->tail + 8);
4520 if (ring->space < 0)
4521 ring->space += ring->Size;
4522 }
Eric Anholt673a3942008-07-30 12:06:12 -07004523
4524 return 0;
4525}
4526
Jesse Barnes79e53942008-11-07 14:24:08 -08004527void
Eric Anholt673a3942008-07-30 12:06:12 -07004528i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4529{
4530 drm_i915_private_t *dev_priv = dev->dev_private;
4531
4532 if (dev_priv->ring.ring_obj == NULL)
4533 return;
4534
4535 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4536
4537 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4538 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4539 dev_priv->ring.ring_obj = NULL;
4540 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4541
Chris Wilson85a7bb92009-02-11 14:52:44 +00004542 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004543}
4544
4545int
4546i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4547 struct drm_file *file_priv)
4548{
4549 drm_i915_private_t *dev_priv = dev->dev_private;
4550 int ret;
4551
Jesse Barnes79e53942008-11-07 14:24:08 -08004552 if (drm_core_check_feature(dev, DRIVER_MODESET))
4553 return 0;
4554
Ben Gamariba1234d2009-09-14 17:48:47 -04004555 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004556 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004557 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004558 }
4559
Eric Anholt673a3942008-07-30 12:06:12 -07004560 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004561 dev_priv->mm.suspended = 0;
4562
4563 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004564 if (ret != 0) {
4565 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004566 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004567 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004568
Carl Worth5e118f42009-03-20 11:54:25 -07004569 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004570 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004571 spin_unlock(&dev_priv->mm.active_list_lock);
4572
Eric Anholt673a3942008-07-30 12:06:12 -07004573 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4574 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4575 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004576 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004577
4578 drm_irq_install(dev);
4579
Eric Anholt673a3942008-07-30 12:06:12 -07004580 return 0;
4581}
4582
4583int
4584i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4585 struct drm_file *file_priv)
4586{
Jesse Barnes79e53942008-11-07 14:24:08 -08004587 if (drm_core_check_feature(dev, DRIVER_MODESET))
4588 return 0;
4589
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004590 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004591 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004592}
4593
4594void
4595i915_gem_lastclose(struct drm_device *dev)
4596{
4597 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004598
Eric Anholte806b492009-01-22 09:56:58 -08004599 if (drm_core_check_feature(dev, DRIVER_MODESET))
4600 return;
4601
Keith Packard6dbe2772008-10-14 21:41:13 -07004602 ret = i915_gem_idle(dev);
4603 if (ret)
4604 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004605}
4606
4607void
4608i915_gem_load(struct drm_device *dev)
4609{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004610 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004611 drm_i915_private_t *dev_priv = dev->dev_private;
4612
Carl Worth5e118f42009-03-20 11:54:25 -07004613 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004614 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4615 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4616 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4617 INIT_LIST_HEAD(&dev_priv->mm.request_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004618 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004619 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4620 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004621 dev_priv->mm.next_gem_seqno = 1;
4622
Chris Wilson31169712009-09-14 16:50:28 +01004623 spin_lock(&shrink_list_lock);
4624 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4625 spin_unlock(&shrink_list_lock);
4626
Jesse Barnesde151cf2008-11-12 10:03:55 -08004627 /* Old X drivers will take 0-2 for front, back, depth buffers */
4628 dev_priv->fence_reg_start = 3;
4629
Jesse Barnes0f973f22009-01-26 17:10:45 -08004630 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004631 dev_priv->num_fence_regs = 16;
4632 else
4633 dev_priv->num_fence_regs = 8;
4634
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004635 /* Initialize fence registers to zero */
4636 if (IS_I965G(dev)) {
4637 for (i = 0; i < 16; i++)
4638 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4639 } else {
4640 for (i = 0; i < 8; i++)
4641 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4642 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4643 for (i = 0; i < 8; i++)
4644 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4645 }
4646
Eric Anholt673a3942008-07-30 12:06:12 -07004647 i915_gem_detect_bit_6_swizzle(dev);
4648}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004649
4650/*
4651 * Create a physically contiguous memory object for this object
4652 * e.g. for cursor + overlay regs
4653 */
4654int i915_gem_init_phys_object(struct drm_device *dev,
4655 int id, int size)
4656{
4657 drm_i915_private_t *dev_priv = dev->dev_private;
4658 struct drm_i915_gem_phys_object *phys_obj;
4659 int ret;
4660
4661 if (dev_priv->mm.phys_objs[id - 1] || !size)
4662 return 0;
4663
Eric Anholt9a298b22009-03-24 12:23:04 -07004664 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004665 if (!phys_obj)
4666 return -ENOMEM;
4667
4668 phys_obj->id = id;
4669
4670 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4671 if (!phys_obj->handle) {
4672 ret = -ENOMEM;
4673 goto kfree_obj;
4674 }
4675#ifdef CONFIG_X86
4676 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4677#endif
4678
4679 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4680
4681 return 0;
4682kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004683 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004684 return ret;
4685}
4686
4687void i915_gem_free_phys_object(struct drm_device *dev, int id)
4688{
4689 drm_i915_private_t *dev_priv = dev->dev_private;
4690 struct drm_i915_gem_phys_object *phys_obj;
4691
4692 if (!dev_priv->mm.phys_objs[id - 1])
4693 return;
4694
4695 phys_obj = dev_priv->mm.phys_objs[id - 1];
4696 if (phys_obj->cur_obj) {
4697 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4698 }
4699
4700#ifdef CONFIG_X86
4701 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4702#endif
4703 drm_pci_free(dev, phys_obj->handle);
4704 kfree(phys_obj);
4705 dev_priv->mm.phys_objs[id - 1] = NULL;
4706}
4707
4708void i915_gem_free_all_phys_object(struct drm_device *dev)
4709{
4710 int i;
4711
Dave Airlie260883c2009-01-22 17:58:49 +10004712 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004713 i915_gem_free_phys_object(dev, i);
4714}
4715
4716void i915_gem_detach_phys_object(struct drm_device *dev,
4717 struct drm_gem_object *obj)
4718{
4719 struct drm_i915_gem_object *obj_priv;
4720 int i;
4721 int ret;
4722 int page_count;
4723
4724 obj_priv = obj->driver_private;
4725 if (!obj_priv->phys_obj)
4726 return;
4727
Eric Anholt856fa192009-03-19 14:10:50 -07004728 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004729 if (ret)
4730 goto out;
4731
4732 page_count = obj->size / PAGE_SIZE;
4733
4734 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004735 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004736 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4737
4738 memcpy(dst, src, PAGE_SIZE);
4739 kunmap_atomic(dst, KM_USER0);
4740 }
Eric Anholt856fa192009-03-19 14:10:50 -07004741 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004742 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004743
4744 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004745out:
4746 obj_priv->phys_obj->cur_obj = NULL;
4747 obj_priv->phys_obj = NULL;
4748}
4749
4750int
4751i915_gem_attach_phys_object(struct drm_device *dev,
4752 struct drm_gem_object *obj, int id)
4753{
4754 drm_i915_private_t *dev_priv = dev->dev_private;
4755 struct drm_i915_gem_object *obj_priv;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 obj_priv = obj->driver_private;
4764
4765 if (obj_priv->phys_obj) {
4766 if (obj_priv->phys_obj->id == id)
4767 return 0;
4768 i915_gem_detach_phys_object(dev, obj);
4769 }
4770
4771
4772 /* create a new object */
4773 if (!dev_priv->mm.phys_objs[id - 1]) {
4774 ret = i915_gem_init_phys_object(dev, id,
4775 obj->size);
4776 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004777 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004778 goto out;
4779 }
4780 }
4781
4782 /* bind to the object */
4783 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4784 obj_priv->phys_obj->cur_obj = obj;
4785
Eric Anholt856fa192009-03-19 14:10:50 -07004786 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004787 if (ret) {
4788 DRM_ERROR("failed to get page list\n");
4789 goto out;
4790 }
4791
4792 page_count = obj->size / PAGE_SIZE;
4793
4794 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004795 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004796 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4797
4798 memcpy(dst, src, PAGE_SIZE);
4799 kunmap_atomic(src, KM_USER0);
4800 }
4801
Chris Wilsond78b47b2009-06-17 21:52:49 +01004802 i915_gem_object_put_pages(obj);
4803
Dave Airlie71acb5e2008-12-30 20:31:46 +10004804 return 0;
4805out:
4806 return ret;
4807}
4808
4809static int
4810i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4811 struct drm_i915_gem_pwrite *args,
4812 struct drm_file *file_priv)
4813{
4814 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4815 void *obj_addr;
4816 int ret;
4817 char __user *user_data;
4818
4819 user_data = (char __user *) (uintptr_t) args->data_ptr;
4820 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4821
Dave Airliee08fb4f2009-02-25 14:52:30 +10004822 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004823 ret = copy_from_user(obj_addr, user_data, args->size);
4824 if (ret)
4825 return -EFAULT;
4826
4827 drm_agp_chipset_flush(dev);
4828 return 0;
4829}
Eric Anholtb9624422009-06-03 07:27:35 +00004830
4831void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4832{
4833 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4834
4835 /* Clean up our request list when the client is going away, so that
4836 * later retire_requests won't dereference our soon-to-be-gone
4837 * file_priv.
4838 */
4839 mutex_lock(&dev->struct_mutex);
4840 while (!list_empty(&i915_file_priv->mm.request_list))
4841 list_del_init(i915_file_priv->mm.request_list.next);
4842 mutex_unlock(&dev->struct_mutex);
4843}
Chris Wilson31169712009-09-14 16:50:28 +01004844
Chris Wilson31169712009-09-14 16:50:28 +01004845static int
4846i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4847{
4848 drm_i915_private_t *dev_priv, *next_dev;
4849 struct drm_i915_gem_object *obj_priv, *next_obj;
4850 int cnt = 0;
4851 int would_deadlock = 1;
4852
4853 /* "fast-path" to count number of available objects */
4854 if (nr_to_scan == 0) {
4855 spin_lock(&shrink_list_lock);
4856 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4857 struct drm_device *dev = dev_priv->dev;
4858
4859 if (mutex_trylock(&dev->struct_mutex)) {
4860 list_for_each_entry(obj_priv,
4861 &dev_priv->mm.inactive_list,
4862 list)
4863 cnt++;
4864 mutex_unlock(&dev->struct_mutex);
4865 }
4866 }
4867 spin_unlock(&shrink_list_lock);
4868
4869 return (cnt / 100) * sysctl_vfs_cache_pressure;
4870 }
4871
4872 spin_lock(&shrink_list_lock);
4873
4874 /* first scan for clean buffers */
4875 list_for_each_entry_safe(dev_priv, next_dev,
4876 &shrink_list, mm.shrink_list) {
4877 struct drm_device *dev = dev_priv->dev;
4878
4879 if (! mutex_trylock(&dev->struct_mutex))
4880 continue;
4881
4882 spin_unlock(&shrink_list_lock);
4883
4884 i915_gem_retire_requests(dev);
4885
4886 list_for_each_entry_safe(obj_priv, next_obj,
4887 &dev_priv->mm.inactive_list,
4888 list) {
4889 if (i915_gem_object_is_purgeable(obj_priv)) {
Chris Wilson963b4832009-09-20 23:03:54 +01004890 i915_gem_object_unbind(obj_priv->obj);
Chris Wilson31169712009-09-14 16:50:28 +01004891 if (--nr_to_scan <= 0)
4892 break;
4893 }
4894 }
4895
4896 spin_lock(&shrink_list_lock);
4897 mutex_unlock(&dev->struct_mutex);
4898
Chris Wilson963b4832009-09-20 23:03:54 +01004899 would_deadlock = 0;
4900
Chris Wilson31169712009-09-14 16:50:28 +01004901 if (nr_to_scan <= 0)
4902 break;
4903 }
4904
4905 /* second pass, evict/count anything still on the inactive list */
4906 list_for_each_entry_safe(dev_priv, next_dev,
4907 &shrink_list, mm.shrink_list) {
4908 struct drm_device *dev = dev_priv->dev;
4909
4910 if (! mutex_trylock(&dev->struct_mutex))
4911 continue;
4912
4913 spin_unlock(&shrink_list_lock);
4914
4915 list_for_each_entry_safe(obj_priv, next_obj,
4916 &dev_priv->mm.inactive_list,
4917 list) {
4918 if (nr_to_scan > 0) {
Chris Wilson963b4832009-09-20 23:03:54 +01004919 i915_gem_object_unbind(obj_priv->obj);
Chris Wilson31169712009-09-14 16:50:28 +01004920 nr_to_scan--;
4921 } else
4922 cnt++;
4923 }
4924
4925 spin_lock(&shrink_list_lock);
4926 mutex_unlock(&dev->struct_mutex);
4927
4928 would_deadlock = 0;
4929 }
4930
4931 spin_unlock(&shrink_list_lock);
4932
4933 if (would_deadlock)
4934 return -1;
4935 else if (cnt > 0)
4936 return (cnt / 100) * sysctl_vfs_cache_pressure;
4937 else
4938 return 0;
4939}
4940
4941static struct shrinker shrinker = {
4942 .shrink = i915_gem_shrink,
4943 .seeks = DEFAULT_SEEKS,
4944};
4945
4946__init void
4947i915_gem_shrinker_init(void)
4948{
4949 register_shrinker(&shrinker);
4950}
4951
4952__exit void
4953i915_gem_shrinker_exit(void)
4954{
4955 unregister_shrinker(&shrinker);
4956}