blob: 0e6c9cca897c23e5e6e114615aa99c026ea95ee6 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080033#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070034
Eric Anholt28dfe522008-11-13 15:00:55 -080035#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
Eric Anholte47c68e2008-11-14 13:35:19 -080037static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080040static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070046static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080047static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080049static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
50static int i915_gem_evict_something(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100051static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070054
Jesse Barnes79e53942008-11-07 14:24:08 -080055int i915_gem_do_init(struct drm_device *dev, unsigned long start,
56 unsigned long end)
57{
58 drm_i915_private_t *dev_priv = dev->dev_private;
59
60 if (start >= end ||
61 (start & (PAGE_SIZE - 1)) != 0 ||
62 (end & (PAGE_SIZE - 1)) != 0) {
63 return -EINVAL;
64 }
65
66 drm_mm_init(&dev_priv->mm.gtt_space, start,
67 end - start);
68
69 dev->gtt_total = (uint32_t) (end - start);
70
71 return 0;
72}
Keith Packard6dbe2772008-10-14 21:41:13 -070073
Eric Anholt673a3942008-07-30 12:06:12 -070074int
75i915_gem_init_ioctl(struct drm_device *dev, void *data,
76 struct drm_file *file_priv)
77{
Eric Anholt673a3942008-07-30 12:06:12 -070078 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080079 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070080
81 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080082 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070083 mutex_unlock(&dev->struct_mutex);
84
Jesse Barnes79e53942008-11-07 14:24:08 -080085 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070086}
87
Eric Anholt5a125c32008-10-22 21:40:13 -070088int
89i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
91{
Eric Anholt5a125c32008-10-22 21:40:13 -070092 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070093
94 if (!(dev->driver->driver_features & DRIVER_GEM))
95 return -ENODEV;
96
97 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -080098 args->aper_available_size = (args->aper_size -
99 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700100
101 return 0;
102}
103
Eric Anholt673a3942008-07-30 12:06:12 -0700104
105/**
106 * Creates a new mm object and returns a handle to it.
107 */
108int
109i915_gem_create_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
111{
112 struct drm_i915_gem_create *args = data;
113 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300114 int ret;
115 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700116
117 args->size = roundup(args->size, PAGE_SIZE);
118
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
121 if (obj == NULL)
122 return -ENOMEM;
123
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
128
129 if (ret)
130 return ret;
131
132 args->handle = handle;
133
134 return 0;
135}
136
Eric Anholt40123c12009-03-09 13:42:30 -0700137static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700138fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
140 char __user *data,
141 int length)
142{
143 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200144 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700145
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
147 if (vaddr == NULL)
148 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700150 kunmap_atomic(vaddr, KM_USER0);
151
Florian Mickler2bc43b52009-04-06 22:55:41 +0200152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700156}
157
Eric Anholt280b7132009-03-12 16:56:27 -0700158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
165}
166
Eric Anholteb014592009-03-10 11:44:52 -0700167static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700168slow_shmem_copy(struct page *dst_page,
169 int dst_offset,
170 struct page *src_page,
171 int src_offset,
172 int length)
173{
174 char *dst_vaddr, *src_vaddr;
175
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
178 return -ENOMEM;
179
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
183 return -ENOMEM;
184 }
185
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
190
191 return 0;
192}
193
Eric Anholt280b7132009-03-12 16:56:27 -0700194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
Eric Anholt673a3942008-07-30 12:06:12 -0700252/**
Eric Anholteb014592009-03-10 11:44:52 -0700253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256 */
257static int
258i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
261{
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
263 ssize_t remain;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
267 int ret;
268
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
270 remain = args->size;
271
272 mutex_lock(&dev->struct_mutex);
273
274 ret = i915_gem_object_get_pages(obj);
275 if (ret != 0)
276 goto fail_unlock;
277
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279 args->size);
280 if (ret != 0)
281 goto fail_put_pages;
282
283 obj_priv = obj->driver_private;
284 offset = args->offset;
285
286 while (remain > 0) {
287 /* Operation in this page
288 *
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
292 */
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail_put_pages;
304
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
308 }
309
310fail_put_pages:
311 i915_gem_object_put_pages(obj);
312fail_unlock:
313 mutex_unlock(&dev->struct_mutex);
314
315 return ret;
316}
317
318/**
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
323 */
324static int
325i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
328{
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
332 ssize_t remain;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
337 int page_length;
338 int ret;
339 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700340 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700341
342 remain = args->size;
343
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
347 */
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
351
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700353 if (user_pages == NULL)
354 return -ENOMEM;
355
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700358 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
361 ret = -EFAULT;
362 goto fail_put_user_pages;
363 }
364
Eric Anholt280b7132009-03-12 16:56:27 -0700365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
Eric Anholteb014592009-03-10 11:44:52 -0700367 mutex_lock(&dev->struct_mutex);
368
369 ret = i915_gem_object_get_pages(obj);
370 if (ret != 0)
371 goto fail_unlock;
372
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
374 args->size);
375 if (ret != 0)
376 goto fail_put_pages;
377
378 obj_priv = obj->driver_private;
379 offset = args->offset;
380
381 while (remain > 0) {
382 /* Operation in this page
383 *
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
389 */
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
394
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
400
Eric Anholt280b7132009-03-12 16:56:27 -0700401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
403 shmem_page_offset,
404 user_pages[data_page_index],
405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
Eric Anholteb014592009-03-10 11:44:52 -0700415 if (ret)
416 goto fail_put_pages;
417
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
421 }
422
423fail_put_pages:
424 i915_gem_object_put_pages(obj);
425fail_unlock:
426 mutex_unlock(&dev->struct_mutex);
427fail_put_user_pages:
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
431 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700432 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700433
434 return ret;
435}
436
Eric Anholt673a3942008-07-30 12:06:12 -0700437/**
438 * Reads data from the object referenced by handle.
439 *
440 * On error, the contents of *data are undefined.
441 */
442int
443i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
445{
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700449 int ret;
450
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452 if (obj == NULL)
453 return -EBADF;
454 obj_priv = obj->driver_private;
455
456 /* Bounds check source.
457 *
458 * XXX: This could use review for overflow issues...
459 */
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
463 return -EINVAL;
464 }
465
Eric Anholt280b7132009-03-12 16:56:27 -0700466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
Eric Anholt673a3942008-07-30 12:06:12 -0700474
475 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700476
Eric Anholteb014592009-03-10 11:44:52 -0700477 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700478}
479
Keith Packard0839ccb2008-10-30 19:38:48 -0700480/* This is the fast write path which cannot handle
481 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700482 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700483
Keith Packard0839ccb2008-10-30 19:38:48 -0700484static inline int
485fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
488 int length)
489{
490 char *vaddr_atomic;
491 unsigned long unwritten;
492
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
495 user_data, length);
496 io_mapping_unmap_atomic(vaddr_atomic);
497 if (unwritten)
498 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700499 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700500}
501
502/* Here's the write path which can sleep for
503 * page faults
504 */
505
506static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700507slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
510 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700511{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700512 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700513 unsigned long unwritten;
514
Eric Anholt3de09aa2009-03-09 09:42:23 -0700515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
519 length);
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700522 if (unwritten)
523 return -EFAULT;
524 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700525}
526
Eric Anholt40123c12009-03-09 13:42:30 -0700527static inline int
528fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
530 char __user *data,
531 int length)
532{
533 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400534 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700535
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
537 if (vaddr == NULL)
538 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700540 kunmap_atomic(vaddr, KM_USER0);
541
Dave Airlied0088772009-03-28 20:29:48 -0400542 if (unwritten)
543 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700544 return 0;
545}
546
Eric Anholt3de09aa2009-03-09 09:42:23 -0700547/**
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
550 */
Eric Anholt673a3942008-07-30 12:06:12 -0700551static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700552i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700555{
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Keith Packard0839ccb2008-10-30 19:38:48 -0700557 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700558 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700559 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700560 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700561 int page_offset, page_length;
562 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700563
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
565 remain = args->size;
566 if (!access_ok(VERIFY_READ, user_data, remain))
567 return -EFAULT;
568
569
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
572 if (ret) {
573 mutex_unlock(&dev->struct_mutex);
574 return ret;
575 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700577 if (ret)
578 goto fail;
579
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700582
583 while (remain > 0) {
584 /* Operation in this page
585 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700589 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700595
Keith Packard0839ccb2008-10-30 19:38:48 -0700596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700598
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700603 if (ret)
604 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700605
Keith Packard0839ccb2008-10-30 19:38:48 -0700606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700609 }
Eric Anholt673a3942008-07-30 12:06:12 -0700610
611fail:
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
614
615 return ret;
616}
617
Eric Anholt3de09aa2009-03-09 09:42:23 -0700618/**
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
621 *
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624 */
Eric Anholt3043c602008-10-02 12:24:47 -0700625static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700626i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700629{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 ssize_t remain;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700639 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700640 uint64_t data_ptr = args->data_ptr;
641
642 remain = args->size;
643
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
647 */
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
651
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700653 if (user_pages == NULL)
654 return -ENOMEM;
655
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
664
665 mutex_lock(&dev->struct_mutex);
666 ret = i915_gem_object_pin(obj, 0);
667 if (ret)
668 goto out_unlock;
669
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
671 if (ret)
672 goto out_unpin_object;
673
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
676
677 while (remain > 0) {
678 /* Operation in this page
679 *
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
685 */
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
690
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
696
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
700 data_page_offset,
701 page_length);
702
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
706 */
707 if (ret)
708 goto out_unpin_object;
709
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
713 }
714
715out_unpin_object:
716 i915_gem_object_unpin(obj);
717out_unlock:
718 mutex_unlock(&dev->struct_mutex);
719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700722 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700723
724 return ret;
725}
726
Eric Anholt40123c12009-03-09 13:42:30 -0700727/**
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
730 */
Eric Anholt673a3942008-07-30 12:06:12 -0700731static int
Eric Anholt40123c12009-03-09 13:42:30 -0700732i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700735{
Eric Anholt40123c12009-03-09 13:42:30 -0700736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
737 ssize_t remain;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700741 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700742
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
744 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700745
746 mutex_lock(&dev->struct_mutex);
747
Eric Anholt40123c12009-03-09 13:42:30 -0700748 ret = i915_gem_object_get_pages(obj);
749 if (ret != 0)
750 goto fail_unlock;
751
Eric Anholte47c68e2008-11-14 13:35:19 -0800752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700753 if (ret != 0)
754 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700755
Eric Anholt40123c12009-03-09 13:42:30 -0700756 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700757 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700758 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700759
Eric Anholt40123c12009-03-09 13:42:30 -0700760 while (remain > 0) {
761 /* Operation in this page
762 *
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
766 */
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
772
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
776 if (ret)
777 goto fail_put_pages;
778
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700782 }
783
Eric Anholt40123c12009-03-09 13:42:30 -0700784fail_put_pages:
785 i915_gem_object_put_pages(obj);
786fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700787 mutex_unlock(&dev->struct_mutex);
788
Eric Anholt40123c12009-03-09 13:42:30 -0700789 return ret;
790}
791
792/**
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
795 *
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
798 */
799static int
800i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
803{
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
807 ssize_t remain;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
812 int page_length;
813 int ret;
814 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700815 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700816
817 remain = args->size;
818
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
822 */
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
826
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700828 if (user_pages == NULL)
829 return -ENOMEM;
830
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
836 ret = -EFAULT;
837 goto fail_put_user_pages;
838 }
839
Eric Anholt280b7132009-03-12 16:56:27 -0700840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
Eric Anholt40123c12009-03-09 13:42:30 -0700842 mutex_lock(&dev->struct_mutex);
843
844 ret = i915_gem_object_get_pages(obj);
845 if (ret != 0)
846 goto fail_unlock;
847
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
849 if (ret != 0)
850 goto fail_put_pages;
851
852 obj_priv = obj->driver_private;
853 offset = args->offset;
854 obj_priv->dirty = 1;
855
856 while (remain > 0) {
857 /* Operation in this page
858 *
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
864 */
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
869
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
875
Eric Anholt280b7132009-03-12 16:56:27 -0700876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
878 shmem_page_offset,
879 user_pages[data_page_index],
880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
Eric Anholt40123c12009-03-09 13:42:30 -0700890 if (ret)
891 goto fail_put_pages;
892
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
896 }
897
898fail_put_pages:
899 i915_gem_object_put_pages(obj);
900fail_unlock:
901 mutex_unlock(&dev->struct_mutex);
902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700905 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700906
907 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700908}
909
910/**
911 * Writes data to the object referenced by handle.
912 *
913 * On error, the contents of the buffer that were to be modified are undefined.
914 */
915int
916i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
918{
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
922 int ret = 0;
923
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
925 if (obj == NULL)
926 return -EBADF;
927 obj_priv = obj->driver_private;
928
929 /* Bounds check destination.
930 *
931 * XXX: This could use review for overflow issues...
932 */
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
936 return -EINVAL;
937 }
938
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
944 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
952 file_priv);
953 }
Eric Anholt280b7132009-03-12 16:56:27 -0700954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700956 } else {
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
960 file_priv);
961 }
962 }
Eric Anholt673a3942008-07-30 12:06:12 -0700963
964#if WATCH_PWRITE
965 if (ret)
966 DRM_INFO("pwrite failed %d\n", ret);
967#endif
968
969 drm_gem_object_unreference(obj);
970
971 return ret;
972}
973
974/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700977 */
978int
979i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
981{
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800991 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100992 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800993 return -EINVAL;
994
Chris Wilson21d509e2009-06-06 09:46:02 +0100995 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
Eric Anholt673a3942008-07-30 12:06:12 -07001004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1007
1008 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001010 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001011 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001012#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001015
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1019 */
1020 if (ret == -EINVAL)
1021 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001022 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001024 }
1025
Eric Anholt673a3942008-07-30 12:06:12 -07001026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1028 return ret;
1029}
1030
1031/**
1032 * Called when user space has done writes to this buffer
1033 */
1034int
1035i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1037{
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1041 int ret = 0;
1042
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1044 return -ENODEV;
1045
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1048 if (obj == NULL) {
1049 mutex_unlock(&dev->struct_mutex);
1050 return -EBADF;
1051 }
1052
1053#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001054 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001055 __func__, args->handle, obj, obj->size);
1056#endif
1057 obj_priv = obj->driver_private;
1058
1059 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1062
Eric Anholt673a3942008-07-30 12:06:12 -07001063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1065 return ret;
1066}
1067
1068/**
1069 * Maps the contents of an object, returning the address it is mapped
1070 * into.
1071 *
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1074 */
1075int
1076i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1078{
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1081 loff_t offset;
1082 unsigned long addr;
1083
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1085 return -ENODEV;
1086
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1088 if (obj == NULL)
1089 return -EBADF;
1090
1091 offset = args->offset;
1092
1093 down_write(&current->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1096 args->offset);
1097 up_write(&current->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1102 return addr;
1103
1104 args->addr_ptr = (uint64_t) addr;
1105
1106 return 0;
1107}
1108
Jesse Barnesde151cf2008-11-12 10:03:55 -08001109/**
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1112 * vmf: fault info
1113 *
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1119 *
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1123 * left.
1124 */
1125int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1126{
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1132 unsigned long pfn;
1133 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001135
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1138 PAGE_SHIFT;
1139
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1144 if (ret) {
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1147 }
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001148
1149 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1150 if (ret) {
1151 mutex_unlock(&dev->struct_mutex);
1152 return VM_FAULT_SIGBUS;
1153 }
1154
Jesse Barnes14b60392009-05-20 16:47:08 -04001155 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001156 }
1157
1158 /* Need a new fence register? */
1159 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001160 obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001161 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001162 if (ret) {
1163 mutex_unlock(&dev->struct_mutex);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001164 return VM_FAULT_SIGBUS;
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001165 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001166 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001167
1168 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1169 page_offset;
1170
1171 /* Finally, remap it using the new GTT offset */
1172 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1173
1174 mutex_unlock(&dev->struct_mutex);
1175
1176 switch (ret) {
1177 case -ENOMEM:
1178 case -EAGAIN:
1179 return VM_FAULT_OOM;
1180 case -EFAULT:
Jesse Barnes959b8872009-03-20 14:16:33 -07001181 case -EINVAL:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001182 return VM_FAULT_SIGBUS;
1183 default:
1184 return VM_FAULT_NOPAGE;
1185 }
1186}
1187
1188/**
1189 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1190 * @obj: obj in question
1191 *
1192 * GEM memory mapping works by handing back to userspace a fake mmap offset
1193 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1194 * up the object based on the offset and sets up the various memory mapping
1195 * structures.
1196 *
1197 * This routine allocates and attaches a fake offset for @obj.
1198 */
1199static int
1200i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1201{
1202 struct drm_device *dev = obj->dev;
1203 struct drm_gem_mm *mm = dev->mm_private;
1204 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1205 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001206 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001207 int ret = 0;
1208
1209 /* Set the object up for mmap'ing */
1210 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001211 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001212 if (!list->map)
1213 return -ENOMEM;
1214
1215 map = list->map;
1216 map->type = _DRM_GEM;
1217 map->size = obj->size;
1218 map->handle = obj;
1219
1220 /* Get a DRM GEM mmap offset allocated... */
1221 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1222 obj->size / PAGE_SIZE, 0, 0);
1223 if (!list->file_offset_node) {
1224 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1225 ret = -ENOMEM;
1226 goto out_free_list;
1227 }
1228
1229 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1230 obj->size / PAGE_SIZE, 0);
1231 if (!list->file_offset_node) {
1232 ret = -ENOMEM;
1233 goto out_free_list;
1234 }
1235
1236 list->hash.key = list->file_offset_node->start;
1237 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1238 DRM_ERROR("failed to add to map hash\n");
1239 goto out_free_mm;
1240 }
1241
1242 /* By now we should be all set, any drm_mmap request on the offset
1243 * below will get to our mmap & fault handler */
1244 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1245
1246 return 0;
1247
1248out_free_mm:
1249 drm_mm_put_block(list->file_offset_node);
1250out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001251 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001252
1253 return ret;
1254}
1255
Chris Wilson901782b2009-07-10 08:18:50 +01001256/**
1257 * i915_gem_release_mmap - remove physical page mappings
1258 * @obj: obj in question
1259 *
1260 * Preserve the reservation of the mmaping with the DRM core code, but
1261 * relinquish ownership of the pages back to the system.
1262 *
1263 * It is vital that we remove the page mapping if we have mapped a tiled
1264 * object through the GTT and then lose the fence register due to
1265 * resource pressure. Similarly if the object has been moved out of the
1266 * aperture, than pages mapped into userspace must be revoked. Removing the
1267 * mapping will then trigger a page fault on the next user access, allowing
1268 * fixup by i915_gem_fault().
1269 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001270void
Chris Wilson901782b2009-07-10 08:18:50 +01001271i915_gem_release_mmap(struct drm_gem_object *obj)
1272{
1273 struct drm_device *dev = obj->dev;
1274 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1275
1276 if (dev->dev_mapping)
1277 unmap_mapping_range(dev->dev_mapping,
1278 obj_priv->mmap_offset, obj->size, 1);
1279}
1280
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001281static void
1282i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1283{
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286 struct drm_gem_mm *mm = dev->mm_private;
1287 struct drm_map_list *list;
1288
1289 list = &obj->map_list;
1290 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1291
1292 if (list->file_offset_node) {
1293 drm_mm_put_block(list->file_offset_node);
1294 list->file_offset_node = NULL;
1295 }
1296
1297 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001298 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001299 list->map = NULL;
1300 }
1301
1302 obj_priv->mmap_offset = 0;
1303}
1304
Jesse Barnesde151cf2008-11-12 10:03:55 -08001305/**
1306 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1307 * @obj: object to check
1308 *
1309 * Return the required GTT alignment for an object, taking into account
1310 * potential fence register mapping if needed.
1311 */
1312static uint32_t
1313i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1314{
1315 struct drm_device *dev = obj->dev;
1316 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1317 int start, i;
1318
1319 /*
1320 * Minimum alignment is 4k (GTT page size), but might be greater
1321 * if a fence register is needed for the object.
1322 */
1323 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1324 return 4096;
1325
1326 /*
1327 * Previous chips need to be aligned to the size of the smallest
1328 * fence register that can contain the object.
1329 */
1330 if (IS_I9XX(dev))
1331 start = 1024*1024;
1332 else
1333 start = 512*1024;
1334
1335 for (i = start; i < obj->size; i <<= 1)
1336 ;
1337
1338 return i;
1339}
1340
1341/**
1342 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1343 * @dev: DRM device
1344 * @data: GTT mapping ioctl data
1345 * @file_priv: GEM object info
1346 *
1347 * Simply returns the fake offset to userspace so it can mmap it.
1348 * The mmap call will end up in drm_gem_mmap(), which will set things
1349 * up so we can get faults in the handler above.
1350 *
1351 * The fault handler will take care of binding the object into the GTT
1352 * (since it may have been evicted to make room for something), allocating
1353 * a fence register, and mapping the appropriate aperture address into
1354 * userspace.
1355 */
1356int
1357i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1358 struct drm_file *file_priv)
1359{
1360 struct drm_i915_gem_mmap_gtt *args = data;
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1362 struct drm_gem_object *obj;
1363 struct drm_i915_gem_object *obj_priv;
1364 int ret;
1365
1366 if (!(dev->driver->driver_features & DRIVER_GEM))
1367 return -ENODEV;
1368
1369 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1370 if (obj == NULL)
1371 return -EBADF;
1372
1373 mutex_lock(&dev->struct_mutex);
1374
1375 obj_priv = obj->driver_private;
1376
1377 if (!obj_priv->mmap_offset) {
1378 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001379 if (ret) {
1380 drm_gem_object_unreference(obj);
1381 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001382 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001383 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001384 }
1385
1386 args->offset = obj_priv->mmap_offset;
1387
1388 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1389
1390 /* Make sure the alignment is correct for fence regs etc */
1391 if (obj_priv->agp_mem &&
1392 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1393 drm_gem_object_unreference(obj);
1394 mutex_unlock(&dev->struct_mutex);
1395 return -EINVAL;
1396 }
1397
1398 /*
1399 * Pull it into the GTT so that we have a page list (makes the
1400 * initial fault faster and any subsequent flushing possible).
1401 */
1402 if (!obj_priv->agp_mem) {
1403 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1404 if (ret) {
1405 drm_gem_object_unreference(obj);
1406 mutex_unlock(&dev->struct_mutex);
1407 return ret;
1408 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001409 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001410 }
1411
1412 drm_gem_object_unreference(obj);
1413 mutex_unlock(&dev->struct_mutex);
1414
1415 return 0;
1416}
1417
Ben Gamari6911a9b2009-04-02 11:24:54 -07001418void
Eric Anholt856fa192009-03-19 14:10:50 -07001419i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001420{
1421 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1422 int page_count = obj->size / PAGE_SIZE;
1423 int i;
1424
Eric Anholt856fa192009-03-19 14:10:50 -07001425 BUG_ON(obj_priv->pages_refcount == 0);
1426
1427 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001428 return;
1429
Eric Anholt280b7132009-03-12 16:56:27 -07001430 if (obj_priv->tiling_mode != I915_TILING_NONE)
1431 i915_gem_object_save_bit_17_swizzle(obj);
1432
Eric Anholt673a3942008-07-30 12:06:12 -07001433 for (i = 0; i < page_count; i++)
Eric Anholt856fa192009-03-19 14:10:50 -07001434 if (obj_priv->pages[i] != NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07001435 if (obj_priv->dirty)
Eric Anholt856fa192009-03-19 14:10:50 -07001436 set_page_dirty(obj_priv->pages[i]);
1437 mark_page_accessed(obj_priv->pages[i]);
1438 page_cache_release(obj_priv->pages[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07001439 }
1440 obj_priv->dirty = 0;
1441
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001442 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001443 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001444}
1445
1446static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001447i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001448{
1449 struct drm_device *dev = obj->dev;
1450 drm_i915_private_t *dev_priv = dev->dev_private;
1451 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1452
1453 /* Add a reference if we're newly entering the active list. */
1454 if (!obj_priv->active) {
1455 drm_gem_object_reference(obj);
1456 obj_priv->active = 1;
1457 }
1458 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001459 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001460 list_move_tail(&obj_priv->list,
1461 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001462 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001463 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001464}
1465
Eric Anholtce44b0e2008-11-06 16:00:31 -08001466static void
1467i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1468{
1469 struct drm_device *dev = obj->dev;
1470 drm_i915_private_t *dev_priv = dev->dev_private;
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1472
1473 BUG_ON(!obj_priv->active);
1474 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1475 obj_priv->last_rendering_seqno = 0;
1476}
Eric Anholt673a3942008-07-30 12:06:12 -07001477
1478static void
1479i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1480{
1481 struct drm_device *dev = obj->dev;
1482 drm_i915_private_t *dev_priv = dev->dev_private;
1483 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1484
1485 i915_verify_inactive(dev, __FILE__, __LINE__);
1486 if (obj_priv->pin_count != 0)
1487 list_del_init(&obj_priv->list);
1488 else
1489 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1490
Eric Anholtce44b0e2008-11-06 16:00:31 -08001491 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001492 if (obj_priv->active) {
1493 obj_priv->active = 0;
1494 drm_gem_object_unreference(obj);
1495 }
1496 i915_verify_inactive(dev, __FILE__, __LINE__);
1497}
1498
1499/**
1500 * Creates a new sequence number, emitting a write of it to the status page
1501 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1502 *
1503 * Must be called with struct_lock held.
1504 *
1505 * Returned sequence numbers are nonzero on success.
1506 */
1507static uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001508i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1509 uint32_t flush_domains)
Eric Anholt673a3942008-07-30 12:06:12 -07001510{
1511 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001512 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001513 struct drm_i915_gem_request *request;
1514 uint32_t seqno;
1515 int was_empty;
1516 RING_LOCALS;
1517
Eric Anholtb9624422009-06-03 07:27:35 +00001518 if (file_priv != NULL)
1519 i915_file_priv = file_priv->driver_priv;
1520
Eric Anholt9a298b22009-03-24 12:23:04 -07001521 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001522 if (request == NULL)
1523 return 0;
1524
1525 /* Grab the seqno we're going to make this request be, and bump the
1526 * next (skipping 0 so it can be the reserved no-seqno value).
1527 */
1528 seqno = dev_priv->mm.next_gem_seqno;
1529 dev_priv->mm.next_gem_seqno++;
1530 if (dev_priv->mm.next_gem_seqno == 0)
1531 dev_priv->mm.next_gem_seqno++;
1532
1533 BEGIN_LP_RING(4);
1534 OUT_RING(MI_STORE_DWORD_INDEX);
1535 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1536 OUT_RING(seqno);
1537
1538 OUT_RING(MI_USER_INTERRUPT);
1539 ADVANCE_LP_RING();
1540
1541 DRM_DEBUG("%d\n", seqno);
1542
1543 request->seqno = seqno;
1544 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001545 was_empty = list_empty(&dev_priv->mm.request_list);
1546 list_add_tail(&request->list, &dev_priv->mm.request_list);
Eric Anholtb9624422009-06-03 07:27:35 +00001547 if (i915_file_priv) {
1548 list_add_tail(&request->client_list,
1549 &i915_file_priv->mm.request_list);
1550 } else {
1551 INIT_LIST_HEAD(&request->client_list);
1552 }
Eric Anholt673a3942008-07-30 12:06:12 -07001553
Eric Anholtce44b0e2008-11-06 16:00:31 -08001554 /* Associate any objects on the flushing list matching the write
1555 * domain we're flushing with our flush.
1556 */
1557 if (flush_domains != 0) {
1558 struct drm_i915_gem_object *obj_priv, *next;
1559
1560 list_for_each_entry_safe(obj_priv, next,
1561 &dev_priv->mm.flushing_list, list) {
1562 struct drm_gem_object *obj = obj_priv->obj;
1563
1564 if ((obj->write_domain & flush_domains) ==
1565 obj->write_domain) {
1566 obj->write_domain = 0;
1567 i915_gem_object_move_to_active(obj, seqno);
1568 }
1569 }
1570
1571 }
1572
Keith Packard6dbe2772008-10-14 21:41:13 -07001573 if (was_empty && !dev_priv->mm.suspended)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001574 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001575 return seqno;
1576}
1577
1578/**
1579 * Command execution barrier
1580 *
1581 * Ensures that all commands in the ring are finished
1582 * before signalling the CPU
1583 */
Eric Anholt3043c602008-10-02 12:24:47 -07001584static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001585i915_retire_commands(struct drm_device *dev)
1586{
1587 drm_i915_private_t *dev_priv = dev->dev_private;
1588 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1589 uint32_t flush_domains = 0;
1590 RING_LOCALS;
1591
1592 /* The sampler always gets flushed on i965 (sigh) */
1593 if (IS_I965G(dev))
1594 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1595 BEGIN_LP_RING(2);
1596 OUT_RING(cmd);
1597 OUT_RING(0); /* noop */
1598 ADVANCE_LP_RING();
1599 return flush_domains;
1600}
1601
1602/**
1603 * Moves buffers associated only with the given active seqno from the active
1604 * to inactive list, potentially freeing them.
1605 */
1606static void
1607i915_gem_retire_request(struct drm_device *dev,
1608 struct drm_i915_gem_request *request)
1609{
1610 drm_i915_private_t *dev_priv = dev->dev_private;
1611
1612 /* Move any buffers on the active list that are no longer referenced
1613 * by the ringbuffer to the flushing/inactive lists as appropriate.
1614 */
Carl Worth5e118f42009-03-20 11:54:25 -07001615 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001616 while (!list_empty(&dev_priv->mm.active_list)) {
1617 struct drm_gem_object *obj;
1618 struct drm_i915_gem_object *obj_priv;
1619
1620 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1621 struct drm_i915_gem_object,
1622 list);
1623 obj = obj_priv->obj;
1624
1625 /* If the seqno being retired doesn't match the oldest in the
1626 * list, then the oldest in the list must still be newer than
1627 * this seqno.
1628 */
1629 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001630 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001631
Eric Anholt673a3942008-07-30 12:06:12 -07001632#if WATCH_LRU
1633 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1634 __func__, request->seqno, obj);
1635#endif
1636
Eric Anholtce44b0e2008-11-06 16:00:31 -08001637 if (obj->write_domain != 0)
1638 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001639 else {
1640 /* Take a reference on the object so it won't be
1641 * freed while the spinlock is held. The list
1642 * protection for this spinlock is safe when breaking
1643 * the lock like this since the next thing we do
1644 * is just get the head of the list again.
1645 */
1646 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001647 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001648 spin_unlock(&dev_priv->mm.active_list_lock);
1649 drm_gem_object_unreference(obj);
1650 spin_lock(&dev_priv->mm.active_list_lock);
1651 }
Eric Anholt673a3942008-07-30 12:06:12 -07001652 }
Carl Worth5e118f42009-03-20 11:54:25 -07001653out:
1654 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001655}
1656
1657/**
1658 * Returns true if seq1 is later than seq2.
1659 */
1660static int
1661i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1662{
1663 return (int32_t)(seq1 - seq2) >= 0;
1664}
1665
1666uint32_t
1667i915_get_gem_seqno(struct drm_device *dev)
1668{
1669 drm_i915_private_t *dev_priv = dev->dev_private;
1670
1671 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1672}
1673
1674/**
1675 * This function clears the request list as sequence numbers are passed.
1676 */
1677void
1678i915_gem_retire_requests(struct drm_device *dev)
1679{
1680 drm_i915_private_t *dev_priv = dev->dev_private;
1681 uint32_t seqno;
1682
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001683 if (!dev_priv->hw_status_page)
1684 return;
1685
Eric Anholt673a3942008-07-30 12:06:12 -07001686 seqno = i915_get_gem_seqno(dev);
1687
1688 while (!list_empty(&dev_priv->mm.request_list)) {
1689 struct drm_i915_gem_request *request;
1690 uint32_t retiring_seqno;
1691
1692 request = list_first_entry(&dev_priv->mm.request_list,
1693 struct drm_i915_gem_request,
1694 list);
1695 retiring_seqno = request->seqno;
1696
1697 if (i915_seqno_passed(seqno, retiring_seqno) ||
1698 dev_priv->mm.wedged) {
1699 i915_gem_retire_request(dev, request);
1700
1701 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001702 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001703 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001704 } else
1705 break;
1706 }
1707}
1708
1709void
1710i915_gem_retire_work_handler(struct work_struct *work)
1711{
1712 drm_i915_private_t *dev_priv;
1713 struct drm_device *dev;
1714
1715 dev_priv = container_of(work, drm_i915_private_t,
1716 mm.retire_work.work);
1717 dev = dev_priv->dev;
1718
1719 mutex_lock(&dev->struct_mutex);
1720 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001721 if (!dev_priv->mm.suspended &&
1722 !list_empty(&dev_priv->mm.request_list))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001723 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001724 mutex_unlock(&dev->struct_mutex);
1725}
1726
1727/**
1728 * Waits for a sequence number to be signaled, and cleans up the
1729 * request and object lists appropriately for that event.
1730 */
Eric Anholt3043c602008-10-02 12:24:47 -07001731static int
Eric Anholt673a3942008-07-30 12:06:12 -07001732i915_wait_request(struct drm_device *dev, uint32_t seqno)
1733{
1734 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001735 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001736 int ret = 0;
1737
1738 BUG_ON(seqno == 0);
1739
1740 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001741 if (IS_IGDNG(dev))
1742 ier = I915_READ(DEIER) | I915_READ(GTIER);
1743 else
1744 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001745 if (!ier) {
1746 DRM_ERROR("something (likely vbetool) disabled "
1747 "interrupts, re-enabling\n");
1748 i915_driver_irq_preinstall(dev);
1749 i915_driver_irq_postinstall(dev);
1750 }
1751
Eric Anholt673a3942008-07-30 12:06:12 -07001752 dev_priv->mm.waiting_gem_seqno = seqno;
1753 i915_user_irq_get(dev);
1754 ret = wait_event_interruptible(dev_priv->irq_queue,
1755 i915_seqno_passed(i915_get_gem_seqno(dev),
1756 seqno) ||
1757 dev_priv->mm.wedged);
1758 i915_user_irq_put(dev);
1759 dev_priv->mm.waiting_gem_seqno = 0;
1760 }
1761 if (dev_priv->mm.wedged)
1762 ret = -EIO;
1763
1764 if (ret && ret != -ERESTARTSYS)
1765 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1766 __func__, ret, seqno, i915_get_gem_seqno(dev));
1767
1768 /* Directly dispatch request retiring. While we have the work queue
1769 * to handle this, the waiter on a request often wants an associated
1770 * buffer to have made it to the inactive list, and we would need
1771 * a separate wait queue to handle that.
1772 */
1773 if (ret == 0)
1774 i915_gem_retire_requests(dev);
1775
1776 return ret;
1777}
1778
1779static void
1780i915_gem_flush(struct drm_device *dev,
1781 uint32_t invalidate_domains,
1782 uint32_t flush_domains)
1783{
1784 drm_i915_private_t *dev_priv = dev->dev_private;
1785 uint32_t cmd;
1786 RING_LOCALS;
1787
1788#if WATCH_EXEC
1789 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1790 invalidate_domains, flush_domains);
1791#endif
1792
1793 if (flush_domains & I915_GEM_DOMAIN_CPU)
1794 drm_agp_chipset_flush(dev);
1795
Chris Wilson21d509e2009-06-06 09:46:02 +01001796 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
Eric Anholt673a3942008-07-30 12:06:12 -07001797 /*
1798 * read/write caches:
1799 *
1800 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1801 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1802 * also flushed at 2d versus 3d pipeline switches.
1803 *
1804 * read-only caches:
1805 *
1806 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1807 * MI_READ_FLUSH is set, and is always flushed on 965.
1808 *
1809 * I915_GEM_DOMAIN_COMMAND may not exist?
1810 *
1811 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1812 * invalidated when MI_EXE_FLUSH is set.
1813 *
1814 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1815 * invalidated with every MI_FLUSH.
1816 *
1817 * TLBs:
1818 *
1819 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1820 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1821 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1822 * are flushed at any MI_FLUSH.
1823 */
1824
1825 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1826 if ((invalidate_domains|flush_domains) &
1827 I915_GEM_DOMAIN_RENDER)
1828 cmd &= ~MI_NO_WRITE_FLUSH;
1829 if (!IS_I965G(dev)) {
1830 /*
1831 * On the 965, the sampler cache always gets flushed
1832 * and this bit is reserved.
1833 */
1834 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1835 cmd |= MI_READ_FLUSH;
1836 }
1837 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1838 cmd |= MI_EXE_FLUSH;
1839
1840#if WATCH_EXEC
1841 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1842#endif
1843 BEGIN_LP_RING(2);
1844 OUT_RING(cmd);
1845 OUT_RING(0); /* noop */
1846 ADVANCE_LP_RING();
1847 }
1848}
1849
1850/**
1851 * Ensures that all rendering to the object has completed and the object is
1852 * safe to unbind from the GTT or access from the CPU.
1853 */
1854static int
1855i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1856{
1857 struct drm_device *dev = obj->dev;
1858 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1859 int ret;
1860
Eric Anholte47c68e2008-11-14 13:35:19 -08001861 /* This function only exists to support waiting for existing rendering,
1862 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001863 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001864 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001865
1866 /* If there is rendering queued on the buffer being evicted, wait for
1867 * it.
1868 */
1869 if (obj_priv->active) {
1870#if WATCH_BUF
1871 DRM_INFO("%s: object %p wait for seqno %08x\n",
1872 __func__, obj, obj_priv->last_rendering_seqno);
1873#endif
1874 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1875 if (ret != 0)
1876 return ret;
1877 }
1878
1879 return 0;
1880}
1881
1882/**
1883 * Unbinds an object from the GTT aperture.
1884 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001885int
Eric Anholt673a3942008-07-30 12:06:12 -07001886i915_gem_object_unbind(struct drm_gem_object *obj)
1887{
1888 struct drm_device *dev = obj->dev;
1889 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1890 int ret = 0;
1891
1892#if WATCH_BUF
1893 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1894 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1895#endif
1896 if (obj_priv->gtt_space == NULL)
1897 return 0;
1898
1899 if (obj_priv->pin_count != 0) {
1900 DRM_ERROR("Attempting to unbind pinned buffer\n");
1901 return -EINVAL;
1902 }
1903
Eric Anholt673a3942008-07-30 12:06:12 -07001904 /* Move the object to the CPU domain to ensure that
1905 * any possible CPU writes while it's not in the GTT
1906 * are flushed when we go to remap it. This will
1907 * also ensure that all pending GPU writes are finished
1908 * before we unbind.
1909 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001910 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07001911 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08001912 if (ret != -ERESTARTSYS)
1913 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07001914 return ret;
1915 }
1916
1917 if (obj_priv->agp_mem != NULL) {
1918 drm_unbind_agp(obj_priv->agp_mem);
1919 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1920 obj_priv->agp_mem = NULL;
1921 }
1922
1923 BUG_ON(obj_priv->active);
1924
Jesse Barnesde151cf2008-11-12 10:03:55 -08001925 /* blow away mappings if mapped through GTT */
Chris Wilson901782b2009-07-10 08:18:50 +01001926 i915_gem_release_mmap(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001927
1928 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1929 i915_gem_clear_fence_reg(obj);
1930
Eric Anholt856fa192009-03-19 14:10:50 -07001931 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001932
1933 if (obj_priv->gtt_space) {
1934 atomic_dec(&dev->gtt_count);
1935 atomic_sub(obj->size, &dev->gtt_memory);
1936
1937 drm_mm_put_block(obj_priv->gtt_space);
1938 obj_priv->gtt_space = NULL;
1939 }
1940
1941 /* Remove ourselves from the LRU list if present. */
1942 if (!list_empty(&obj_priv->list))
1943 list_del_init(&obj_priv->list);
1944
1945 return 0;
1946}
1947
1948static int
1949i915_gem_evict_something(struct drm_device *dev)
1950{
1951 drm_i915_private_t *dev_priv = dev->dev_private;
1952 struct drm_gem_object *obj;
1953 struct drm_i915_gem_object *obj_priv;
1954 int ret = 0;
1955
1956 for (;;) {
1957 /* If there's an inactive buffer available now, grab it
1958 * and be done.
1959 */
1960 if (!list_empty(&dev_priv->mm.inactive_list)) {
1961 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1962 struct drm_i915_gem_object,
1963 list);
1964 obj = obj_priv->obj;
1965 BUG_ON(obj_priv->pin_count != 0);
1966#if WATCH_LRU
1967 DRM_INFO("%s: evicting %p\n", __func__, obj);
1968#endif
1969 BUG_ON(obj_priv->active);
1970
1971 /* Wait on the rendering and unbind the buffer. */
1972 ret = i915_gem_object_unbind(obj);
1973 break;
1974 }
1975
1976 /* If we didn't get anything, but the ring is still processing
1977 * things, wait for one of those things to finish and hopefully
1978 * leave us a buffer to evict.
1979 */
1980 if (!list_empty(&dev_priv->mm.request_list)) {
1981 struct drm_i915_gem_request *request;
1982
1983 request = list_first_entry(&dev_priv->mm.request_list,
1984 struct drm_i915_gem_request,
1985 list);
1986
1987 ret = i915_wait_request(dev, request->seqno);
1988 if (ret)
1989 break;
1990
1991 /* if waiting caused an object to become inactive,
1992 * then loop around and wait for it. Otherwise, we
1993 * assume that waiting freed and unbound something,
1994 * so there should now be some space in the GTT
1995 */
1996 if (!list_empty(&dev_priv->mm.inactive_list))
1997 continue;
1998 break;
1999 }
2000
2001 /* If we didn't have anything on the request list but there
2002 * are buffers awaiting a flush, emit one and try again.
2003 * When we wait on it, those buffers waiting for that flush
2004 * will get moved to inactive.
2005 */
2006 if (!list_empty(&dev_priv->mm.flushing_list)) {
2007 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2008 struct drm_i915_gem_object,
2009 list);
2010 obj = obj_priv->obj;
2011
2012 i915_gem_flush(dev,
2013 obj->write_domain,
2014 obj->write_domain);
Eric Anholtb9624422009-06-03 07:27:35 +00002015 i915_add_request(dev, NULL, obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002016
2017 obj = NULL;
2018 continue;
2019 }
2020
2021 DRM_ERROR("inactive empty %d request empty %d "
2022 "flushing empty %d\n",
2023 list_empty(&dev_priv->mm.inactive_list),
2024 list_empty(&dev_priv->mm.request_list),
2025 list_empty(&dev_priv->mm.flushing_list));
2026 /* If we didn't do any of the above, there's nothing to be done
2027 * and we just can't fit it in.
2028 */
Chris Wilson2939e1f2009-06-06 09:46:03 +01002029 return -ENOSPC;
Eric Anholt673a3942008-07-30 12:06:12 -07002030 }
2031 return ret;
2032}
2033
2034static int
Keith Packardac94a962008-11-20 23:30:27 -08002035i915_gem_evict_everything(struct drm_device *dev)
2036{
2037 int ret;
2038
2039 for (;;) {
2040 ret = i915_gem_evict_something(dev);
2041 if (ret != 0)
2042 break;
2043 }
Chris Wilson2939e1f2009-06-06 09:46:03 +01002044 if (ret == -ENOSPC)
Owain Ainsworth15c35332008-12-06 20:42:20 -08002045 return 0;
Keith Packardac94a962008-11-20 23:30:27 -08002046 return ret;
2047}
2048
Ben Gamari6911a9b2009-04-02 11:24:54 -07002049int
Eric Anholt856fa192009-03-19 14:10:50 -07002050i915_gem_object_get_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002051{
2052 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2053 int page_count, i;
2054 struct address_space *mapping;
2055 struct inode *inode;
2056 struct page *page;
2057 int ret;
2058
Eric Anholt856fa192009-03-19 14:10:50 -07002059 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002060 return 0;
2061
2062 /* Get the list of pages out of our struct file. They'll be pinned
2063 * at this point until we release them.
2064 */
2065 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002066 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002067 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002068 if (obj_priv->pages == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002069 DRM_ERROR("Faled to allocate page list\n");
Eric Anholt856fa192009-03-19 14:10:50 -07002070 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002071 return -ENOMEM;
2072 }
2073
2074 inode = obj->filp->f_path.dentry->d_inode;
2075 mapping = inode->i_mapping;
2076 for (i = 0; i < page_count; i++) {
2077 page = read_mapping_page(mapping, i, NULL);
2078 if (IS_ERR(page)) {
2079 ret = PTR_ERR(page);
2080 DRM_ERROR("read_mapping_page failed: %d\n", ret);
Eric Anholt856fa192009-03-19 14:10:50 -07002081 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002082 return ret;
2083 }
Eric Anholt856fa192009-03-19 14:10:50 -07002084 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002085 }
Eric Anholt280b7132009-03-12 16:56:27 -07002086
2087 if (obj_priv->tiling_mode != I915_TILING_NONE)
2088 i915_gem_object_do_bit_17_swizzle(obj);
2089
Eric Anholt673a3942008-07-30 12:06:12 -07002090 return 0;
2091}
2092
Jesse Barnesde151cf2008-11-12 10:03:55 -08002093static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2094{
2095 struct drm_gem_object *obj = reg->obj;
2096 struct drm_device *dev = obj->dev;
2097 drm_i915_private_t *dev_priv = dev->dev_private;
2098 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2099 int regnum = obj_priv->fence_reg;
2100 uint64_t val;
2101
2102 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2103 0xfffff000) << 32;
2104 val |= obj_priv->gtt_offset & 0xfffff000;
2105 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2106 if (obj_priv->tiling_mode == I915_TILING_Y)
2107 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2108 val |= I965_FENCE_REG_VALID;
2109
2110 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2111}
2112
2113static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2114{
2115 struct drm_gem_object *obj = reg->obj;
2116 struct drm_device *dev = obj->dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private;
2118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002120 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002121 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002122 uint32_t pitch_val;
2123
2124 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2125 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002126 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002127 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002128 return;
2129 }
2130
Jesse Barnes0f973f22009-01-26 17:10:45 -08002131 if (obj_priv->tiling_mode == I915_TILING_Y &&
2132 HAS_128_BYTE_Y_TILING(dev))
2133 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002134 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002135 tile_width = 512;
2136
2137 /* Note: pitch better be a power of two tile widths */
2138 pitch_val = obj_priv->stride / tile_width;
2139 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002140
2141 val = obj_priv->gtt_offset;
2142 if (obj_priv->tiling_mode == I915_TILING_Y)
2143 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2144 val |= I915_FENCE_SIZE_BITS(obj->size);
2145 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2146 val |= I830_FENCE_REG_VALID;
2147
Eric Anholtdc529a42009-03-10 22:34:49 -07002148 if (regnum < 8)
2149 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2150 else
2151 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2152 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002153}
2154
2155static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2156{
2157 struct drm_gem_object *obj = reg->obj;
2158 struct drm_device *dev = obj->dev;
2159 drm_i915_private_t *dev_priv = dev->dev_private;
2160 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2161 int regnum = obj_priv->fence_reg;
2162 uint32_t val;
2163 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002164 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002165
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002166 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002167 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002168 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002169 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002170 return;
2171 }
2172
Eric Anholte76a16d2009-05-26 17:44:56 -07002173 pitch_val = obj_priv->stride / 128;
2174 pitch_val = ffs(pitch_val) - 1;
2175 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2176
Jesse Barnesde151cf2008-11-12 10:03:55 -08002177 val = obj_priv->gtt_offset;
2178 if (obj_priv->tiling_mode == I915_TILING_Y)
2179 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002180 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2181 WARN_ON(fence_size_bits & ~0x00000f00);
2182 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002183 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2184 val |= I830_FENCE_REG_VALID;
2185
2186 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002187}
2188
2189/**
2190 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2191 * @obj: object to map through a fence reg
2192 *
2193 * When mapping objects through the GTT, userspace wants to be able to write
2194 * to them without having to worry about swizzling if the object is tiled.
2195 *
2196 * This function walks the fence regs looking for a free one for @obj,
2197 * stealing one if it can't find any.
2198 *
2199 * It then sets up the reg based on the object's properties: address, pitch
2200 * and tiling format.
2201 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002202int
2203i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002204{
2205 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002206 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002207 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2208 struct drm_i915_fence_reg *reg = NULL;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002209 struct drm_i915_gem_object *old_obj_priv = NULL;
2210 int i, ret, avail;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002211
2212 switch (obj_priv->tiling_mode) {
2213 case I915_TILING_NONE:
2214 WARN(1, "allocating a fence for non-tiled object?\n");
2215 break;
2216 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002217 if (!obj_priv->stride)
2218 return -EINVAL;
2219 WARN((obj_priv->stride & (512 - 1)),
2220 "object 0x%08x is X tiled but has non-512B pitch\n",
2221 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002222 break;
2223 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002224 if (!obj_priv->stride)
2225 return -EINVAL;
2226 WARN((obj_priv->stride & (128 - 1)),
2227 "object 0x%08x is Y tiled but has non-128B pitch\n",
2228 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002229 break;
2230 }
2231
2232 /* First try to find a free reg */
Chris Wilson9b2412f2009-02-11 14:26:44 +00002233try_again:
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002234 avail = 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002235 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2236 reg = &dev_priv->fence_regs[i];
2237 if (!reg->obj)
2238 break;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002239
2240 old_obj_priv = reg->obj->driver_private;
2241 if (!old_obj_priv->pin_count)
2242 avail++;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002243 }
2244
2245 /* None available, try to steal one or wait for a user to finish */
2246 if (i == dev_priv->num_fence_regs) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002247 uint32_t seqno = dev_priv->mm.next_gem_seqno;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002248
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002249 if (avail == 0)
Chris Wilson2939e1f2009-06-06 09:46:03 +01002250 return -ENOSPC;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002251
Jesse Barnesde151cf2008-11-12 10:03:55 -08002252 for (i = dev_priv->fence_reg_start;
2253 i < dev_priv->num_fence_regs; i++) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002254 uint32_t this_seqno;
2255
Jesse Barnesde151cf2008-11-12 10:03:55 -08002256 reg = &dev_priv->fence_regs[i];
2257 old_obj_priv = reg->obj->driver_private;
Chris Wilsond7619c42009-02-11 14:26:47 +00002258
2259 if (old_obj_priv->pin_count)
2260 continue;
2261
2262 /* i915 uses fences for GPU access to tiled buffers */
2263 if (IS_I965G(dev) || !old_obj_priv->active)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002264 break;
Chris Wilsond7619c42009-02-11 14:26:47 +00002265
2266 /* find the seqno of the first available fence */
2267 this_seqno = old_obj_priv->last_rendering_seqno;
2268 if (this_seqno != 0 &&
2269 reg->obj->write_domain == 0 &&
2270 i915_seqno_passed(seqno, this_seqno))
2271 seqno = this_seqno;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002272 }
2273
2274 /*
2275 * Now things get ugly... we have to wait for one of the
2276 * objects to finish before trying again.
2277 */
2278 if (i == dev_priv->num_fence_regs) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002279 if (seqno == dev_priv->mm.next_gem_seqno) {
2280 i915_gem_flush(dev,
2281 I915_GEM_GPU_DOMAINS,
2282 I915_GEM_GPU_DOMAINS);
Eric Anholtb9624422009-06-03 07:27:35 +00002283 seqno = i915_add_request(dev, NULL,
Chris Wilsond7619c42009-02-11 14:26:47 +00002284 I915_GEM_GPU_DOMAINS);
2285 if (seqno == 0)
2286 return -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002287 }
Chris Wilsond7619c42009-02-11 14:26:47 +00002288
2289 ret = i915_wait_request(dev, seqno);
2290 if (ret)
2291 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002292 goto try_again;
2293 }
2294
2295 /*
2296 * Zap this virtual mapping so we can set up a fence again
2297 * for this object next time we need it.
2298 */
Chris Wilson901782b2009-07-10 08:18:50 +01002299 i915_gem_release_mmap(reg->obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002300 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2301 }
2302
2303 obj_priv->fence_reg = i;
2304 reg->obj = obj;
2305
2306 if (IS_I965G(dev))
2307 i965_write_fence_reg(reg);
2308 else if (IS_I9XX(dev))
2309 i915_write_fence_reg(reg);
2310 else
2311 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002312
2313 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002314}
2315
2316/**
2317 * i915_gem_clear_fence_reg - clear out fence register info
2318 * @obj: object to clear
2319 *
2320 * Zeroes out the fence register itself and clears out the associated
2321 * data structures in dev_priv and obj_priv.
2322 */
2323static void
2324i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2325{
2326 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002327 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002328 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2329
2330 if (IS_I965G(dev))
2331 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholtdc529a42009-03-10 22:34:49 -07002332 else {
2333 uint32_t fence_reg;
2334
2335 if (obj_priv->fence_reg < 8)
2336 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2337 else
2338 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2339 8) * 4;
2340
2341 I915_WRITE(fence_reg, 0);
2342 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002343
2344 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2345 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2346}
2347
Eric Anholt673a3942008-07-30 12:06:12 -07002348/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002349 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2350 * to the buffer to finish, and then resets the fence register.
2351 * @obj: tiled object holding a fence register.
2352 *
2353 * Zeroes out the fence register itself and clears out the associated
2354 * data structures in dev_priv and obj_priv.
2355 */
2356int
2357i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2358{
2359 struct drm_device *dev = obj->dev;
2360 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2361
2362 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2363 return 0;
2364
2365 /* On the i915, GPU access to tiled buffers is via a fence,
2366 * therefore we must wait for any outstanding access to complete
2367 * before clearing the fence.
2368 */
2369 if (!IS_I965G(dev)) {
2370 int ret;
2371
2372 i915_gem_object_flush_gpu_write_domain(obj);
2373 i915_gem_object_flush_gtt_write_domain(obj);
2374 ret = i915_gem_object_wait_rendering(obj);
2375 if (ret != 0)
2376 return ret;
2377 }
2378
2379 i915_gem_clear_fence_reg (obj);
2380
2381 return 0;
2382}
2383
2384/**
Eric Anholt673a3942008-07-30 12:06:12 -07002385 * Finds free space in the GTT aperture and binds the object there.
2386 */
2387static int
2388i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2389{
2390 struct drm_device *dev = obj->dev;
2391 drm_i915_private_t *dev_priv = dev->dev_private;
2392 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2393 struct drm_mm_node *free_space;
2394 int page_count, ret;
2395
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08002396 if (dev_priv->mm.suspended)
2397 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002398 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002399 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002400 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002401 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2402 return -EINVAL;
2403 }
2404
2405 search_free:
2406 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2407 obj->size, alignment, 0);
2408 if (free_space != NULL) {
2409 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2410 alignment);
2411 if (obj_priv->gtt_space != NULL) {
2412 obj_priv->gtt_space->private = obj;
2413 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2414 }
2415 }
2416 if (obj_priv->gtt_space == NULL) {
Carl Worth5e118f42009-03-20 11:54:25 -07002417 bool lists_empty;
2418
Eric Anholt673a3942008-07-30 12:06:12 -07002419 /* If the gtt is empty and we're still having trouble
2420 * fitting our object in, we're out of memory.
2421 */
2422#if WATCH_LRU
2423 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2424#endif
Carl Worth5e118f42009-03-20 11:54:25 -07002425 spin_lock(&dev_priv->mm.active_list_lock);
2426 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2427 list_empty(&dev_priv->mm.flushing_list) &&
2428 list_empty(&dev_priv->mm.active_list));
2429 spin_unlock(&dev_priv->mm.active_list_lock);
2430 if (lists_empty) {
Eric Anholt673a3942008-07-30 12:06:12 -07002431 DRM_ERROR("GTT full, but LRU list empty\n");
Chris Wilson2939e1f2009-06-06 09:46:03 +01002432 return -ENOSPC;
Eric Anholt673a3942008-07-30 12:06:12 -07002433 }
2434
2435 ret = i915_gem_evict_something(dev);
2436 if (ret != 0) {
Keith Packardac94a962008-11-20 23:30:27 -08002437 if (ret != -ERESTARTSYS)
2438 DRM_ERROR("Failed to evict a buffer %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07002439 return ret;
2440 }
2441 goto search_free;
2442 }
2443
2444#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002445 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002446 obj->size, obj_priv->gtt_offset);
2447#endif
Eric Anholt856fa192009-03-19 14:10:50 -07002448 ret = i915_gem_object_get_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002449 if (ret) {
2450 drm_mm_put_block(obj_priv->gtt_space);
2451 obj_priv->gtt_space = NULL;
2452 return ret;
2453 }
2454
2455 page_count = obj->size / PAGE_SIZE;
2456 /* Create an AGP memory structure pointing at our pages, and bind it
2457 * into the GTT.
2458 */
2459 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002460 obj_priv->pages,
Eric Anholt673a3942008-07-30 12:06:12 -07002461 page_count,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002462 obj_priv->gtt_offset,
2463 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002464 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002465 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002466 drm_mm_put_block(obj_priv->gtt_space);
2467 obj_priv->gtt_space = NULL;
2468 return -ENOMEM;
2469 }
2470 atomic_inc(&dev->gtt_count);
2471 atomic_add(obj->size, &dev->gtt_memory);
2472
2473 /* Assert that the object is not currently in any GPU domain. As it
2474 * wasn't in the GTT, there shouldn't be any way it could have been in
2475 * a GPU cache
2476 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002477 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2478 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002479
2480 return 0;
2481}
2482
2483void
2484i915_gem_clflush_object(struct drm_gem_object *obj)
2485{
2486 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2487
2488 /* If we don't have a page list set up, then we're not pinned
2489 * to GPU, and we can ignore the cache flush because it'll happen
2490 * again at bind time.
2491 */
Eric Anholt856fa192009-03-19 14:10:50 -07002492 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002493 return;
2494
Eric Anholtcfa16a02009-05-26 18:46:16 -07002495 /* XXX: The 865 in particular appears to be weird in how it handles
2496 * cache flushing. We haven't figured it out, but the
2497 * clflush+agp_chipset_flush doesn't appear to successfully get the
2498 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2499 */
2500 if (IS_I865G(obj->dev)) {
2501 wbinvd();
2502 return;
2503 }
2504
Eric Anholt856fa192009-03-19 14:10:50 -07002505 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002506}
2507
Eric Anholte47c68e2008-11-14 13:35:19 -08002508/** Flushes any GPU write domain for the object if it's dirty. */
2509static void
2510i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2511{
2512 struct drm_device *dev = obj->dev;
2513 uint32_t seqno;
2514
2515 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2516 return;
2517
2518 /* Queue the GPU write cache flushing we need. */
2519 i915_gem_flush(dev, 0, obj->write_domain);
Eric Anholtb9624422009-06-03 07:27:35 +00002520 seqno = i915_add_request(dev, NULL, obj->write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002521 obj->write_domain = 0;
2522 i915_gem_object_move_to_active(obj, seqno);
2523}
2524
2525/** Flushes the GTT write domain for the object if it's dirty. */
2526static void
2527i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2528{
2529 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2530 return;
2531
2532 /* No actual flushing is required for the GTT write domain. Writes
2533 * to it immediately go to main memory as far as we know, so there's
2534 * no chipset flush. It also doesn't land in render cache.
2535 */
2536 obj->write_domain = 0;
2537}
2538
2539/** Flushes the CPU write domain for the object if it's dirty. */
2540static void
2541i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2542{
2543 struct drm_device *dev = obj->dev;
2544
2545 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2546 return;
2547
2548 i915_gem_clflush_object(obj);
2549 drm_agp_chipset_flush(dev);
2550 obj->write_domain = 0;
2551}
2552
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002553/**
2554 * Moves a single object to the GTT read, and possibly write domain.
2555 *
2556 * This function returns when the move is complete, including waiting on
2557 * flushes to occur.
2558 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002559int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002560i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2561{
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002562 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002563 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002564
Eric Anholt02354392008-11-26 13:58:13 -08002565 /* Not valid to be called on unbound objects. */
2566 if (obj_priv->gtt_space == NULL)
2567 return -EINVAL;
2568
Eric Anholte47c68e2008-11-14 13:35:19 -08002569 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002570 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002571 ret = i915_gem_object_wait_rendering(obj);
2572 if (ret != 0)
2573 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002574
2575 /* If we're writing through the GTT domain, then CPU and GPU caches
2576 * will need to be invalidated at next use.
2577 */
2578 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002579 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002580
Eric Anholte47c68e2008-11-14 13:35:19 -08002581 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002582
2583 /* It should now be out of any other write domains, and we can update
2584 * the domain values for our changes.
2585 */
2586 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2587 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002588 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002589 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002590 obj_priv->dirty = 1;
2591 }
2592
2593 return 0;
2594}
2595
2596/**
2597 * Moves a single object to the CPU read, and possibly write domain.
2598 *
2599 * This function returns when the move is complete, including waiting on
2600 * flushes to occur.
2601 */
2602static int
2603i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2604{
Eric Anholte47c68e2008-11-14 13:35:19 -08002605 int ret;
2606
2607 i915_gem_object_flush_gpu_write_domain(obj);
2608 /* Wait on any GPU rendering and flushing to occur. */
2609 ret = i915_gem_object_wait_rendering(obj);
2610 if (ret != 0)
2611 return ret;
2612
2613 i915_gem_object_flush_gtt_write_domain(obj);
2614
2615 /* If we have a partially-valid cache of the object in the CPU,
2616 * finish invalidating it and free the per-page flags.
2617 */
2618 i915_gem_object_set_to_full_cpu_read_domain(obj);
2619
2620 /* Flush the CPU cache if it's still invalid. */
2621 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2622 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002623
2624 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2625 }
2626
2627 /* It should now be out of any other write domains, and we can update
2628 * the domain values for our changes.
2629 */
2630 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2631
2632 /* If we're writing through the CPU, then the GPU read domains will
2633 * need to be invalidated at next use.
2634 */
2635 if (write) {
2636 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2637 obj->write_domain = I915_GEM_DOMAIN_CPU;
2638 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002639
2640 return 0;
2641}
2642
Eric Anholt673a3942008-07-30 12:06:12 -07002643/*
2644 * Set the next domain for the specified object. This
2645 * may not actually perform the necessary flushing/invaliding though,
2646 * as that may want to be batched with other set_domain operations
2647 *
2648 * This is (we hope) the only really tricky part of gem. The goal
2649 * is fairly simple -- track which caches hold bits of the object
2650 * and make sure they remain coherent. A few concrete examples may
2651 * help to explain how it works. For shorthand, we use the notation
2652 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2653 * a pair of read and write domain masks.
2654 *
2655 * Case 1: the batch buffer
2656 *
2657 * 1. Allocated
2658 * 2. Written by CPU
2659 * 3. Mapped to GTT
2660 * 4. Read by GPU
2661 * 5. Unmapped from GTT
2662 * 6. Freed
2663 *
2664 * Let's take these a step at a time
2665 *
2666 * 1. Allocated
2667 * Pages allocated from the kernel may still have
2668 * cache contents, so we set them to (CPU, CPU) always.
2669 * 2. Written by CPU (using pwrite)
2670 * The pwrite function calls set_domain (CPU, CPU) and
2671 * this function does nothing (as nothing changes)
2672 * 3. Mapped by GTT
2673 * This function asserts that the object is not
2674 * currently in any GPU-based read or write domains
2675 * 4. Read by GPU
2676 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2677 * As write_domain is zero, this function adds in the
2678 * current read domains (CPU+COMMAND, 0).
2679 * flush_domains is set to CPU.
2680 * invalidate_domains is set to COMMAND
2681 * clflush is run to get data out of the CPU caches
2682 * then i915_dev_set_domain calls i915_gem_flush to
2683 * emit an MI_FLUSH and drm_agp_chipset_flush
2684 * 5. Unmapped from GTT
2685 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2686 * flush_domains and invalidate_domains end up both zero
2687 * so no flushing/invalidating happens
2688 * 6. Freed
2689 * yay, done
2690 *
2691 * Case 2: The shared render buffer
2692 *
2693 * 1. Allocated
2694 * 2. Mapped to GTT
2695 * 3. Read/written by GPU
2696 * 4. set_domain to (CPU,CPU)
2697 * 5. Read/written by CPU
2698 * 6. Read/written by GPU
2699 *
2700 * 1. Allocated
2701 * Same as last example, (CPU, CPU)
2702 * 2. Mapped to GTT
2703 * Nothing changes (assertions find that it is not in the GPU)
2704 * 3. Read/written by GPU
2705 * execbuffer calls set_domain (RENDER, RENDER)
2706 * flush_domains gets CPU
2707 * invalidate_domains gets GPU
2708 * clflush (obj)
2709 * MI_FLUSH and drm_agp_chipset_flush
2710 * 4. set_domain (CPU, CPU)
2711 * flush_domains gets GPU
2712 * invalidate_domains gets CPU
2713 * wait_rendering (obj) to make sure all drawing is complete.
2714 * This will include an MI_FLUSH to get the data from GPU
2715 * to memory
2716 * clflush (obj) to invalidate the CPU cache
2717 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2718 * 5. Read/written by CPU
2719 * cache lines are loaded and dirtied
2720 * 6. Read written by GPU
2721 * Same as last GPU access
2722 *
2723 * Case 3: The constant buffer
2724 *
2725 * 1. Allocated
2726 * 2. Written by CPU
2727 * 3. Read by GPU
2728 * 4. Updated (written) by CPU again
2729 * 5. Read by GPU
2730 *
2731 * 1. Allocated
2732 * (CPU, CPU)
2733 * 2. Written by CPU
2734 * (CPU, CPU)
2735 * 3. Read by GPU
2736 * (CPU+RENDER, 0)
2737 * flush_domains = CPU
2738 * invalidate_domains = RENDER
2739 * clflush (obj)
2740 * MI_FLUSH
2741 * drm_agp_chipset_flush
2742 * 4. Updated (written) by CPU again
2743 * (CPU, CPU)
2744 * flush_domains = 0 (no previous write domain)
2745 * invalidate_domains = 0 (no new read domains)
2746 * 5. Read by GPU
2747 * (CPU+RENDER, 0)
2748 * flush_domains = CPU
2749 * invalidate_domains = RENDER
2750 * clflush (obj)
2751 * MI_FLUSH
2752 * drm_agp_chipset_flush
2753 */
Keith Packardc0d90822008-11-20 23:11:08 -08002754static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08002755i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002756{
2757 struct drm_device *dev = obj->dev;
2758 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2759 uint32_t invalidate_domains = 0;
2760 uint32_t flush_domains = 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002761
Eric Anholt8b0e3782009-02-19 14:40:50 -08002762 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2763 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07002764
2765#if WATCH_BUF
2766 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2767 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08002768 obj->read_domains, obj->pending_read_domains,
2769 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002770#endif
2771 /*
2772 * If the object isn't moving to a new write domain,
2773 * let the object stay in multiple read domains
2774 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002775 if (obj->pending_write_domain == 0)
2776 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002777 else
2778 obj_priv->dirty = 1;
2779
2780 /*
2781 * Flush the current write domain if
2782 * the new read domains don't match. Invalidate
2783 * any read domains which differ from the old
2784 * write domain
2785 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002786 if (obj->write_domain &&
2787 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07002788 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002789 invalidate_domains |=
2790 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07002791 }
2792 /*
2793 * Invalidate any read caches which may have
2794 * stale data. That is, any new read domains.
2795 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002796 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002797 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2798#if WATCH_BUF
2799 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2800 __func__, flush_domains, invalidate_domains);
2801#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002802 i915_gem_clflush_object(obj);
2803 }
2804
Eric Anholtefbeed92009-02-19 14:54:51 -08002805 /* The actual obj->write_domain will be updated with
2806 * pending_write_domain after we emit the accumulated flush for all
2807 * of our domain changes in execbuffers (which clears objects'
2808 * write_domains). So if we have a current write domain that we
2809 * aren't changing, set pending_write_domain to that.
2810 */
2811 if (flush_domains == 0 && obj->pending_write_domain == 0)
2812 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002813 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002814
2815 dev->invalidate_domains |= invalidate_domains;
2816 dev->flush_domains |= flush_domains;
2817#if WATCH_BUF
2818 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2819 __func__,
2820 obj->read_domains, obj->write_domain,
2821 dev->invalidate_domains, dev->flush_domains);
2822#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002823}
2824
2825/**
Eric Anholte47c68e2008-11-14 13:35:19 -08002826 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07002827 *
Eric Anholte47c68e2008-11-14 13:35:19 -08002828 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2829 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2830 */
2831static void
2832i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2833{
Eric Anholte47c68e2008-11-14 13:35:19 -08002834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2835
2836 if (!obj_priv->page_cpu_valid)
2837 return;
2838
2839 /* If we're partially in the CPU read domain, finish moving it in.
2840 */
2841 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2842 int i;
2843
2844 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2845 if (obj_priv->page_cpu_valid[i])
2846 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07002847 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08002848 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002849 }
2850
2851 /* Free the page_cpu_valid mappings which are now stale, whether
2852 * or not we've got I915_GEM_DOMAIN_CPU.
2853 */
Eric Anholt9a298b22009-03-24 12:23:04 -07002854 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08002855 obj_priv->page_cpu_valid = NULL;
2856}
2857
2858/**
2859 * Set the CPU read domain on a range of the object.
2860 *
2861 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2862 * not entirely valid. The page_cpu_valid member of the object flags which
2863 * pages have been flushed, and will be respected by
2864 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2865 * of the whole object.
2866 *
2867 * This function returns when the move is complete, including waiting on
2868 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07002869 */
2870static int
Eric Anholte47c68e2008-11-14 13:35:19 -08002871i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2872 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07002873{
2874 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002875 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002876
Eric Anholte47c68e2008-11-14 13:35:19 -08002877 if (offset == 0 && size == obj->size)
2878 return i915_gem_object_set_to_cpu_domain(obj, 0);
2879
2880 i915_gem_object_flush_gpu_write_domain(obj);
2881 /* Wait on any GPU rendering and flushing to occur. */
2882 ret = i915_gem_object_wait_rendering(obj);
2883 if (ret != 0)
2884 return ret;
2885 i915_gem_object_flush_gtt_write_domain(obj);
2886
2887 /* If we're already fully in the CPU read domain, we're done. */
2888 if (obj_priv->page_cpu_valid == NULL &&
2889 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002890 return 0;
2891
Eric Anholte47c68e2008-11-14 13:35:19 -08002892 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2893 * newly adding I915_GEM_DOMAIN_CPU
2894 */
Eric Anholt673a3942008-07-30 12:06:12 -07002895 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07002896 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2897 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08002898 if (obj_priv->page_cpu_valid == NULL)
2899 return -ENOMEM;
2900 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2901 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002902
2903 /* Flush the cache on any pages that are still invalid from the CPU's
2904 * perspective.
2905 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002906 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2907 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07002908 if (obj_priv->page_cpu_valid[i])
2909 continue;
2910
Eric Anholt856fa192009-03-19 14:10:50 -07002911 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07002912
2913 obj_priv->page_cpu_valid[i] = 1;
2914 }
2915
Eric Anholte47c68e2008-11-14 13:35:19 -08002916 /* It should now be out of any other write domains, and we can update
2917 * the domain values for our changes.
2918 */
2919 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2920
2921 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2922
Eric Anholt673a3942008-07-30 12:06:12 -07002923 return 0;
2924}
2925
2926/**
Eric Anholt673a3942008-07-30 12:06:12 -07002927 * Pin an object to the GTT and evaluate the relocations landing in it.
2928 */
2929static int
2930i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2931 struct drm_file *file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002932 struct drm_i915_gem_exec_object *entry,
2933 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07002934{
2935 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07002936 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002937 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2938 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07002939 void __iomem *reloc_page;
Eric Anholt673a3942008-07-30 12:06:12 -07002940
2941 /* Choose the GTT offset for our buffer and put it there. */
2942 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2943 if (ret)
2944 return ret;
2945
2946 entry->offset = obj_priv->gtt_offset;
2947
Eric Anholt673a3942008-07-30 12:06:12 -07002948 /* Apply the relocations, using the GTT aperture to avoid cache
2949 * flushing requirements.
2950 */
2951 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002952 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07002953 struct drm_gem_object *target_obj;
2954 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07002955 uint32_t reloc_val, reloc_offset;
2956 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07002957
Eric Anholt673a3942008-07-30 12:06:12 -07002958 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002959 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002960 if (target_obj == NULL) {
2961 i915_gem_object_unpin(obj);
2962 return -EBADF;
2963 }
2964 target_obj_priv = target_obj->driver_private;
2965
2966 /* The target buffer should have appeared before us in the
2967 * exec_object list, so it should have a GTT space bound by now.
2968 */
2969 if (target_obj_priv->gtt_space == NULL) {
2970 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002971 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002972 drm_gem_object_unreference(target_obj);
2973 i915_gem_object_unpin(obj);
2974 return -EINVAL;
2975 }
2976
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002977 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07002978 DRM_ERROR("Relocation beyond object bounds: "
2979 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002980 obj, reloc->target_handle,
2981 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07002982 drm_gem_object_unreference(target_obj);
2983 i915_gem_object_unpin(obj);
2984 return -EINVAL;
2985 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002986 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07002987 DRM_ERROR("Relocation not 4-byte aligned: "
2988 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002989 obj, reloc->target_handle,
2990 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07002991 drm_gem_object_unreference(target_obj);
2992 i915_gem_object_unpin(obj);
2993 return -EINVAL;
2994 }
2995
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002996 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2997 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002998 DRM_ERROR("reloc with read/write CPU domains: "
2999 "obj %p target %d offset %d "
3000 "read %08x write %08x",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003001 obj, reloc->target_handle,
3002 (int) reloc->offset,
3003 reloc->read_domains,
3004 reloc->write_domain);
Chris Wilson491152b2009-02-11 14:26:32 +00003005 drm_gem_object_unreference(target_obj);
3006 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003007 return -EINVAL;
3008 }
3009
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003010 if (reloc->write_domain && target_obj->pending_write_domain &&
3011 reloc->write_domain != target_obj->pending_write_domain) {
Eric Anholt673a3942008-07-30 12:06:12 -07003012 DRM_ERROR("Write domain conflict: "
3013 "obj %p target %d offset %d "
3014 "new %08x old %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003015 obj, reloc->target_handle,
3016 (int) reloc->offset,
3017 reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07003018 target_obj->pending_write_domain);
3019 drm_gem_object_unreference(target_obj);
3020 i915_gem_object_unpin(obj);
3021 return -EINVAL;
3022 }
3023
3024#if WATCH_RELOC
3025 DRM_INFO("%s: obj %p offset %08x target %d "
3026 "read %08x write %08x gtt %08x "
3027 "presumed %08x delta %08x\n",
3028 __func__,
3029 obj,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003030 (int) reloc->offset,
3031 (int) reloc->target_handle,
3032 (int) reloc->read_domains,
3033 (int) reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07003034 (int) target_obj_priv->gtt_offset,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003035 (int) reloc->presumed_offset,
3036 reloc->delta);
Eric Anholt673a3942008-07-30 12:06:12 -07003037#endif
3038
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003039 target_obj->pending_read_domains |= reloc->read_domains;
3040 target_obj->pending_write_domain |= reloc->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003041
3042 /* If the relocation already has the right value in it, no
3043 * more work needs to be done.
3044 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003045 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
Eric Anholt673a3942008-07-30 12:06:12 -07003046 drm_gem_object_unreference(target_obj);
3047 continue;
3048 }
3049
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003050 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3051 if (ret != 0) {
3052 drm_gem_object_unreference(target_obj);
3053 i915_gem_object_unpin(obj);
3054 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003055 }
3056
3057 /* Map the page containing the relocation we're going to
3058 * perform.
3059 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003060 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003061 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3062 (reloc_offset &
3063 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003064 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003065 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003066 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003067
3068#if WATCH_BUF
3069 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003070 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003071 readl(reloc_entry), reloc_val);
3072#endif
3073 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003074 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003075
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003076 /* The updated presumed offset for this entry will be
3077 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003078 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003079 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003080
3081 drm_gem_object_unreference(target_obj);
3082 }
3083
Eric Anholt673a3942008-07-30 12:06:12 -07003084#if WATCH_BUF
3085 if (0)
3086 i915_gem_dump_object(obj, 128, __func__, ~0);
3087#endif
3088 return 0;
3089}
3090
3091/** Dispatch a batchbuffer to the ring
3092 */
3093static int
3094i915_dispatch_gem_execbuffer(struct drm_device *dev,
3095 struct drm_i915_gem_execbuffer *exec,
Eric Anholt201361a2009-03-11 12:30:04 -07003096 struct drm_clip_rect *cliprects,
Eric Anholt673a3942008-07-30 12:06:12 -07003097 uint64_t exec_offset)
3098{
3099 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003100 int nbox = exec->num_cliprects;
3101 int i = 0, count;
Chris Wilson83d60792009-06-06 09:45:57 +01003102 uint32_t exec_start, exec_len;
Eric Anholt673a3942008-07-30 12:06:12 -07003103 RING_LOCALS;
3104
3105 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3106 exec_len = (uint32_t) exec->batch_len;
3107
Eric Anholt673a3942008-07-30 12:06:12 -07003108 count = nbox ? nbox : 1;
3109
3110 for (i = 0; i < count; i++) {
3111 if (i < nbox) {
Eric Anholt201361a2009-03-11 12:30:04 -07003112 int ret = i915_emit_box(dev, cliprects, i,
Eric Anholt673a3942008-07-30 12:06:12 -07003113 exec->DR1, exec->DR4);
3114 if (ret)
3115 return ret;
3116 }
3117
3118 if (IS_I830(dev) || IS_845G(dev)) {
3119 BEGIN_LP_RING(4);
3120 OUT_RING(MI_BATCH_BUFFER);
3121 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3122 OUT_RING(exec_start + exec_len - 4);
3123 OUT_RING(0);
3124 ADVANCE_LP_RING();
3125 } else {
3126 BEGIN_LP_RING(2);
3127 if (IS_I965G(dev)) {
3128 OUT_RING(MI_BATCH_BUFFER_START |
3129 (2 << 6) |
3130 MI_BATCH_NON_SECURE_I965);
3131 OUT_RING(exec_start);
3132 } else {
3133 OUT_RING(MI_BATCH_BUFFER_START |
3134 (2 << 6));
3135 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3136 }
3137 ADVANCE_LP_RING();
3138 }
3139 }
3140
3141 /* XXX breadcrumb */
3142 return 0;
3143}
3144
3145/* Throttle our rendering by waiting until the ring has completed our requests
3146 * emitted over 20 msec ago.
3147 *
Eric Anholtb9624422009-06-03 07:27:35 +00003148 * Note that if we were to use the current jiffies each time around the loop,
3149 * we wouldn't escape the function with any frames outstanding if the time to
3150 * render a frame was over 20ms.
3151 *
Eric Anholt673a3942008-07-30 12:06:12 -07003152 * This should get us reasonable parallelism between CPU and GPU but also
3153 * relatively low latency when blocking on a particular request to finish.
3154 */
3155static int
3156i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3157{
3158 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3159 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003160 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003161
3162 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003163 while (!list_empty(&i915_file_priv->mm.request_list)) {
3164 struct drm_i915_gem_request *request;
3165
3166 request = list_first_entry(&i915_file_priv->mm.request_list,
3167 struct drm_i915_gem_request,
3168 client_list);
3169
3170 if (time_after_eq(request->emitted_jiffies, recent_enough))
3171 break;
3172
3173 ret = i915_wait_request(dev, request->seqno);
3174 if (ret != 0)
3175 break;
3176 }
Eric Anholt673a3942008-07-30 12:06:12 -07003177 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003178
Eric Anholt673a3942008-07-30 12:06:12 -07003179 return ret;
3180}
3181
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003182static int
3183i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3184 uint32_t buffer_count,
3185 struct drm_i915_gem_relocation_entry **relocs)
3186{
3187 uint32_t reloc_count = 0, reloc_index = 0, i;
3188 int ret;
3189
3190 *relocs = NULL;
3191 for (i = 0; i < buffer_count; i++) {
3192 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3193 return -EINVAL;
3194 reloc_count += exec_list[i].relocation_count;
3195 }
3196
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003197 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003198 if (*relocs == NULL)
3199 return -ENOMEM;
3200
3201 for (i = 0; i < buffer_count; i++) {
3202 struct drm_i915_gem_relocation_entry __user *user_relocs;
3203
3204 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3205
3206 ret = copy_from_user(&(*relocs)[reloc_index],
3207 user_relocs,
3208 exec_list[i].relocation_count *
3209 sizeof(**relocs));
3210 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003211 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003212 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003213 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003214 }
3215
3216 reloc_index += exec_list[i].relocation_count;
3217 }
3218
Florian Mickler2bc43b52009-04-06 22:55:41 +02003219 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003220}
3221
3222static int
3223i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3224 uint32_t buffer_count,
3225 struct drm_i915_gem_relocation_entry *relocs)
3226{
3227 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003228 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003229
3230 for (i = 0; i < buffer_count; i++) {
3231 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003232 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003233
3234 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3235
Florian Mickler2bc43b52009-04-06 22:55:41 +02003236 unwritten = copy_to_user(user_relocs,
3237 &relocs[reloc_count],
3238 exec_list[i].relocation_count *
3239 sizeof(*relocs));
3240
3241 if (unwritten) {
3242 ret = -EFAULT;
3243 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003244 }
3245
3246 reloc_count += exec_list[i].relocation_count;
3247 }
3248
Florian Mickler2bc43b52009-04-06 22:55:41 +02003249err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003250 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003251
3252 return ret;
3253}
3254
Chris Wilson83d60792009-06-06 09:45:57 +01003255static int
3256i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3257 uint64_t exec_offset)
3258{
3259 uint32_t exec_start, exec_len;
3260
3261 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3262 exec_len = (uint32_t) exec->batch_len;
3263
3264 if ((exec_start | exec_len) & 0x7)
3265 return -EINVAL;
3266
3267 if (!exec_start)
3268 return -EINVAL;
3269
3270 return 0;
3271}
3272
Eric Anholt673a3942008-07-30 12:06:12 -07003273int
3274i915_gem_execbuffer(struct drm_device *dev, void *data,
3275 struct drm_file *file_priv)
3276{
3277 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003278 struct drm_i915_gem_execbuffer *args = data;
3279 struct drm_i915_gem_exec_object *exec_list = NULL;
3280 struct drm_gem_object **object_list = NULL;
3281 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003282 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003283 struct drm_clip_rect *cliprects = NULL;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003284 struct drm_i915_gem_relocation_entry *relocs;
3285 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003286 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003287 uint32_t seqno, flush_domains, reloc_index;
Keith Packardac94a962008-11-20 23:30:27 -08003288 int pin_tries;
Eric Anholt673a3942008-07-30 12:06:12 -07003289
3290#if WATCH_EXEC
3291 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3292 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3293#endif
3294
Eric Anholt4f481ed2008-09-10 14:22:49 -07003295 if (args->buffer_count < 1) {
3296 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3297 return -EINVAL;
3298 }
Eric Anholt673a3942008-07-30 12:06:12 -07003299 /* Copy in the exec list from userland */
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003300 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3301 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
Eric Anholt673a3942008-07-30 12:06:12 -07003302 if (exec_list == NULL || object_list == NULL) {
3303 DRM_ERROR("Failed to allocate exec or object list "
3304 "for %d buffers\n",
3305 args->buffer_count);
3306 ret = -ENOMEM;
3307 goto pre_mutex_err;
3308 }
3309 ret = copy_from_user(exec_list,
3310 (struct drm_i915_relocation_entry __user *)
3311 (uintptr_t) args->buffers_ptr,
3312 sizeof(*exec_list) * args->buffer_count);
3313 if (ret != 0) {
3314 DRM_ERROR("copy %d exec entries failed %d\n",
3315 args->buffer_count, ret);
3316 goto pre_mutex_err;
3317 }
3318
Eric Anholt201361a2009-03-11 12:30:04 -07003319 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003320 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3321 GFP_KERNEL);
Eric Anholt201361a2009-03-11 12:30:04 -07003322 if (cliprects == NULL)
3323 goto pre_mutex_err;
3324
3325 ret = copy_from_user(cliprects,
3326 (struct drm_clip_rect __user *)
3327 (uintptr_t) args->cliprects_ptr,
3328 sizeof(*cliprects) * args->num_cliprects);
3329 if (ret != 0) {
3330 DRM_ERROR("copy %d cliprects failed: %d\n",
3331 args->num_cliprects, ret);
3332 goto pre_mutex_err;
3333 }
3334 }
3335
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003336 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3337 &relocs);
3338 if (ret != 0)
3339 goto pre_mutex_err;
3340
Eric Anholt673a3942008-07-30 12:06:12 -07003341 mutex_lock(&dev->struct_mutex);
3342
3343 i915_verify_inactive(dev, __FILE__, __LINE__);
3344
3345 if (dev_priv->mm.wedged) {
3346 DRM_ERROR("Execbuf while wedged\n");
3347 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003348 ret = -EIO;
3349 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003350 }
3351
3352 if (dev_priv->mm.suspended) {
3353 DRM_ERROR("Execbuf while VT-switched.\n");
3354 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003355 ret = -EBUSY;
3356 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003357 }
3358
Keith Packardac94a962008-11-20 23:30:27 -08003359 /* Look up object handles */
Eric Anholt673a3942008-07-30 12:06:12 -07003360 for (i = 0; i < args->buffer_count; i++) {
3361 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3362 exec_list[i].handle);
3363 if (object_list[i] == NULL) {
3364 DRM_ERROR("Invalid object handle %d at index %d\n",
3365 exec_list[i].handle, i);
3366 ret = -EBADF;
3367 goto err;
3368 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003369
3370 obj_priv = object_list[i]->driver_private;
3371 if (obj_priv->in_execbuffer) {
3372 DRM_ERROR("Object %p appears more than once in object list\n",
3373 object_list[i]);
3374 ret = -EBADF;
3375 goto err;
3376 }
3377 obj_priv->in_execbuffer = true;
Keith Packardac94a962008-11-20 23:30:27 -08003378 }
Eric Anholt673a3942008-07-30 12:06:12 -07003379
Keith Packardac94a962008-11-20 23:30:27 -08003380 /* Pin and relocate */
3381 for (pin_tries = 0; ; pin_tries++) {
3382 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003383 reloc_index = 0;
3384
Keith Packardac94a962008-11-20 23:30:27 -08003385 for (i = 0; i < args->buffer_count; i++) {
3386 object_list[i]->pending_read_domains = 0;
3387 object_list[i]->pending_write_domain = 0;
3388 ret = i915_gem_object_pin_and_relocate(object_list[i],
3389 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003390 &exec_list[i],
3391 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003392 if (ret)
3393 break;
3394 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003395 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003396 }
3397 /* success */
3398 if (ret == 0)
3399 break;
3400
3401 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003402 if (ret != -ENOSPC || pin_tries >= 1) {
Eric Anholtf1acec92008-12-19 14:47:48 -08003403 if (ret != -ERESTARTSYS)
3404 DRM_ERROR("Failed to pin buffers %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003405 goto err;
3406 }
Keith Packardac94a962008-11-20 23:30:27 -08003407
3408 /* unpin all of our buffers */
3409 for (i = 0; i < pinned; i++)
3410 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003411 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003412
3413 /* evict everyone we can from the aperture */
3414 ret = i915_gem_evict_everything(dev);
3415 if (ret)
3416 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003417 }
3418
3419 /* Set the pending read domains for the batch buffer to COMMAND */
3420 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003421 if (batch_obj->pending_write_domain) {
3422 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3423 ret = -EINVAL;
3424 goto err;
3425 }
3426 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003427
Chris Wilson83d60792009-06-06 09:45:57 +01003428 /* Sanity check the batch buffer, prior to moving objects */
3429 exec_offset = exec_list[args->buffer_count - 1].offset;
3430 ret = i915_gem_check_execbuffer (args, exec_offset);
3431 if (ret != 0) {
3432 DRM_ERROR("execbuf with invalid offset/length\n");
3433 goto err;
3434 }
3435
Eric Anholt673a3942008-07-30 12:06:12 -07003436 i915_verify_inactive(dev, __FILE__, __LINE__);
3437
Keith Packard646f0f62008-11-20 23:23:03 -08003438 /* Zero the global flush/invalidate flags. These
3439 * will be modified as new domains are computed
3440 * for each object
3441 */
3442 dev->invalidate_domains = 0;
3443 dev->flush_domains = 0;
3444
Eric Anholt673a3942008-07-30 12:06:12 -07003445 for (i = 0; i < args->buffer_count; i++) {
3446 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003447
Keith Packard646f0f62008-11-20 23:23:03 -08003448 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003449 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003450 }
3451
3452 i915_verify_inactive(dev, __FILE__, __LINE__);
3453
Keith Packard646f0f62008-11-20 23:23:03 -08003454 if (dev->invalidate_domains | dev->flush_domains) {
3455#if WATCH_EXEC
3456 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3457 __func__,
3458 dev->invalidate_domains,
3459 dev->flush_domains);
3460#endif
3461 i915_gem_flush(dev,
3462 dev->invalidate_domains,
3463 dev->flush_domains);
3464 if (dev->flush_domains)
Eric Anholtb9624422009-06-03 07:27:35 +00003465 (void)i915_add_request(dev, file_priv,
3466 dev->flush_domains);
Keith Packard646f0f62008-11-20 23:23:03 -08003467 }
Eric Anholt673a3942008-07-30 12:06:12 -07003468
Eric Anholtefbeed92009-02-19 14:54:51 -08003469 for (i = 0; i < args->buffer_count; i++) {
3470 struct drm_gem_object *obj = object_list[i];
3471
3472 obj->write_domain = obj->pending_write_domain;
3473 }
3474
Eric Anholt673a3942008-07-30 12:06:12 -07003475 i915_verify_inactive(dev, __FILE__, __LINE__);
3476
3477#if WATCH_COHERENCY
3478 for (i = 0; i < args->buffer_count; i++) {
3479 i915_gem_object_check_coherency(object_list[i],
3480 exec_list[i].handle);
3481 }
3482#endif
3483
Eric Anholt673a3942008-07-30 12:06:12 -07003484#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003485 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003486 args->batch_len,
3487 __func__,
3488 ~0);
3489#endif
3490
Eric Anholt673a3942008-07-30 12:06:12 -07003491 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07003492 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003493 if (ret) {
3494 DRM_ERROR("dispatch failed %d\n", ret);
3495 goto err;
3496 }
3497
3498 /*
3499 * Ensure that the commands in the batch buffer are
3500 * finished before the interrupt fires
3501 */
3502 flush_domains = i915_retire_commands(dev);
3503
3504 i915_verify_inactive(dev, __FILE__, __LINE__);
3505
3506 /*
3507 * Get a seqno representing the execution of the current buffer,
3508 * which we can wait on. We would like to mitigate these interrupts,
3509 * likely by only creating seqnos occasionally (so that we have
3510 * *some* interrupts representing completion of buffers that we can
3511 * wait on when trying to clear up gtt space).
3512 */
Eric Anholtb9624422009-06-03 07:27:35 +00003513 seqno = i915_add_request(dev, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07003514 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003515 for (i = 0; i < args->buffer_count; i++) {
3516 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003517
Eric Anholtce44b0e2008-11-06 16:00:31 -08003518 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07003519#if WATCH_LRU
3520 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3521#endif
3522 }
3523#if WATCH_LRU
3524 i915_dump_lru(dev, __func__);
3525#endif
3526
3527 i915_verify_inactive(dev, __FILE__, __LINE__);
3528
Eric Anholt673a3942008-07-30 12:06:12 -07003529err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003530 for (i = 0; i < pinned; i++)
3531 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003532
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003533 for (i = 0; i < args->buffer_count; i++) {
3534 if (object_list[i]) {
3535 obj_priv = object_list[i]->driver_private;
3536 obj_priv->in_execbuffer = false;
3537 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003538 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003539 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003540
Eric Anholt673a3942008-07-30 12:06:12 -07003541 mutex_unlock(&dev->struct_mutex);
3542
Roland Dreiera35f2e22009-02-06 17:48:09 -08003543 if (!ret) {
3544 /* Copy the new buffer offsets back to the user's exec list. */
3545 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3546 (uintptr_t) args->buffers_ptr,
3547 exec_list,
3548 sizeof(*exec_list) * args->buffer_count);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003549 if (ret) {
3550 ret = -EFAULT;
Roland Dreiera35f2e22009-02-06 17:48:09 -08003551 DRM_ERROR("failed to copy %d exec entries "
3552 "back to user (%d)\n",
3553 args->buffer_count, ret);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003554 }
Roland Dreiera35f2e22009-02-06 17:48:09 -08003555 }
3556
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003557 /* Copy the updated relocations out regardless of current error
3558 * state. Failure to update the relocs would mean that the next
3559 * time userland calls execbuf, it would do so with presumed offset
3560 * state that didn't match the actual object state.
3561 */
3562 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3563 relocs);
3564 if (ret2 != 0) {
3565 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3566
3567 if (ret == 0)
3568 ret = ret2;
3569 }
3570
Eric Anholt673a3942008-07-30 12:06:12 -07003571pre_mutex_err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003572 drm_free_large(object_list);
3573 drm_free_large(exec_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003574 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07003575
3576 return ret;
3577}
3578
3579int
3580i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3581{
3582 struct drm_device *dev = obj->dev;
3583 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3584 int ret;
3585
3586 i915_verify_inactive(dev, __FILE__, __LINE__);
3587 if (obj_priv->gtt_space == NULL) {
3588 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3589 if (ret != 0) {
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003590 if (ret != -EBUSY && ret != -ERESTARTSYS)
Kyle McMartin0fce81e2009-02-28 15:01:16 -05003591 DRM_ERROR("Failure to bind: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003592 return ret;
3593 }
Chris Wilson22c344e2009-02-11 14:26:45 +00003594 }
3595 /*
3596 * Pre-965 chips need a fence register set up in order to
3597 * properly handle tiled surfaces.
3598 */
3599 if (!IS_I965G(dev) &&
3600 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3601 obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003602 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilson22c344e2009-02-11 14:26:45 +00003603 if (ret != 0) {
3604 if (ret != -EBUSY && ret != -ERESTARTSYS)
3605 DRM_ERROR("Failure to install fence: %d\n",
3606 ret);
3607 return ret;
3608 }
Eric Anholt673a3942008-07-30 12:06:12 -07003609 }
3610 obj_priv->pin_count++;
3611
3612 /* If the object is not active and not pending a flush,
3613 * remove it from the inactive list
3614 */
3615 if (obj_priv->pin_count == 1) {
3616 atomic_inc(&dev->pin_count);
3617 atomic_add(obj->size, &dev->pin_memory);
3618 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003619 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07003620 !list_empty(&obj_priv->list))
3621 list_del_init(&obj_priv->list);
3622 }
3623 i915_verify_inactive(dev, __FILE__, __LINE__);
3624
3625 return 0;
3626}
3627
3628void
3629i915_gem_object_unpin(struct drm_gem_object *obj)
3630{
3631 struct drm_device *dev = obj->dev;
3632 drm_i915_private_t *dev_priv = dev->dev_private;
3633 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3634
3635 i915_verify_inactive(dev, __FILE__, __LINE__);
3636 obj_priv->pin_count--;
3637 BUG_ON(obj_priv->pin_count < 0);
3638 BUG_ON(obj_priv->gtt_space == NULL);
3639
3640 /* If the object is no longer pinned, and is
3641 * neither active nor being flushed, then stick it on
3642 * the inactive list
3643 */
3644 if (obj_priv->pin_count == 0) {
3645 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003646 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003647 list_move_tail(&obj_priv->list,
3648 &dev_priv->mm.inactive_list);
3649 atomic_dec(&dev->pin_count);
3650 atomic_sub(obj->size, &dev->pin_memory);
3651 }
3652 i915_verify_inactive(dev, __FILE__, __LINE__);
3653}
3654
3655int
3656i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3657 struct drm_file *file_priv)
3658{
3659 struct drm_i915_gem_pin *args = data;
3660 struct drm_gem_object *obj;
3661 struct drm_i915_gem_object *obj_priv;
3662 int ret;
3663
3664 mutex_lock(&dev->struct_mutex);
3665
3666 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3667 if (obj == NULL) {
3668 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3669 args->handle);
3670 mutex_unlock(&dev->struct_mutex);
3671 return -EBADF;
3672 }
3673 obj_priv = obj->driver_private;
3674
Jesse Barnes79e53942008-11-07 14:24:08 -08003675 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3676 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3677 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00003678 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003679 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08003680 return -EINVAL;
3681 }
3682
3683 obj_priv->user_pin_count++;
3684 obj_priv->pin_filp = file_priv;
3685 if (obj_priv->user_pin_count == 1) {
3686 ret = i915_gem_object_pin(obj, args->alignment);
3687 if (ret != 0) {
3688 drm_gem_object_unreference(obj);
3689 mutex_unlock(&dev->struct_mutex);
3690 return ret;
3691 }
Eric Anholt673a3942008-07-30 12:06:12 -07003692 }
3693
3694 /* XXX - flush the CPU caches for pinned objects
3695 * as the X server doesn't manage domains yet
3696 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003697 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003698 args->offset = obj_priv->gtt_offset;
3699 drm_gem_object_unreference(obj);
3700 mutex_unlock(&dev->struct_mutex);
3701
3702 return 0;
3703}
3704
3705int
3706i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3707 struct drm_file *file_priv)
3708{
3709 struct drm_i915_gem_pin *args = data;
3710 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08003711 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003712
3713 mutex_lock(&dev->struct_mutex);
3714
3715 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3716 if (obj == NULL) {
3717 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3718 args->handle);
3719 mutex_unlock(&dev->struct_mutex);
3720 return -EBADF;
3721 }
3722
Jesse Barnes79e53942008-11-07 14:24:08 -08003723 obj_priv = obj->driver_private;
3724 if (obj_priv->pin_filp != file_priv) {
3725 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3726 args->handle);
3727 drm_gem_object_unreference(obj);
3728 mutex_unlock(&dev->struct_mutex);
3729 return -EINVAL;
3730 }
3731 obj_priv->user_pin_count--;
3732 if (obj_priv->user_pin_count == 0) {
3733 obj_priv->pin_filp = NULL;
3734 i915_gem_object_unpin(obj);
3735 }
Eric Anholt673a3942008-07-30 12:06:12 -07003736
3737 drm_gem_object_unreference(obj);
3738 mutex_unlock(&dev->struct_mutex);
3739 return 0;
3740}
3741
3742int
3743i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3744 struct drm_file *file_priv)
3745{
3746 struct drm_i915_gem_busy *args = data;
3747 struct drm_gem_object *obj;
3748 struct drm_i915_gem_object *obj_priv;
3749
Eric Anholt673a3942008-07-30 12:06:12 -07003750 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3751 if (obj == NULL) {
3752 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3753 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003754 return -EBADF;
3755 }
3756
Chris Wilsonb1ce7862009-06-06 09:46:00 +01003757 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08003758 /* Update the active list for the hardware's current position.
3759 * Otherwise this only updates on a delayed timer or when irqs are
3760 * actually unmasked, and our working set ends up being larger than
3761 * required.
3762 */
3763 i915_gem_retire_requests(dev);
3764
Eric Anholt673a3942008-07-30 12:06:12 -07003765 obj_priv = obj->driver_private;
Eric Anholtc4de0a52008-12-14 19:05:04 -08003766 /* Don't count being on the flushing list against the object being
3767 * done. Otherwise, a buffer left on the flushing list but not getting
3768 * flushed (because nobody's flushing that domain) won't ever return
3769 * unbusy and get reused by libdrm's bo cache. The other expected
3770 * consumer of this interface, OpenGL's occlusion queries, also specs
3771 * that the objects get unbusy "eventually" without any interference.
3772 */
3773 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003774
3775 drm_gem_object_unreference(obj);
3776 mutex_unlock(&dev->struct_mutex);
3777 return 0;
3778}
3779
3780int
3781i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3782 struct drm_file *file_priv)
3783{
3784 return i915_gem_ring_throttle(dev, file_priv);
3785}
3786
3787int i915_gem_init_object(struct drm_gem_object *obj)
3788{
3789 struct drm_i915_gem_object *obj_priv;
3790
Eric Anholt9a298b22009-03-24 12:23:04 -07003791 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07003792 if (obj_priv == NULL)
3793 return -ENOMEM;
3794
3795 /*
3796 * We've just allocated pages from the kernel,
3797 * so they've just been written by the CPU with
3798 * zeros. They'll need to be clflushed before we
3799 * use them with the GPU.
3800 */
3801 obj->write_domain = I915_GEM_DOMAIN_CPU;
3802 obj->read_domains = I915_GEM_DOMAIN_CPU;
3803
Keith Packardba1eb1d2008-10-14 19:55:10 -07003804 obj_priv->agp_type = AGP_USER_MEMORY;
3805
Eric Anholt673a3942008-07-30 12:06:12 -07003806 obj->driver_private = obj_priv;
3807 obj_priv->obj = obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003808 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholt673a3942008-07-30 12:06:12 -07003809 INIT_LIST_HEAD(&obj_priv->list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003810
Eric Anholt673a3942008-07-30 12:06:12 -07003811 return 0;
3812}
3813
3814void i915_gem_free_object(struct drm_gem_object *obj)
3815{
Jesse Barnesde151cf2008-11-12 10:03:55 -08003816 struct drm_device *dev = obj->dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003817 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3818
3819 while (obj_priv->pin_count > 0)
3820 i915_gem_object_unpin(obj);
3821
Dave Airlie71acb5e2008-12-30 20:31:46 +10003822 if (obj_priv->phys_obj)
3823 i915_gem_detach_phys_object(dev, obj);
3824
Eric Anholt673a3942008-07-30 12:06:12 -07003825 i915_gem_object_unbind(obj);
3826
Jesse Barnesab00b3e2009-02-11 14:01:46 -08003827 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003828
Eric Anholt9a298b22009-03-24 12:23:04 -07003829 kfree(obj_priv->page_cpu_valid);
Eric Anholt280b7132009-03-12 16:56:27 -07003830 kfree(obj_priv->bit_17);
Eric Anholt9a298b22009-03-24 12:23:04 -07003831 kfree(obj->driver_private);
Eric Anholt673a3942008-07-30 12:06:12 -07003832}
3833
Eric Anholt673a3942008-07-30 12:06:12 -07003834/** Unbinds all objects that are on the given buffer list. */
3835static int
3836i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3837{
3838 struct drm_gem_object *obj;
3839 struct drm_i915_gem_object *obj_priv;
3840 int ret;
3841
3842 while (!list_empty(head)) {
3843 obj_priv = list_first_entry(head,
3844 struct drm_i915_gem_object,
3845 list);
3846 obj = obj_priv->obj;
3847
3848 if (obj_priv->pin_count != 0) {
3849 DRM_ERROR("Pinned object in unbind list\n");
3850 mutex_unlock(&dev->struct_mutex);
3851 return -EINVAL;
3852 }
3853
3854 ret = i915_gem_object_unbind(obj);
3855 if (ret != 0) {
3856 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3857 ret);
3858 mutex_unlock(&dev->struct_mutex);
3859 return ret;
3860 }
3861 }
3862
3863
3864 return 0;
3865}
3866
Jesse Barnes5669fca2009-02-17 15:13:31 -08003867int
Eric Anholt673a3942008-07-30 12:06:12 -07003868i915_gem_idle(struct drm_device *dev)
3869{
3870 drm_i915_private_t *dev_priv = dev->dev_private;
3871 uint32_t seqno, cur_seqno, last_seqno;
3872 int stuck, ret;
3873
Keith Packard6dbe2772008-10-14 21:41:13 -07003874 mutex_lock(&dev->struct_mutex);
3875
3876 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3877 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003878 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003879 }
Eric Anholt673a3942008-07-30 12:06:12 -07003880
3881 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3882 * We need to replace this with a semaphore, or something.
3883 */
3884 dev_priv->mm.suspended = 1;
3885
Keith Packard6dbe2772008-10-14 21:41:13 -07003886 /* Cancel the retire work handler, wait for it to finish if running
3887 */
3888 mutex_unlock(&dev->struct_mutex);
3889 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3890 mutex_lock(&dev->struct_mutex);
3891
Eric Anholt673a3942008-07-30 12:06:12 -07003892 i915_kernel_lost_context(dev);
3893
3894 /* Flush the GPU along with all non-CPU write domains
3895 */
Chris Wilson21d509e2009-06-06 09:46:02 +01003896 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3897 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07003898
3899 if (seqno == 0) {
3900 mutex_unlock(&dev->struct_mutex);
3901 return -ENOMEM;
3902 }
3903
3904 dev_priv->mm.waiting_gem_seqno = seqno;
3905 last_seqno = 0;
3906 stuck = 0;
3907 for (;;) {
3908 cur_seqno = i915_get_gem_seqno(dev);
3909 if (i915_seqno_passed(cur_seqno, seqno))
3910 break;
3911 if (last_seqno == cur_seqno) {
3912 if (stuck++ > 100) {
3913 DRM_ERROR("hardware wedged\n");
3914 dev_priv->mm.wedged = 1;
3915 DRM_WAKEUP(&dev_priv->irq_queue);
3916 break;
3917 }
3918 }
3919 msleep(10);
3920 last_seqno = cur_seqno;
3921 }
3922 dev_priv->mm.waiting_gem_seqno = 0;
3923
3924 i915_gem_retire_requests(dev);
3925
Carl Worth5e118f42009-03-20 11:54:25 -07003926 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08003927 if (!dev_priv->mm.wedged) {
3928 /* Active and flushing should now be empty as we've
3929 * waited for a sequence higher than any pending execbuffer
3930 */
3931 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3932 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3933 /* Request should now be empty as we've also waited
3934 * for the last request in the list
3935 */
3936 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3937 }
Eric Anholt673a3942008-07-30 12:06:12 -07003938
Eric Anholt28dfe522008-11-13 15:00:55 -08003939 /* Empty the active and flushing lists to inactive. If there's
3940 * anything left at this point, it means that we're wedged and
3941 * nothing good's going to happen by leaving them there. So strip
3942 * the GPU domains and just stuff them onto inactive.
Eric Anholt673a3942008-07-30 12:06:12 -07003943 */
Eric Anholt28dfe522008-11-13 15:00:55 -08003944 while (!list_empty(&dev_priv->mm.active_list)) {
3945 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003946
Eric Anholt28dfe522008-11-13 15:00:55 -08003947 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3948 struct drm_i915_gem_object,
3949 list);
3950 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3951 i915_gem_object_move_to_inactive(obj_priv->obj);
3952 }
Carl Worth5e118f42009-03-20 11:54:25 -07003953 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08003954
3955 while (!list_empty(&dev_priv->mm.flushing_list)) {
3956 struct drm_i915_gem_object *obj_priv;
3957
Eric Anholt151903d2008-12-01 10:23:21 +10003958 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
Eric Anholt28dfe522008-11-13 15:00:55 -08003959 struct drm_i915_gem_object,
3960 list);
3961 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3962 i915_gem_object_move_to_inactive(obj_priv->obj);
3963 }
3964
3965
3966 /* Move all inactive buffers out of the GTT. */
Eric Anholt673a3942008-07-30 12:06:12 -07003967 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
Eric Anholt28dfe522008-11-13 15:00:55 -08003968 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
Keith Packard6dbe2772008-10-14 21:41:13 -07003969 if (ret) {
3970 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003971 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003972 }
Eric Anholt673a3942008-07-30 12:06:12 -07003973
Keith Packard6dbe2772008-10-14 21:41:13 -07003974 i915_gem_cleanup_ringbuffer(dev);
3975 mutex_unlock(&dev->struct_mutex);
3976
Eric Anholt673a3942008-07-30 12:06:12 -07003977 return 0;
3978}
3979
3980static int
3981i915_gem_init_hws(struct drm_device *dev)
3982{
3983 drm_i915_private_t *dev_priv = dev->dev_private;
3984 struct drm_gem_object *obj;
3985 struct drm_i915_gem_object *obj_priv;
3986 int ret;
3987
3988 /* If we need a physical address for the status page, it's already
3989 * initialized at driver load time.
3990 */
3991 if (!I915_NEED_GFX_HWS(dev))
3992 return 0;
3993
3994 obj = drm_gem_object_alloc(dev, 4096);
3995 if (obj == NULL) {
3996 DRM_ERROR("Failed to allocate status page\n");
3997 return -ENOMEM;
3998 }
3999 obj_priv = obj->driver_private;
Keith Packardba1eb1d2008-10-14 19:55:10 -07004000 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt673a3942008-07-30 12:06:12 -07004001
4002 ret = i915_gem_object_pin(obj, 4096);
4003 if (ret != 0) {
4004 drm_gem_object_unreference(obj);
4005 return ret;
4006 }
4007
4008 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07004009
Eric Anholt856fa192009-03-19 14:10:50 -07004010 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004011 if (dev_priv->hw_status_page == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004012 DRM_ERROR("Failed to map status page.\n");
4013 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Chris Wilson3eb2ee72009-02-11 14:26:34 +00004014 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004015 drm_gem_object_unreference(obj);
4016 return -EINVAL;
4017 }
4018 dev_priv->hws_obj = obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004019 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4020 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004021 I915_READ(HWS_PGA); /* posting read */
Eric Anholt673a3942008-07-30 12:06:12 -07004022 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4023
4024 return 0;
4025}
4026
Chris Wilson85a7bb92009-02-11 14:52:44 +00004027static void
4028i915_gem_cleanup_hws(struct drm_device *dev)
4029{
4030 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004031 struct drm_gem_object *obj;
4032 struct drm_i915_gem_object *obj_priv;
Chris Wilson85a7bb92009-02-11 14:52:44 +00004033
4034 if (dev_priv->hws_obj == NULL)
4035 return;
4036
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004037 obj = dev_priv->hws_obj;
4038 obj_priv = obj->driver_private;
4039
Eric Anholt856fa192009-03-19 14:10:50 -07004040 kunmap(obj_priv->pages[0]);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004041 i915_gem_object_unpin(obj);
4042 drm_gem_object_unreference(obj);
4043 dev_priv->hws_obj = NULL;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004044
Chris Wilson85a7bb92009-02-11 14:52:44 +00004045 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4046 dev_priv->hw_status_page = NULL;
4047
4048 /* Write high address into HWS_PGA when disabling. */
4049 I915_WRITE(HWS_PGA, 0x1ffff000);
4050}
4051
Jesse Barnes79e53942008-11-07 14:24:08 -08004052int
Eric Anholt673a3942008-07-30 12:06:12 -07004053i915_gem_init_ringbuffer(struct drm_device *dev)
4054{
4055 drm_i915_private_t *dev_priv = dev->dev_private;
4056 struct drm_gem_object *obj;
4057 struct drm_i915_gem_object *obj_priv;
Jesse Barnes79e53942008-11-07 14:24:08 -08004058 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
Eric Anholt673a3942008-07-30 12:06:12 -07004059 int ret;
Keith Packard50aa253d2008-10-14 17:20:35 -07004060 u32 head;
Eric Anholt673a3942008-07-30 12:06:12 -07004061
4062 ret = i915_gem_init_hws(dev);
4063 if (ret != 0)
4064 return ret;
4065
4066 obj = drm_gem_object_alloc(dev, 128 * 1024);
4067 if (obj == NULL) {
4068 DRM_ERROR("Failed to allocate ringbuffer\n");
Chris Wilson85a7bb92009-02-11 14:52:44 +00004069 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004070 return -ENOMEM;
4071 }
4072 obj_priv = obj->driver_private;
4073
4074 ret = i915_gem_object_pin(obj, 4096);
4075 if (ret != 0) {
4076 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004077 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004078 return ret;
4079 }
4080
4081 /* Set up the kernel mapping for the ring. */
Jesse Barnes79e53942008-11-07 14:24:08 -08004082 ring->Size = obj->size;
4083 ring->tail_mask = obj->size - 1;
Eric Anholt673a3942008-07-30 12:06:12 -07004084
Jesse Barnes79e53942008-11-07 14:24:08 -08004085 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4086 ring->map.size = obj->size;
4087 ring->map.type = 0;
4088 ring->map.flags = 0;
4089 ring->map.mtrr = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004090
Jesse Barnes79e53942008-11-07 14:24:08 -08004091 drm_core_ioremap_wc(&ring->map, dev);
4092 if (ring->map.handle == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004093 DRM_ERROR("Failed to map ringbuffer.\n");
4094 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
Chris Wilson47ed1852009-02-11 14:26:33 +00004095 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004096 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004097 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004098 return -EINVAL;
4099 }
Jesse Barnes79e53942008-11-07 14:24:08 -08004100 ring->ring_obj = obj;
4101 ring->virtual_start = ring->map.handle;
Eric Anholt673a3942008-07-30 12:06:12 -07004102
4103 /* Stop the ring if it's running. */
4104 I915_WRITE(PRB0_CTL, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004105 I915_WRITE(PRB0_TAIL, 0);
Keith Packard50aa253d2008-10-14 17:20:35 -07004106 I915_WRITE(PRB0_HEAD, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004107
4108 /* Initialize the ring. */
4109 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
Keith Packard50aa253d2008-10-14 17:20:35 -07004110 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4111
4112 /* G45 ring initialization fails to reset head to zero */
4113 if (head != 0) {
4114 DRM_ERROR("Ring head not reset to zero "
4115 "ctl %08x head %08x tail %08x start %08x\n",
4116 I915_READ(PRB0_CTL),
4117 I915_READ(PRB0_HEAD),
4118 I915_READ(PRB0_TAIL),
4119 I915_READ(PRB0_START));
4120 I915_WRITE(PRB0_HEAD, 0);
4121
4122 DRM_ERROR("Ring head forced to zero "
4123 "ctl %08x head %08x tail %08x start %08x\n",
4124 I915_READ(PRB0_CTL),
4125 I915_READ(PRB0_HEAD),
4126 I915_READ(PRB0_TAIL),
4127 I915_READ(PRB0_START));
4128 }
4129
Eric Anholt673a3942008-07-30 12:06:12 -07004130 I915_WRITE(PRB0_CTL,
4131 ((obj->size - 4096) & RING_NR_PAGES) |
4132 RING_NO_REPORT |
4133 RING_VALID);
4134
Keith Packard50aa253d2008-10-14 17:20:35 -07004135 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4136
4137 /* If the head is still not zero, the ring is dead */
4138 if (head != 0) {
4139 DRM_ERROR("Ring initialization failed "
4140 "ctl %08x head %08x tail %08x start %08x\n",
4141 I915_READ(PRB0_CTL),
4142 I915_READ(PRB0_HEAD),
4143 I915_READ(PRB0_TAIL),
4144 I915_READ(PRB0_START));
4145 return -EIO;
4146 }
4147
Eric Anholt673a3942008-07-30 12:06:12 -07004148 /* Update our cache of the ring state */
Jesse Barnes79e53942008-11-07 14:24:08 -08004149 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4150 i915_kernel_lost_context(dev);
4151 else {
4152 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4153 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4154 ring->space = ring->head - (ring->tail + 8);
4155 if (ring->space < 0)
4156 ring->space += ring->Size;
4157 }
Eric Anholt673a3942008-07-30 12:06:12 -07004158
4159 return 0;
4160}
4161
Jesse Barnes79e53942008-11-07 14:24:08 -08004162void
Eric Anholt673a3942008-07-30 12:06:12 -07004163i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4164{
4165 drm_i915_private_t *dev_priv = dev->dev_private;
4166
4167 if (dev_priv->ring.ring_obj == NULL)
4168 return;
4169
4170 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4171
4172 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4173 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4174 dev_priv->ring.ring_obj = NULL;
4175 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4176
Chris Wilson85a7bb92009-02-11 14:52:44 +00004177 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004178}
4179
4180int
4181i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4182 struct drm_file *file_priv)
4183{
4184 drm_i915_private_t *dev_priv = dev->dev_private;
4185 int ret;
4186
Jesse Barnes79e53942008-11-07 14:24:08 -08004187 if (drm_core_check_feature(dev, DRIVER_MODESET))
4188 return 0;
4189
Eric Anholt673a3942008-07-30 12:06:12 -07004190 if (dev_priv->mm.wedged) {
4191 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4192 dev_priv->mm.wedged = 0;
4193 }
4194
Eric Anholt673a3942008-07-30 12:06:12 -07004195 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004196 dev_priv->mm.suspended = 0;
4197
4198 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004199 if (ret != 0) {
4200 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004201 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004202 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004203
Carl Worth5e118f42009-03-20 11:54:25 -07004204 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004205 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004206 spin_unlock(&dev_priv->mm.active_list_lock);
4207
Eric Anholt673a3942008-07-30 12:06:12 -07004208 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4209 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4210 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004211 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004212
4213 drm_irq_install(dev);
4214
Eric Anholt673a3942008-07-30 12:06:12 -07004215 return 0;
4216}
4217
4218int
4219i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4220 struct drm_file *file_priv)
4221{
4222 int ret;
4223
Jesse Barnes79e53942008-11-07 14:24:08 -08004224 if (drm_core_check_feature(dev, DRIVER_MODESET))
4225 return 0;
4226
Eric Anholt673a3942008-07-30 12:06:12 -07004227 ret = i915_gem_idle(dev);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004228 drm_irq_uninstall(dev);
4229
Keith Packard6dbe2772008-10-14 21:41:13 -07004230 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004231}
4232
4233void
4234i915_gem_lastclose(struct drm_device *dev)
4235{
4236 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004237
Eric Anholte806b492009-01-22 09:56:58 -08004238 if (drm_core_check_feature(dev, DRIVER_MODESET))
4239 return;
4240
Keith Packard6dbe2772008-10-14 21:41:13 -07004241 ret = i915_gem_idle(dev);
4242 if (ret)
4243 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004244}
4245
4246void
4247i915_gem_load(struct drm_device *dev)
4248{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004249 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004250 drm_i915_private_t *dev_priv = dev->dev_private;
4251
Carl Worth5e118f42009-03-20 11:54:25 -07004252 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004253 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4254 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4255 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4256 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4257 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4258 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004259 dev_priv->mm.next_gem_seqno = 1;
4260
Jesse Barnesde151cf2008-11-12 10:03:55 -08004261 /* Old X drivers will take 0-2 for front, back, depth buffers */
4262 dev_priv->fence_reg_start = 3;
4263
Jesse Barnes0f973f22009-01-26 17:10:45 -08004264 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004265 dev_priv->num_fence_regs = 16;
4266 else
4267 dev_priv->num_fence_regs = 8;
4268
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004269 /* Initialize fence registers to zero */
4270 if (IS_I965G(dev)) {
4271 for (i = 0; i < 16; i++)
4272 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4273 } else {
4274 for (i = 0; i < 8; i++)
4275 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4276 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4277 for (i = 0; i < 8; i++)
4278 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4279 }
4280
Eric Anholt673a3942008-07-30 12:06:12 -07004281 i915_gem_detect_bit_6_swizzle(dev);
4282}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004283
4284/*
4285 * Create a physically contiguous memory object for this object
4286 * e.g. for cursor + overlay regs
4287 */
4288int i915_gem_init_phys_object(struct drm_device *dev,
4289 int id, int size)
4290{
4291 drm_i915_private_t *dev_priv = dev->dev_private;
4292 struct drm_i915_gem_phys_object *phys_obj;
4293 int ret;
4294
4295 if (dev_priv->mm.phys_objs[id - 1] || !size)
4296 return 0;
4297
Eric Anholt9a298b22009-03-24 12:23:04 -07004298 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004299 if (!phys_obj)
4300 return -ENOMEM;
4301
4302 phys_obj->id = id;
4303
4304 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4305 if (!phys_obj->handle) {
4306 ret = -ENOMEM;
4307 goto kfree_obj;
4308 }
4309#ifdef CONFIG_X86
4310 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4311#endif
4312
4313 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4314
4315 return 0;
4316kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004317 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004318 return ret;
4319}
4320
4321void i915_gem_free_phys_object(struct drm_device *dev, int id)
4322{
4323 drm_i915_private_t *dev_priv = dev->dev_private;
4324 struct drm_i915_gem_phys_object *phys_obj;
4325
4326 if (!dev_priv->mm.phys_objs[id - 1])
4327 return;
4328
4329 phys_obj = dev_priv->mm.phys_objs[id - 1];
4330 if (phys_obj->cur_obj) {
4331 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4332 }
4333
4334#ifdef CONFIG_X86
4335 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4336#endif
4337 drm_pci_free(dev, phys_obj->handle);
4338 kfree(phys_obj);
4339 dev_priv->mm.phys_objs[id - 1] = NULL;
4340}
4341
4342void i915_gem_free_all_phys_object(struct drm_device *dev)
4343{
4344 int i;
4345
Dave Airlie260883c2009-01-22 17:58:49 +10004346 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004347 i915_gem_free_phys_object(dev, i);
4348}
4349
4350void i915_gem_detach_phys_object(struct drm_device *dev,
4351 struct drm_gem_object *obj)
4352{
4353 struct drm_i915_gem_object *obj_priv;
4354 int i;
4355 int ret;
4356 int page_count;
4357
4358 obj_priv = obj->driver_private;
4359 if (!obj_priv->phys_obj)
4360 return;
4361
Eric Anholt856fa192009-03-19 14:10:50 -07004362 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004363 if (ret)
4364 goto out;
4365
4366 page_count = obj->size / PAGE_SIZE;
4367
4368 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004369 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004370 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4371
4372 memcpy(dst, src, PAGE_SIZE);
4373 kunmap_atomic(dst, KM_USER0);
4374 }
Eric Anholt856fa192009-03-19 14:10:50 -07004375 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004376 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004377
4378 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004379out:
4380 obj_priv->phys_obj->cur_obj = NULL;
4381 obj_priv->phys_obj = NULL;
4382}
4383
4384int
4385i915_gem_attach_phys_object(struct drm_device *dev,
4386 struct drm_gem_object *obj, int id)
4387{
4388 drm_i915_private_t *dev_priv = dev->dev_private;
4389 struct drm_i915_gem_object *obj_priv;
4390 int ret = 0;
4391 int page_count;
4392 int i;
4393
4394 if (id > I915_MAX_PHYS_OBJECT)
4395 return -EINVAL;
4396
4397 obj_priv = obj->driver_private;
4398
4399 if (obj_priv->phys_obj) {
4400 if (obj_priv->phys_obj->id == id)
4401 return 0;
4402 i915_gem_detach_phys_object(dev, obj);
4403 }
4404
4405
4406 /* create a new object */
4407 if (!dev_priv->mm.phys_objs[id - 1]) {
4408 ret = i915_gem_init_phys_object(dev, id,
4409 obj->size);
4410 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004411 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004412 goto out;
4413 }
4414 }
4415
4416 /* bind to the object */
4417 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4418 obj_priv->phys_obj->cur_obj = obj;
4419
Eric Anholt856fa192009-03-19 14:10:50 -07004420 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004421 if (ret) {
4422 DRM_ERROR("failed to get page list\n");
4423 goto out;
4424 }
4425
4426 page_count = obj->size / PAGE_SIZE;
4427
4428 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004429 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004430 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4431
4432 memcpy(dst, src, PAGE_SIZE);
4433 kunmap_atomic(src, KM_USER0);
4434 }
4435
Chris Wilsond78b47b2009-06-17 21:52:49 +01004436 i915_gem_object_put_pages(obj);
4437
Dave Airlie71acb5e2008-12-30 20:31:46 +10004438 return 0;
4439out:
4440 return ret;
4441}
4442
4443static int
4444i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4445 struct drm_i915_gem_pwrite *args,
4446 struct drm_file *file_priv)
4447{
4448 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4449 void *obj_addr;
4450 int ret;
4451 char __user *user_data;
4452
4453 user_data = (char __user *) (uintptr_t) args->data_ptr;
4454 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4455
Dave Airliee08fb4f2009-02-25 14:52:30 +10004456 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004457 ret = copy_from_user(obj_addr, user_data, args->size);
4458 if (ret)
4459 return -EFAULT;
4460
4461 drm_agp_chipset_flush(dev);
4462 return 0;
4463}
Eric Anholtb9624422009-06-03 07:27:35 +00004464
4465void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4466{
4467 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4468
4469 /* Clean up our request list when the client is going away, so that
4470 * later retire_requests won't dereference our soon-to-be-gone
4471 * file_priv.
4472 */
4473 mutex_lock(&dev->struct_mutex);
4474 while (!list_empty(&i915_file_priv->mm.request_list))
4475 list_del_init(i915_file_priv->mm.request_list.next);
4476 mutex_unlock(&dev->struct_mutex);
4477}