blob: e2421869a40cf809f509c57e58a770a7c0e4043a [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080033#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070034
Eric Anholt28dfe522008-11-13 15:00:55 -080035#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
Eric Anholte47c68e2008-11-14 13:35:19 -080037static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080040static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070046static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080047static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
Jesse Barnes0f973f22009-01-26 17:10:45 -080049static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100052static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070055
Jesse Barnes79e53942008-11-07 14:24:08 -080056int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end)
58{
59 drm_i915_private_t *dev_priv = dev->dev_private;
60
61 if (start >= end ||
62 (start & (PAGE_SIZE - 1)) != 0 ||
63 (end & (PAGE_SIZE - 1)) != 0) {
64 return -EINVAL;
65 }
66
67 drm_mm_init(&dev_priv->mm.gtt_space, start,
68 end - start);
69
70 dev->gtt_total = (uint32_t) (end - start);
71
72 return 0;
73}
Keith Packard6dbe2772008-10-14 21:41:13 -070074
Eric Anholt673a3942008-07-30 12:06:12 -070075int
76i915_gem_init_ioctl(struct drm_device *dev, void *data,
77 struct drm_file *file_priv)
78{
Eric Anholt673a3942008-07-30 12:06:12 -070079 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080080 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070081
82 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080083 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070084 mutex_unlock(&dev->struct_mutex);
85
Jesse Barnes79e53942008-11-07 14:24:08 -080086 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070087}
88
Eric Anholt5a125c32008-10-22 21:40:13 -070089int
90i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv)
92{
Eric Anholt5a125c32008-10-22 21:40:13 -070093 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070094
95 if (!(dev->driver->driver_features & DRIVER_GEM))
96 return -ENODEV;
97
98 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -080099 args->aper_available_size = (args->aper_size -
100 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700101
102 return 0;
103}
104
Eric Anholt673a3942008-07-30 12:06:12 -0700105
106/**
107 * Creates a new mm object and returns a handle to it.
108 */
109int
110i915_gem_create_ioctl(struct drm_device *dev, void *data,
111 struct drm_file *file_priv)
112{
113 struct drm_i915_gem_create *args = data;
114 struct drm_gem_object *obj;
115 int handle, ret;
116
117 args->size = roundup(args->size, PAGE_SIZE);
118
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
121 if (obj == NULL)
122 return -ENOMEM;
123
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
128
129 if (ret)
130 return ret;
131
132 args->handle = handle;
133
134 return 0;
135}
136
Eric Anholt40123c12009-03-09 13:42:30 -0700137static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700138fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
140 char __user *data,
141 int length)
142{
143 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200144 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700145
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
147 if (vaddr == NULL)
148 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700150 kunmap_atomic(vaddr, KM_USER0);
151
Florian Mickler2bc43b52009-04-06 22:55:41 +0200152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700156}
157
Eric Anholt280b7132009-03-12 16:56:27 -0700158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
165}
166
Eric Anholteb014592009-03-10 11:44:52 -0700167static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700168slow_shmem_copy(struct page *dst_page,
169 int dst_offset,
170 struct page *src_page,
171 int src_offset,
172 int length)
173{
174 char *dst_vaddr, *src_vaddr;
175
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
178 return -ENOMEM;
179
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
183 return -ENOMEM;
184 }
185
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
190
191 return 0;
192}
193
Eric Anholt280b7132009-03-12 16:56:27 -0700194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
Eric Anholt673a3942008-07-30 12:06:12 -0700252/**
Eric Anholteb014592009-03-10 11:44:52 -0700253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256 */
257static int
258i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
261{
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
263 ssize_t remain;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
267 int ret;
268
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
270 remain = args->size;
271
272 mutex_lock(&dev->struct_mutex);
273
274 ret = i915_gem_object_get_pages(obj);
275 if (ret != 0)
276 goto fail_unlock;
277
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279 args->size);
280 if (ret != 0)
281 goto fail_put_pages;
282
283 obj_priv = obj->driver_private;
284 offset = args->offset;
285
286 while (remain > 0) {
287 /* Operation in this page
288 *
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
292 */
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail_put_pages;
304
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
308 }
309
310fail_put_pages:
311 i915_gem_object_put_pages(obj);
312fail_unlock:
313 mutex_unlock(&dev->struct_mutex);
314
315 return ret;
316}
317
318/**
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
323 */
324static int
325i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
328{
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
332 ssize_t remain;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
337 int page_length;
338 int ret;
339 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700340 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700341
342 remain = args->size;
343
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
347 */
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
351
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700353 if (user_pages == NULL)
354 return -ENOMEM;
355
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700358 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
361 ret = -EFAULT;
362 goto fail_put_user_pages;
363 }
364
Eric Anholt280b7132009-03-12 16:56:27 -0700365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
Eric Anholteb014592009-03-10 11:44:52 -0700367 mutex_lock(&dev->struct_mutex);
368
369 ret = i915_gem_object_get_pages(obj);
370 if (ret != 0)
371 goto fail_unlock;
372
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
374 args->size);
375 if (ret != 0)
376 goto fail_put_pages;
377
378 obj_priv = obj->driver_private;
379 offset = args->offset;
380
381 while (remain > 0) {
382 /* Operation in this page
383 *
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
389 */
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
394
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
400
Eric Anholt280b7132009-03-12 16:56:27 -0700401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
403 shmem_page_offset,
404 user_pages[data_page_index],
405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
Eric Anholteb014592009-03-10 11:44:52 -0700415 if (ret)
416 goto fail_put_pages;
417
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
421 }
422
423fail_put_pages:
424 i915_gem_object_put_pages(obj);
425fail_unlock:
426 mutex_unlock(&dev->struct_mutex);
427fail_put_user_pages:
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
431 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700432 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700433
434 return ret;
435}
436
Eric Anholt673a3942008-07-30 12:06:12 -0700437/**
438 * Reads data from the object referenced by handle.
439 *
440 * On error, the contents of *data are undefined.
441 */
442int
443i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
445{
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700449 int ret;
450
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452 if (obj == NULL)
453 return -EBADF;
454 obj_priv = obj->driver_private;
455
456 /* Bounds check source.
457 *
458 * XXX: This could use review for overflow issues...
459 */
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
463 return -EINVAL;
464 }
465
Eric Anholt280b7132009-03-12 16:56:27 -0700466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
Eric Anholt673a3942008-07-30 12:06:12 -0700474
475 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700476
Eric Anholteb014592009-03-10 11:44:52 -0700477 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700478}
479
Keith Packard0839ccb2008-10-30 19:38:48 -0700480/* This is the fast write path which cannot handle
481 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700482 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700483
Keith Packard0839ccb2008-10-30 19:38:48 -0700484static inline int
485fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
488 int length)
489{
490 char *vaddr_atomic;
491 unsigned long unwritten;
492
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
495 user_data, length);
496 io_mapping_unmap_atomic(vaddr_atomic);
497 if (unwritten)
498 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700499 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700500}
501
502/* Here's the write path which can sleep for
503 * page faults
504 */
505
506static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700507slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
510 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700511{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700512 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700513 unsigned long unwritten;
514
Eric Anholt3de09aa2009-03-09 09:42:23 -0700515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
519 length);
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700522 if (unwritten)
523 return -EFAULT;
524 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700525}
526
Eric Anholt40123c12009-03-09 13:42:30 -0700527static inline int
528fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
530 char __user *data,
531 int length)
532{
533 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400534 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700535
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
537 if (vaddr == NULL)
538 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700540 kunmap_atomic(vaddr, KM_USER0);
541
Dave Airlied0088772009-03-28 20:29:48 -0400542 if (unwritten)
543 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700544 return 0;
545}
546
Eric Anholt3de09aa2009-03-09 09:42:23 -0700547/**
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
550 */
Eric Anholt673a3942008-07-30 12:06:12 -0700551static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700552i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700555{
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Keith Packard0839ccb2008-10-30 19:38:48 -0700557 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700558 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700559 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700560 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700561 int page_offset, page_length;
562 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700563
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
565 remain = args->size;
566 if (!access_ok(VERIFY_READ, user_data, remain))
567 return -EFAULT;
568
569
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
572 if (ret) {
573 mutex_unlock(&dev->struct_mutex);
574 return ret;
575 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700577 if (ret)
578 goto fail;
579
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700582
583 while (remain > 0) {
584 /* Operation in this page
585 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700589 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700595
Keith Packard0839ccb2008-10-30 19:38:48 -0700596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700598
Keith Packard0839ccb2008-10-30 19:38:48 -0700599 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700603 if (ret)
604 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700605
Keith Packard0839ccb2008-10-30 19:38:48 -0700606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700609 }
Eric Anholt673a3942008-07-30 12:06:12 -0700610
611fail:
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
614
615 return ret;
616}
617
Eric Anholt3de09aa2009-03-09 09:42:23 -0700618/**
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
621 *
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624 */
Eric Anholt3043c602008-10-02 12:24:47 -0700625static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700626i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700629{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 ssize_t remain;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700639 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700640 uint64_t data_ptr = args->data_ptr;
641
642 remain = args->size;
643
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
647 */
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
651
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700653 if (user_pages == NULL)
654 return -ENOMEM;
655
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
664
665 mutex_lock(&dev->struct_mutex);
666 ret = i915_gem_object_pin(obj, 0);
667 if (ret)
668 goto out_unlock;
669
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
671 if (ret)
672 goto out_unpin_object;
673
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
676
677 while (remain > 0) {
678 /* Operation in this page
679 *
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
685 */
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
690
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
696
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
700 data_page_offset,
701 page_length);
702
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
706 */
707 if (ret)
708 goto out_unpin_object;
709
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
713 }
714
715out_unpin_object:
716 i915_gem_object_unpin(obj);
717out_unlock:
718 mutex_unlock(&dev->struct_mutex);
719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700722 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700723
724 return ret;
725}
726
Eric Anholt40123c12009-03-09 13:42:30 -0700727/**
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
730 */
Eric Anholt673a3942008-07-30 12:06:12 -0700731static int
Eric Anholt40123c12009-03-09 13:42:30 -0700732i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700735{
Eric Anholt40123c12009-03-09 13:42:30 -0700736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
737 ssize_t remain;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700741 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700742
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
744 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700745
746 mutex_lock(&dev->struct_mutex);
747
Eric Anholt40123c12009-03-09 13:42:30 -0700748 ret = i915_gem_object_get_pages(obj);
749 if (ret != 0)
750 goto fail_unlock;
751
Eric Anholte47c68e2008-11-14 13:35:19 -0800752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700753 if (ret != 0)
754 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700755
Eric Anholt40123c12009-03-09 13:42:30 -0700756 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700757 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700758 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700759
Eric Anholt40123c12009-03-09 13:42:30 -0700760 while (remain > 0) {
761 /* Operation in this page
762 *
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
766 */
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
772
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
776 if (ret)
777 goto fail_put_pages;
778
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700782 }
783
Eric Anholt40123c12009-03-09 13:42:30 -0700784fail_put_pages:
785 i915_gem_object_put_pages(obj);
786fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700787 mutex_unlock(&dev->struct_mutex);
788
Eric Anholt40123c12009-03-09 13:42:30 -0700789 return ret;
790}
791
792/**
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
795 *
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
798 */
799static int
800i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
803{
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
807 ssize_t remain;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
812 int page_length;
813 int ret;
814 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700815 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700816
817 remain = args->size;
818
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
822 */
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
826
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700828 if (user_pages == NULL)
829 return -ENOMEM;
830
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
836 ret = -EFAULT;
837 goto fail_put_user_pages;
838 }
839
Eric Anholt280b7132009-03-12 16:56:27 -0700840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
Eric Anholt40123c12009-03-09 13:42:30 -0700842 mutex_lock(&dev->struct_mutex);
843
844 ret = i915_gem_object_get_pages(obj);
845 if (ret != 0)
846 goto fail_unlock;
847
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
849 if (ret != 0)
850 goto fail_put_pages;
851
852 obj_priv = obj->driver_private;
853 offset = args->offset;
854 obj_priv->dirty = 1;
855
856 while (remain > 0) {
857 /* Operation in this page
858 *
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
864 */
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
869
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
875
Eric Anholt280b7132009-03-12 16:56:27 -0700876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
878 shmem_page_offset,
879 user_pages[data_page_index],
880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
Eric Anholt40123c12009-03-09 13:42:30 -0700890 if (ret)
891 goto fail_put_pages;
892
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
896 }
897
898fail_put_pages:
899 i915_gem_object_put_pages(obj);
900fail_unlock:
901 mutex_unlock(&dev->struct_mutex);
902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700905 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700906
907 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700908}
909
910/**
911 * Writes data to the object referenced by handle.
912 *
913 * On error, the contents of the buffer that were to be modified are undefined.
914 */
915int
916i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
918{
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
922 int ret = 0;
923
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
925 if (obj == NULL)
926 return -EBADF;
927 obj_priv = obj->driver_private;
928
929 /* Bounds check destination.
930 *
931 * XXX: This could use review for overflow issues...
932 */
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
936 return -EINVAL;
937 }
938
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
944 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
952 file_priv);
953 }
Eric Anholt280b7132009-03-12 16:56:27 -0700954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700956 } else {
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
960 file_priv);
961 }
962 }
Eric Anholt673a3942008-07-30 12:06:12 -0700963
964#if WATCH_PWRITE
965 if (ret)
966 DRM_INFO("pwrite failed %d\n", ret);
967#endif
968
969 drm_gem_object_unreference(obj);
970
971 return ret;
972}
973
974/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700977 */
978int
979i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
981{
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
993 return -EINVAL;
994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
Eric Anholt673a3942008-07-30 12:06:12 -07001004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1007
1008 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001011 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001012#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001015
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1019 */
1020 if (ret == -EINVAL)
1021 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001022 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001024 }
1025
Eric Anholt673a3942008-07-30 12:06:12 -07001026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1028 return ret;
1029}
1030
1031/**
1032 * Called when user space has done writes to this buffer
1033 */
1034int
1035i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1037{
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1041 int ret = 0;
1042
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1044 return -ENODEV;
1045
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1048 if (obj == NULL) {
1049 mutex_unlock(&dev->struct_mutex);
1050 return -EBADF;
1051 }
1052
1053#if WATCH_BUF
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055 __func__, args->handle, obj, obj->size);
1056#endif
1057 obj_priv = obj->driver_private;
1058
1059 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1062
Eric Anholt673a3942008-07-30 12:06:12 -07001063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1065 return ret;
1066}
1067
1068/**
1069 * Maps the contents of an object, returning the address it is mapped
1070 * into.
1071 *
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1074 */
1075int
1076i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1078{
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1081 loff_t offset;
1082 unsigned long addr;
1083
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1085 return -ENODEV;
1086
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1088 if (obj == NULL)
1089 return -EBADF;
1090
1091 offset = args->offset;
1092
1093 down_write(&current->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1096 args->offset);
1097 up_write(&current->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1102 return addr;
1103
1104 args->addr_ptr = (uint64_t) addr;
1105
1106 return 0;
1107}
1108
Jesse Barnesde151cf2008-11-12 10:03:55 -08001109/**
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1112 * vmf: fault info
1113 *
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1119 *
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1123 * left.
1124 */
1125int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1126{
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1132 unsigned long pfn;
1133 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001135
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1138 PAGE_SHIFT;
1139
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1144 if (ret) {
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1147 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001148 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001149 }
1150
1151 /* Need a new fence register? */
1152 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001153 obj_priv->tiling_mode != I915_TILING_NONE) {
Jesse Barnes0f973f22009-01-26 17:10:45 -08001154 ret = i915_gem_object_get_fence_reg(obj, write);
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001155 if (ret) {
1156 mutex_unlock(&dev->struct_mutex);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001157 return VM_FAULT_SIGBUS;
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001158 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001159 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001160
1161 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1162 page_offset;
1163
1164 /* Finally, remap it using the new GTT offset */
1165 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1166
1167 mutex_unlock(&dev->struct_mutex);
1168
1169 switch (ret) {
1170 case -ENOMEM:
1171 case -EAGAIN:
1172 return VM_FAULT_OOM;
1173 case -EFAULT:
Jesse Barnes959b8872009-03-20 14:16:33 -07001174 case -EINVAL:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001175 return VM_FAULT_SIGBUS;
1176 default:
1177 return VM_FAULT_NOPAGE;
1178 }
1179}
1180
1181/**
1182 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1183 * @obj: obj in question
1184 *
1185 * GEM memory mapping works by handing back to userspace a fake mmap offset
1186 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1187 * up the object based on the offset and sets up the various memory mapping
1188 * structures.
1189 *
1190 * This routine allocates and attaches a fake offset for @obj.
1191 */
1192static int
1193i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1194{
1195 struct drm_device *dev = obj->dev;
1196 struct drm_gem_mm *mm = dev->mm_private;
1197 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1198 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001199 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001200 int ret = 0;
1201
1202 /* Set the object up for mmap'ing */
1203 list = &obj->map_list;
1204 list->map = drm_calloc(1, sizeof(struct drm_map_list),
1205 DRM_MEM_DRIVER);
1206 if (!list->map)
1207 return -ENOMEM;
1208
1209 map = list->map;
1210 map->type = _DRM_GEM;
1211 map->size = obj->size;
1212 map->handle = obj;
1213
1214 /* Get a DRM GEM mmap offset allocated... */
1215 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1216 obj->size / PAGE_SIZE, 0, 0);
1217 if (!list->file_offset_node) {
1218 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1219 ret = -ENOMEM;
1220 goto out_free_list;
1221 }
1222
1223 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1224 obj->size / PAGE_SIZE, 0);
1225 if (!list->file_offset_node) {
1226 ret = -ENOMEM;
1227 goto out_free_list;
1228 }
1229
1230 list->hash.key = list->file_offset_node->start;
1231 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1232 DRM_ERROR("failed to add to map hash\n");
1233 goto out_free_mm;
1234 }
1235
1236 /* By now we should be all set, any drm_mmap request on the offset
1237 * below will get to our mmap & fault handler */
1238 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1239
1240 return 0;
1241
1242out_free_mm:
1243 drm_mm_put_block(list->file_offset_node);
1244out_free_list:
1245 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
1246
1247 return ret;
1248}
1249
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001250static void
1251i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1252{
1253 struct drm_device *dev = obj->dev;
1254 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_map_list *list;
1257
1258 list = &obj->map_list;
1259 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1260
1261 if (list->file_offset_node) {
1262 drm_mm_put_block(list->file_offset_node);
1263 list->file_offset_node = NULL;
1264 }
1265
1266 if (list->map) {
1267 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
1268 list->map = NULL;
1269 }
1270
1271 obj_priv->mmap_offset = 0;
1272}
1273
Jesse Barnesde151cf2008-11-12 10:03:55 -08001274/**
1275 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1276 * @obj: object to check
1277 *
1278 * Return the required GTT alignment for an object, taking into account
1279 * potential fence register mapping if needed.
1280 */
1281static uint32_t
1282i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1283{
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286 int start, i;
1287
1288 /*
1289 * Minimum alignment is 4k (GTT page size), but might be greater
1290 * if a fence register is needed for the object.
1291 */
1292 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1293 return 4096;
1294
1295 /*
1296 * Previous chips need to be aligned to the size of the smallest
1297 * fence register that can contain the object.
1298 */
1299 if (IS_I9XX(dev))
1300 start = 1024*1024;
1301 else
1302 start = 512*1024;
1303
1304 for (i = start; i < obj->size; i <<= 1)
1305 ;
1306
1307 return i;
1308}
1309
1310/**
1311 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1312 * @dev: DRM device
1313 * @data: GTT mapping ioctl data
1314 * @file_priv: GEM object info
1315 *
1316 * Simply returns the fake offset to userspace so it can mmap it.
1317 * The mmap call will end up in drm_gem_mmap(), which will set things
1318 * up so we can get faults in the handler above.
1319 *
1320 * The fault handler will take care of binding the object into the GTT
1321 * (since it may have been evicted to make room for something), allocating
1322 * a fence register, and mapping the appropriate aperture address into
1323 * userspace.
1324 */
1325int
1326i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1327 struct drm_file *file_priv)
1328{
1329 struct drm_i915_gem_mmap_gtt *args = data;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 struct drm_gem_object *obj;
1332 struct drm_i915_gem_object *obj_priv;
1333 int ret;
1334
1335 if (!(dev->driver->driver_features & DRIVER_GEM))
1336 return -ENODEV;
1337
1338 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1339 if (obj == NULL)
1340 return -EBADF;
1341
1342 mutex_lock(&dev->struct_mutex);
1343
1344 obj_priv = obj->driver_private;
1345
1346 if (!obj_priv->mmap_offset) {
1347 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001348 if (ret) {
1349 drm_gem_object_unreference(obj);
1350 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001351 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001352 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001353 }
1354
1355 args->offset = obj_priv->mmap_offset;
1356
1357 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1358
1359 /* Make sure the alignment is correct for fence regs etc */
1360 if (obj_priv->agp_mem &&
1361 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1362 drm_gem_object_unreference(obj);
1363 mutex_unlock(&dev->struct_mutex);
1364 return -EINVAL;
1365 }
1366
1367 /*
1368 * Pull it into the GTT so that we have a page list (makes the
1369 * initial fault faster and any subsequent flushing possible).
1370 */
1371 if (!obj_priv->agp_mem) {
1372 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1373 if (ret) {
1374 drm_gem_object_unreference(obj);
1375 mutex_unlock(&dev->struct_mutex);
1376 return ret;
1377 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001378 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001379 }
1380
1381 drm_gem_object_unreference(obj);
1382 mutex_unlock(&dev->struct_mutex);
1383
1384 return 0;
1385}
1386
Ben Gamari6911a9b2009-04-02 11:24:54 -07001387void
Eric Anholt856fa192009-03-19 14:10:50 -07001388i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001389{
1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1391 int page_count = obj->size / PAGE_SIZE;
1392 int i;
1393
Eric Anholt856fa192009-03-19 14:10:50 -07001394 BUG_ON(obj_priv->pages_refcount == 0);
1395
1396 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001397 return;
1398
Eric Anholt280b7132009-03-12 16:56:27 -07001399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
Eric Anholt673a3942008-07-30 12:06:12 -07001402 for (i = 0; i < page_count; i++)
Eric Anholt856fa192009-03-19 14:10:50 -07001403 if (obj_priv->pages[i] != NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07001404 if (obj_priv->dirty)
Eric Anholt856fa192009-03-19 14:10:50 -07001405 set_page_dirty(obj_priv->pages[i]);
1406 mark_page_accessed(obj_priv->pages[i]);
1407 page_cache_release(obj_priv->pages[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07001408 }
1409 obj_priv->dirty = 0;
1410
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001411 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001412 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001413}
1414
1415static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001416i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001417{
1418 struct drm_device *dev = obj->dev;
1419 drm_i915_private_t *dev_priv = dev->dev_private;
1420 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1421
1422 /* Add a reference if we're newly entering the active list. */
1423 if (!obj_priv->active) {
1424 drm_gem_object_reference(obj);
1425 obj_priv->active = 1;
1426 }
1427 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001428 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001429 list_move_tail(&obj_priv->list,
1430 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001431 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001432 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001433}
1434
Eric Anholtce44b0e2008-11-06 16:00:31 -08001435static void
1436i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1437{
1438 struct drm_device *dev = obj->dev;
1439 drm_i915_private_t *dev_priv = dev->dev_private;
1440 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1441
1442 BUG_ON(!obj_priv->active);
1443 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1444 obj_priv->last_rendering_seqno = 0;
1445}
Eric Anholt673a3942008-07-30 12:06:12 -07001446
1447static void
1448i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1449{
1450 struct drm_device *dev = obj->dev;
1451 drm_i915_private_t *dev_priv = dev->dev_private;
1452 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1453
1454 i915_verify_inactive(dev, __FILE__, __LINE__);
1455 if (obj_priv->pin_count != 0)
1456 list_del_init(&obj_priv->list);
1457 else
1458 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1459
Eric Anholtce44b0e2008-11-06 16:00:31 -08001460 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001461 if (obj_priv->active) {
1462 obj_priv->active = 0;
1463 drm_gem_object_unreference(obj);
1464 }
1465 i915_verify_inactive(dev, __FILE__, __LINE__);
1466}
1467
1468/**
1469 * Creates a new sequence number, emitting a write of it to the status page
1470 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1471 *
1472 * Must be called with struct_lock held.
1473 *
1474 * Returned sequence numbers are nonzero on success.
1475 */
1476static uint32_t
1477i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1478{
1479 drm_i915_private_t *dev_priv = dev->dev_private;
1480 struct drm_i915_gem_request *request;
1481 uint32_t seqno;
1482 int was_empty;
1483 RING_LOCALS;
1484
1485 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1486 if (request == NULL)
1487 return 0;
1488
1489 /* Grab the seqno we're going to make this request be, and bump the
1490 * next (skipping 0 so it can be the reserved no-seqno value).
1491 */
1492 seqno = dev_priv->mm.next_gem_seqno;
1493 dev_priv->mm.next_gem_seqno++;
1494 if (dev_priv->mm.next_gem_seqno == 0)
1495 dev_priv->mm.next_gem_seqno++;
1496
1497 BEGIN_LP_RING(4);
1498 OUT_RING(MI_STORE_DWORD_INDEX);
1499 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1500 OUT_RING(seqno);
1501
1502 OUT_RING(MI_USER_INTERRUPT);
1503 ADVANCE_LP_RING();
1504
1505 DRM_DEBUG("%d\n", seqno);
1506
1507 request->seqno = seqno;
1508 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001509 was_empty = list_empty(&dev_priv->mm.request_list);
1510 list_add_tail(&request->list, &dev_priv->mm.request_list);
1511
Eric Anholtce44b0e2008-11-06 16:00:31 -08001512 /* Associate any objects on the flushing list matching the write
1513 * domain we're flushing with our flush.
1514 */
1515 if (flush_domains != 0) {
1516 struct drm_i915_gem_object *obj_priv, *next;
1517
1518 list_for_each_entry_safe(obj_priv, next,
1519 &dev_priv->mm.flushing_list, list) {
1520 struct drm_gem_object *obj = obj_priv->obj;
1521
1522 if ((obj->write_domain & flush_domains) ==
1523 obj->write_domain) {
1524 obj->write_domain = 0;
1525 i915_gem_object_move_to_active(obj, seqno);
1526 }
1527 }
1528
1529 }
1530
Keith Packard6dbe2772008-10-14 21:41:13 -07001531 if (was_empty && !dev_priv->mm.suspended)
Eric Anholt673a3942008-07-30 12:06:12 -07001532 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1533 return seqno;
1534}
1535
1536/**
1537 * Command execution barrier
1538 *
1539 * Ensures that all commands in the ring are finished
1540 * before signalling the CPU
1541 */
Eric Anholt3043c602008-10-02 12:24:47 -07001542static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001543i915_retire_commands(struct drm_device *dev)
1544{
1545 drm_i915_private_t *dev_priv = dev->dev_private;
1546 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1547 uint32_t flush_domains = 0;
1548 RING_LOCALS;
1549
1550 /* The sampler always gets flushed on i965 (sigh) */
1551 if (IS_I965G(dev))
1552 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1553 BEGIN_LP_RING(2);
1554 OUT_RING(cmd);
1555 OUT_RING(0); /* noop */
1556 ADVANCE_LP_RING();
1557 return flush_domains;
1558}
1559
1560/**
1561 * Moves buffers associated only with the given active seqno from the active
1562 * to inactive list, potentially freeing them.
1563 */
1564static void
1565i915_gem_retire_request(struct drm_device *dev,
1566 struct drm_i915_gem_request *request)
1567{
1568 drm_i915_private_t *dev_priv = dev->dev_private;
1569
1570 /* Move any buffers on the active list that are no longer referenced
1571 * by the ringbuffer to the flushing/inactive lists as appropriate.
1572 */
Carl Worth5e118f42009-03-20 11:54:25 -07001573 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001574 while (!list_empty(&dev_priv->mm.active_list)) {
1575 struct drm_gem_object *obj;
1576 struct drm_i915_gem_object *obj_priv;
1577
1578 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1579 struct drm_i915_gem_object,
1580 list);
1581 obj = obj_priv->obj;
1582
1583 /* If the seqno being retired doesn't match the oldest in the
1584 * list, then the oldest in the list must still be newer than
1585 * this seqno.
1586 */
1587 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001588 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001589
Eric Anholt673a3942008-07-30 12:06:12 -07001590#if WATCH_LRU
1591 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1592 __func__, request->seqno, obj);
1593#endif
1594
Eric Anholtce44b0e2008-11-06 16:00:31 -08001595 if (obj->write_domain != 0)
1596 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001597 else {
1598 /* Take a reference on the object so it won't be
1599 * freed while the spinlock is held. The list
1600 * protection for this spinlock is safe when breaking
1601 * the lock like this since the next thing we do
1602 * is just get the head of the list again.
1603 */
1604 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001605 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001606 spin_unlock(&dev_priv->mm.active_list_lock);
1607 drm_gem_object_unreference(obj);
1608 spin_lock(&dev_priv->mm.active_list_lock);
1609 }
Eric Anholt673a3942008-07-30 12:06:12 -07001610 }
Carl Worth5e118f42009-03-20 11:54:25 -07001611out:
1612 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001613}
1614
1615/**
1616 * Returns true if seq1 is later than seq2.
1617 */
1618static int
1619i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1620{
1621 return (int32_t)(seq1 - seq2) >= 0;
1622}
1623
1624uint32_t
1625i915_get_gem_seqno(struct drm_device *dev)
1626{
1627 drm_i915_private_t *dev_priv = dev->dev_private;
1628
1629 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1630}
1631
1632/**
1633 * This function clears the request list as sequence numbers are passed.
1634 */
1635void
1636i915_gem_retire_requests(struct drm_device *dev)
1637{
1638 drm_i915_private_t *dev_priv = dev->dev_private;
1639 uint32_t seqno;
1640
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001641 if (!dev_priv->hw_status_page)
1642 return;
1643
Eric Anholt673a3942008-07-30 12:06:12 -07001644 seqno = i915_get_gem_seqno(dev);
1645
1646 while (!list_empty(&dev_priv->mm.request_list)) {
1647 struct drm_i915_gem_request *request;
1648 uint32_t retiring_seqno;
1649
1650 request = list_first_entry(&dev_priv->mm.request_list,
1651 struct drm_i915_gem_request,
1652 list);
1653 retiring_seqno = request->seqno;
1654
1655 if (i915_seqno_passed(seqno, retiring_seqno) ||
1656 dev_priv->mm.wedged) {
1657 i915_gem_retire_request(dev, request);
1658
1659 list_del(&request->list);
1660 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1661 } else
1662 break;
1663 }
1664}
1665
1666void
1667i915_gem_retire_work_handler(struct work_struct *work)
1668{
1669 drm_i915_private_t *dev_priv;
1670 struct drm_device *dev;
1671
1672 dev_priv = container_of(work, drm_i915_private_t,
1673 mm.retire_work.work);
1674 dev = dev_priv->dev;
1675
1676 mutex_lock(&dev->struct_mutex);
1677 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001678 if (!dev_priv->mm.suspended &&
1679 !list_empty(&dev_priv->mm.request_list))
Eric Anholt673a3942008-07-30 12:06:12 -07001680 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1681 mutex_unlock(&dev->struct_mutex);
1682}
1683
1684/**
1685 * Waits for a sequence number to be signaled, and cleans up the
1686 * request and object lists appropriately for that event.
1687 */
Eric Anholt3043c602008-10-02 12:24:47 -07001688static int
Eric Anholt673a3942008-07-30 12:06:12 -07001689i915_wait_request(struct drm_device *dev, uint32_t seqno)
1690{
1691 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001692 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001693 int ret = 0;
1694
1695 BUG_ON(seqno == 0);
1696
1697 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001698 ier = I915_READ(IER);
1699 if (!ier) {
1700 DRM_ERROR("something (likely vbetool) disabled "
1701 "interrupts, re-enabling\n");
1702 i915_driver_irq_preinstall(dev);
1703 i915_driver_irq_postinstall(dev);
1704 }
1705
Eric Anholt673a3942008-07-30 12:06:12 -07001706 dev_priv->mm.waiting_gem_seqno = seqno;
1707 i915_user_irq_get(dev);
1708 ret = wait_event_interruptible(dev_priv->irq_queue,
1709 i915_seqno_passed(i915_get_gem_seqno(dev),
1710 seqno) ||
1711 dev_priv->mm.wedged);
1712 i915_user_irq_put(dev);
1713 dev_priv->mm.waiting_gem_seqno = 0;
1714 }
1715 if (dev_priv->mm.wedged)
1716 ret = -EIO;
1717
1718 if (ret && ret != -ERESTARTSYS)
1719 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1720 __func__, ret, seqno, i915_get_gem_seqno(dev));
1721
1722 /* Directly dispatch request retiring. While we have the work queue
1723 * to handle this, the waiter on a request often wants an associated
1724 * buffer to have made it to the inactive list, and we would need
1725 * a separate wait queue to handle that.
1726 */
1727 if (ret == 0)
1728 i915_gem_retire_requests(dev);
1729
1730 return ret;
1731}
1732
1733static void
1734i915_gem_flush(struct drm_device *dev,
1735 uint32_t invalidate_domains,
1736 uint32_t flush_domains)
1737{
1738 drm_i915_private_t *dev_priv = dev->dev_private;
1739 uint32_t cmd;
1740 RING_LOCALS;
1741
1742#if WATCH_EXEC
1743 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1744 invalidate_domains, flush_domains);
1745#endif
1746
1747 if (flush_domains & I915_GEM_DOMAIN_CPU)
1748 drm_agp_chipset_flush(dev);
1749
1750 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1751 I915_GEM_DOMAIN_GTT)) {
1752 /*
1753 * read/write caches:
1754 *
1755 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1756 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1757 * also flushed at 2d versus 3d pipeline switches.
1758 *
1759 * read-only caches:
1760 *
1761 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1762 * MI_READ_FLUSH is set, and is always flushed on 965.
1763 *
1764 * I915_GEM_DOMAIN_COMMAND may not exist?
1765 *
1766 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1767 * invalidated when MI_EXE_FLUSH is set.
1768 *
1769 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1770 * invalidated with every MI_FLUSH.
1771 *
1772 * TLBs:
1773 *
1774 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1775 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1776 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1777 * are flushed at any MI_FLUSH.
1778 */
1779
1780 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1781 if ((invalidate_domains|flush_domains) &
1782 I915_GEM_DOMAIN_RENDER)
1783 cmd &= ~MI_NO_WRITE_FLUSH;
1784 if (!IS_I965G(dev)) {
1785 /*
1786 * On the 965, the sampler cache always gets flushed
1787 * and this bit is reserved.
1788 */
1789 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1790 cmd |= MI_READ_FLUSH;
1791 }
1792 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1793 cmd |= MI_EXE_FLUSH;
1794
1795#if WATCH_EXEC
1796 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1797#endif
1798 BEGIN_LP_RING(2);
1799 OUT_RING(cmd);
1800 OUT_RING(0); /* noop */
1801 ADVANCE_LP_RING();
1802 }
1803}
1804
1805/**
1806 * Ensures that all rendering to the object has completed and the object is
1807 * safe to unbind from the GTT or access from the CPU.
1808 */
1809static int
1810i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1811{
1812 struct drm_device *dev = obj->dev;
1813 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1814 int ret;
1815
Eric Anholte47c68e2008-11-14 13:35:19 -08001816 /* This function only exists to support waiting for existing rendering,
1817 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001818 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001819 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001820
1821 /* If there is rendering queued on the buffer being evicted, wait for
1822 * it.
1823 */
1824 if (obj_priv->active) {
1825#if WATCH_BUF
1826 DRM_INFO("%s: object %p wait for seqno %08x\n",
1827 __func__, obj, obj_priv->last_rendering_seqno);
1828#endif
1829 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1830 if (ret != 0)
1831 return ret;
1832 }
1833
1834 return 0;
1835}
1836
1837/**
1838 * Unbinds an object from the GTT aperture.
1839 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001840int
Eric Anholt673a3942008-07-30 12:06:12 -07001841i915_gem_object_unbind(struct drm_gem_object *obj)
1842{
1843 struct drm_device *dev = obj->dev;
1844 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001845 loff_t offset;
Eric Anholt673a3942008-07-30 12:06:12 -07001846 int ret = 0;
1847
1848#if WATCH_BUF
1849 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1850 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1851#endif
1852 if (obj_priv->gtt_space == NULL)
1853 return 0;
1854
1855 if (obj_priv->pin_count != 0) {
1856 DRM_ERROR("Attempting to unbind pinned buffer\n");
1857 return -EINVAL;
1858 }
1859
Eric Anholt673a3942008-07-30 12:06:12 -07001860 /* Move the object to the CPU domain to ensure that
1861 * any possible CPU writes while it's not in the GTT
1862 * are flushed when we go to remap it. This will
1863 * also ensure that all pending GPU writes are finished
1864 * before we unbind.
1865 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001866 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07001867 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08001868 if (ret != -ERESTARTSYS)
1869 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07001870 return ret;
1871 }
1872
1873 if (obj_priv->agp_mem != NULL) {
1874 drm_unbind_agp(obj_priv->agp_mem);
1875 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1876 obj_priv->agp_mem = NULL;
1877 }
1878
1879 BUG_ON(obj_priv->active);
1880
Jesse Barnesde151cf2008-11-12 10:03:55 -08001881 /* blow away mappings if mapped through GTT */
1882 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
Jesse Barnes79e53942008-11-07 14:24:08 -08001883 if (dev->dev_mapping)
1884 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001885
1886 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1887 i915_gem_clear_fence_reg(obj);
1888
Eric Anholt856fa192009-03-19 14:10:50 -07001889 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001890
1891 if (obj_priv->gtt_space) {
1892 atomic_dec(&dev->gtt_count);
1893 atomic_sub(obj->size, &dev->gtt_memory);
1894
1895 drm_mm_put_block(obj_priv->gtt_space);
1896 obj_priv->gtt_space = NULL;
1897 }
1898
1899 /* Remove ourselves from the LRU list if present. */
1900 if (!list_empty(&obj_priv->list))
1901 list_del_init(&obj_priv->list);
1902
1903 return 0;
1904}
1905
1906static int
1907i915_gem_evict_something(struct drm_device *dev)
1908{
1909 drm_i915_private_t *dev_priv = dev->dev_private;
1910 struct drm_gem_object *obj;
1911 struct drm_i915_gem_object *obj_priv;
1912 int ret = 0;
1913
1914 for (;;) {
1915 /* If there's an inactive buffer available now, grab it
1916 * and be done.
1917 */
1918 if (!list_empty(&dev_priv->mm.inactive_list)) {
1919 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1920 struct drm_i915_gem_object,
1921 list);
1922 obj = obj_priv->obj;
1923 BUG_ON(obj_priv->pin_count != 0);
1924#if WATCH_LRU
1925 DRM_INFO("%s: evicting %p\n", __func__, obj);
1926#endif
1927 BUG_ON(obj_priv->active);
1928
1929 /* Wait on the rendering and unbind the buffer. */
1930 ret = i915_gem_object_unbind(obj);
1931 break;
1932 }
1933
1934 /* If we didn't get anything, but the ring is still processing
1935 * things, wait for one of those things to finish and hopefully
1936 * leave us a buffer to evict.
1937 */
1938 if (!list_empty(&dev_priv->mm.request_list)) {
1939 struct drm_i915_gem_request *request;
1940
1941 request = list_first_entry(&dev_priv->mm.request_list,
1942 struct drm_i915_gem_request,
1943 list);
1944
1945 ret = i915_wait_request(dev, request->seqno);
1946 if (ret)
1947 break;
1948
1949 /* if waiting caused an object to become inactive,
1950 * then loop around and wait for it. Otherwise, we
1951 * assume that waiting freed and unbound something,
1952 * so there should now be some space in the GTT
1953 */
1954 if (!list_empty(&dev_priv->mm.inactive_list))
1955 continue;
1956 break;
1957 }
1958
1959 /* If we didn't have anything on the request list but there
1960 * are buffers awaiting a flush, emit one and try again.
1961 * When we wait on it, those buffers waiting for that flush
1962 * will get moved to inactive.
1963 */
1964 if (!list_empty(&dev_priv->mm.flushing_list)) {
1965 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1966 struct drm_i915_gem_object,
1967 list);
1968 obj = obj_priv->obj;
1969
1970 i915_gem_flush(dev,
1971 obj->write_domain,
1972 obj->write_domain);
1973 i915_add_request(dev, obj->write_domain);
1974
1975 obj = NULL;
1976 continue;
1977 }
1978
1979 DRM_ERROR("inactive empty %d request empty %d "
1980 "flushing empty %d\n",
1981 list_empty(&dev_priv->mm.inactive_list),
1982 list_empty(&dev_priv->mm.request_list),
1983 list_empty(&dev_priv->mm.flushing_list));
1984 /* If we didn't do any of the above, there's nothing to be done
1985 * and we just can't fit it in.
1986 */
1987 return -ENOMEM;
1988 }
1989 return ret;
1990}
1991
1992static int
Keith Packardac94a962008-11-20 23:30:27 -08001993i915_gem_evict_everything(struct drm_device *dev)
1994{
1995 int ret;
1996
1997 for (;;) {
1998 ret = i915_gem_evict_something(dev);
1999 if (ret != 0)
2000 break;
2001 }
Owain Ainsworth15c35332008-12-06 20:42:20 -08002002 if (ret == -ENOMEM)
2003 return 0;
Keith Packardac94a962008-11-20 23:30:27 -08002004 return ret;
2005}
2006
Ben Gamari6911a9b2009-04-02 11:24:54 -07002007int
Eric Anholt856fa192009-03-19 14:10:50 -07002008i915_gem_object_get_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002009{
2010 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2011 int page_count, i;
2012 struct address_space *mapping;
2013 struct inode *inode;
2014 struct page *page;
2015 int ret;
2016
Eric Anholt856fa192009-03-19 14:10:50 -07002017 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002018 return 0;
2019
2020 /* Get the list of pages out of our struct file. They'll be pinned
2021 * at this point until we release them.
2022 */
2023 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002024 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002025 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002026 if (obj_priv->pages == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002027 DRM_ERROR("Faled to allocate page list\n");
Eric Anholt856fa192009-03-19 14:10:50 -07002028 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002029 return -ENOMEM;
2030 }
2031
2032 inode = obj->filp->f_path.dentry->d_inode;
2033 mapping = inode->i_mapping;
2034 for (i = 0; i < page_count; i++) {
2035 page = read_mapping_page(mapping, i, NULL);
2036 if (IS_ERR(page)) {
2037 ret = PTR_ERR(page);
2038 DRM_ERROR("read_mapping_page failed: %d\n", ret);
Eric Anholt856fa192009-03-19 14:10:50 -07002039 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002040 return ret;
2041 }
Eric Anholt856fa192009-03-19 14:10:50 -07002042 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002043 }
Eric Anholt280b7132009-03-12 16:56:27 -07002044
2045 if (obj_priv->tiling_mode != I915_TILING_NONE)
2046 i915_gem_object_do_bit_17_swizzle(obj);
2047
Eric Anholt673a3942008-07-30 12:06:12 -07002048 return 0;
2049}
2050
Jesse Barnesde151cf2008-11-12 10:03:55 -08002051static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2052{
2053 struct drm_gem_object *obj = reg->obj;
2054 struct drm_device *dev = obj->dev;
2055 drm_i915_private_t *dev_priv = dev->dev_private;
2056 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2057 int regnum = obj_priv->fence_reg;
2058 uint64_t val;
2059
2060 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2061 0xfffff000) << 32;
2062 val |= obj_priv->gtt_offset & 0xfffff000;
2063 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2064 if (obj_priv->tiling_mode == I915_TILING_Y)
2065 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2066 val |= I965_FENCE_REG_VALID;
2067
2068 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2069}
2070
2071static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2072{
2073 struct drm_gem_object *obj = reg->obj;
2074 struct drm_device *dev = obj->dev;
2075 drm_i915_private_t *dev_priv = dev->dev_private;
2076 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2077 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002078 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002079 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002080 uint32_t pitch_val;
2081
2082 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2083 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002084 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002085 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002086 return;
2087 }
2088
Jesse Barnes0f973f22009-01-26 17:10:45 -08002089 if (obj_priv->tiling_mode == I915_TILING_Y &&
2090 HAS_128_BYTE_Y_TILING(dev))
2091 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002092 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002093 tile_width = 512;
2094
2095 /* Note: pitch better be a power of two tile widths */
2096 pitch_val = obj_priv->stride / tile_width;
2097 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002098
2099 val = obj_priv->gtt_offset;
2100 if (obj_priv->tiling_mode == I915_TILING_Y)
2101 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2102 val |= I915_FENCE_SIZE_BITS(obj->size);
2103 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2104 val |= I830_FENCE_REG_VALID;
2105
Eric Anholtdc529a42009-03-10 22:34:49 -07002106 if (regnum < 8)
2107 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2108 else
2109 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2110 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002111}
2112
2113static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2114{
2115 struct drm_gem_object *obj = reg->obj;
2116 struct drm_device *dev = obj->dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private;
2118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119 int regnum = obj_priv->fence_reg;
2120 uint32_t val;
2121 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002122 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002123
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002124 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002125 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002126 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002127 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002128 return;
2129 }
2130
Eric Anholte76a16d2009-05-26 17:44:56 -07002131 pitch_val = obj_priv->stride / 128;
2132 pitch_val = ffs(pitch_val) - 1;
2133 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2134
Jesse Barnesde151cf2008-11-12 10:03:55 -08002135 val = obj_priv->gtt_offset;
2136 if (obj_priv->tiling_mode == I915_TILING_Y)
2137 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002138 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2139 WARN_ON(fence_size_bits & ~0x00000f00);
2140 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002141 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2142 val |= I830_FENCE_REG_VALID;
2143
2144 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2145
2146}
2147
2148/**
2149 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2150 * @obj: object to map through a fence reg
Jesse Barnes0f973f22009-01-26 17:10:45 -08002151 * @write: object is about to be written
Jesse Barnesde151cf2008-11-12 10:03:55 -08002152 *
2153 * When mapping objects through the GTT, userspace wants to be able to write
2154 * to them without having to worry about swizzling if the object is tiled.
2155 *
2156 * This function walks the fence regs looking for a free one for @obj,
2157 * stealing one if it can't find any.
2158 *
2159 * It then sets up the reg based on the object's properties: address, pitch
2160 * and tiling format.
2161 */
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002162static int
Jesse Barnes0f973f22009-01-26 17:10:45 -08002163i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002164{
2165 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002166 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002167 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2168 struct drm_i915_fence_reg *reg = NULL;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002169 struct drm_i915_gem_object *old_obj_priv = NULL;
2170 int i, ret, avail;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002171
2172 switch (obj_priv->tiling_mode) {
2173 case I915_TILING_NONE:
2174 WARN(1, "allocating a fence for non-tiled object?\n");
2175 break;
2176 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002177 if (!obj_priv->stride)
2178 return -EINVAL;
2179 WARN((obj_priv->stride & (512 - 1)),
2180 "object 0x%08x is X tiled but has non-512B pitch\n",
2181 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002182 break;
2183 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002184 if (!obj_priv->stride)
2185 return -EINVAL;
2186 WARN((obj_priv->stride & (128 - 1)),
2187 "object 0x%08x is Y tiled but has non-128B pitch\n",
2188 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002189 break;
2190 }
2191
2192 /* First try to find a free reg */
Chris Wilson9b2412f2009-02-11 14:26:44 +00002193try_again:
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002194 avail = 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002195 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2196 reg = &dev_priv->fence_regs[i];
2197 if (!reg->obj)
2198 break;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002199
2200 old_obj_priv = reg->obj->driver_private;
2201 if (!old_obj_priv->pin_count)
2202 avail++;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002203 }
2204
2205 /* None available, try to steal one or wait for a user to finish */
2206 if (i == dev_priv->num_fence_regs) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002207 uint32_t seqno = dev_priv->mm.next_gem_seqno;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002208 loff_t offset;
2209
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002210 if (avail == 0)
2211 return -ENOMEM;
2212
Jesse Barnesde151cf2008-11-12 10:03:55 -08002213 for (i = dev_priv->fence_reg_start;
2214 i < dev_priv->num_fence_regs; i++) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002215 uint32_t this_seqno;
2216
Jesse Barnesde151cf2008-11-12 10:03:55 -08002217 reg = &dev_priv->fence_regs[i];
2218 old_obj_priv = reg->obj->driver_private;
Chris Wilsond7619c42009-02-11 14:26:47 +00002219
2220 if (old_obj_priv->pin_count)
2221 continue;
2222
2223 /* i915 uses fences for GPU access to tiled buffers */
2224 if (IS_I965G(dev) || !old_obj_priv->active)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002225 break;
Chris Wilsond7619c42009-02-11 14:26:47 +00002226
2227 /* find the seqno of the first available fence */
2228 this_seqno = old_obj_priv->last_rendering_seqno;
2229 if (this_seqno != 0 &&
2230 reg->obj->write_domain == 0 &&
2231 i915_seqno_passed(seqno, this_seqno))
2232 seqno = this_seqno;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002233 }
2234
2235 /*
2236 * Now things get ugly... we have to wait for one of the
2237 * objects to finish before trying again.
2238 */
2239 if (i == dev_priv->num_fence_regs) {
Chris Wilsond7619c42009-02-11 14:26:47 +00002240 if (seqno == dev_priv->mm.next_gem_seqno) {
2241 i915_gem_flush(dev,
2242 I915_GEM_GPU_DOMAINS,
2243 I915_GEM_GPU_DOMAINS);
2244 seqno = i915_add_request(dev,
2245 I915_GEM_GPU_DOMAINS);
2246 if (seqno == 0)
2247 return -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002248 }
Chris Wilsond7619c42009-02-11 14:26:47 +00002249
2250 ret = i915_wait_request(dev, seqno);
2251 if (ret)
2252 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002253 goto try_again;
2254 }
2255
Chris Wilsond7619c42009-02-11 14:26:47 +00002256 BUG_ON(old_obj_priv->active ||
2257 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2258
Jesse Barnesde151cf2008-11-12 10:03:55 -08002259 /*
2260 * Zap this virtual mapping so we can set up a fence again
2261 * for this object next time we need it.
2262 */
2263 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
Jesse Barnes79e53942008-11-07 14:24:08 -08002264 if (dev->dev_mapping)
2265 unmap_mapping_range(dev->dev_mapping, offset,
2266 reg->obj->size, 1);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002267 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2268 }
2269
2270 obj_priv->fence_reg = i;
2271 reg->obj = obj;
2272
2273 if (IS_I965G(dev))
2274 i965_write_fence_reg(reg);
2275 else if (IS_I9XX(dev))
2276 i915_write_fence_reg(reg);
2277 else
2278 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002279
2280 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002281}
2282
2283/**
2284 * i915_gem_clear_fence_reg - clear out fence register info
2285 * @obj: object to clear
2286 *
2287 * Zeroes out the fence register itself and clears out the associated
2288 * data structures in dev_priv and obj_priv.
2289 */
2290static void
2291i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2292{
2293 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002294 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002295 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2296
2297 if (IS_I965G(dev))
2298 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholtdc529a42009-03-10 22:34:49 -07002299 else {
2300 uint32_t fence_reg;
2301
2302 if (obj_priv->fence_reg < 8)
2303 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2304 else
2305 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2306 8) * 4;
2307
2308 I915_WRITE(fence_reg, 0);
2309 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002310
2311 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2312 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2313}
2314
Eric Anholt673a3942008-07-30 12:06:12 -07002315/**
2316 * Finds free space in the GTT aperture and binds the object there.
2317 */
2318static int
2319i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2320{
2321 struct drm_device *dev = obj->dev;
2322 drm_i915_private_t *dev_priv = dev->dev_private;
2323 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2324 struct drm_mm_node *free_space;
2325 int page_count, ret;
2326
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08002327 if (dev_priv->mm.suspended)
2328 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002329 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002330 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002331 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002332 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2333 return -EINVAL;
2334 }
2335
2336 search_free:
2337 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2338 obj->size, alignment, 0);
2339 if (free_space != NULL) {
2340 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2341 alignment);
2342 if (obj_priv->gtt_space != NULL) {
2343 obj_priv->gtt_space->private = obj;
2344 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2345 }
2346 }
2347 if (obj_priv->gtt_space == NULL) {
Carl Worth5e118f42009-03-20 11:54:25 -07002348 bool lists_empty;
2349
Eric Anholt673a3942008-07-30 12:06:12 -07002350 /* If the gtt is empty and we're still having trouble
2351 * fitting our object in, we're out of memory.
2352 */
2353#if WATCH_LRU
2354 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2355#endif
Carl Worth5e118f42009-03-20 11:54:25 -07002356 spin_lock(&dev_priv->mm.active_list_lock);
2357 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2358 list_empty(&dev_priv->mm.flushing_list) &&
2359 list_empty(&dev_priv->mm.active_list));
2360 spin_unlock(&dev_priv->mm.active_list_lock);
2361 if (lists_empty) {
Eric Anholt673a3942008-07-30 12:06:12 -07002362 DRM_ERROR("GTT full, but LRU list empty\n");
2363 return -ENOMEM;
2364 }
2365
2366 ret = i915_gem_evict_something(dev);
2367 if (ret != 0) {
Keith Packardac94a962008-11-20 23:30:27 -08002368 if (ret != -ERESTARTSYS)
2369 DRM_ERROR("Failed to evict a buffer %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07002370 return ret;
2371 }
2372 goto search_free;
2373 }
2374
2375#if WATCH_BUF
2376 DRM_INFO("Binding object of size %d at 0x%08x\n",
2377 obj->size, obj_priv->gtt_offset);
2378#endif
Eric Anholt856fa192009-03-19 14:10:50 -07002379 ret = i915_gem_object_get_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002380 if (ret) {
2381 drm_mm_put_block(obj_priv->gtt_space);
2382 obj_priv->gtt_space = NULL;
2383 return ret;
2384 }
2385
2386 page_count = obj->size / PAGE_SIZE;
2387 /* Create an AGP memory structure pointing at our pages, and bind it
2388 * into the GTT.
2389 */
2390 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002391 obj_priv->pages,
Eric Anholt673a3942008-07-30 12:06:12 -07002392 page_count,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002393 obj_priv->gtt_offset,
2394 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002395 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002396 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002397 drm_mm_put_block(obj_priv->gtt_space);
2398 obj_priv->gtt_space = NULL;
2399 return -ENOMEM;
2400 }
2401 atomic_inc(&dev->gtt_count);
2402 atomic_add(obj->size, &dev->gtt_memory);
2403
2404 /* Assert that the object is not currently in any GPU domain. As it
2405 * wasn't in the GTT, there shouldn't be any way it could have been in
2406 * a GPU cache
2407 */
2408 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2409 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2410
2411 return 0;
2412}
2413
2414void
2415i915_gem_clflush_object(struct drm_gem_object *obj)
2416{
2417 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2418
2419 /* If we don't have a page list set up, then we're not pinned
2420 * to GPU, and we can ignore the cache flush because it'll happen
2421 * again at bind time.
2422 */
Eric Anholt856fa192009-03-19 14:10:50 -07002423 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002424 return;
2425
Eric Anholtcfa16a02009-05-26 18:46:16 -07002426 /* XXX: The 865 in particular appears to be weird in how it handles
2427 * cache flushing. We haven't figured it out, but the
2428 * clflush+agp_chipset_flush doesn't appear to successfully get the
2429 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2430 */
2431 if (IS_I865G(obj->dev)) {
2432 wbinvd();
2433 return;
2434 }
2435
Eric Anholt856fa192009-03-19 14:10:50 -07002436 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002437}
2438
Eric Anholte47c68e2008-11-14 13:35:19 -08002439/** Flushes any GPU write domain for the object if it's dirty. */
2440static void
2441i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2442{
2443 struct drm_device *dev = obj->dev;
2444 uint32_t seqno;
2445
2446 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2447 return;
2448
2449 /* Queue the GPU write cache flushing we need. */
2450 i915_gem_flush(dev, 0, obj->write_domain);
2451 seqno = i915_add_request(dev, obj->write_domain);
2452 obj->write_domain = 0;
2453 i915_gem_object_move_to_active(obj, seqno);
2454}
2455
2456/** Flushes the GTT write domain for the object if it's dirty. */
2457static void
2458i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2459{
2460 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2461 return;
2462
2463 /* No actual flushing is required for the GTT write domain. Writes
2464 * to it immediately go to main memory as far as we know, so there's
2465 * no chipset flush. It also doesn't land in render cache.
2466 */
2467 obj->write_domain = 0;
2468}
2469
2470/** Flushes the CPU write domain for the object if it's dirty. */
2471static void
2472i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2473{
2474 struct drm_device *dev = obj->dev;
2475
2476 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2477 return;
2478
2479 i915_gem_clflush_object(obj);
2480 drm_agp_chipset_flush(dev);
2481 obj->write_domain = 0;
2482}
2483
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002484/**
2485 * Moves a single object to the GTT read, and possibly write domain.
2486 *
2487 * This function returns when the move is complete, including waiting on
2488 * flushes to occur.
2489 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002490int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002491i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2492{
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002493 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002494 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002495
Eric Anholt02354392008-11-26 13:58:13 -08002496 /* Not valid to be called on unbound objects. */
2497 if (obj_priv->gtt_space == NULL)
2498 return -EINVAL;
2499
Eric Anholte47c68e2008-11-14 13:35:19 -08002500 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002501 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002502 ret = i915_gem_object_wait_rendering(obj);
2503 if (ret != 0)
2504 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002505
2506 /* If we're writing through the GTT domain, then CPU and GPU caches
2507 * will need to be invalidated at next use.
2508 */
2509 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002510 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002511
Eric Anholte47c68e2008-11-14 13:35:19 -08002512 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002513
2514 /* It should now be out of any other write domains, and we can update
2515 * the domain values for our changes.
2516 */
2517 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2518 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002519 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002520 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002521 obj_priv->dirty = 1;
2522 }
2523
2524 return 0;
2525}
2526
2527/**
2528 * Moves a single object to the CPU read, and possibly write domain.
2529 *
2530 * This function returns when the move is complete, including waiting on
2531 * flushes to occur.
2532 */
2533static int
2534i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2535{
Eric Anholte47c68e2008-11-14 13:35:19 -08002536 int ret;
2537
2538 i915_gem_object_flush_gpu_write_domain(obj);
2539 /* Wait on any GPU rendering and flushing to occur. */
2540 ret = i915_gem_object_wait_rendering(obj);
2541 if (ret != 0)
2542 return ret;
2543
2544 i915_gem_object_flush_gtt_write_domain(obj);
2545
2546 /* If we have a partially-valid cache of the object in the CPU,
2547 * finish invalidating it and free the per-page flags.
2548 */
2549 i915_gem_object_set_to_full_cpu_read_domain(obj);
2550
2551 /* Flush the CPU cache if it's still invalid. */
2552 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2553 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002554
2555 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2556 }
2557
2558 /* It should now be out of any other write domains, and we can update
2559 * the domain values for our changes.
2560 */
2561 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2562
2563 /* If we're writing through the CPU, then the GPU read domains will
2564 * need to be invalidated at next use.
2565 */
2566 if (write) {
2567 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2568 obj->write_domain = I915_GEM_DOMAIN_CPU;
2569 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002570
2571 return 0;
2572}
2573
Eric Anholt673a3942008-07-30 12:06:12 -07002574/*
2575 * Set the next domain for the specified object. This
2576 * may not actually perform the necessary flushing/invaliding though,
2577 * as that may want to be batched with other set_domain operations
2578 *
2579 * This is (we hope) the only really tricky part of gem. The goal
2580 * is fairly simple -- track which caches hold bits of the object
2581 * and make sure they remain coherent. A few concrete examples may
2582 * help to explain how it works. For shorthand, we use the notation
2583 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2584 * a pair of read and write domain masks.
2585 *
2586 * Case 1: the batch buffer
2587 *
2588 * 1. Allocated
2589 * 2. Written by CPU
2590 * 3. Mapped to GTT
2591 * 4. Read by GPU
2592 * 5. Unmapped from GTT
2593 * 6. Freed
2594 *
2595 * Let's take these a step at a time
2596 *
2597 * 1. Allocated
2598 * Pages allocated from the kernel may still have
2599 * cache contents, so we set them to (CPU, CPU) always.
2600 * 2. Written by CPU (using pwrite)
2601 * The pwrite function calls set_domain (CPU, CPU) and
2602 * this function does nothing (as nothing changes)
2603 * 3. Mapped by GTT
2604 * This function asserts that the object is not
2605 * currently in any GPU-based read or write domains
2606 * 4. Read by GPU
2607 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2608 * As write_domain is zero, this function adds in the
2609 * current read domains (CPU+COMMAND, 0).
2610 * flush_domains is set to CPU.
2611 * invalidate_domains is set to COMMAND
2612 * clflush is run to get data out of the CPU caches
2613 * then i915_dev_set_domain calls i915_gem_flush to
2614 * emit an MI_FLUSH and drm_agp_chipset_flush
2615 * 5. Unmapped from GTT
2616 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2617 * flush_domains and invalidate_domains end up both zero
2618 * so no flushing/invalidating happens
2619 * 6. Freed
2620 * yay, done
2621 *
2622 * Case 2: The shared render buffer
2623 *
2624 * 1. Allocated
2625 * 2. Mapped to GTT
2626 * 3. Read/written by GPU
2627 * 4. set_domain to (CPU,CPU)
2628 * 5. Read/written by CPU
2629 * 6. Read/written by GPU
2630 *
2631 * 1. Allocated
2632 * Same as last example, (CPU, CPU)
2633 * 2. Mapped to GTT
2634 * Nothing changes (assertions find that it is not in the GPU)
2635 * 3. Read/written by GPU
2636 * execbuffer calls set_domain (RENDER, RENDER)
2637 * flush_domains gets CPU
2638 * invalidate_domains gets GPU
2639 * clflush (obj)
2640 * MI_FLUSH and drm_agp_chipset_flush
2641 * 4. set_domain (CPU, CPU)
2642 * flush_domains gets GPU
2643 * invalidate_domains gets CPU
2644 * wait_rendering (obj) to make sure all drawing is complete.
2645 * This will include an MI_FLUSH to get the data from GPU
2646 * to memory
2647 * clflush (obj) to invalidate the CPU cache
2648 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2649 * 5. Read/written by CPU
2650 * cache lines are loaded and dirtied
2651 * 6. Read written by GPU
2652 * Same as last GPU access
2653 *
2654 * Case 3: The constant buffer
2655 *
2656 * 1. Allocated
2657 * 2. Written by CPU
2658 * 3. Read by GPU
2659 * 4. Updated (written) by CPU again
2660 * 5. Read by GPU
2661 *
2662 * 1. Allocated
2663 * (CPU, CPU)
2664 * 2. Written by CPU
2665 * (CPU, CPU)
2666 * 3. Read by GPU
2667 * (CPU+RENDER, 0)
2668 * flush_domains = CPU
2669 * invalidate_domains = RENDER
2670 * clflush (obj)
2671 * MI_FLUSH
2672 * drm_agp_chipset_flush
2673 * 4. Updated (written) by CPU again
2674 * (CPU, CPU)
2675 * flush_domains = 0 (no previous write domain)
2676 * invalidate_domains = 0 (no new read domains)
2677 * 5. Read by GPU
2678 * (CPU+RENDER, 0)
2679 * flush_domains = CPU
2680 * invalidate_domains = RENDER
2681 * clflush (obj)
2682 * MI_FLUSH
2683 * drm_agp_chipset_flush
2684 */
Keith Packardc0d90822008-11-20 23:11:08 -08002685static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08002686i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002687{
2688 struct drm_device *dev = obj->dev;
2689 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2690 uint32_t invalidate_domains = 0;
2691 uint32_t flush_domains = 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002692
Eric Anholt8b0e3782009-02-19 14:40:50 -08002693 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2694 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07002695
2696#if WATCH_BUF
2697 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2698 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08002699 obj->read_domains, obj->pending_read_domains,
2700 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002701#endif
2702 /*
2703 * If the object isn't moving to a new write domain,
2704 * let the object stay in multiple read domains
2705 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002706 if (obj->pending_write_domain == 0)
2707 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002708 else
2709 obj_priv->dirty = 1;
2710
2711 /*
2712 * Flush the current write domain if
2713 * the new read domains don't match. Invalidate
2714 * any read domains which differ from the old
2715 * write domain
2716 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002717 if (obj->write_domain &&
2718 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07002719 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002720 invalidate_domains |=
2721 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07002722 }
2723 /*
2724 * Invalidate any read caches which may have
2725 * stale data. That is, any new read domains.
2726 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002727 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002728 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2729#if WATCH_BUF
2730 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2731 __func__, flush_domains, invalidate_domains);
2732#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002733 i915_gem_clflush_object(obj);
2734 }
2735
Eric Anholtefbeed92009-02-19 14:54:51 -08002736 /* The actual obj->write_domain will be updated with
2737 * pending_write_domain after we emit the accumulated flush for all
2738 * of our domain changes in execbuffers (which clears objects'
2739 * write_domains). So if we have a current write domain that we
2740 * aren't changing, set pending_write_domain to that.
2741 */
2742 if (flush_domains == 0 && obj->pending_write_domain == 0)
2743 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002744 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002745
2746 dev->invalidate_domains |= invalidate_domains;
2747 dev->flush_domains |= flush_domains;
2748#if WATCH_BUF
2749 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2750 __func__,
2751 obj->read_domains, obj->write_domain,
2752 dev->invalidate_domains, dev->flush_domains);
2753#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002754}
2755
2756/**
Eric Anholte47c68e2008-11-14 13:35:19 -08002757 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07002758 *
Eric Anholte47c68e2008-11-14 13:35:19 -08002759 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2760 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2761 */
2762static void
2763i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2764{
Eric Anholte47c68e2008-11-14 13:35:19 -08002765 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2766
2767 if (!obj_priv->page_cpu_valid)
2768 return;
2769
2770 /* If we're partially in the CPU read domain, finish moving it in.
2771 */
2772 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2773 int i;
2774
2775 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2776 if (obj_priv->page_cpu_valid[i])
2777 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07002778 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08002779 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002780 }
2781
2782 /* Free the page_cpu_valid mappings which are now stale, whether
2783 * or not we've got I915_GEM_DOMAIN_CPU.
2784 */
2785 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2786 DRM_MEM_DRIVER);
2787 obj_priv->page_cpu_valid = NULL;
2788}
2789
2790/**
2791 * Set the CPU read domain on a range of the object.
2792 *
2793 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2794 * not entirely valid. The page_cpu_valid member of the object flags which
2795 * pages have been flushed, and will be respected by
2796 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2797 * of the whole object.
2798 *
2799 * This function returns when the move is complete, including waiting on
2800 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07002801 */
2802static int
Eric Anholte47c68e2008-11-14 13:35:19 -08002803i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2804 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07002805{
2806 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002807 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002808
Eric Anholte47c68e2008-11-14 13:35:19 -08002809 if (offset == 0 && size == obj->size)
2810 return i915_gem_object_set_to_cpu_domain(obj, 0);
2811
2812 i915_gem_object_flush_gpu_write_domain(obj);
2813 /* Wait on any GPU rendering and flushing to occur. */
2814 ret = i915_gem_object_wait_rendering(obj);
2815 if (ret != 0)
2816 return ret;
2817 i915_gem_object_flush_gtt_write_domain(obj);
2818
2819 /* If we're already fully in the CPU read domain, we're done. */
2820 if (obj_priv->page_cpu_valid == NULL &&
2821 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002822 return 0;
2823
Eric Anholte47c68e2008-11-14 13:35:19 -08002824 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2825 * newly adding I915_GEM_DOMAIN_CPU
2826 */
Eric Anholt673a3942008-07-30 12:06:12 -07002827 if (obj_priv->page_cpu_valid == NULL) {
2828 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2829 DRM_MEM_DRIVER);
Eric Anholte47c68e2008-11-14 13:35:19 -08002830 if (obj_priv->page_cpu_valid == NULL)
2831 return -ENOMEM;
2832 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2833 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002834
2835 /* Flush the cache on any pages that are still invalid from the CPU's
2836 * perspective.
2837 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002838 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2839 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07002840 if (obj_priv->page_cpu_valid[i])
2841 continue;
2842
Eric Anholt856fa192009-03-19 14:10:50 -07002843 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07002844
2845 obj_priv->page_cpu_valid[i] = 1;
2846 }
2847
Eric Anholte47c68e2008-11-14 13:35:19 -08002848 /* It should now be out of any other write domains, and we can update
2849 * the domain values for our changes.
2850 */
2851 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2852
2853 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2854
Eric Anholt673a3942008-07-30 12:06:12 -07002855 return 0;
2856}
2857
2858/**
Eric Anholt673a3942008-07-30 12:06:12 -07002859 * Pin an object to the GTT and evaluate the relocations landing in it.
2860 */
2861static int
2862i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2863 struct drm_file *file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002864 struct drm_i915_gem_exec_object *entry,
2865 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07002866{
2867 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07002868 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002869 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2870 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07002871 void __iomem *reloc_page;
Eric Anholt673a3942008-07-30 12:06:12 -07002872
2873 /* Choose the GTT offset for our buffer and put it there. */
2874 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2875 if (ret)
2876 return ret;
2877
2878 entry->offset = obj_priv->gtt_offset;
2879
Eric Anholt673a3942008-07-30 12:06:12 -07002880 /* Apply the relocations, using the GTT aperture to avoid cache
2881 * flushing requirements.
2882 */
2883 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002884 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07002885 struct drm_gem_object *target_obj;
2886 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07002887 uint32_t reloc_val, reloc_offset;
2888 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07002889
Eric Anholt673a3942008-07-30 12:06:12 -07002890 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002891 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002892 if (target_obj == NULL) {
2893 i915_gem_object_unpin(obj);
2894 return -EBADF;
2895 }
2896 target_obj_priv = target_obj->driver_private;
2897
2898 /* The target buffer should have appeared before us in the
2899 * exec_object list, so it should have a GTT space bound by now.
2900 */
2901 if (target_obj_priv->gtt_space == NULL) {
2902 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002903 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002904 drm_gem_object_unreference(target_obj);
2905 i915_gem_object_unpin(obj);
2906 return -EINVAL;
2907 }
2908
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002909 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07002910 DRM_ERROR("Relocation beyond object bounds: "
2911 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002912 obj, reloc->target_handle,
2913 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07002914 drm_gem_object_unreference(target_obj);
2915 i915_gem_object_unpin(obj);
2916 return -EINVAL;
2917 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002918 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07002919 DRM_ERROR("Relocation not 4-byte aligned: "
2920 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002921 obj, reloc->target_handle,
2922 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07002923 drm_gem_object_unreference(target_obj);
2924 i915_gem_object_unpin(obj);
2925 return -EINVAL;
2926 }
2927
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002928 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2929 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002930 DRM_ERROR("reloc with read/write CPU domains: "
2931 "obj %p target %d offset %d "
2932 "read %08x write %08x",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002933 obj, reloc->target_handle,
2934 (int) reloc->offset,
2935 reloc->read_domains,
2936 reloc->write_domain);
Chris Wilson491152b2009-02-11 14:26:32 +00002937 drm_gem_object_unreference(target_obj);
2938 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002939 return -EINVAL;
2940 }
2941
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002942 if (reloc->write_domain && target_obj->pending_write_domain &&
2943 reloc->write_domain != target_obj->pending_write_domain) {
Eric Anholt673a3942008-07-30 12:06:12 -07002944 DRM_ERROR("Write domain conflict: "
2945 "obj %p target %d offset %d "
2946 "new %08x old %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002947 obj, reloc->target_handle,
2948 (int) reloc->offset,
2949 reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07002950 target_obj->pending_write_domain);
2951 drm_gem_object_unreference(target_obj);
2952 i915_gem_object_unpin(obj);
2953 return -EINVAL;
2954 }
2955
2956#if WATCH_RELOC
2957 DRM_INFO("%s: obj %p offset %08x target %d "
2958 "read %08x write %08x gtt %08x "
2959 "presumed %08x delta %08x\n",
2960 __func__,
2961 obj,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002962 (int) reloc->offset,
2963 (int) reloc->target_handle,
2964 (int) reloc->read_domains,
2965 (int) reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07002966 (int) target_obj_priv->gtt_offset,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002967 (int) reloc->presumed_offset,
2968 reloc->delta);
Eric Anholt673a3942008-07-30 12:06:12 -07002969#endif
2970
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002971 target_obj->pending_read_domains |= reloc->read_domains;
2972 target_obj->pending_write_domain |= reloc->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07002973
2974 /* If the relocation already has the right value in it, no
2975 * more work needs to be done.
2976 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002977 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
Eric Anholt673a3942008-07-30 12:06:12 -07002978 drm_gem_object_unreference(target_obj);
2979 continue;
2980 }
2981
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002982 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2983 if (ret != 0) {
2984 drm_gem_object_unreference(target_obj);
2985 i915_gem_object_unpin(obj);
2986 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07002987 }
2988
2989 /* Map the page containing the relocation we're going to
2990 * perform.
2991 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002992 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07002993 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2994 (reloc_offset &
2995 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07002996 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07002997 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002998 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07002999
3000#if WATCH_BUF
3001 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003002 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003003 readl(reloc_entry), reloc_val);
3004#endif
3005 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003006 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003007
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003008 /* The updated presumed offset for this entry will be
3009 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003010 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003011 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003012
3013 drm_gem_object_unreference(target_obj);
3014 }
3015
Eric Anholt673a3942008-07-30 12:06:12 -07003016#if WATCH_BUF
3017 if (0)
3018 i915_gem_dump_object(obj, 128, __func__, ~0);
3019#endif
3020 return 0;
3021}
3022
3023/** Dispatch a batchbuffer to the ring
3024 */
3025static int
3026i915_dispatch_gem_execbuffer(struct drm_device *dev,
3027 struct drm_i915_gem_execbuffer *exec,
Eric Anholt201361a2009-03-11 12:30:04 -07003028 struct drm_clip_rect *cliprects,
Eric Anholt673a3942008-07-30 12:06:12 -07003029 uint64_t exec_offset)
3030{
3031 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003032 int nbox = exec->num_cliprects;
3033 int i = 0, count;
3034 uint32_t exec_start, exec_len;
3035 RING_LOCALS;
3036
3037 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3038 exec_len = (uint32_t) exec->batch_len;
3039
3040 if ((exec_start | exec_len) & 0x7) {
3041 DRM_ERROR("alignment\n");
3042 return -EINVAL;
3043 }
3044
3045 if (!exec_start)
3046 return -EINVAL;
3047
3048 count = nbox ? nbox : 1;
3049
3050 for (i = 0; i < count; i++) {
3051 if (i < nbox) {
Eric Anholt201361a2009-03-11 12:30:04 -07003052 int ret = i915_emit_box(dev, cliprects, i,
Eric Anholt673a3942008-07-30 12:06:12 -07003053 exec->DR1, exec->DR4);
3054 if (ret)
3055 return ret;
3056 }
3057
3058 if (IS_I830(dev) || IS_845G(dev)) {
3059 BEGIN_LP_RING(4);
3060 OUT_RING(MI_BATCH_BUFFER);
3061 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3062 OUT_RING(exec_start + exec_len - 4);
3063 OUT_RING(0);
3064 ADVANCE_LP_RING();
3065 } else {
3066 BEGIN_LP_RING(2);
3067 if (IS_I965G(dev)) {
3068 OUT_RING(MI_BATCH_BUFFER_START |
3069 (2 << 6) |
3070 MI_BATCH_NON_SECURE_I965);
3071 OUT_RING(exec_start);
3072 } else {
3073 OUT_RING(MI_BATCH_BUFFER_START |
3074 (2 << 6));
3075 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3076 }
3077 ADVANCE_LP_RING();
3078 }
3079 }
3080
3081 /* XXX breadcrumb */
3082 return 0;
3083}
3084
3085/* Throttle our rendering by waiting until the ring has completed our requests
3086 * emitted over 20 msec ago.
3087 *
3088 * This should get us reasonable parallelism between CPU and GPU but also
3089 * relatively low latency when blocking on a particular request to finish.
3090 */
3091static int
3092i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3093{
3094 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3095 int ret = 0;
3096 uint32_t seqno;
3097
3098 mutex_lock(&dev->struct_mutex);
3099 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3100 i915_file_priv->mm.last_gem_throttle_seqno =
3101 i915_file_priv->mm.last_gem_seqno;
3102 if (seqno)
3103 ret = i915_wait_request(dev, seqno);
3104 mutex_unlock(&dev->struct_mutex);
3105 return ret;
3106}
3107
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003108static int
3109i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3110 uint32_t buffer_count,
3111 struct drm_i915_gem_relocation_entry **relocs)
3112{
3113 uint32_t reloc_count = 0, reloc_index = 0, i;
3114 int ret;
3115
3116 *relocs = NULL;
3117 for (i = 0; i < buffer_count; i++) {
3118 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3119 return -EINVAL;
3120 reloc_count += exec_list[i].relocation_count;
3121 }
3122
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003123 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003124 if (*relocs == NULL)
3125 return -ENOMEM;
3126
3127 for (i = 0; i < buffer_count; i++) {
3128 struct drm_i915_gem_relocation_entry __user *user_relocs;
3129
3130 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3131
3132 ret = copy_from_user(&(*relocs)[reloc_index],
3133 user_relocs,
3134 exec_list[i].relocation_count *
3135 sizeof(**relocs));
3136 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003137 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003138 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003139 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003140 }
3141
3142 reloc_index += exec_list[i].relocation_count;
3143 }
3144
Florian Mickler2bc43b52009-04-06 22:55:41 +02003145 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003146}
3147
3148static int
3149i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3150 uint32_t buffer_count,
3151 struct drm_i915_gem_relocation_entry *relocs)
3152{
3153 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003154 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003155
3156 for (i = 0; i < buffer_count; i++) {
3157 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003158 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003159
3160 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3161
Florian Mickler2bc43b52009-04-06 22:55:41 +02003162 unwritten = copy_to_user(user_relocs,
3163 &relocs[reloc_count],
3164 exec_list[i].relocation_count *
3165 sizeof(*relocs));
3166
3167 if (unwritten) {
3168 ret = -EFAULT;
3169 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003170 }
3171
3172 reloc_count += exec_list[i].relocation_count;
3173 }
3174
Florian Mickler2bc43b52009-04-06 22:55:41 +02003175err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003176 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003177
3178 return ret;
3179}
3180
Eric Anholt673a3942008-07-30 12:06:12 -07003181int
3182i915_gem_execbuffer(struct drm_device *dev, void *data,
3183 struct drm_file *file_priv)
3184{
3185 drm_i915_private_t *dev_priv = dev->dev_private;
3186 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3187 struct drm_i915_gem_execbuffer *args = data;
3188 struct drm_i915_gem_exec_object *exec_list = NULL;
3189 struct drm_gem_object **object_list = NULL;
3190 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003191 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003192 struct drm_clip_rect *cliprects = NULL;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003193 struct drm_i915_gem_relocation_entry *relocs;
3194 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003195 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003196 uint32_t seqno, flush_domains, reloc_index;
Keith Packardac94a962008-11-20 23:30:27 -08003197 int pin_tries;
Eric Anholt673a3942008-07-30 12:06:12 -07003198
3199#if WATCH_EXEC
3200 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3201 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3202#endif
3203
Eric Anholt4f481ed2008-09-10 14:22:49 -07003204 if (args->buffer_count < 1) {
3205 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3206 return -EINVAL;
3207 }
Eric Anholt673a3942008-07-30 12:06:12 -07003208 /* Copy in the exec list from userland */
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003209 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3210 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
Eric Anholt673a3942008-07-30 12:06:12 -07003211 if (exec_list == NULL || object_list == NULL) {
3212 DRM_ERROR("Failed to allocate exec or object list "
3213 "for %d buffers\n",
3214 args->buffer_count);
3215 ret = -ENOMEM;
3216 goto pre_mutex_err;
3217 }
3218 ret = copy_from_user(exec_list,
3219 (struct drm_i915_relocation_entry __user *)
3220 (uintptr_t) args->buffers_ptr,
3221 sizeof(*exec_list) * args->buffer_count);
3222 if (ret != 0) {
3223 DRM_ERROR("copy %d exec entries failed %d\n",
3224 args->buffer_count, ret);
3225 goto pre_mutex_err;
3226 }
3227
Eric Anholt201361a2009-03-11 12:30:04 -07003228 if (args->num_cliprects != 0) {
3229 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3230 DRM_MEM_DRIVER);
3231 if (cliprects == NULL)
3232 goto pre_mutex_err;
3233
3234 ret = copy_from_user(cliprects,
3235 (struct drm_clip_rect __user *)
3236 (uintptr_t) args->cliprects_ptr,
3237 sizeof(*cliprects) * args->num_cliprects);
3238 if (ret != 0) {
3239 DRM_ERROR("copy %d cliprects failed: %d\n",
3240 args->num_cliprects, ret);
3241 goto pre_mutex_err;
3242 }
3243 }
3244
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003245 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3246 &relocs);
3247 if (ret != 0)
3248 goto pre_mutex_err;
3249
Eric Anholt673a3942008-07-30 12:06:12 -07003250 mutex_lock(&dev->struct_mutex);
3251
3252 i915_verify_inactive(dev, __FILE__, __LINE__);
3253
3254 if (dev_priv->mm.wedged) {
3255 DRM_ERROR("Execbuf while wedged\n");
3256 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003257 ret = -EIO;
3258 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003259 }
3260
3261 if (dev_priv->mm.suspended) {
3262 DRM_ERROR("Execbuf while VT-switched.\n");
3263 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003264 ret = -EBUSY;
3265 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003266 }
3267
Keith Packardac94a962008-11-20 23:30:27 -08003268 /* Look up object handles */
Eric Anholt673a3942008-07-30 12:06:12 -07003269 for (i = 0; i < args->buffer_count; i++) {
3270 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3271 exec_list[i].handle);
3272 if (object_list[i] == NULL) {
3273 DRM_ERROR("Invalid object handle %d at index %d\n",
3274 exec_list[i].handle, i);
3275 ret = -EBADF;
3276 goto err;
3277 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003278
3279 obj_priv = object_list[i]->driver_private;
3280 if (obj_priv->in_execbuffer) {
3281 DRM_ERROR("Object %p appears more than once in object list\n",
3282 object_list[i]);
3283 ret = -EBADF;
3284 goto err;
3285 }
3286 obj_priv->in_execbuffer = true;
Keith Packardac94a962008-11-20 23:30:27 -08003287 }
Eric Anholt673a3942008-07-30 12:06:12 -07003288
Keith Packardac94a962008-11-20 23:30:27 -08003289 /* Pin and relocate */
3290 for (pin_tries = 0; ; pin_tries++) {
3291 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003292 reloc_index = 0;
3293
Keith Packardac94a962008-11-20 23:30:27 -08003294 for (i = 0; i < args->buffer_count; i++) {
3295 object_list[i]->pending_read_domains = 0;
3296 object_list[i]->pending_write_domain = 0;
3297 ret = i915_gem_object_pin_and_relocate(object_list[i],
3298 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003299 &exec_list[i],
3300 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003301 if (ret)
3302 break;
3303 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003304 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003305 }
3306 /* success */
3307 if (ret == 0)
3308 break;
3309
3310 /* error other than GTT full, or we've already tried again */
3311 if (ret != -ENOMEM || pin_tries >= 1) {
Eric Anholtf1acec92008-12-19 14:47:48 -08003312 if (ret != -ERESTARTSYS)
3313 DRM_ERROR("Failed to pin buffers %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003314 goto err;
3315 }
Keith Packardac94a962008-11-20 23:30:27 -08003316
3317 /* unpin all of our buffers */
3318 for (i = 0; i < pinned; i++)
3319 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003320 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003321
3322 /* evict everyone we can from the aperture */
3323 ret = i915_gem_evict_everything(dev);
3324 if (ret)
3325 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003326 }
3327
3328 /* Set the pending read domains for the batch buffer to COMMAND */
3329 batch_obj = object_list[args->buffer_count-1];
3330 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3331 batch_obj->pending_write_domain = 0;
3332
3333 i915_verify_inactive(dev, __FILE__, __LINE__);
3334
Keith Packard646f0f62008-11-20 23:23:03 -08003335 /* Zero the global flush/invalidate flags. These
3336 * will be modified as new domains are computed
3337 * for each object
3338 */
3339 dev->invalidate_domains = 0;
3340 dev->flush_domains = 0;
3341
Eric Anholt673a3942008-07-30 12:06:12 -07003342 for (i = 0; i < args->buffer_count; i++) {
3343 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003344
Keith Packard646f0f62008-11-20 23:23:03 -08003345 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003346 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003347 }
3348
3349 i915_verify_inactive(dev, __FILE__, __LINE__);
3350
Keith Packard646f0f62008-11-20 23:23:03 -08003351 if (dev->invalidate_domains | dev->flush_domains) {
3352#if WATCH_EXEC
3353 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3354 __func__,
3355 dev->invalidate_domains,
3356 dev->flush_domains);
3357#endif
3358 i915_gem_flush(dev,
3359 dev->invalidate_domains,
3360 dev->flush_domains);
3361 if (dev->flush_domains)
3362 (void)i915_add_request(dev, dev->flush_domains);
3363 }
Eric Anholt673a3942008-07-30 12:06:12 -07003364
Eric Anholtefbeed92009-02-19 14:54:51 -08003365 for (i = 0; i < args->buffer_count; i++) {
3366 struct drm_gem_object *obj = object_list[i];
3367
3368 obj->write_domain = obj->pending_write_domain;
3369 }
3370
Eric Anholt673a3942008-07-30 12:06:12 -07003371 i915_verify_inactive(dev, __FILE__, __LINE__);
3372
3373#if WATCH_COHERENCY
3374 for (i = 0; i < args->buffer_count; i++) {
3375 i915_gem_object_check_coherency(object_list[i],
3376 exec_list[i].handle);
3377 }
3378#endif
3379
3380 exec_offset = exec_list[args->buffer_count - 1].offset;
3381
3382#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003383 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003384 args->batch_len,
3385 __func__,
3386 ~0);
3387#endif
3388
Eric Anholt673a3942008-07-30 12:06:12 -07003389 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07003390 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003391 if (ret) {
3392 DRM_ERROR("dispatch failed %d\n", ret);
3393 goto err;
3394 }
3395
3396 /*
3397 * Ensure that the commands in the batch buffer are
3398 * finished before the interrupt fires
3399 */
3400 flush_domains = i915_retire_commands(dev);
3401
3402 i915_verify_inactive(dev, __FILE__, __LINE__);
3403
3404 /*
3405 * Get a seqno representing the execution of the current buffer,
3406 * which we can wait on. We would like to mitigate these interrupts,
3407 * likely by only creating seqnos occasionally (so that we have
3408 * *some* interrupts representing completion of buffers that we can
3409 * wait on when trying to clear up gtt space).
3410 */
3411 seqno = i915_add_request(dev, flush_domains);
3412 BUG_ON(seqno == 0);
3413 i915_file_priv->mm.last_gem_seqno = seqno;
3414 for (i = 0; i < args->buffer_count; i++) {
3415 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003416
Eric Anholtce44b0e2008-11-06 16:00:31 -08003417 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07003418#if WATCH_LRU
3419 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3420#endif
3421 }
3422#if WATCH_LRU
3423 i915_dump_lru(dev, __func__);
3424#endif
3425
3426 i915_verify_inactive(dev, __FILE__, __LINE__);
3427
Eric Anholt673a3942008-07-30 12:06:12 -07003428err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003429 for (i = 0; i < pinned; i++)
3430 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003431
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003432 for (i = 0; i < args->buffer_count; i++) {
3433 if (object_list[i]) {
3434 obj_priv = object_list[i]->driver_private;
3435 obj_priv->in_execbuffer = false;
3436 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003437 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003438 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003439
Eric Anholt673a3942008-07-30 12:06:12 -07003440 mutex_unlock(&dev->struct_mutex);
3441
Roland Dreiera35f2e22009-02-06 17:48:09 -08003442 if (!ret) {
3443 /* Copy the new buffer offsets back to the user's exec list. */
3444 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3445 (uintptr_t) args->buffers_ptr,
3446 exec_list,
3447 sizeof(*exec_list) * args->buffer_count);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003448 if (ret) {
3449 ret = -EFAULT;
Roland Dreiera35f2e22009-02-06 17:48:09 -08003450 DRM_ERROR("failed to copy %d exec entries "
3451 "back to user (%d)\n",
3452 args->buffer_count, ret);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003453 }
Roland Dreiera35f2e22009-02-06 17:48:09 -08003454 }
3455
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003456 /* Copy the updated relocations out regardless of current error
3457 * state. Failure to update the relocs would mean that the next
3458 * time userland calls execbuf, it would do so with presumed offset
3459 * state that didn't match the actual object state.
3460 */
3461 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3462 relocs);
3463 if (ret2 != 0) {
3464 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3465
3466 if (ret == 0)
3467 ret = ret2;
3468 }
3469
Eric Anholt673a3942008-07-30 12:06:12 -07003470pre_mutex_err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003471 drm_free_large(object_list);
3472 drm_free_large(exec_list);
Eric Anholt201361a2009-03-11 12:30:04 -07003473 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3474 DRM_MEM_DRIVER);
Eric Anholt673a3942008-07-30 12:06:12 -07003475
3476 return ret;
3477}
3478
3479int
3480i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3481{
3482 struct drm_device *dev = obj->dev;
3483 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3484 int ret;
3485
3486 i915_verify_inactive(dev, __FILE__, __LINE__);
3487 if (obj_priv->gtt_space == NULL) {
3488 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3489 if (ret != 0) {
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003490 if (ret != -EBUSY && ret != -ERESTARTSYS)
Kyle McMartin0fce81e2009-02-28 15:01:16 -05003491 DRM_ERROR("Failure to bind: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003492 return ret;
3493 }
Chris Wilson22c344e2009-02-11 14:26:45 +00003494 }
3495 /*
3496 * Pre-965 chips need a fence register set up in order to
3497 * properly handle tiled surfaces.
3498 */
3499 if (!IS_I965G(dev) &&
3500 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3501 obj_priv->tiling_mode != I915_TILING_NONE) {
3502 ret = i915_gem_object_get_fence_reg(obj, true);
3503 if (ret != 0) {
3504 if (ret != -EBUSY && ret != -ERESTARTSYS)
3505 DRM_ERROR("Failure to install fence: %d\n",
3506 ret);
3507 return ret;
3508 }
Eric Anholt673a3942008-07-30 12:06:12 -07003509 }
3510 obj_priv->pin_count++;
3511
3512 /* If the object is not active and not pending a flush,
3513 * remove it from the inactive list
3514 */
3515 if (obj_priv->pin_count == 1) {
3516 atomic_inc(&dev->pin_count);
3517 atomic_add(obj->size, &dev->pin_memory);
3518 if (!obj_priv->active &&
3519 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3520 I915_GEM_DOMAIN_GTT)) == 0 &&
3521 !list_empty(&obj_priv->list))
3522 list_del_init(&obj_priv->list);
3523 }
3524 i915_verify_inactive(dev, __FILE__, __LINE__);
3525
3526 return 0;
3527}
3528
3529void
3530i915_gem_object_unpin(struct drm_gem_object *obj)
3531{
3532 struct drm_device *dev = obj->dev;
3533 drm_i915_private_t *dev_priv = dev->dev_private;
3534 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3535
3536 i915_verify_inactive(dev, __FILE__, __LINE__);
3537 obj_priv->pin_count--;
3538 BUG_ON(obj_priv->pin_count < 0);
3539 BUG_ON(obj_priv->gtt_space == NULL);
3540
3541 /* If the object is no longer pinned, and is
3542 * neither active nor being flushed, then stick it on
3543 * the inactive list
3544 */
3545 if (obj_priv->pin_count == 0) {
3546 if (!obj_priv->active &&
3547 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3548 I915_GEM_DOMAIN_GTT)) == 0)
3549 list_move_tail(&obj_priv->list,
3550 &dev_priv->mm.inactive_list);
3551 atomic_dec(&dev->pin_count);
3552 atomic_sub(obj->size, &dev->pin_memory);
3553 }
3554 i915_verify_inactive(dev, __FILE__, __LINE__);
3555}
3556
3557int
3558i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv)
3560{
3561 struct drm_i915_gem_pin *args = data;
3562 struct drm_gem_object *obj;
3563 struct drm_i915_gem_object *obj_priv;
3564 int ret;
3565
3566 mutex_lock(&dev->struct_mutex);
3567
3568 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3569 if (obj == NULL) {
3570 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3571 args->handle);
3572 mutex_unlock(&dev->struct_mutex);
3573 return -EBADF;
3574 }
3575 obj_priv = obj->driver_private;
3576
Jesse Barnes79e53942008-11-07 14:24:08 -08003577 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3578 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3579 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00003580 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003581 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08003582 return -EINVAL;
3583 }
3584
3585 obj_priv->user_pin_count++;
3586 obj_priv->pin_filp = file_priv;
3587 if (obj_priv->user_pin_count == 1) {
3588 ret = i915_gem_object_pin(obj, args->alignment);
3589 if (ret != 0) {
3590 drm_gem_object_unreference(obj);
3591 mutex_unlock(&dev->struct_mutex);
3592 return ret;
3593 }
Eric Anholt673a3942008-07-30 12:06:12 -07003594 }
3595
3596 /* XXX - flush the CPU caches for pinned objects
3597 * as the X server doesn't manage domains yet
3598 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003599 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003600 args->offset = obj_priv->gtt_offset;
3601 drm_gem_object_unreference(obj);
3602 mutex_unlock(&dev->struct_mutex);
3603
3604 return 0;
3605}
3606
3607int
3608i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3609 struct drm_file *file_priv)
3610{
3611 struct drm_i915_gem_pin *args = data;
3612 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08003613 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003614
3615 mutex_lock(&dev->struct_mutex);
3616
3617 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3618 if (obj == NULL) {
3619 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3620 args->handle);
3621 mutex_unlock(&dev->struct_mutex);
3622 return -EBADF;
3623 }
3624
Jesse Barnes79e53942008-11-07 14:24:08 -08003625 obj_priv = obj->driver_private;
3626 if (obj_priv->pin_filp != file_priv) {
3627 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3628 args->handle);
3629 drm_gem_object_unreference(obj);
3630 mutex_unlock(&dev->struct_mutex);
3631 return -EINVAL;
3632 }
3633 obj_priv->user_pin_count--;
3634 if (obj_priv->user_pin_count == 0) {
3635 obj_priv->pin_filp = NULL;
3636 i915_gem_object_unpin(obj);
3637 }
Eric Anholt673a3942008-07-30 12:06:12 -07003638
3639 drm_gem_object_unreference(obj);
3640 mutex_unlock(&dev->struct_mutex);
3641 return 0;
3642}
3643
3644int
3645i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3646 struct drm_file *file_priv)
3647{
3648 struct drm_i915_gem_busy *args = data;
3649 struct drm_gem_object *obj;
3650 struct drm_i915_gem_object *obj_priv;
3651
3652 mutex_lock(&dev->struct_mutex);
3653 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3654 if (obj == NULL) {
3655 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3656 args->handle);
3657 mutex_unlock(&dev->struct_mutex);
3658 return -EBADF;
3659 }
3660
Eric Anholtf21289b2009-02-18 09:44:56 -08003661 /* Update the active list for the hardware's current position.
3662 * Otherwise this only updates on a delayed timer or when irqs are
3663 * actually unmasked, and our working set ends up being larger than
3664 * required.
3665 */
3666 i915_gem_retire_requests(dev);
3667
Eric Anholt673a3942008-07-30 12:06:12 -07003668 obj_priv = obj->driver_private;
Eric Anholtc4de0a52008-12-14 19:05:04 -08003669 /* Don't count being on the flushing list against the object being
3670 * done. Otherwise, a buffer left on the flushing list but not getting
3671 * flushed (because nobody's flushing that domain) won't ever return
3672 * unbusy and get reused by libdrm's bo cache. The other expected
3673 * consumer of this interface, OpenGL's occlusion queries, also specs
3674 * that the objects get unbusy "eventually" without any interference.
3675 */
3676 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003677
3678 drm_gem_object_unreference(obj);
3679 mutex_unlock(&dev->struct_mutex);
3680 return 0;
3681}
3682
3683int
3684i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3685 struct drm_file *file_priv)
3686{
3687 return i915_gem_ring_throttle(dev, file_priv);
3688}
3689
3690int i915_gem_init_object(struct drm_gem_object *obj)
3691{
3692 struct drm_i915_gem_object *obj_priv;
3693
3694 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3695 if (obj_priv == NULL)
3696 return -ENOMEM;
3697
3698 /*
3699 * We've just allocated pages from the kernel,
3700 * so they've just been written by the CPU with
3701 * zeros. They'll need to be clflushed before we
3702 * use them with the GPU.
3703 */
3704 obj->write_domain = I915_GEM_DOMAIN_CPU;
3705 obj->read_domains = I915_GEM_DOMAIN_CPU;
3706
Keith Packardba1eb1d2008-10-14 19:55:10 -07003707 obj_priv->agp_type = AGP_USER_MEMORY;
3708
Eric Anholt673a3942008-07-30 12:06:12 -07003709 obj->driver_private = obj_priv;
3710 obj_priv->obj = obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003711 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholt673a3942008-07-30 12:06:12 -07003712 INIT_LIST_HEAD(&obj_priv->list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003713
Eric Anholt673a3942008-07-30 12:06:12 -07003714 return 0;
3715}
3716
3717void i915_gem_free_object(struct drm_gem_object *obj)
3718{
Jesse Barnesde151cf2008-11-12 10:03:55 -08003719 struct drm_device *dev = obj->dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003720 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3721
3722 while (obj_priv->pin_count > 0)
3723 i915_gem_object_unpin(obj);
3724
Dave Airlie71acb5e2008-12-30 20:31:46 +10003725 if (obj_priv->phys_obj)
3726 i915_gem_detach_phys_object(dev, obj);
3727
Eric Anholt673a3942008-07-30 12:06:12 -07003728 i915_gem_object_unbind(obj);
3729
Jesse Barnesab00b3e2009-02-11 14:01:46 -08003730 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003731
Eric Anholt673a3942008-07-30 12:06:12 -07003732 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
Eric Anholt280b7132009-03-12 16:56:27 -07003733 kfree(obj_priv->bit_17);
Eric Anholt673a3942008-07-30 12:06:12 -07003734 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3735}
3736
Eric Anholt673a3942008-07-30 12:06:12 -07003737/** Unbinds all objects that are on the given buffer list. */
3738static int
3739i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3740{
3741 struct drm_gem_object *obj;
3742 struct drm_i915_gem_object *obj_priv;
3743 int ret;
3744
3745 while (!list_empty(head)) {
3746 obj_priv = list_first_entry(head,
3747 struct drm_i915_gem_object,
3748 list);
3749 obj = obj_priv->obj;
3750
3751 if (obj_priv->pin_count != 0) {
3752 DRM_ERROR("Pinned object in unbind list\n");
3753 mutex_unlock(&dev->struct_mutex);
3754 return -EINVAL;
3755 }
3756
3757 ret = i915_gem_object_unbind(obj);
3758 if (ret != 0) {
3759 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3760 ret);
3761 mutex_unlock(&dev->struct_mutex);
3762 return ret;
3763 }
3764 }
3765
3766
3767 return 0;
3768}
3769
Jesse Barnes5669fca2009-02-17 15:13:31 -08003770int
Eric Anholt673a3942008-07-30 12:06:12 -07003771i915_gem_idle(struct drm_device *dev)
3772{
3773 drm_i915_private_t *dev_priv = dev->dev_private;
3774 uint32_t seqno, cur_seqno, last_seqno;
3775 int stuck, ret;
3776
Keith Packard6dbe2772008-10-14 21:41:13 -07003777 mutex_lock(&dev->struct_mutex);
3778
3779 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3780 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003781 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003782 }
Eric Anholt673a3942008-07-30 12:06:12 -07003783
3784 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3785 * We need to replace this with a semaphore, or something.
3786 */
3787 dev_priv->mm.suspended = 1;
3788
Keith Packard6dbe2772008-10-14 21:41:13 -07003789 /* Cancel the retire work handler, wait for it to finish if running
3790 */
3791 mutex_unlock(&dev->struct_mutex);
3792 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3793 mutex_lock(&dev->struct_mutex);
3794
Eric Anholt673a3942008-07-30 12:06:12 -07003795 i915_kernel_lost_context(dev);
3796
3797 /* Flush the GPU along with all non-CPU write domains
3798 */
3799 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3800 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
Jesse Barnesde151cf2008-11-12 10:03:55 -08003801 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003802
3803 if (seqno == 0) {
3804 mutex_unlock(&dev->struct_mutex);
3805 return -ENOMEM;
3806 }
3807
3808 dev_priv->mm.waiting_gem_seqno = seqno;
3809 last_seqno = 0;
3810 stuck = 0;
3811 for (;;) {
3812 cur_seqno = i915_get_gem_seqno(dev);
3813 if (i915_seqno_passed(cur_seqno, seqno))
3814 break;
3815 if (last_seqno == cur_seqno) {
3816 if (stuck++ > 100) {
3817 DRM_ERROR("hardware wedged\n");
3818 dev_priv->mm.wedged = 1;
3819 DRM_WAKEUP(&dev_priv->irq_queue);
3820 break;
3821 }
3822 }
3823 msleep(10);
3824 last_seqno = cur_seqno;
3825 }
3826 dev_priv->mm.waiting_gem_seqno = 0;
3827
3828 i915_gem_retire_requests(dev);
3829
Carl Worth5e118f42009-03-20 11:54:25 -07003830 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08003831 if (!dev_priv->mm.wedged) {
3832 /* Active and flushing should now be empty as we've
3833 * waited for a sequence higher than any pending execbuffer
3834 */
3835 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3836 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3837 /* Request should now be empty as we've also waited
3838 * for the last request in the list
3839 */
3840 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3841 }
Eric Anholt673a3942008-07-30 12:06:12 -07003842
Eric Anholt28dfe522008-11-13 15:00:55 -08003843 /* Empty the active and flushing lists to inactive. If there's
3844 * anything left at this point, it means that we're wedged and
3845 * nothing good's going to happen by leaving them there. So strip
3846 * the GPU domains and just stuff them onto inactive.
Eric Anholt673a3942008-07-30 12:06:12 -07003847 */
Eric Anholt28dfe522008-11-13 15:00:55 -08003848 while (!list_empty(&dev_priv->mm.active_list)) {
3849 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003850
Eric Anholt28dfe522008-11-13 15:00:55 -08003851 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3852 struct drm_i915_gem_object,
3853 list);
3854 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3855 i915_gem_object_move_to_inactive(obj_priv->obj);
3856 }
Carl Worth5e118f42009-03-20 11:54:25 -07003857 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08003858
3859 while (!list_empty(&dev_priv->mm.flushing_list)) {
3860 struct drm_i915_gem_object *obj_priv;
3861
Eric Anholt151903d2008-12-01 10:23:21 +10003862 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
Eric Anholt28dfe522008-11-13 15:00:55 -08003863 struct drm_i915_gem_object,
3864 list);
3865 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3866 i915_gem_object_move_to_inactive(obj_priv->obj);
3867 }
3868
3869
3870 /* Move all inactive buffers out of the GTT. */
Eric Anholt673a3942008-07-30 12:06:12 -07003871 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
Eric Anholt28dfe522008-11-13 15:00:55 -08003872 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
Keith Packard6dbe2772008-10-14 21:41:13 -07003873 if (ret) {
3874 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003875 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003876 }
Eric Anholt673a3942008-07-30 12:06:12 -07003877
Keith Packard6dbe2772008-10-14 21:41:13 -07003878 i915_gem_cleanup_ringbuffer(dev);
3879 mutex_unlock(&dev->struct_mutex);
3880
Eric Anholt673a3942008-07-30 12:06:12 -07003881 return 0;
3882}
3883
3884static int
3885i915_gem_init_hws(struct drm_device *dev)
3886{
3887 drm_i915_private_t *dev_priv = dev->dev_private;
3888 struct drm_gem_object *obj;
3889 struct drm_i915_gem_object *obj_priv;
3890 int ret;
3891
3892 /* If we need a physical address for the status page, it's already
3893 * initialized at driver load time.
3894 */
3895 if (!I915_NEED_GFX_HWS(dev))
3896 return 0;
3897
3898 obj = drm_gem_object_alloc(dev, 4096);
3899 if (obj == NULL) {
3900 DRM_ERROR("Failed to allocate status page\n");
3901 return -ENOMEM;
3902 }
3903 obj_priv = obj->driver_private;
Keith Packardba1eb1d2008-10-14 19:55:10 -07003904 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt673a3942008-07-30 12:06:12 -07003905
3906 ret = i915_gem_object_pin(obj, 4096);
3907 if (ret != 0) {
3908 drm_gem_object_unreference(obj);
3909 return ret;
3910 }
3911
3912 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003913
Eric Anholt856fa192009-03-19 14:10:50 -07003914 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
Keith Packardba1eb1d2008-10-14 19:55:10 -07003915 if (dev_priv->hw_status_page == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07003916 DRM_ERROR("Failed to map status page.\n");
3917 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Chris Wilson3eb2ee72009-02-11 14:26:34 +00003918 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003919 drm_gem_object_unreference(obj);
3920 return -EINVAL;
3921 }
3922 dev_priv->hws_obj = obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003923 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3924 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
Keith Packardba1eb1d2008-10-14 19:55:10 -07003925 I915_READ(HWS_PGA); /* posting read */
Eric Anholt673a3942008-07-30 12:06:12 -07003926 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3927
3928 return 0;
3929}
3930
Chris Wilson85a7bb92009-02-11 14:52:44 +00003931static void
3932i915_gem_cleanup_hws(struct drm_device *dev)
3933{
3934 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00003935 struct drm_gem_object *obj;
3936 struct drm_i915_gem_object *obj_priv;
Chris Wilson85a7bb92009-02-11 14:52:44 +00003937
3938 if (dev_priv->hws_obj == NULL)
3939 return;
3940
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00003941 obj = dev_priv->hws_obj;
3942 obj_priv = obj->driver_private;
3943
Eric Anholt856fa192009-03-19 14:10:50 -07003944 kunmap(obj_priv->pages[0]);
Chris Wilson85a7bb92009-02-11 14:52:44 +00003945 i915_gem_object_unpin(obj);
3946 drm_gem_object_unreference(obj);
3947 dev_priv->hws_obj = NULL;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00003948
Chris Wilson85a7bb92009-02-11 14:52:44 +00003949 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3950 dev_priv->hw_status_page = NULL;
3951
3952 /* Write high address into HWS_PGA when disabling. */
3953 I915_WRITE(HWS_PGA, 0x1ffff000);
3954}
3955
Jesse Barnes79e53942008-11-07 14:24:08 -08003956int
Eric Anholt673a3942008-07-30 12:06:12 -07003957i915_gem_init_ringbuffer(struct drm_device *dev)
3958{
3959 drm_i915_private_t *dev_priv = dev->dev_private;
3960 struct drm_gem_object *obj;
3961 struct drm_i915_gem_object *obj_priv;
Jesse Barnes79e53942008-11-07 14:24:08 -08003962 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
Eric Anholt673a3942008-07-30 12:06:12 -07003963 int ret;
Keith Packard50aa2532008-10-14 17:20:35 -07003964 u32 head;
Eric Anholt673a3942008-07-30 12:06:12 -07003965
3966 ret = i915_gem_init_hws(dev);
3967 if (ret != 0)
3968 return ret;
3969
3970 obj = drm_gem_object_alloc(dev, 128 * 1024);
3971 if (obj == NULL) {
3972 DRM_ERROR("Failed to allocate ringbuffer\n");
Chris Wilson85a7bb92009-02-11 14:52:44 +00003973 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003974 return -ENOMEM;
3975 }
3976 obj_priv = obj->driver_private;
3977
3978 ret = i915_gem_object_pin(obj, 4096);
3979 if (ret != 0) {
3980 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00003981 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003982 return ret;
3983 }
3984
3985 /* Set up the kernel mapping for the ring. */
Jesse Barnes79e53942008-11-07 14:24:08 -08003986 ring->Size = obj->size;
3987 ring->tail_mask = obj->size - 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003988
Jesse Barnes79e53942008-11-07 14:24:08 -08003989 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3990 ring->map.size = obj->size;
3991 ring->map.type = 0;
3992 ring->map.flags = 0;
3993 ring->map.mtrr = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003994
Jesse Barnes79e53942008-11-07 14:24:08 -08003995 drm_core_ioremap_wc(&ring->map, dev);
3996 if (ring->map.handle == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07003997 DRM_ERROR("Failed to map ringbuffer.\n");
3998 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
Chris Wilson47ed1852009-02-11 14:26:33 +00003999 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004000 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004001 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004002 return -EINVAL;
4003 }
Jesse Barnes79e53942008-11-07 14:24:08 -08004004 ring->ring_obj = obj;
4005 ring->virtual_start = ring->map.handle;
Eric Anholt673a3942008-07-30 12:06:12 -07004006
4007 /* Stop the ring if it's running. */
4008 I915_WRITE(PRB0_CTL, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004009 I915_WRITE(PRB0_TAIL, 0);
Keith Packard50aa2532008-10-14 17:20:35 -07004010 I915_WRITE(PRB0_HEAD, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004011
4012 /* Initialize the ring. */
4013 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
Keith Packard50aa2532008-10-14 17:20:35 -07004014 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4015
4016 /* G45 ring initialization fails to reset head to zero */
4017 if (head != 0) {
4018 DRM_ERROR("Ring head not reset to zero "
4019 "ctl %08x head %08x tail %08x start %08x\n",
4020 I915_READ(PRB0_CTL),
4021 I915_READ(PRB0_HEAD),
4022 I915_READ(PRB0_TAIL),
4023 I915_READ(PRB0_START));
4024 I915_WRITE(PRB0_HEAD, 0);
4025
4026 DRM_ERROR("Ring head forced to zero "
4027 "ctl %08x head %08x tail %08x start %08x\n",
4028 I915_READ(PRB0_CTL),
4029 I915_READ(PRB0_HEAD),
4030 I915_READ(PRB0_TAIL),
4031 I915_READ(PRB0_START));
4032 }
4033
Eric Anholt673a3942008-07-30 12:06:12 -07004034 I915_WRITE(PRB0_CTL,
4035 ((obj->size - 4096) & RING_NR_PAGES) |
4036 RING_NO_REPORT |
4037 RING_VALID);
4038
Keith Packard50aa2532008-10-14 17:20:35 -07004039 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4040
4041 /* If the head is still not zero, the ring is dead */
4042 if (head != 0) {
4043 DRM_ERROR("Ring initialization failed "
4044 "ctl %08x head %08x tail %08x start %08x\n",
4045 I915_READ(PRB0_CTL),
4046 I915_READ(PRB0_HEAD),
4047 I915_READ(PRB0_TAIL),
4048 I915_READ(PRB0_START));
4049 return -EIO;
4050 }
4051
Eric Anholt673a3942008-07-30 12:06:12 -07004052 /* Update our cache of the ring state */
Jesse Barnes79e53942008-11-07 14:24:08 -08004053 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4054 i915_kernel_lost_context(dev);
4055 else {
4056 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4057 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4058 ring->space = ring->head - (ring->tail + 8);
4059 if (ring->space < 0)
4060 ring->space += ring->Size;
4061 }
Eric Anholt673a3942008-07-30 12:06:12 -07004062
4063 return 0;
4064}
4065
Jesse Barnes79e53942008-11-07 14:24:08 -08004066void
Eric Anholt673a3942008-07-30 12:06:12 -07004067i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4068{
4069 drm_i915_private_t *dev_priv = dev->dev_private;
4070
4071 if (dev_priv->ring.ring_obj == NULL)
4072 return;
4073
4074 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4075
4076 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4077 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4078 dev_priv->ring.ring_obj = NULL;
4079 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4080
Chris Wilson85a7bb92009-02-11 14:52:44 +00004081 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004082}
4083
4084int
4085i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4086 struct drm_file *file_priv)
4087{
4088 drm_i915_private_t *dev_priv = dev->dev_private;
4089 int ret;
4090
Jesse Barnes79e53942008-11-07 14:24:08 -08004091 if (drm_core_check_feature(dev, DRIVER_MODESET))
4092 return 0;
4093
Eric Anholt673a3942008-07-30 12:06:12 -07004094 if (dev_priv->mm.wedged) {
4095 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4096 dev_priv->mm.wedged = 0;
4097 }
4098
Eric Anholt673a3942008-07-30 12:06:12 -07004099 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004100 dev_priv->mm.suspended = 0;
4101
4102 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004103 if (ret != 0) {
4104 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004105 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004106 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004107
Carl Worth5e118f42009-03-20 11:54:25 -07004108 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004109 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004110 spin_unlock(&dev_priv->mm.active_list_lock);
4111
Eric Anholt673a3942008-07-30 12:06:12 -07004112 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4113 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4114 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004115 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004116
4117 drm_irq_install(dev);
4118
Eric Anholt673a3942008-07-30 12:06:12 -07004119 return 0;
4120}
4121
4122int
4123i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4124 struct drm_file *file_priv)
4125{
4126 int ret;
4127
Jesse Barnes79e53942008-11-07 14:24:08 -08004128 if (drm_core_check_feature(dev, DRIVER_MODESET))
4129 return 0;
4130
Eric Anholt673a3942008-07-30 12:06:12 -07004131 ret = i915_gem_idle(dev);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004132 drm_irq_uninstall(dev);
4133
Keith Packard6dbe2772008-10-14 21:41:13 -07004134 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004135}
4136
4137void
4138i915_gem_lastclose(struct drm_device *dev)
4139{
4140 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004141
Eric Anholte806b492009-01-22 09:56:58 -08004142 if (drm_core_check_feature(dev, DRIVER_MODESET))
4143 return;
4144
Keith Packard6dbe2772008-10-14 21:41:13 -07004145 ret = i915_gem_idle(dev);
4146 if (ret)
4147 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004148}
4149
4150void
4151i915_gem_load(struct drm_device *dev)
4152{
4153 drm_i915_private_t *dev_priv = dev->dev_private;
4154
Carl Worth5e118f42009-03-20 11:54:25 -07004155 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004156 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4157 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4158 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4159 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4160 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4161 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004162 dev_priv->mm.next_gem_seqno = 1;
4163
Jesse Barnesde151cf2008-11-12 10:03:55 -08004164 /* Old X drivers will take 0-2 for front, back, depth buffers */
4165 dev_priv->fence_reg_start = 3;
4166
Jesse Barnes0f973f22009-01-26 17:10:45 -08004167 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004168 dev_priv->num_fence_regs = 16;
4169 else
4170 dev_priv->num_fence_regs = 8;
4171
Eric Anholt673a3942008-07-30 12:06:12 -07004172 i915_gem_detect_bit_6_swizzle(dev);
4173}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004174
4175/*
4176 * Create a physically contiguous memory object for this object
4177 * e.g. for cursor + overlay regs
4178 */
4179int i915_gem_init_phys_object(struct drm_device *dev,
4180 int id, int size)
4181{
4182 drm_i915_private_t *dev_priv = dev->dev_private;
4183 struct drm_i915_gem_phys_object *phys_obj;
4184 int ret;
4185
4186 if (dev_priv->mm.phys_objs[id - 1] || !size)
4187 return 0;
4188
4189 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4190 if (!phys_obj)
4191 return -ENOMEM;
4192
4193 phys_obj->id = id;
4194
4195 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4196 if (!phys_obj->handle) {
4197 ret = -ENOMEM;
4198 goto kfree_obj;
4199 }
4200#ifdef CONFIG_X86
4201 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4202#endif
4203
4204 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4205
4206 return 0;
4207kfree_obj:
4208 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4209 return ret;
4210}
4211
4212void i915_gem_free_phys_object(struct drm_device *dev, int id)
4213{
4214 drm_i915_private_t *dev_priv = dev->dev_private;
4215 struct drm_i915_gem_phys_object *phys_obj;
4216
4217 if (!dev_priv->mm.phys_objs[id - 1])
4218 return;
4219
4220 phys_obj = dev_priv->mm.phys_objs[id - 1];
4221 if (phys_obj->cur_obj) {
4222 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4223 }
4224
4225#ifdef CONFIG_X86
4226 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4227#endif
4228 drm_pci_free(dev, phys_obj->handle);
4229 kfree(phys_obj);
4230 dev_priv->mm.phys_objs[id - 1] = NULL;
4231}
4232
4233void i915_gem_free_all_phys_object(struct drm_device *dev)
4234{
4235 int i;
4236
Dave Airlie260883c2009-01-22 17:58:49 +10004237 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004238 i915_gem_free_phys_object(dev, i);
4239}
4240
4241void i915_gem_detach_phys_object(struct drm_device *dev,
4242 struct drm_gem_object *obj)
4243{
4244 struct drm_i915_gem_object *obj_priv;
4245 int i;
4246 int ret;
4247 int page_count;
4248
4249 obj_priv = obj->driver_private;
4250 if (!obj_priv->phys_obj)
4251 return;
4252
Eric Anholt856fa192009-03-19 14:10:50 -07004253 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004254 if (ret)
4255 goto out;
4256
4257 page_count = obj->size / PAGE_SIZE;
4258
4259 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004260 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004261 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4262
4263 memcpy(dst, src, PAGE_SIZE);
4264 kunmap_atomic(dst, KM_USER0);
4265 }
Eric Anholt856fa192009-03-19 14:10:50 -07004266 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004267 drm_agp_chipset_flush(dev);
4268out:
4269 obj_priv->phys_obj->cur_obj = NULL;
4270 obj_priv->phys_obj = NULL;
4271}
4272
4273int
4274i915_gem_attach_phys_object(struct drm_device *dev,
4275 struct drm_gem_object *obj, int id)
4276{
4277 drm_i915_private_t *dev_priv = dev->dev_private;
4278 struct drm_i915_gem_object *obj_priv;
4279 int ret = 0;
4280 int page_count;
4281 int i;
4282
4283 if (id > I915_MAX_PHYS_OBJECT)
4284 return -EINVAL;
4285
4286 obj_priv = obj->driver_private;
4287
4288 if (obj_priv->phys_obj) {
4289 if (obj_priv->phys_obj->id == id)
4290 return 0;
4291 i915_gem_detach_phys_object(dev, obj);
4292 }
4293
4294
4295 /* create a new object */
4296 if (!dev_priv->mm.phys_objs[id - 1]) {
4297 ret = i915_gem_init_phys_object(dev, id,
4298 obj->size);
4299 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004300 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004301 goto out;
4302 }
4303 }
4304
4305 /* bind to the object */
4306 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4307 obj_priv->phys_obj->cur_obj = obj;
4308
Eric Anholt856fa192009-03-19 14:10:50 -07004309 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004310 if (ret) {
4311 DRM_ERROR("failed to get page list\n");
4312 goto out;
4313 }
4314
4315 page_count = obj->size / PAGE_SIZE;
4316
4317 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004318 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004319 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4320
4321 memcpy(dst, src, PAGE_SIZE);
4322 kunmap_atomic(src, KM_USER0);
4323 }
4324
4325 return 0;
4326out:
4327 return ret;
4328}
4329
4330static int
4331i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4332 struct drm_i915_gem_pwrite *args,
4333 struct drm_file *file_priv)
4334{
4335 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4336 void *obj_addr;
4337 int ret;
4338 char __user *user_data;
4339
4340 user_data = (char __user *) (uintptr_t) args->data_ptr;
4341 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4342
Dave Airliee08fb4f2009-02-25 14:52:30 +10004343 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004344 ret = copy_from_user(obj_addr, user_data, args->size);
4345 if (ret)
4346 return -EFAULT;
4347
4348 drm_agp_chipset_flush(dev);
4349 return 0;
4350}