blob: 2fff2e0a976eeb3e81218d263448e6cf17ebd340 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070032#include "intel_drv.h"
Eric Anholt673a3942008-07-30 12:06:12 -070033#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080034#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035
Eric Anholt28dfe522008-11-13 15:00:55 -080036#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37
Eric Anholte47c68e2008-11-14 13:35:19 -080038static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080041static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42 int write);
43static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
44 uint64_t offset,
45 uint64_t size);
46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070047static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080048static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100052static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070055
Chris Wilson31169712009-09-14 16:50:28 +010056static LIST_HEAD(shrink_list);
57static DEFINE_SPINLOCK(shrink_list_lock);
58
Jesse Barnes79e53942008-11-07 14:24:08 -080059int i915_gem_do_init(struct drm_device *dev, unsigned long start,
60 unsigned long end)
61{
62 drm_i915_private_t *dev_priv = dev->dev_private;
63
64 if (start >= end ||
65 (start & (PAGE_SIZE - 1)) != 0 ||
66 (end & (PAGE_SIZE - 1)) != 0) {
67 return -EINVAL;
68 }
69
70 drm_mm_init(&dev_priv->mm.gtt_space, start,
71 end - start);
72
73 dev->gtt_total = (uint32_t) (end - start);
74
75 return 0;
76}
Keith Packard6dbe2772008-10-14 21:41:13 -070077
Eric Anholt673a3942008-07-30 12:06:12 -070078int
79i915_gem_init_ioctl(struct drm_device *dev, void *data,
80 struct drm_file *file_priv)
81{
Eric Anholt673a3942008-07-30 12:06:12 -070082 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080083 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070084
85 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080086 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070087 mutex_unlock(&dev->struct_mutex);
88
Jesse Barnes79e53942008-11-07 14:24:08 -080089 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070090}
91
Eric Anholt5a125c32008-10-22 21:40:13 -070092int
93i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
94 struct drm_file *file_priv)
95{
Eric Anholt5a125c32008-10-22 21:40:13 -070096 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070097
98 if (!(dev->driver->driver_features & DRIVER_GEM))
99 return -ENODEV;
100
101 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800102 args->aper_available_size = (args->aper_size -
103 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700104
105 return 0;
106}
107
Eric Anholt673a3942008-07-30 12:06:12 -0700108
109/**
110 * Creates a new mm object and returns a handle to it.
111 */
112int
113i915_gem_create_ioctl(struct drm_device *dev, void *data,
114 struct drm_file *file_priv)
115{
116 struct drm_i915_gem_create *args = data;
117 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300118 int ret;
119 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700120
121 args->size = roundup(args->size, PAGE_SIZE);
122
123 /* Allocate the new object */
124 obj = drm_gem_object_alloc(dev, args->size);
125 if (obj == NULL)
126 return -ENOMEM;
127
128 ret = drm_gem_handle_create(file_priv, obj, &handle);
129 mutex_lock(&dev->struct_mutex);
130 drm_gem_object_handle_unreference(obj);
131 mutex_unlock(&dev->struct_mutex);
132
133 if (ret)
134 return ret;
135
136 args->handle = handle;
137
138 return 0;
139}
140
Eric Anholt40123c12009-03-09 13:42:30 -0700141static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700142fast_shmem_read(struct page **pages,
143 loff_t page_base, int page_offset,
144 char __user *data,
145 int length)
146{
147 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200148 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700149
150 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
151 if (vaddr == NULL)
152 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200153 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700154 kunmap_atomic(vaddr, KM_USER0);
155
Florian Mickler2bc43b52009-04-06 22:55:41 +0200156 if (unwritten)
157 return -EFAULT;
158
159 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700160}
161
Eric Anholt280b7132009-03-12 16:56:27 -0700162static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
163{
164 drm_i915_private_t *dev_priv = obj->dev->dev_private;
165 struct drm_i915_gem_object *obj_priv = obj->driver_private;
166
167 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
168 obj_priv->tiling_mode != I915_TILING_NONE;
169}
170
Eric Anholteb014592009-03-10 11:44:52 -0700171static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700172slow_shmem_copy(struct page *dst_page,
173 int dst_offset,
174 struct page *src_page,
175 int src_offset,
176 int length)
177{
178 char *dst_vaddr, *src_vaddr;
179
180 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
181 if (dst_vaddr == NULL)
182 return -ENOMEM;
183
184 src_vaddr = kmap_atomic(src_page, KM_USER1);
185 if (src_vaddr == NULL) {
186 kunmap_atomic(dst_vaddr, KM_USER0);
187 return -ENOMEM;
188 }
189
190 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
191
192 kunmap_atomic(src_vaddr, KM_USER1);
193 kunmap_atomic(dst_vaddr, KM_USER0);
194
195 return 0;
196}
197
Eric Anholt280b7132009-03-12 16:56:27 -0700198static inline int
199slow_shmem_bit17_copy(struct page *gpu_page,
200 int gpu_offset,
201 struct page *cpu_page,
202 int cpu_offset,
203 int length,
204 int is_read)
205{
206 char *gpu_vaddr, *cpu_vaddr;
207
208 /* Use the unswizzled path if this page isn't affected. */
209 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
210 if (is_read)
211 return slow_shmem_copy(cpu_page, cpu_offset,
212 gpu_page, gpu_offset, length);
213 else
214 return slow_shmem_copy(gpu_page, gpu_offset,
215 cpu_page, cpu_offset, length);
216 }
217
218 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
219 if (gpu_vaddr == NULL)
220 return -ENOMEM;
221
222 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
223 if (cpu_vaddr == NULL) {
224 kunmap_atomic(gpu_vaddr, KM_USER0);
225 return -ENOMEM;
226 }
227
228 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
229 * XORing with the other bits (A9 for Y, A9 and A10 for X)
230 */
231 while (length > 0) {
232 int cacheline_end = ALIGN(gpu_offset + 1, 64);
233 int this_length = min(cacheline_end - gpu_offset, length);
234 int swizzled_gpu_offset = gpu_offset ^ 64;
235
236 if (is_read) {
237 memcpy(cpu_vaddr + cpu_offset,
238 gpu_vaddr + swizzled_gpu_offset,
239 this_length);
240 } else {
241 memcpy(gpu_vaddr + swizzled_gpu_offset,
242 cpu_vaddr + cpu_offset,
243 this_length);
244 }
245 cpu_offset += this_length;
246 gpu_offset += this_length;
247 length -= this_length;
248 }
249
250 kunmap_atomic(cpu_vaddr, KM_USER1);
251 kunmap_atomic(gpu_vaddr, KM_USER0);
252
253 return 0;
254}
255
Eric Anholt673a3942008-07-30 12:06:12 -0700256/**
Eric Anholteb014592009-03-10 11:44:52 -0700257 * This is the fast shmem pread path, which attempts to copy_from_user directly
258 * from the backing pages of the object to the user's address space. On a
259 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
260 */
261static int
262i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
263 struct drm_i915_gem_pread *args,
264 struct drm_file *file_priv)
265{
266 struct drm_i915_gem_object *obj_priv = obj->driver_private;
267 ssize_t remain;
268 loff_t offset, page_base;
269 char __user *user_data;
270 int page_offset, page_length;
271 int ret;
272
273 user_data = (char __user *) (uintptr_t) args->data_ptr;
274 remain = args->size;
275
276 mutex_lock(&dev->struct_mutex);
277
278 ret = i915_gem_object_get_pages(obj);
279 if (ret != 0)
280 goto fail_unlock;
281
282 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
283 args->size);
284 if (ret != 0)
285 goto fail_put_pages;
286
287 obj_priv = obj->driver_private;
288 offset = args->offset;
289
290 while (remain > 0) {
291 /* Operation in this page
292 *
293 * page_base = page offset within aperture
294 * page_offset = offset within page
295 * page_length = bytes to copy for this page
296 */
297 page_base = (offset & ~(PAGE_SIZE-1));
298 page_offset = offset & (PAGE_SIZE-1);
299 page_length = remain;
300 if ((page_offset + remain) > PAGE_SIZE)
301 page_length = PAGE_SIZE - page_offset;
302
303 ret = fast_shmem_read(obj_priv->pages,
304 page_base, page_offset,
305 user_data, page_length);
306 if (ret)
307 goto fail_put_pages;
308
309 remain -= page_length;
310 user_data += page_length;
311 offset += page_length;
312 }
313
314fail_put_pages:
315 i915_gem_object_put_pages(obj);
316fail_unlock:
317 mutex_unlock(&dev->struct_mutex);
318
319 return ret;
320}
321
322/**
323 * This is the fallback shmem pread path, which allocates temporary storage
324 * in kernel space to copy_to_user into outside of the struct_mutex, so we
325 * can copy out of the object's backing pages while holding the struct mutex
326 * and not take page faults.
327 */
328static int
329i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
330 struct drm_i915_gem_pread *args,
331 struct drm_file *file_priv)
332{
333 struct drm_i915_gem_object *obj_priv = obj->driver_private;
334 struct mm_struct *mm = current->mm;
335 struct page **user_pages;
336 ssize_t remain;
337 loff_t offset, pinned_pages, i;
338 loff_t first_data_page, last_data_page, num_pages;
339 int shmem_page_index, shmem_page_offset;
340 int data_page_index, data_page_offset;
341 int page_length;
342 int ret;
343 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700344 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700345
346 remain = args->size;
347
348 /* Pin the user pages containing the data. We can't fault while
349 * holding the struct mutex, yet we want to hold it while
350 * dereferencing the user data.
351 */
352 first_data_page = data_ptr / PAGE_SIZE;
353 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
354 num_pages = last_data_page - first_data_page + 1;
355
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700356 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700357 if (user_pages == NULL)
358 return -ENOMEM;
359
360 down_read(&mm->mmap_sem);
361 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700362 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700363 up_read(&mm->mmap_sem);
364 if (pinned_pages < num_pages) {
365 ret = -EFAULT;
366 goto fail_put_user_pages;
367 }
368
Eric Anholt280b7132009-03-12 16:56:27 -0700369 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
370
Eric Anholteb014592009-03-10 11:44:52 -0700371 mutex_lock(&dev->struct_mutex);
372
373 ret = i915_gem_object_get_pages(obj);
374 if (ret != 0)
375 goto fail_unlock;
376
377 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
378 args->size);
379 if (ret != 0)
380 goto fail_put_pages;
381
382 obj_priv = obj->driver_private;
383 offset = args->offset;
384
385 while (remain > 0) {
386 /* Operation in this page
387 *
388 * shmem_page_index = page number within shmem file
389 * shmem_page_offset = offset within page in shmem file
390 * data_page_index = page number in get_user_pages return
391 * data_page_offset = offset with data_page_index page.
392 * page_length = bytes to copy for this page
393 */
394 shmem_page_index = offset / PAGE_SIZE;
395 shmem_page_offset = offset & ~PAGE_MASK;
396 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
397 data_page_offset = data_ptr & ~PAGE_MASK;
398
399 page_length = remain;
400 if ((shmem_page_offset + page_length) > PAGE_SIZE)
401 page_length = PAGE_SIZE - shmem_page_offset;
402 if ((data_page_offset + page_length) > PAGE_SIZE)
403 page_length = PAGE_SIZE - data_page_offset;
404
Eric Anholt280b7132009-03-12 16:56:27 -0700405 if (do_bit17_swizzling) {
406 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
407 shmem_page_offset,
408 user_pages[data_page_index],
409 data_page_offset,
410 page_length,
411 1);
412 } else {
413 ret = slow_shmem_copy(user_pages[data_page_index],
414 data_page_offset,
415 obj_priv->pages[shmem_page_index],
416 shmem_page_offset,
417 page_length);
418 }
Eric Anholteb014592009-03-10 11:44:52 -0700419 if (ret)
420 goto fail_put_pages;
421
422 remain -= page_length;
423 data_ptr += page_length;
424 offset += page_length;
425 }
426
427fail_put_pages:
428 i915_gem_object_put_pages(obj);
429fail_unlock:
430 mutex_unlock(&dev->struct_mutex);
431fail_put_user_pages:
432 for (i = 0; i < pinned_pages; i++) {
433 SetPageDirty(user_pages[i]);
434 page_cache_release(user_pages[i]);
435 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700436 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700437
438 return ret;
439}
440
Eric Anholt673a3942008-07-30 12:06:12 -0700441/**
442 * Reads data from the object referenced by handle.
443 *
444 * On error, the contents of *data are undefined.
445 */
446int
447i915_gem_pread_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
449{
450 struct drm_i915_gem_pread *args = data;
451 struct drm_gem_object *obj;
452 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700453 int ret;
454
455 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
456 if (obj == NULL)
457 return -EBADF;
458 obj_priv = obj->driver_private;
459
460 /* Bounds check source.
461 *
462 * XXX: This could use review for overflow issues...
463 */
464 if (args->offset > obj->size || args->size > obj->size ||
465 args->offset + args->size > obj->size) {
466 drm_gem_object_unreference(obj);
467 return -EINVAL;
468 }
469
Eric Anholt280b7132009-03-12 16:56:27 -0700470 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700471 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700472 } else {
473 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
474 if (ret != 0)
475 ret = i915_gem_shmem_pread_slow(dev, obj, args,
476 file_priv);
477 }
Eric Anholt673a3942008-07-30 12:06:12 -0700478
479 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700480
Eric Anholteb014592009-03-10 11:44:52 -0700481 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700482}
483
Keith Packard0839ccb2008-10-30 19:38:48 -0700484/* This is the fast write path which cannot handle
485 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700486 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700487
Keith Packard0839ccb2008-10-30 19:38:48 -0700488static inline int
489fast_user_write(struct io_mapping *mapping,
490 loff_t page_base, int page_offset,
491 char __user *user_data,
492 int length)
493{
494 char *vaddr_atomic;
495 unsigned long unwritten;
496
497 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
498 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
499 user_data, length);
500 io_mapping_unmap_atomic(vaddr_atomic);
501 if (unwritten)
502 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700503 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700504}
505
506/* Here's the write path which can sleep for
507 * page faults
508 */
509
510static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700511slow_kernel_write(struct io_mapping *mapping,
512 loff_t gtt_base, int gtt_offset,
513 struct page *user_page, int user_offset,
514 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700515{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700516 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700517 unsigned long unwritten;
518
Eric Anholt3de09aa2009-03-09 09:42:23 -0700519 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
520 src_vaddr = kmap_atomic(user_page, KM_USER1);
521 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
522 src_vaddr + user_offset,
523 length);
524 kunmap_atomic(src_vaddr, KM_USER1);
525 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700526 if (unwritten)
527 return -EFAULT;
528 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700529}
530
Eric Anholt40123c12009-03-09 13:42:30 -0700531static inline int
532fast_shmem_write(struct page **pages,
533 loff_t page_base, int page_offset,
534 char __user *data,
535 int length)
536{
537 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400538 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700539
540 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
541 if (vaddr == NULL)
542 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400543 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700544 kunmap_atomic(vaddr, KM_USER0);
545
Dave Airlied0088772009-03-28 20:29:48 -0400546 if (unwritten)
547 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700548 return 0;
549}
550
Eric Anholt3de09aa2009-03-09 09:42:23 -0700551/**
552 * This is the fast pwrite path, where we copy the data directly from the
553 * user into the GTT, uncached.
554 */
Eric Anholt673a3942008-07-30 12:06:12 -0700555static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700556i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
557 struct drm_i915_gem_pwrite *args,
558 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700559{
560 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Keith Packard0839ccb2008-10-30 19:38:48 -0700561 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700562 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700563 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700564 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700565 int page_offset, page_length;
566 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700567
568 user_data = (char __user *) (uintptr_t) args->data_ptr;
569 remain = args->size;
570 if (!access_ok(VERIFY_READ, user_data, remain))
571 return -EFAULT;
572
573
574 mutex_lock(&dev->struct_mutex);
575 ret = i915_gem_object_pin(obj, 0);
576 if (ret) {
577 mutex_unlock(&dev->struct_mutex);
578 return ret;
579 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800580 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700581 if (ret)
582 goto fail;
583
584 obj_priv = obj->driver_private;
585 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700586
587 while (remain > 0) {
588 /* Operation in this page
589 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700590 * page_base = page offset within aperture
591 * page_offset = offset within page
592 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700593 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700594 page_base = (offset & ~(PAGE_SIZE-1));
595 page_offset = offset & (PAGE_SIZE-1);
596 page_length = remain;
597 if ((page_offset + remain) > PAGE_SIZE)
598 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700599
Keith Packard0839ccb2008-10-30 19:38:48 -0700600 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
601 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700602
Keith Packard0839ccb2008-10-30 19:38:48 -0700603 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700604 * source page isn't available. Return the error and we'll
605 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700606 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700607 if (ret)
608 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700609
Keith Packard0839ccb2008-10-30 19:38:48 -0700610 remain -= page_length;
611 user_data += page_length;
612 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700613 }
Eric Anholt673a3942008-07-30 12:06:12 -0700614
615fail:
616 i915_gem_object_unpin(obj);
617 mutex_unlock(&dev->struct_mutex);
618
619 return ret;
620}
621
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622/**
623 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
624 * the memory and maps it using kmap_atomic for copying.
625 *
626 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
627 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
628 */
Eric Anholt3043c602008-10-02 12:24:47 -0700629static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700630i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
631 struct drm_i915_gem_pwrite *args,
632 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700633{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700634 struct drm_i915_gem_object *obj_priv = obj->driver_private;
635 drm_i915_private_t *dev_priv = dev->dev_private;
636 ssize_t remain;
637 loff_t gtt_page_base, offset;
638 loff_t first_data_page, last_data_page, num_pages;
639 loff_t pinned_pages, i;
640 struct page **user_pages;
641 struct mm_struct *mm = current->mm;
642 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700643 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700644 uint64_t data_ptr = args->data_ptr;
645
646 remain = args->size;
647
648 /* Pin the user pages containing the data. We can't fault while
649 * holding the struct mutex, and all of the pwrite implementations
650 * want to hold it while dereferencing the user data.
651 */
652 first_data_page = data_ptr / PAGE_SIZE;
653 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
654 num_pages = last_data_page - first_data_page + 1;
655
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700656 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700657 if (user_pages == NULL)
658 return -ENOMEM;
659
660 down_read(&mm->mmap_sem);
661 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
662 num_pages, 0, 0, user_pages, NULL);
663 up_read(&mm->mmap_sem);
664 if (pinned_pages < num_pages) {
665 ret = -EFAULT;
666 goto out_unpin_pages;
667 }
668
669 mutex_lock(&dev->struct_mutex);
670 ret = i915_gem_object_pin(obj, 0);
671 if (ret)
672 goto out_unlock;
673
674 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
675 if (ret)
676 goto out_unpin_object;
677
678 obj_priv = obj->driver_private;
679 offset = obj_priv->gtt_offset + args->offset;
680
681 while (remain > 0) {
682 /* Operation in this page
683 *
684 * gtt_page_base = page offset within aperture
685 * gtt_page_offset = offset within page in aperture
686 * data_page_index = page number in get_user_pages return
687 * data_page_offset = offset with data_page_index page.
688 * page_length = bytes to copy for this page
689 */
690 gtt_page_base = offset & PAGE_MASK;
691 gtt_page_offset = offset & ~PAGE_MASK;
692 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
693 data_page_offset = data_ptr & ~PAGE_MASK;
694
695 page_length = remain;
696 if ((gtt_page_offset + page_length) > PAGE_SIZE)
697 page_length = PAGE_SIZE - gtt_page_offset;
698 if ((data_page_offset + page_length) > PAGE_SIZE)
699 page_length = PAGE_SIZE - data_page_offset;
700
701 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
702 gtt_page_base, gtt_page_offset,
703 user_pages[data_page_index],
704 data_page_offset,
705 page_length);
706
707 /* If we get a fault while copying data, then (presumably) our
708 * source page isn't available. Return the error and we'll
709 * retry in the slow path.
710 */
711 if (ret)
712 goto out_unpin_object;
713
714 remain -= page_length;
715 offset += page_length;
716 data_ptr += page_length;
717 }
718
719out_unpin_object:
720 i915_gem_object_unpin(obj);
721out_unlock:
722 mutex_unlock(&dev->struct_mutex);
723out_unpin_pages:
724 for (i = 0; i < pinned_pages; i++)
725 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700726 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700727
728 return ret;
729}
730
Eric Anholt40123c12009-03-09 13:42:30 -0700731/**
732 * This is the fast shmem pwrite path, which attempts to directly
733 * copy_from_user into the kmapped pages backing the object.
734 */
Eric Anholt673a3942008-07-30 12:06:12 -0700735static int
Eric Anholt40123c12009-03-09 13:42:30 -0700736i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
737 struct drm_i915_gem_pwrite *args,
738 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700739{
Eric Anholt40123c12009-03-09 13:42:30 -0700740 struct drm_i915_gem_object *obj_priv = obj->driver_private;
741 ssize_t remain;
742 loff_t offset, page_base;
743 char __user *user_data;
744 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700745 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700746
747 user_data = (char __user *) (uintptr_t) args->data_ptr;
748 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700749
750 mutex_lock(&dev->struct_mutex);
751
Eric Anholt40123c12009-03-09 13:42:30 -0700752 ret = i915_gem_object_get_pages(obj);
753 if (ret != 0)
754 goto fail_unlock;
755
Eric Anholte47c68e2008-11-14 13:35:19 -0800756 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700757 if (ret != 0)
758 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700759
Eric Anholt40123c12009-03-09 13:42:30 -0700760 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700761 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700762 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700763
Eric Anholt40123c12009-03-09 13:42:30 -0700764 while (remain > 0) {
765 /* Operation in this page
766 *
767 * page_base = page offset within aperture
768 * page_offset = offset within page
769 * page_length = bytes to copy for this page
770 */
771 page_base = (offset & ~(PAGE_SIZE-1));
772 page_offset = offset & (PAGE_SIZE-1);
773 page_length = remain;
774 if ((page_offset + remain) > PAGE_SIZE)
775 page_length = PAGE_SIZE - page_offset;
776
777 ret = fast_shmem_write(obj_priv->pages,
778 page_base, page_offset,
779 user_data, page_length);
780 if (ret)
781 goto fail_put_pages;
782
783 remain -= page_length;
784 user_data += page_length;
785 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700786 }
787
Eric Anholt40123c12009-03-09 13:42:30 -0700788fail_put_pages:
789 i915_gem_object_put_pages(obj);
790fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700791 mutex_unlock(&dev->struct_mutex);
792
Eric Anholt40123c12009-03-09 13:42:30 -0700793 return ret;
794}
795
796/**
797 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
798 * the memory and maps it using kmap_atomic for copying.
799 *
800 * This avoids taking mmap_sem for faulting on the user's address while the
801 * struct_mutex is held.
802 */
803static int
804i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
805 struct drm_i915_gem_pwrite *args,
806 struct drm_file *file_priv)
807{
808 struct drm_i915_gem_object *obj_priv = obj->driver_private;
809 struct mm_struct *mm = current->mm;
810 struct page **user_pages;
811 ssize_t remain;
812 loff_t offset, pinned_pages, i;
813 loff_t first_data_page, last_data_page, num_pages;
814 int shmem_page_index, shmem_page_offset;
815 int data_page_index, data_page_offset;
816 int page_length;
817 int ret;
818 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700819 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700820
821 remain = args->size;
822
823 /* Pin the user pages containing the data. We can't fault while
824 * holding the struct mutex, and all of the pwrite implementations
825 * want to hold it while dereferencing the user data.
826 */
827 first_data_page = data_ptr / PAGE_SIZE;
828 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
829 num_pages = last_data_page - first_data_page + 1;
830
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700831 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700832 if (user_pages == NULL)
833 return -ENOMEM;
834
835 down_read(&mm->mmap_sem);
836 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
837 num_pages, 0, 0, user_pages, NULL);
838 up_read(&mm->mmap_sem);
839 if (pinned_pages < num_pages) {
840 ret = -EFAULT;
841 goto fail_put_user_pages;
842 }
843
Eric Anholt280b7132009-03-12 16:56:27 -0700844 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
845
Eric Anholt40123c12009-03-09 13:42:30 -0700846 mutex_lock(&dev->struct_mutex);
847
848 ret = i915_gem_object_get_pages(obj);
849 if (ret != 0)
850 goto fail_unlock;
851
852 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
853 if (ret != 0)
854 goto fail_put_pages;
855
856 obj_priv = obj->driver_private;
857 offset = args->offset;
858 obj_priv->dirty = 1;
859
860 while (remain > 0) {
861 /* Operation in this page
862 *
863 * shmem_page_index = page number within shmem file
864 * shmem_page_offset = offset within page in shmem file
865 * data_page_index = page number in get_user_pages return
866 * data_page_offset = offset with data_page_index page.
867 * page_length = bytes to copy for this page
868 */
869 shmem_page_index = offset / PAGE_SIZE;
870 shmem_page_offset = offset & ~PAGE_MASK;
871 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
872 data_page_offset = data_ptr & ~PAGE_MASK;
873
874 page_length = remain;
875 if ((shmem_page_offset + page_length) > PAGE_SIZE)
876 page_length = PAGE_SIZE - shmem_page_offset;
877 if ((data_page_offset + page_length) > PAGE_SIZE)
878 page_length = PAGE_SIZE - data_page_offset;
879
Eric Anholt280b7132009-03-12 16:56:27 -0700880 if (do_bit17_swizzling) {
881 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
882 shmem_page_offset,
883 user_pages[data_page_index],
884 data_page_offset,
885 page_length,
886 0);
887 } else {
888 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
889 shmem_page_offset,
890 user_pages[data_page_index],
891 data_page_offset,
892 page_length);
893 }
Eric Anholt40123c12009-03-09 13:42:30 -0700894 if (ret)
895 goto fail_put_pages;
896
897 remain -= page_length;
898 data_ptr += page_length;
899 offset += page_length;
900 }
901
902fail_put_pages:
903 i915_gem_object_put_pages(obj);
904fail_unlock:
905 mutex_unlock(&dev->struct_mutex);
906fail_put_user_pages:
907 for (i = 0; i < pinned_pages; i++)
908 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700909 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700910
911 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700912}
913
914/**
915 * Writes data to the object referenced by handle.
916 *
917 * On error, the contents of the buffer that were to be modified are undefined.
918 */
919int
920i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 struct drm_file *file_priv)
922{
923 struct drm_i915_gem_pwrite *args = data;
924 struct drm_gem_object *obj;
925 struct drm_i915_gem_object *obj_priv;
926 int ret = 0;
927
928 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
929 if (obj == NULL)
930 return -EBADF;
931 obj_priv = obj->driver_private;
932
933 /* Bounds check destination.
934 *
935 * XXX: This could use review for overflow issues...
936 */
937 if (args->offset > obj->size || args->size > obj->size ||
938 args->offset + args->size > obj->size) {
939 drm_gem_object_unreference(obj);
940 return -EINVAL;
941 }
942
943 /* We can only do the GTT pwrite on untiled buffers, as otherwise
944 * it would end up going through the fenced access, and we'll get
945 * different detiling behavior between reading and writing.
946 * pread/pwrite currently are reading and writing from the CPU
947 * perspective, requiring manual detiling by the client.
948 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000949 if (obj_priv->phys_obj)
950 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
951 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700952 dev->gtt_total != 0) {
953 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
954 if (ret == -EFAULT) {
955 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
956 file_priv);
957 }
Eric Anholt280b7132009-03-12 16:56:27 -0700958 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700960 } else {
961 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
962 if (ret == -EFAULT) {
963 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
964 file_priv);
965 }
966 }
Eric Anholt673a3942008-07-30 12:06:12 -0700967
968#if WATCH_PWRITE
969 if (ret)
970 DRM_INFO("pwrite failed %d\n", ret);
971#endif
972
973 drm_gem_object_unreference(obj);
974
975 return ret;
976}
977
978/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800979 * Called when user space prepares to use an object with the CPU, either
980 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700981 */
982int
983i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
984 struct drm_file *file_priv)
985{
Eric Anholta09ba7f2009-08-29 12:49:51 -0700986 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700987 struct drm_i915_gem_set_domain *args = data;
988 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -0700989 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800990 uint32_t read_domains = args->read_domains;
991 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700992 int ret;
993
994 if (!(dev->driver->driver_features & DRIVER_GEM))
995 return -ENODEV;
996
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800997 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100998 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800999 return -EINVAL;
1000
Chris Wilson21d509e2009-06-06 09:46:02 +01001001 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001002 return -EINVAL;
1003
1004 /* Having something in the write domain implies it's in the read
1005 * domain, and only that read domain. Enforce that in the request.
1006 */
1007 if (write_domain != 0 && read_domains != write_domain)
1008 return -EINVAL;
1009
Eric Anholt673a3942008-07-30 12:06:12 -07001010 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1011 if (obj == NULL)
1012 return -EBADF;
Jesse Barnes652c3932009-08-17 13:31:43 -07001013 obj_priv = obj->driver_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001014
1015 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001016
1017 intel_mark_busy(dev, obj);
1018
Eric Anholt673a3942008-07-30 12:06:12 -07001019#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001020 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001021 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001022#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001023 if (read_domains & I915_GEM_DOMAIN_GTT) {
1024 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001025
Eric Anholta09ba7f2009-08-29 12:49:51 -07001026 /* Update the LRU on the fence for the CPU access that's
1027 * about to occur.
1028 */
1029 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1030 list_move_tail(&obj_priv->fence_list,
1031 &dev_priv->mm.fence_list);
1032 }
1033
Eric Anholt02354392008-11-26 13:58:13 -08001034 /* Silently promote "you're not bound, there was nothing to do"
1035 * to success, since the client was just asking us to
1036 * make sure everything was done.
1037 */
1038 if (ret == -EINVAL)
1039 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001040 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001041 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001042 }
1043
Eric Anholt673a3942008-07-30 12:06:12 -07001044 drm_gem_object_unreference(obj);
1045 mutex_unlock(&dev->struct_mutex);
1046 return ret;
1047}
1048
1049/**
1050 * Called when user space has done writes to this buffer
1051 */
1052int
1053i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv)
1055{
1056 struct drm_i915_gem_sw_finish *args = data;
1057 struct drm_gem_object *obj;
1058 struct drm_i915_gem_object *obj_priv;
1059 int ret = 0;
1060
1061 if (!(dev->driver->driver_features & DRIVER_GEM))
1062 return -ENODEV;
1063
1064 mutex_lock(&dev->struct_mutex);
1065 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1066 if (obj == NULL) {
1067 mutex_unlock(&dev->struct_mutex);
1068 return -EBADF;
1069 }
1070
1071#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001072 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001073 __func__, args->handle, obj, obj->size);
1074#endif
1075 obj_priv = obj->driver_private;
1076
1077 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001078 if (obj_priv->pin_count)
1079 i915_gem_object_flush_cpu_write_domain(obj);
1080
Eric Anholt673a3942008-07-30 12:06:12 -07001081 drm_gem_object_unreference(obj);
1082 mutex_unlock(&dev->struct_mutex);
1083 return ret;
1084}
1085
1086/**
1087 * Maps the contents of an object, returning the address it is mapped
1088 * into.
1089 *
1090 * While the mapping holds a reference on the contents of the object, it doesn't
1091 * imply a ref on the object itself.
1092 */
1093int
1094i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv)
1096{
1097 struct drm_i915_gem_mmap *args = data;
1098 struct drm_gem_object *obj;
1099 loff_t offset;
1100 unsigned long addr;
1101
1102 if (!(dev->driver->driver_features & DRIVER_GEM))
1103 return -ENODEV;
1104
1105 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1106 if (obj == NULL)
1107 return -EBADF;
1108
1109 offset = args->offset;
1110
1111 down_write(&current->mm->mmap_sem);
1112 addr = do_mmap(obj->filp, 0, args->size,
1113 PROT_READ | PROT_WRITE, MAP_SHARED,
1114 args->offset);
1115 up_write(&current->mm->mmap_sem);
1116 mutex_lock(&dev->struct_mutex);
1117 drm_gem_object_unreference(obj);
1118 mutex_unlock(&dev->struct_mutex);
1119 if (IS_ERR((void *)addr))
1120 return addr;
1121
1122 args->addr_ptr = (uint64_t) addr;
1123
1124 return 0;
1125}
1126
Jesse Barnesde151cf2008-11-12 10:03:55 -08001127/**
1128 * i915_gem_fault - fault a page into the GTT
1129 * vma: VMA in question
1130 * vmf: fault info
1131 *
1132 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1133 * from userspace. The fault handler takes care of binding the object to
1134 * the GTT (if needed), allocating and programming a fence register (again,
1135 * only if needed based on whether the old reg is still valid or the object
1136 * is tiled) and inserting a new PTE into the faulting process.
1137 *
1138 * Note that the faulting process may involve evicting existing objects
1139 * from the GTT and/or fence registers to make room. So performance may
1140 * suffer if the GTT working set is large or there are few fence registers
1141 * left.
1142 */
1143int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1144{
1145 struct drm_gem_object *obj = vma->vm_private_data;
1146 struct drm_device *dev = obj->dev;
1147 struct drm_i915_private *dev_priv = dev->dev_private;
1148 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1149 pgoff_t page_offset;
1150 unsigned long pfn;
1151 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001152 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001153
1154 /* We don't use vmf->pgoff since that has the fake offset */
1155 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1156 PAGE_SHIFT;
1157
1158 /* Now bind it into the GTT if needed */
1159 mutex_lock(&dev->struct_mutex);
1160 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001161 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001162 if (ret) {
1163 mutex_unlock(&dev->struct_mutex);
1164 return VM_FAULT_SIGBUS;
1165 }
Chris Wilson4960aac2009-09-14 16:50:25 +01001166 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001167
1168 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1169 if (ret) {
1170 mutex_unlock(&dev->struct_mutex);
1171 return VM_FAULT_SIGBUS;
1172 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001173 }
1174
1175 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001176 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001177 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001178 if (ret) {
1179 mutex_unlock(&dev->struct_mutex);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001180 return VM_FAULT_SIGBUS;
Chris Wilson7d8d58b2009-02-04 14:15:10 +00001181 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001182 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001183
1184 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1185 page_offset;
1186
1187 /* Finally, remap it using the new GTT offset */
1188 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1189
1190 mutex_unlock(&dev->struct_mutex);
1191
1192 switch (ret) {
1193 case -ENOMEM:
1194 case -EAGAIN:
1195 return VM_FAULT_OOM;
1196 case -EFAULT:
Jesse Barnes959b8872009-03-20 14:16:33 -07001197 case -EINVAL:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001198 return VM_FAULT_SIGBUS;
1199 default:
1200 return VM_FAULT_NOPAGE;
1201 }
1202}
1203
1204/**
1205 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1206 * @obj: obj in question
1207 *
1208 * GEM memory mapping works by handing back to userspace a fake mmap offset
1209 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1210 * up the object based on the offset and sets up the various memory mapping
1211 * structures.
1212 *
1213 * This routine allocates and attaches a fake offset for @obj.
1214 */
1215static int
1216i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1217{
1218 struct drm_device *dev = obj->dev;
1219 struct drm_gem_mm *mm = dev->mm_private;
1220 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1221 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001222 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001223 int ret = 0;
1224
1225 /* Set the object up for mmap'ing */
1226 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001227 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001228 if (!list->map)
1229 return -ENOMEM;
1230
1231 map = list->map;
1232 map->type = _DRM_GEM;
1233 map->size = obj->size;
1234 map->handle = obj;
1235
1236 /* Get a DRM GEM mmap offset allocated... */
1237 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1238 obj->size / PAGE_SIZE, 0, 0);
1239 if (!list->file_offset_node) {
1240 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1241 ret = -ENOMEM;
1242 goto out_free_list;
1243 }
1244
1245 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1246 obj->size / PAGE_SIZE, 0);
1247 if (!list->file_offset_node) {
1248 ret = -ENOMEM;
1249 goto out_free_list;
1250 }
1251
1252 list->hash.key = list->file_offset_node->start;
1253 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1254 DRM_ERROR("failed to add to map hash\n");
1255 goto out_free_mm;
1256 }
1257
1258 /* By now we should be all set, any drm_mmap request on the offset
1259 * below will get to our mmap & fault handler */
1260 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1261
1262 return 0;
1263
1264out_free_mm:
1265 drm_mm_put_block(list->file_offset_node);
1266out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001267 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001268
1269 return ret;
1270}
1271
Chris Wilson901782b2009-07-10 08:18:50 +01001272/**
1273 * i915_gem_release_mmap - remove physical page mappings
1274 * @obj: obj in question
1275 *
1276 * Preserve the reservation of the mmaping with the DRM core code, but
1277 * relinquish ownership of the pages back to the system.
1278 *
1279 * It is vital that we remove the page mapping if we have mapped a tiled
1280 * object through the GTT and then lose the fence register due to
1281 * resource pressure. Similarly if the object has been moved out of the
1282 * aperture, than pages mapped into userspace must be revoked. Removing the
1283 * mapping will then trigger a page fault on the next user access, allowing
1284 * fixup by i915_gem_fault().
1285 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001286void
Chris Wilson901782b2009-07-10 08:18:50 +01001287i915_gem_release_mmap(struct drm_gem_object *obj)
1288{
1289 struct drm_device *dev = obj->dev;
1290 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1291
1292 if (dev->dev_mapping)
1293 unmap_mapping_range(dev->dev_mapping,
1294 obj_priv->mmap_offset, obj->size, 1);
1295}
1296
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001297static void
1298i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1299{
1300 struct drm_device *dev = obj->dev;
1301 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1302 struct drm_gem_mm *mm = dev->mm_private;
1303 struct drm_map_list *list;
1304
1305 list = &obj->map_list;
1306 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1307
1308 if (list->file_offset_node) {
1309 drm_mm_put_block(list->file_offset_node);
1310 list->file_offset_node = NULL;
1311 }
1312
1313 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001314 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001315 list->map = NULL;
1316 }
1317
1318 obj_priv->mmap_offset = 0;
1319}
1320
Jesse Barnesde151cf2008-11-12 10:03:55 -08001321/**
1322 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1323 * @obj: object to check
1324 *
1325 * Return the required GTT alignment for an object, taking into account
1326 * potential fence register mapping if needed.
1327 */
1328static uint32_t
1329i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1330{
1331 struct drm_device *dev = obj->dev;
1332 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1333 int start, i;
1334
1335 /*
1336 * Minimum alignment is 4k (GTT page size), but might be greater
1337 * if a fence register is needed for the object.
1338 */
1339 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1340 return 4096;
1341
1342 /*
1343 * Previous chips need to be aligned to the size of the smallest
1344 * fence register that can contain the object.
1345 */
1346 if (IS_I9XX(dev))
1347 start = 1024*1024;
1348 else
1349 start = 512*1024;
1350
1351 for (i = start; i < obj->size; i <<= 1)
1352 ;
1353
1354 return i;
1355}
1356
1357/**
1358 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1359 * @dev: DRM device
1360 * @data: GTT mapping ioctl data
1361 * @file_priv: GEM object info
1362 *
1363 * Simply returns the fake offset to userspace so it can mmap it.
1364 * The mmap call will end up in drm_gem_mmap(), which will set things
1365 * up so we can get faults in the handler above.
1366 *
1367 * The fault handler will take care of binding the object into the GTT
1368 * (since it may have been evicted to make room for something), allocating
1369 * a fence register, and mapping the appropriate aperture address into
1370 * userspace.
1371 */
1372int
1373i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1374 struct drm_file *file_priv)
1375{
1376 struct drm_i915_gem_mmap_gtt *args = data;
1377 struct drm_i915_private *dev_priv = dev->dev_private;
1378 struct drm_gem_object *obj;
1379 struct drm_i915_gem_object *obj_priv;
1380 int ret;
1381
1382 if (!(dev->driver->driver_features & DRIVER_GEM))
1383 return -ENODEV;
1384
1385 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1386 if (obj == NULL)
1387 return -EBADF;
1388
1389 mutex_lock(&dev->struct_mutex);
1390
1391 obj_priv = obj->driver_private;
1392
1393 if (!obj_priv->mmap_offset) {
1394 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001395 if (ret) {
1396 drm_gem_object_unreference(obj);
1397 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001398 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001399 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001400 }
1401
1402 args->offset = obj_priv->mmap_offset;
1403
Jesse Barnesde151cf2008-11-12 10:03:55 -08001404 /*
1405 * Pull it into the GTT so that we have a page list (makes the
1406 * initial fault faster and any subsequent flushing possible).
1407 */
1408 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001409 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001410 if (ret) {
1411 drm_gem_object_unreference(obj);
1412 mutex_unlock(&dev->struct_mutex);
1413 return ret;
1414 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001415 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001416 }
1417
1418 drm_gem_object_unreference(obj);
1419 mutex_unlock(&dev->struct_mutex);
1420
1421 return 0;
1422}
1423
Ben Gamari6911a9b2009-04-02 11:24:54 -07001424void
Eric Anholt856fa192009-03-19 14:10:50 -07001425i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001426{
1427 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1428 int page_count = obj->size / PAGE_SIZE;
1429 int i;
1430
Eric Anholt856fa192009-03-19 14:10:50 -07001431 BUG_ON(obj_priv->pages_refcount == 0);
1432
1433 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001434 return;
1435
Eric Anholt280b7132009-03-12 16:56:27 -07001436 if (obj_priv->tiling_mode != I915_TILING_NONE)
1437 i915_gem_object_save_bit_17_swizzle(obj);
1438
Eric Anholt673a3942008-07-30 12:06:12 -07001439 for (i = 0; i < page_count; i++)
Eric Anholt856fa192009-03-19 14:10:50 -07001440 if (obj_priv->pages[i] != NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07001441 if (obj_priv->dirty)
Eric Anholt856fa192009-03-19 14:10:50 -07001442 set_page_dirty(obj_priv->pages[i]);
1443 mark_page_accessed(obj_priv->pages[i]);
1444 page_cache_release(obj_priv->pages[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07001445 }
1446 obj_priv->dirty = 0;
1447
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001448 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001449 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001450}
1451
1452static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001453i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001454{
1455 struct drm_device *dev = obj->dev;
1456 drm_i915_private_t *dev_priv = dev->dev_private;
1457 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1458
1459 /* Add a reference if we're newly entering the active list. */
1460 if (!obj_priv->active) {
1461 drm_gem_object_reference(obj);
1462 obj_priv->active = 1;
1463 }
1464 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001465 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001466 list_move_tail(&obj_priv->list,
1467 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001468 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001469 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001470}
1471
Eric Anholtce44b0e2008-11-06 16:00:31 -08001472static void
1473i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1474{
1475 struct drm_device *dev = obj->dev;
1476 drm_i915_private_t *dev_priv = dev->dev_private;
1477 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1478
1479 BUG_ON(!obj_priv->active);
1480 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1481 obj_priv->last_rendering_seqno = 0;
1482}
Eric Anholt673a3942008-07-30 12:06:12 -07001483
1484static void
1485i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1486{
1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private;
1489 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1490
1491 i915_verify_inactive(dev, __FILE__, __LINE__);
1492 if (obj_priv->pin_count != 0)
1493 list_del_init(&obj_priv->list);
1494 else
1495 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1496
Eric Anholtce44b0e2008-11-06 16:00:31 -08001497 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001498 if (obj_priv->active) {
1499 obj_priv->active = 0;
1500 drm_gem_object_unreference(obj);
1501 }
1502 i915_verify_inactive(dev, __FILE__, __LINE__);
1503}
1504
1505/**
1506 * Creates a new sequence number, emitting a write of it to the status page
1507 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1508 *
1509 * Must be called with struct_lock held.
1510 *
1511 * Returned sequence numbers are nonzero on success.
1512 */
1513static uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001514i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1515 uint32_t flush_domains)
Eric Anholt673a3942008-07-30 12:06:12 -07001516{
1517 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001518 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001519 struct drm_i915_gem_request *request;
1520 uint32_t seqno;
1521 int was_empty;
1522 RING_LOCALS;
1523
Eric Anholtb9624422009-06-03 07:27:35 +00001524 if (file_priv != NULL)
1525 i915_file_priv = file_priv->driver_priv;
1526
Eric Anholt9a298b22009-03-24 12:23:04 -07001527 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001528 if (request == NULL)
1529 return 0;
1530
1531 /* Grab the seqno we're going to make this request be, and bump the
1532 * next (skipping 0 so it can be the reserved no-seqno value).
1533 */
1534 seqno = dev_priv->mm.next_gem_seqno;
1535 dev_priv->mm.next_gem_seqno++;
1536 if (dev_priv->mm.next_gem_seqno == 0)
1537 dev_priv->mm.next_gem_seqno++;
1538
1539 BEGIN_LP_RING(4);
1540 OUT_RING(MI_STORE_DWORD_INDEX);
1541 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1542 OUT_RING(seqno);
1543
1544 OUT_RING(MI_USER_INTERRUPT);
1545 ADVANCE_LP_RING();
1546
1547 DRM_DEBUG("%d\n", seqno);
1548
1549 request->seqno = seqno;
1550 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001551 was_empty = list_empty(&dev_priv->mm.request_list);
1552 list_add_tail(&request->list, &dev_priv->mm.request_list);
Eric Anholtb9624422009-06-03 07:27:35 +00001553 if (i915_file_priv) {
1554 list_add_tail(&request->client_list,
1555 &i915_file_priv->mm.request_list);
1556 } else {
1557 INIT_LIST_HEAD(&request->client_list);
1558 }
Eric Anholt673a3942008-07-30 12:06:12 -07001559
Eric Anholtce44b0e2008-11-06 16:00:31 -08001560 /* Associate any objects on the flushing list matching the write
1561 * domain we're flushing with our flush.
1562 */
1563 if (flush_domains != 0) {
1564 struct drm_i915_gem_object *obj_priv, *next;
1565
1566 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.flushing_list, list) {
1568 struct drm_gem_object *obj = obj_priv->obj;
1569
1570 if ((obj->write_domain & flush_domains) ==
1571 obj->write_domain) {
1572 obj->write_domain = 0;
1573 i915_gem_object_move_to_active(obj, seqno);
1574 }
1575 }
1576
1577 }
1578
Ben Gamarif65d9422009-09-14 17:48:44 -04001579 if (!dev_priv->mm.suspended) {
1580 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1581 if (was_empty)
1582 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1583 }
Eric Anholt673a3942008-07-30 12:06:12 -07001584 return seqno;
1585}
1586
1587/**
1588 * Command execution barrier
1589 *
1590 * Ensures that all commands in the ring are finished
1591 * before signalling the CPU
1592 */
Eric Anholt3043c602008-10-02 12:24:47 -07001593static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001594i915_retire_commands(struct drm_device *dev)
1595{
1596 drm_i915_private_t *dev_priv = dev->dev_private;
1597 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1598 uint32_t flush_domains = 0;
1599 RING_LOCALS;
1600
1601 /* The sampler always gets flushed on i965 (sigh) */
1602 if (IS_I965G(dev))
1603 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1604 BEGIN_LP_RING(2);
1605 OUT_RING(cmd);
1606 OUT_RING(0); /* noop */
1607 ADVANCE_LP_RING();
1608 return flush_domains;
1609}
1610
1611/**
1612 * Moves buffers associated only with the given active seqno from the active
1613 * to inactive list, potentially freeing them.
1614 */
1615static void
1616i915_gem_retire_request(struct drm_device *dev,
1617 struct drm_i915_gem_request *request)
1618{
1619 drm_i915_private_t *dev_priv = dev->dev_private;
1620
1621 /* Move any buffers on the active list that are no longer referenced
1622 * by the ringbuffer to the flushing/inactive lists as appropriate.
1623 */
Carl Worth5e118f42009-03-20 11:54:25 -07001624 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001625 while (!list_empty(&dev_priv->mm.active_list)) {
1626 struct drm_gem_object *obj;
1627 struct drm_i915_gem_object *obj_priv;
1628
1629 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1630 struct drm_i915_gem_object,
1631 list);
1632 obj = obj_priv->obj;
1633
1634 /* If the seqno being retired doesn't match the oldest in the
1635 * list, then the oldest in the list must still be newer than
1636 * this seqno.
1637 */
1638 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001639 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001640
Eric Anholt673a3942008-07-30 12:06:12 -07001641#if WATCH_LRU
1642 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1643 __func__, request->seqno, obj);
1644#endif
1645
Eric Anholtce44b0e2008-11-06 16:00:31 -08001646 if (obj->write_domain != 0)
1647 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001648 else {
1649 /* Take a reference on the object so it won't be
1650 * freed while the spinlock is held. The list
1651 * protection for this spinlock is safe when breaking
1652 * the lock like this since the next thing we do
1653 * is just get the head of the list again.
1654 */
1655 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001656 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001657 spin_unlock(&dev_priv->mm.active_list_lock);
1658 drm_gem_object_unreference(obj);
1659 spin_lock(&dev_priv->mm.active_list_lock);
1660 }
Eric Anholt673a3942008-07-30 12:06:12 -07001661 }
Carl Worth5e118f42009-03-20 11:54:25 -07001662out:
1663 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001664}
1665
1666/**
1667 * Returns true if seq1 is later than seq2.
1668 */
Ben Gamari22be1722009-09-14 17:48:43 -04001669bool
Eric Anholt673a3942008-07-30 12:06:12 -07001670i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1671{
1672 return (int32_t)(seq1 - seq2) >= 0;
1673}
1674
1675uint32_t
1676i915_get_gem_seqno(struct drm_device *dev)
1677{
1678 drm_i915_private_t *dev_priv = dev->dev_private;
1679
1680 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1681}
1682
1683/**
1684 * This function clears the request list as sequence numbers are passed.
1685 */
1686void
1687i915_gem_retire_requests(struct drm_device *dev)
1688{
1689 drm_i915_private_t *dev_priv = dev->dev_private;
1690 uint32_t seqno;
1691
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001692 if (!dev_priv->hw_status_page)
1693 return;
1694
Eric Anholt673a3942008-07-30 12:06:12 -07001695 seqno = i915_get_gem_seqno(dev);
1696
1697 while (!list_empty(&dev_priv->mm.request_list)) {
1698 struct drm_i915_gem_request *request;
1699 uint32_t retiring_seqno;
1700
1701 request = list_first_entry(&dev_priv->mm.request_list,
1702 struct drm_i915_gem_request,
1703 list);
1704 retiring_seqno = request->seqno;
1705
1706 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001707 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001708 i915_gem_retire_request(dev, request);
1709
1710 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001711 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001712 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001713 } else
1714 break;
1715 }
1716}
1717
1718void
1719i915_gem_retire_work_handler(struct work_struct *work)
1720{
1721 drm_i915_private_t *dev_priv;
1722 struct drm_device *dev;
1723
1724 dev_priv = container_of(work, drm_i915_private_t,
1725 mm.retire_work.work);
1726 dev = dev_priv->dev;
1727
1728 mutex_lock(&dev->struct_mutex);
1729 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001730 if (!dev_priv->mm.suspended &&
1731 !list_empty(&dev_priv->mm.request_list))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001732 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001733 mutex_unlock(&dev->struct_mutex);
1734}
1735
1736/**
1737 * Waits for a sequence number to be signaled, and cleans up the
1738 * request and object lists appropriately for that event.
1739 */
Eric Anholt3043c602008-10-02 12:24:47 -07001740static int
Eric Anholt673a3942008-07-30 12:06:12 -07001741i915_wait_request(struct drm_device *dev, uint32_t seqno)
1742{
1743 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001744 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001745 int ret = 0;
1746
1747 BUG_ON(seqno == 0);
1748
Ben Gamariba1234d2009-09-14 17:48:47 -04001749 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001750 return -EIO;
1751
Eric Anholt673a3942008-07-30 12:06:12 -07001752 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001753 if (IS_IGDNG(dev))
1754 ier = I915_READ(DEIER) | I915_READ(GTIER);
1755 else
1756 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001757 if (!ier) {
1758 DRM_ERROR("something (likely vbetool) disabled "
1759 "interrupts, re-enabling\n");
1760 i915_driver_irq_preinstall(dev);
1761 i915_driver_irq_postinstall(dev);
1762 }
1763
Eric Anholt673a3942008-07-30 12:06:12 -07001764 dev_priv->mm.waiting_gem_seqno = seqno;
1765 i915_user_irq_get(dev);
1766 ret = wait_event_interruptible(dev_priv->irq_queue,
1767 i915_seqno_passed(i915_get_gem_seqno(dev),
1768 seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001769 atomic_read(&dev_priv->mm.wedged));
Eric Anholt673a3942008-07-30 12:06:12 -07001770 i915_user_irq_put(dev);
1771 dev_priv->mm.waiting_gem_seqno = 0;
1772 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001773 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001774 ret = -EIO;
1775
1776 if (ret && ret != -ERESTARTSYS)
1777 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1778 __func__, ret, seqno, i915_get_gem_seqno(dev));
1779
1780 /* Directly dispatch request retiring. While we have the work queue
1781 * to handle this, the waiter on a request often wants an associated
1782 * buffer to have made it to the inactive list, and we would need
1783 * a separate wait queue to handle that.
1784 */
1785 if (ret == 0)
1786 i915_gem_retire_requests(dev);
1787
1788 return ret;
1789}
1790
1791static void
1792i915_gem_flush(struct drm_device *dev,
1793 uint32_t invalidate_domains,
1794 uint32_t flush_domains)
1795{
1796 drm_i915_private_t *dev_priv = dev->dev_private;
1797 uint32_t cmd;
1798 RING_LOCALS;
1799
1800#if WATCH_EXEC
1801 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1802 invalidate_domains, flush_domains);
1803#endif
1804
1805 if (flush_domains & I915_GEM_DOMAIN_CPU)
1806 drm_agp_chipset_flush(dev);
1807
Chris Wilson21d509e2009-06-06 09:46:02 +01001808 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
Eric Anholt673a3942008-07-30 12:06:12 -07001809 /*
1810 * read/write caches:
1811 *
1812 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1813 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1814 * also flushed at 2d versus 3d pipeline switches.
1815 *
1816 * read-only caches:
1817 *
1818 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1819 * MI_READ_FLUSH is set, and is always flushed on 965.
1820 *
1821 * I915_GEM_DOMAIN_COMMAND may not exist?
1822 *
1823 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1824 * invalidated when MI_EXE_FLUSH is set.
1825 *
1826 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1827 * invalidated with every MI_FLUSH.
1828 *
1829 * TLBs:
1830 *
1831 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1832 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1833 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1834 * are flushed at any MI_FLUSH.
1835 */
1836
1837 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1838 if ((invalidate_domains|flush_domains) &
1839 I915_GEM_DOMAIN_RENDER)
1840 cmd &= ~MI_NO_WRITE_FLUSH;
1841 if (!IS_I965G(dev)) {
1842 /*
1843 * On the 965, the sampler cache always gets flushed
1844 * and this bit is reserved.
1845 */
1846 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1847 cmd |= MI_READ_FLUSH;
1848 }
1849 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1850 cmd |= MI_EXE_FLUSH;
1851
1852#if WATCH_EXEC
1853 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1854#endif
1855 BEGIN_LP_RING(2);
1856 OUT_RING(cmd);
1857 OUT_RING(0); /* noop */
1858 ADVANCE_LP_RING();
1859 }
1860}
1861
1862/**
1863 * Ensures that all rendering to the object has completed and the object is
1864 * safe to unbind from the GTT or access from the CPU.
1865 */
1866static int
1867i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1868{
1869 struct drm_device *dev = obj->dev;
1870 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1871 int ret;
1872
Eric Anholte47c68e2008-11-14 13:35:19 -08001873 /* This function only exists to support waiting for existing rendering,
1874 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001875 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001876 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001877
1878 /* If there is rendering queued on the buffer being evicted, wait for
1879 * it.
1880 */
1881 if (obj_priv->active) {
1882#if WATCH_BUF
1883 DRM_INFO("%s: object %p wait for seqno %08x\n",
1884 __func__, obj, obj_priv->last_rendering_seqno);
1885#endif
1886 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1887 if (ret != 0)
1888 return ret;
1889 }
1890
1891 return 0;
1892}
1893
1894/**
1895 * Unbinds an object from the GTT aperture.
1896 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001897int
Eric Anholt673a3942008-07-30 12:06:12 -07001898i915_gem_object_unbind(struct drm_gem_object *obj)
1899{
1900 struct drm_device *dev = obj->dev;
1901 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1902 int ret = 0;
1903
1904#if WATCH_BUF
1905 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1906 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1907#endif
1908 if (obj_priv->gtt_space == NULL)
1909 return 0;
1910
1911 if (obj_priv->pin_count != 0) {
1912 DRM_ERROR("Attempting to unbind pinned buffer\n");
1913 return -EINVAL;
1914 }
1915
Eric Anholt5323fd02009-09-09 11:50:45 -07001916 /* blow away mappings if mapped through GTT */
1917 i915_gem_release_mmap(obj);
1918
1919 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1920 i915_gem_clear_fence_reg(obj);
1921
Eric Anholt673a3942008-07-30 12:06:12 -07001922 /* Move the object to the CPU domain to ensure that
1923 * any possible CPU writes while it's not in the GTT
1924 * are flushed when we go to remap it. This will
1925 * also ensure that all pending GPU writes are finished
1926 * before we unbind.
1927 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001928 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07001929 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08001930 if (ret != -ERESTARTSYS)
1931 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07001932 return ret;
1933 }
1934
Eric Anholt5323fd02009-09-09 11:50:45 -07001935 BUG_ON(obj_priv->active);
1936
Eric Anholt673a3942008-07-30 12:06:12 -07001937 if (obj_priv->agp_mem != NULL) {
1938 drm_unbind_agp(obj_priv->agp_mem);
1939 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1940 obj_priv->agp_mem = NULL;
1941 }
1942
Eric Anholt856fa192009-03-19 14:10:50 -07001943 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001944
1945 if (obj_priv->gtt_space) {
1946 atomic_dec(&dev->gtt_count);
1947 atomic_sub(obj->size, &dev->gtt_memory);
1948
1949 drm_mm_put_block(obj_priv->gtt_space);
1950 obj_priv->gtt_space = NULL;
1951 }
1952
1953 /* Remove ourselves from the LRU list if present. */
1954 if (!list_empty(&obj_priv->list))
1955 list_del_init(&obj_priv->list);
1956
1957 return 0;
1958}
1959
1960static int
1961i915_gem_evict_something(struct drm_device *dev)
1962{
1963 drm_i915_private_t *dev_priv = dev->dev_private;
1964 struct drm_gem_object *obj;
1965 struct drm_i915_gem_object *obj_priv;
1966 int ret = 0;
1967
1968 for (;;) {
1969 /* If there's an inactive buffer available now, grab it
1970 * and be done.
1971 */
1972 if (!list_empty(&dev_priv->mm.inactive_list)) {
1973 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1974 struct drm_i915_gem_object,
1975 list);
1976 obj = obj_priv->obj;
1977 BUG_ON(obj_priv->pin_count != 0);
1978#if WATCH_LRU
1979 DRM_INFO("%s: evicting %p\n", __func__, obj);
1980#endif
1981 BUG_ON(obj_priv->active);
1982
1983 /* Wait on the rendering and unbind the buffer. */
1984 ret = i915_gem_object_unbind(obj);
1985 break;
1986 }
1987
1988 /* If we didn't get anything, but the ring is still processing
1989 * things, wait for one of those things to finish and hopefully
1990 * leave us a buffer to evict.
1991 */
1992 if (!list_empty(&dev_priv->mm.request_list)) {
1993 struct drm_i915_gem_request *request;
1994
1995 request = list_first_entry(&dev_priv->mm.request_list,
1996 struct drm_i915_gem_request,
1997 list);
1998
1999 ret = i915_wait_request(dev, request->seqno);
2000 if (ret)
2001 break;
2002
2003 /* if waiting caused an object to become inactive,
2004 * then loop around and wait for it. Otherwise, we
2005 * assume that waiting freed and unbound something,
2006 * so there should now be some space in the GTT
2007 */
2008 if (!list_empty(&dev_priv->mm.inactive_list))
2009 continue;
2010 break;
2011 }
2012
2013 /* If we didn't have anything on the request list but there
2014 * are buffers awaiting a flush, emit one and try again.
2015 * When we wait on it, those buffers waiting for that flush
2016 * will get moved to inactive.
2017 */
2018 if (!list_empty(&dev_priv->mm.flushing_list)) {
2019 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2020 struct drm_i915_gem_object,
2021 list);
2022 obj = obj_priv->obj;
2023
2024 i915_gem_flush(dev,
2025 obj->write_domain,
2026 obj->write_domain);
Eric Anholtb9624422009-06-03 07:27:35 +00002027 i915_add_request(dev, NULL, obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002028
2029 obj = NULL;
2030 continue;
2031 }
2032
2033 DRM_ERROR("inactive empty %d request empty %d "
2034 "flushing empty %d\n",
2035 list_empty(&dev_priv->mm.inactive_list),
2036 list_empty(&dev_priv->mm.request_list),
2037 list_empty(&dev_priv->mm.flushing_list));
2038 /* If we didn't do any of the above, there's nothing to be done
2039 * and we just can't fit it in.
2040 */
Chris Wilson2939e1f2009-06-06 09:46:03 +01002041 return -ENOSPC;
Eric Anholt673a3942008-07-30 12:06:12 -07002042 }
2043 return ret;
2044}
2045
2046static int
Keith Packardac94a962008-11-20 23:30:27 -08002047i915_gem_evict_everything(struct drm_device *dev)
2048{
2049 int ret;
2050
2051 for (;;) {
2052 ret = i915_gem_evict_something(dev);
2053 if (ret != 0)
2054 break;
2055 }
Chris Wilson2939e1f2009-06-06 09:46:03 +01002056 if (ret == -ENOSPC)
Owain Ainsworth15c35332008-12-06 20:42:20 -08002057 return 0;
Keith Packardac94a962008-11-20 23:30:27 -08002058 return ret;
2059}
2060
Ben Gamari6911a9b2009-04-02 11:24:54 -07002061int
Eric Anholt856fa192009-03-19 14:10:50 -07002062i915_gem_object_get_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002063{
2064 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2065 int page_count, i;
2066 struct address_space *mapping;
2067 struct inode *inode;
2068 struct page *page;
2069 int ret;
2070
Eric Anholt856fa192009-03-19 14:10:50 -07002071 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002072 return 0;
2073
2074 /* Get the list of pages out of our struct file. They'll be pinned
2075 * at this point until we release them.
2076 */
2077 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002078 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002079 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002080 if (obj_priv->pages == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002081 DRM_ERROR("Faled to allocate page list\n");
Eric Anholt856fa192009-03-19 14:10:50 -07002082 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002083 return -ENOMEM;
2084 }
2085
2086 inode = obj->filp->f_path.dentry->d_inode;
2087 mapping = inode->i_mapping;
2088 for (i = 0; i < page_count; i++) {
2089 page = read_mapping_page(mapping, i, NULL);
2090 if (IS_ERR(page)) {
2091 ret = PTR_ERR(page);
2092 DRM_ERROR("read_mapping_page failed: %d\n", ret);
Eric Anholt856fa192009-03-19 14:10:50 -07002093 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002094 return ret;
2095 }
Eric Anholt856fa192009-03-19 14:10:50 -07002096 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002097 }
Eric Anholt280b7132009-03-12 16:56:27 -07002098
2099 if (obj_priv->tiling_mode != I915_TILING_NONE)
2100 i915_gem_object_do_bit_17_swizzle(obj);
2101
Eric Anholt673a3942008-07-30 12:06:12 -07002102 return 0;
2103}
2104
Jesse Barnesde151cf2008-11-12 10:03:55 -08002105static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2106{
2107 struct drm_gem_object *obj = reg->obj;
2108 struct drm_device *dev = obj->dev;
2109 drm_i915_private_t *dev_priv = dev->dev_private;
2110 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2111 int regnum = obj_priv->fence_reg;
2112 uint64_t val;
2113
2114 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2115 0xfffff000) << 32;
2116 val |= obj_priv->gtt_offset & 0xfffff000;
2117 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2118 if (obj_priv->tiling_mode == I915_TILING_Y)
2119 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2120 val |= I965_FENCE_REG_VALID;
2121
2122 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2123}
2124
2125static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2126{
2127 struct drm_gem_object *obj = reg->obj;
2128 struct drm_device *dev = obj->dev;
2129 drm_i915_private_t *dev_priv = dev->dev_private;
2130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2131 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002132 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002133 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002134 uint32_t pitch_val;
2135
2136 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2137 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002138 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002139 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002140 return;
2141 }
2142
Jesse Barnes0f973f22009-01-26 17:10:45 -08002143 if (obj_priv->tiling_mode == I915_TILING_Y &&
2144 HAS_128_BYTE_Y_TILING(dev))
2145 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002146 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002147 tile_width = 512;
2148
2149 /* Note: pitch better be a power of two tile widths */
2150 pitch_val = obj_priv->stride / tile_width;
2151 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002152
2153 val = obj_priv->gtt_offset;
2154 if (obj_priv->tiling_mode == I915_TILING_Y)
2155 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2156 val |= I915_FENCE_SIZE_BITS(obj->size);
2157 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2158 val |= I830_FENCE_REG_VALID;
2159
Eric Anholtdc529a42009-03-10 22:34:49 -07002160 if (regnum < 8)
2161 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2162 else
2163 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2164 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002165}
2166
2167static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2168{
2169 struct drm_gem_object *obj = reg->obj;
2170 struct drm_device *dev = obj->dev;
2171 drm_i915_private_t *dev_priv = dev->dev_private;
2172 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2173 int regnum = obj_priv->fence_reg;
2174 uint32_t val;
2175 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002176 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002177
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002178 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002179 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002180 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002181 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002182 return;
2183 }
2184
Eric Anholte76a16d2009-05-26 17:44:56 -07002185 pitch_val = obj_priv->stride / 128;
2186 pitch_val = ffs(pitch_val) - 1;
2187 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2188
Jesse Barnesde151cf2008-11-12 10:03:55 -08002189 val = obj_priv->gtt_offset;
2190 if (obj_priv->tiling_mode == I915_TILING_Y)
2191 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002192 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2193 WARN_ON(fence_size_bits & ~0x00000f00);
2194 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002195 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2196 val |= I830_FENCE_REG_VALID;
2197
2198 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002199}
2200
2201/**
2202 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2203 * @obj: object to map through a fence reg
2204 *
2205 * When mapping objects through the GTT, userspace wants to be able to write
2206 * to them without having to worry about swizzling if the object is tiled.
2207 *
2208 * This function walks the fence regs looking for a free one for @obj,
2209 * stealing one if it can't find any.
2210 *
2211 * It then sets up the reg based on the object's properties: address, pitch
2212 * and tiling format.
2213 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002214int
2215i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002216{
2217 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002218 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002219 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2220 struct drm_i915_fence_reg *reg = NULL;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002221 struct drm_i915_gem_object *old_obj_priv = NULL;
2222 int i, ret, avail;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002223
Eric Anholta09ba7f2009-08-29 12:49:51 -07002224 /* Just update our place in the LRU if our fence is getting used. */
2225 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2226 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2227 return 0;
2228 }
2229
Jesse Barnesde151cf2008-11-12 10:03:55 -08002230 switch (obj_priv->tiling_mode) {
2231 case I915_TILING_NONE:
2232 WARN(1, "allocating a fence for non-tiled object?\n");
2233 break;
2234 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002235 if (!obj_priv->stride)
2236 return -EINVAL;
2237 WARN((obj_priv->stride & (512 - 1)),
2238 "object 0x%08x is X tiled but has non-512B pitch\n",
2239 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002240 break;
2241 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002242 if (!obj_priv->stride)
2243 return -EINVAL;
2244 WARN((obj_priv->stride & (128 - 1)),
2245 "object 0x%08x is Y tiled but has non-128B pitch\n",
2246 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002247 break;
2248 }
2249
2250 /* First try to find a free reg */
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002251 avail = 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002252 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2253 reg = &dev_priv->fence_regs[i];
2254 if (!reg->obj)
2255 break;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002256
2257 old_obj_priv = reg->obj->driver_private;
2258 if (!old_obj_priv->pin_count)
2259 avail++;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002260 }
2261
2262 /* None available, try to steal one or wait for a user to finish */
2263 if (i == dev_priv->num_fence_regs) {
Eric Anholta09ba7f2009-08-29 12:49:51 -07002264 struct drm_gem_object *old_obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002265
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002266 if (avail == 0)
Chris Wilson2939e1f2009-06-06 09:46:03 +01002267 return -ENOSPC;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002268
Eric Anholta09ba7f2009-08-29 12:49:51 -07002269 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2270 fence_list) {
2271 old_obj = old_obj_priv->obj;
Chris Wilsond7619c42009-02-11 14:26:47 +00002272
2273 if (old_obj_priv->pin_count)
2274 continue;
2275
Eric Anholta09ba7f2009-08-29 12:49:51 -07002276 /* Take a reference, as otherwise the wait_rendering
2277 * below may cause the object to get freed out from
2278 * under us.
2279 */
2280 drm_gem_object_reference(old_obj);
2281
Chris Wilsond7619c42009-02-11 14:26:47 +00002282 /* i915 uses fences for GPU access to tiled buffers */
2283 if (IS_I965G(dev) || !old_obj_priv->active)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002284 break;
Chris Wilsond7619c42009-02-11 14:26:47 +00002285
Eric Anholta09ba7f2009-08-29 12:49:51 -07002286 /* This brings the object to the head of the LRU if it
2287 * had been written to. The only way this should
2288 * result in us waiting longer than the expected
2289 * optimal amount of time is if there was a
2290 * fence-using buffer later that was read-only.
2291 */
2292 i915_gem_object_flush_gpu_write_domain(old_obj);
2293 ret = i915_gem_object_wait_rendering(old_obj);
Chris Wilson58c2fb62009-09-01 12:02:39 +01002294 if (ret != 0) {
2295 drm_gem_object_unreference(old_obj);
Chris Wilsond7619c42009-02-11 14:26:47 +00002296 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002297 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002298
Eric Anholta09ba7f2009-08-29 12:49:51 -07002299 break;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002300 }
2301
2302 /*
2303 * Zap this virtual mapping so we can set up a fence again
2304 * for this object next time we need it.
2305 */
Chris Wilson58c2fb62009-09-01 12:02:39 +01002306 i915_gem_release_mmap(old_obj);
2307
Eric Anholta09ba7f2009-08-29 12:49:51 -07002308 i = old_obj_priv->fence_reg;
Chris Wilson58c2fb62009-09-01 12:02:39 +01002309 reg = &dev_priv->fence_regs[i];
2310
Jesse Barnesde151cf2008-11-12 10:03:55 -08002311 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002312 list_del_init(&old_obj_priv->fence_list);
Chris Wilson58c2fb62009-09-01 12:02:39 +01002313
Eric Anholta09ba7f2009-08-29 12:49:51 -07002314 drm_gem_object_unreference(old_obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002315 }
2316
2317 obj_priv->fence_reg = i;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002318 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2319
Jesse Barnesde151cf2008-11-12 10:03:55 -08002320 reg->obj = obj;
2321
2322 if (IS_I965G(dev))
2323 i965_write_fence_reg(reg);
2324 else if (IS_I9XX(dev))
2325 i915_write_fence_reg(reg);
2326 else
2327 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002328
2329 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002330}
2331
2332/**
2333 * i915_gem_clear_fence_reg - clear out fence register info
2334 * @obj: object to clear
2335 *
2336 * Zeroes out the fence register itself and clears out the associated
2337 * data structures in dev_priv and obj_priv.
2338 */
2339static void
2340i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2341{
2342 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002343 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002344 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2345
2346 if (IS_I965G(dev))
2347 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholtdc529a42009-03-10 22:34:49 -07002348 else {
2349 uint32_t fence_reg;
2350
2351 if (obj_priv->fence_reg < 8)
2352 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2353 else
2354 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2355 8) * 4;
2356
2357 I915_WRITE(fence_reg, 0);
2358 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002359
2360 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2361 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002362 list_del_init(&obj_priv->fence_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002363}
2364
Eric Anholt673a3942008-07-30 12:06:12 -07002365/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002366 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2367 * to the buffer to finish, and then resets the fence register.
2368 * @obj: tiled object holding a fence register.
2369 *
2370 * Zeroes out the fence register itself and clears out the associated
2371 * data structures in dev_priv and obj_priv.
2372 */
2373int
2374i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2375{
2376 struct drm_device *dev = obj->dev;
2377 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2378
2379 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2380 return 0;
2381
2382 /* On the i915, GPU access to tiled buffers is via a fence,
2383 * therefore we must wait for any outstanding access to complete
2384 * before clearing the fence.
2385 */
2386 if (!IS_I965G(dev)) {
2387 int ret;
2388
2389 i915_gem_object_flush_gpu_write_domain(obj);
2390 i915_gem_object_flush_gtt_write_domain(obj);
2391 ret = i915_gem_object_wait_rendering(obj);
2392 if (ret != 0)
2393 return ret;
2394 }
2395
2396 i915_gem_clear_fence_reg (obj);
2397
2398 return 0;
2399}
2400
2401/**
Eric Anholt673a3942008-07-30 12:06:12 -07002402 * Finds free space in the GTT aperture and binds the object there.
2403 */
2404static int
2405i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2406{
2407 struct drm_device *dev = obj->dev;
2408 drm_i915_private_t *dev_priv = dev->dev_private;
2409 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2410 struct drm_mm_node *free_space;
2411 int page_count, ret;
2412
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08002413 if (dev_priv->mm.suspended)
2414 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002415 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002416 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002417 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002418 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2419 return -EINVAL;
2420 }
2421
2422 search_free:
2423 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2424 obj->size, alignment, 0);
2425 if (free_space != NULL) {
2426 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2427 alignment);
2428 if (obj_priv->gtt_space != NULL) {
2429 obj_priv->gtt_space->private = obj;
2430 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2431 }
2432 }
2433 if (obj_priv->gtt_space == NULL) {
Carl Worth5e118f42009-03-20 11:54:25 -07002434 bool lists_empty;
2435
Eric Anholt673a3942008-07-30 12:06:12 -07002436 /* If the gtt is empty and we're still having trouble
2437 * fitting our object in, we're out of memory.
2438 */
2439#if WATCH_LRU
2440 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2441#endif
Carl Worth5e118f42009-03-20 11:54:25 -07002442 spin_lock(&dev_priv->mm.active_list_lock);
2443 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2444 list_empty(&dev_priv->mm.flushing_list) &&
2445 list_empty(&dev_priv->mm.active_list));
2446 spin_unlock(&dev_priv->mm.active_list_lock);
2447 if (lists_empty) {
Eric Anholt673a3942008-07-30 12:06:12 -07002448 DRM_ERROR("GTT full, but LRU list empty\n");
Chris Wilson2939e1f2009-06-06 09:46:03 +01002449 return -ENOSPC;
Eric Anholt673a3942008-07-30 12:06:12 -07002450 }
2451
2452 ret = i915_gem_evict_something(dev);
2453 if (ret != 0) {
Keith Packardac94a962008-11-20 23:30:27 -08002454 if (ret != -ERESTARTSYS)
2455 DRM_ERROR("Failed to evict a buffer %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07002456 return ret;
2457 }
2458 goto search_free;
2459 }
2460
2461#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002462 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002463 obj->size, obj_priv->gtt_offset);
2464#endif
Eric Anholt856fa192009-03-19 14:10:50 -07002465 ret = i915_gem_object_get_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002466 if (ret) {
2467 drm_mm_put_block(obj_priv->gtt_space);
2468 obj_priv->gtt_space = NULL;
2469 return ret;
2470 }
2471
2472 page_count = obj->size / PAGE_SIZE;
2473 /* Create an AGP memory structure pointing at our pages, and bind it
2474 * into the GTT.
2475 */
2476 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002477 obj_priv->pages,
Eric Anholt673a3942008-07-30 12:06:12 -07002478 page_count,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002479 obj_priv->gtt_offset,
2480 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002481 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002482 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002483 drm_mm_put_block(obj_priv->gtt_space);
2484 obj_priv->gtt_space = NULL;
2485 return -ENOMEM;
2486 }
2487 atomic_inc(&dev->gtt_count);
2488 atomic_add(obj->size, &dev->gtt_memory);
2489
2490 /* Assert that the object is not currently in any GPU domain. As it
2491 * wasn't in the GTT, there shouldn't be any way it could have been in
2492 * a GPU cache
2493 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002494 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2495 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002496
2497 return 0;
2498}
2499
2500void
2501i915_gem_clflush_object(struct drm_gem_object *obj)
2502{
2503 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2504
2505 /* If we don't have a page list set up, then we're not pinned
2506 * to GPU, and we can ignore the cache flush because it'll happen
2507 * again at bind time.
2508 */
Eric Anholt856fa192009-03-19 14:10:50 -07002509 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002510 return;
2511
Eric Anholt856fa192009-03-19 14:10:50 -07002512 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002513}
2514
Eric Anholte47c68e2008-11-14 13:35:19 -08002515/** Flushes any GPU write domain for the object if it's dirty. */
2516static void
2517i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2518{
2519 struct drm_device *dev = obj->dev;
2520 uint32_t seqno;
2521
2522 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2523 return;
2524
2525 /* Queue the GPU write cache flushing we need. */
2526 i915_gem_flush(dev, 0, obj->write_domain);
Eric Anholtb9624422009-06-03 07:27:35 +00002527 seqno = i915_add_request(dev, NULL, obj->write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002528 obj->write_domain = 0;
2529 i915_gem_object_move_to_active(obj, seqno);
2530}
2531
2532/** Flushes the GTT write domain for the object if it's dirty. */
2533static void
2534i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2535{
2536 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2537 return;
2538
2539 /* No actual flushing is required for the GTT write domain. Writes
2540 * to it immediately go to main memory as far as we know, so there's
2541 * no chipset flush. It also doesn't land in render cache.
2542 */
2543 obj->write_domain = 0;
2544}
2545
2546/** Flushes the CPU write domain for the object if it's dirty. */
2547static void
2548i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2549{
2550 struct drm_device *dev = obj->dev;
2551
2552 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2553 return;
2554
2555 i915_gem_clflush_object(obj);
2556 drm_agp_chipset_flush(dev);
2557 obj->write_domain = 0;
2558}
2559
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002560/**
2561 * Moves a single object to the GTT read, and possibly write domain.
2562 *
2563 * This function returns when the move is complete, including waiting on
2564 * flushes to occur.
2565 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002566int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002567i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2568{
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002569 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002570 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002571
Eric Anholt02354392008-11-26 13:58:13 -08002572 /* Not valid to be called on unbound objects. */
2573 if (obj_priv->gtt_space == NULL)
2574 return -EINVAL;
2575
Eric Anholte47c68e2008-11-14 13:35:19 -08002576 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002577 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002578 ret = i915_gem_object_wait_rendering(obj);
2579 if (ret != 0)
2580 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002581
2582 /* If we're writing through the GTT domain, then CPU and GPU caches
2583 * will need to be invalidated at next use.
2584 */
2585 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002586 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002587
Eric Anholte47c68e2008-11-14 13:35:19 -08002588 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002589
2590 /* It should now be out of any other write domains, and we can update
2591 * the domain values for our changes.
2592 */
2593 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2594 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002595 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002596 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002597 obj_priv->dirty = 1;
2598 }
2599
2600 return 0;
2601}
2602
2603/**
2604 * Moves a single object to the CPU read, and possibly write domain.
2605 *
2606 * This function returns when the move is complete, including waiting on
2607 * flushes to occur.
2608 */
2609static int
2610i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2611{
Eric Anholte47c68e2008-11-14 13:35:19 -08002612 int ret;
2613
2614 i915_gem_object_flush_gpu_write_domain(obj);
2615 /* Wait on any GPU rendering and flushing to occur. */
2616 ret = i915_gem_object_wait_rendering(obj);
2617 if (ret != 0)
2618 return ret;
2619
2620 i915_gem_object_flush_gtt_write_domain(obj);
2621
2622 /* If we have a partially-valid cache of the object in the CPU,
2623 * finish invalidating it and free the per-page flags.
2624 */
2625 i915_gem_object_set_to_full_cpu_read_domain(obj);
2626
2627 /* Flush the CPU cache if it's still invalid. */
2628 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2629 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002630
2631 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2632 }
2633
2634 /* It should now be out of any other write domains, and we can update
2635 * the domain values for our changes.
2636 */
2637 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2638
2639 /* If we're writing through the CPU, then the GPU read domains will
2640 * need to be invalidated at next use.
2641 */
2642 if (write) {
2643 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2644 obj->write_domain = I915_GEM_DOMAIN_CPU;
2645 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002646
2647 return 0;
2648}
2649
Eric Anholt673a3942008-07-30 12:06:12 -07002650/*
2651 * Set the next domain for the specified object. This
2652 * may not actually perform the necessary flushing/invaliding though,
2653 * as that may want to be batched with other set_domain operations
2654 *
2655 * This is (we hope) the only really tricky part of gem. The goal
2656 * is fairly simple -- track which caches hold bits of the object
2657 * and make sure they remain coherent. A few concrete examples may
2658 * help to explain how it works. For shorthand, we use the notation
2659 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2660 * a pair of read and write domain masks.
2661 *
2662 * Case 1: the batch buffer
2663 *
2664 * 1. Allocated
2665 * 2. Written by CPU
2666 * 3. Mapped to GTT
2667 * 4. Read by GPU
2668 * 5. Unmapped from GTT
2669 * 6. Freed
2670 *
2671 * Let's take these a step at a time
2672 *
2673 * 1. Allocated
2674 * Pages allocated from the kernel may still have
2675 * cache contents, so we set them to (CPU, CPU) always.
2676 * 2. Written by CPU (using pwrite)
2677 * The pwrite function calls set_domain (CPU, CPU) and
2678 * this function does nothing (as nothing changes)
2679 * 3. Mapped by GTT
2680 * This function asserts that the object is not
2681 * currently in any GPU-based read or write domains
2682 * 4. Read by GPU
2683 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2684 * As write_domain is zero, this function adds in the
2685 * current read domains (CPU+COMMAND, 0).
2686 * flush_domains is set to CPU.
2687 * invalidate_domains is set to COMMAND
2688 * clflush is run to get data out of the CPU caches
2689 * then i915_dev_set_domain calls i915_gem_flush to
2690 * emit an MI_FLUSH and drm_agp_chipset_flush
2691 * 5. Unmapped from GTT
2692 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2693 * flush_domains and invalidate_domains end up both zero
2694 * so no flushing/invalidating happens
2695 * 6. Freed
2696 * yay, done
2697 *
2698 * Case 2: The shared render buffer
2699 *
2700 * 1. Allocated
2701 * 2. Mapped to GTT
2702 * 3. Read/written by GPU
2703 * 4. set_domain to (CPU,CPU)
2704 * 5. Read/written by CPU
2705 * 6. Read/written by GPU
2706 *
2707 * 1. Allocated
2708 * Same as last example, (CPU, CPU)
2709 * 2. Mapped to GTT
2710 * Nothing changes (assertions find that it is not in the GPU)
2711 * 3. Read/written by GPU
2712 * execbuffer calls set_domain (RENDER, RENDER)
2713 * flush_domains gets CPU
2714 * invalidate_domains gets GPU
2715 * clflush (obj)
2716 * MI_FLUSH and drm_agp_chipset_flush
2717 * 4. set_domain (CPU, CPU)
2718 * flush_domains gets GPU
2719 * invalidate_domains gets CPU
2720 * wait_rendering (obj) to make sure all drawing is complete.
2721 * This will include an MI_FLUSH to get the data from GPU
2722 * to memory
2723 * clflush (obj) to invalidate the CPU cache
2724 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2725 * 5. Read/written by CPU
2726 * cache lines are loaded and dirtied
2727 * 6. Read written by GPU
2728 * Same as last GPU access
2729 *
2730 * Case 3: The constant buffer
2731 *
2732 * 1. Allocated
2733 * 2. Written by CPU
2734 * 3. Read by GPU
2735 * 4. Updated (written) by CPU again
2736 * 5. Read by GPU
2737 *
2738 * 1. Allocated
2739 * (CPU, CPU)
2740 * 2. Written by CPU
2741 * (CPU, CPU)
2742 * 3. Read by GPU
2743 * (CPU+RENDER, 0)
2744 * flush_domains = CPU
2745 * invalidate_domains = RENDER
2746 * clflush (obj)
2747 * MI_FLUSH
2748 * drm_agp_chipset_flush
2749 * 4. Updated (written) by CPU again
2750 * (CPU, CPU)
2751 * flush_domains = 0 (no previous write domain)
2752 * invalidate_domains = 0 (no new read domains)
2753 * 5. Read by GPU
2754 * (CPU+RENDER, 0)
2755 * flush_domains = CPU
2756 * invalidate_domains = RENDER
2757 * clflush (obj)
2758 * MI_FLUSH
2759 * drm_agp_chipset_flush
2760 */
Keith Packardc0d90822008-11-20 23:11:08 -08002761static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08002762i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002763{
2764 struct drm_device *dev = obj->dev;
2765 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2766 uint32_t invalidate_domains = 0;
2767 uint32_t flush_domains = 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002768
Eric Anholt8b0e3782009-02-19 14:40:50 -08002769 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2770 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07002771
Jesse Barnes652c3932009-08-17 13:31:43 -07002772 intel_mark_busy(dev, obj);
2773
Eric Anholt673a3942008-07-30 12:06:12 -07002774#if WATCH_BUF
2775 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2776 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08002777 obj->read_domains, obj->pending_read_domains,
2778 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002779#endif
2780 /*
2781 * If the object isn't moving to a new write domain,
2782 * let the object stay in multiple read domains
2783 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002784 if (obj->pending_write_domain == 0)
2785 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002786 else
2787 obj_priv->dirty = 1;
2788
2789 /*
2790 * Flush the current write domain if
2791 * the new read domains don't match. Invalidate
2792 * any read domains which differ from the old
2793 * write domain
2794 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002795 if (obj->write_domain &&
2796 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07002797 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002798 invalidate_domains |=
2799 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07002800 }
2801 /*
2802 * Invalidate any read caches which may have
2803 * stale data. That is, any new read domains.
2804 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08002805 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002806 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2807#if WATCH_BUF
2808 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2809 __func__, flush_domains, invalidate_domains);
2810#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002811 i915_gem_clflush_object(obj);
2812 }
2813
Eric Anholtefbeed92009-02-19 14:54:51 -08002814 /* The actual obj->write_domain will be updated with
2815 * pending_write_domain after we emit the accumulated flush for all
2816 * of our domain changes in execbuffers (which clears objects'
2817 * write_domains). So if we have a current write domain that we
2818 * aren't changing, set pending_write_domain to that.
2819 */
2820 if (flush_domains == 0 && obj->pending_write_domain == 0)
2821 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08002822 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07002823
2824 dev->invalidate_domains |= invalidate_domains;
2825 dev->flush_domains |= flush_domains;
2826#if WATCH_BUF
2827 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2828 __func__,
2829 obj->read_domains, obj->write_domain,
2830 dev->invalidate_domains, dev->flush_domains);
2831#endif
Eric Anholt673a3942008-07-30 12:06:12 -07002832}
2833
2834/**
Eric Anholte47c68e2008-11-14 13:35:19 -08002835 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07002836 *
Eric Anholte47c68e2008-11-14 13:35:19 -08002837 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2838 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2839 */
2840static void
2841i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2842{
Eric Anholte47c68e2008-11-14 13:35:19 -08002843 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2844
2845 if (!obj_priv->page_cpu_valid)
2846 return;
2847
2848 /* If we're partially in the CPU read domain, finish moving it in.
2849 */
2850 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2851 int i;
2852
2853 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2854 if (obj_priv->page_cpu_valid[i])
2855 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07002856 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08002857 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002858 }
2859
2860 /* Free the page_cpu_valid mappings which are now stale, whether
2861 * or not we've got I915_GEM_DOMAIN_CPU.
2862 */
Eric Anholt9a298b22009-03-24 12:23:04 -07002863 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08002864 obj_priv->page_cpu_valid = NULL;
2865}
2866
2867/**
2868 * Set the CPU read domain on a range of the object.
2869 *
2870 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2871 * not entirely valid. The page_cpu_valid member of the object flags which
2872 * pages have been flushed, and will be respected by
2873 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2874 * of the whole object.
2875 *
2876 * This function returns when the move is complete, including waiting on
2877 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07002878 */
2879static int
Eric Anholte47c68e2008-11-14 13:35:19 -08002880i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2881 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07002882{
2883 struct drm_i915_gem_object *obj_priv = obj->driver_private;
Eric Anholte47c68e2008-11-14 13:35:19 -08002884 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002885
Eric Anholte47c68e2008-11-14 13:35:19 -08002886 if (offset == 0 && size == obj->size)
2887 return i915_gem_object_set_to_cpu_domain(obj, 0);
2888
2889 i915_gem_object_flush_gpu_write_domain(obj);
2890 /* Wait on any GPU rendering and flushing to occur. */
2891 ret = i915_gem_object_wait_rendering(obj);
2892 if (ret != 0)
2893 return ret;
2894 i915_gem_object_flush_gtt_write_domain(obj);
2895
2896 /* If we're already fully in the CPU read domain, we're done. */
2897 if (obj_priv->page_cpu_valid == NULL &&
2898 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002899 return 0;
2900
Eric Anholte47c68e2008-11-14 13:35:19 -08002901 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2902 * newly adding I915_GEM_DOMAIN_CPU
2903 */
Eric Anholt673a3942008-07-30 12:06:12 -07002904 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07002905 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2906 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08002907 if (obj_priv->page_cpu_valid == NULL)
2908 return -ENOMEM;
2909 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2910 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002911
2912 /* Flush the cache on any pages that are still invalid from the CPU's
2913 * perspective.
2914 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002915 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2916 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07002917 if (obj_priv->page_cpu_valid[i])
2918 continue;
2919
Eric Anholt856fa192009-03-19 14:10:50 -07002920 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07002921
2922 obj_priv->page_cpu_valid[i] = 1;
2923 }
2924
Eric Anholte47c68e2008-11-14 13:35:19 -08002925 /* It should now be out of any other write domains, and we can update
2926 * the domain values for our changes.
2927 */
2928 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2929
2930 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2931
Eric Anholt673a3942008-07-30 12:06:12 -07002932 return 0;
2933}
2934
2935/**
Eric Anholt673a3942008-07-30 12:06:12 -07002936 * Pin an object to the GTT and evaluate the relocations landing in it.
2937 */
2938static int
2939i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2940 struct drm_file *file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002941 struct drm_i915_gem_exec_object *entry,
2942 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07002943{
2944 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07002945 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002946 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2947 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07002948 void __iomem *reloc_page;
Eric Anholt673a3942008-07-30 12:06:12 -07002949
2950 /* Choose the GTT offset for our buffer and put it there. */
2951 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2952 if (ret)
2953 return ret;
2954
2955 entry->offset = obj_priv->gtt_offset;
2956
Eric Anholt673a3942008-07-30 12:06:12 -07002957 /* Apply the relocations, using the GTT aperture to avoid cache
2958 * flushing requirements.
2959 */
2960 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002961 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07002962 struct drm_gem_object *target_obj;
2963 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07002964 uint32_t reloc_val, reloc_offset;
2965 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07002966
Eric Anholt673a3942008-07-30 12:06:12 -07002967 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002968 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002969 if (target_obj == NULL) {
2970 i915_gem_object_unpin(obj);
2971 return -EBADF;
2972 }
2973 target_obj_priv = target_obj->driver_private;
2974
2975 /* The target buffer should have appeared before us in the
2976 * exec_object list, so it should have a GTT space bound by now.
2977 */
2978 if (target_obj_priv->gtt_space == NULL) {
2979 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002980 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07002981 drm_gem_object_unreference(target_obj);
2982 i915_gem_object_unpin(obj);
2983 return -EINVAL;
2984 }
2985
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002986 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07002987 DRM_ERROR("Relocation beyond object bounds: "
2988 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002989 obj, reloc->target_handle,
2990 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07002991 drm_gem_object_unreference(target_obj);
2992 i915_gem_object_unpin(obj);
2993 return -EINVAL;
2994 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002995 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07002996 DRM_ERROR("Relocation not 4-byte aligned: "
2997 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07002998 obj, reloc->target_handle,
2999 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003000 drm_gem_object_unreference(target_obj);
3001 i915_gem_object_unpin(obj);
3002 return -EINVAL;
3003 }
3004
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003005 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3006 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003007 DRM_ERROR("reloc with read/write CPU domains: "
3008 "obj %p target %d offset %d "
3009 "read %08x write %08x",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003010 obj, reloc->target_handle,
3011 (int) reloc->offset,
3012 reloc->read_domains,
3013 reloc->write_domain);
Chris Wilson491152b2009-02-11 14:26:32 +00003014 drm_gem_object_unreference(target_obj);
3015 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003016 return -EINVAL;
3017 }
3018
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003019 if (reloc->write_domain && target_obj->pending_write_domain &&
3020 reloc->write_domain != target_obj->pending_write_domain) {
Eric Anholt673a3942008-07-30 12:06:12 -07003021 DRM_ERROR("Write domain conflict: "
3022 "obj %p target %d offset %d "
3023 "new %08x old %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003024 obj, reloc->target_handle,
3025 (int) reloc->offset,
3026 reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07003027 target_obj->pending_write_domain);
3028 drm_gem_object_unreference(target_obj);
3029 i915_gem_object_unpin(obj);
3030 return -EINVAL;
3031 }
3032
3033#if WATCH_RELOC
3034 DRM_INFO("%s: obj %p offset %08x target %d "
3035 "read %08x write %08x gtt %08x "
3036 "presumed %08x delta %08x\n",
3037 __func__,
3038 obj,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003039 (int) reloc->offset,
3040 (int) reloc->target_handle,
3041 (int) reloc->read_domains,
3042 (int) reloc->write_domain,
Eric Anholt673a3942008-07-30 12:06:12 -07003043 (int) target_obj_priv->gtt_offset,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003044 (int) reloc->presumed_offset,
3045 reloc->delta);
Eric Anholt673a3942008-07-30 12:06:12 -07003046#endif
3047
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003048 target_obj->pending_read_domains |= reloc->read_domains;
3049 target_obj->pending_write_domain |= reloc->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003050
3051 /* If the relocation already has the right value in it, no
3052 * more work needs to be done.
3053 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003054 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
Eric Anholt673a3942008-07-30 12:06:12 -07003055 drm_gem_object_unreference(target_obj);
3056 continue;
3057 }
3058
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003059 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3060 if (ret != 0) {
3061 drm_gem_object_unreference(target_obj);
3062 i915_gem_object_unpin(obj);
3063 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003064 }
3065
3066 /* Map the page containing the relocation we're going to
3067 * perform.
3068 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003069 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003070 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3071 (reloc_offset &
3072 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003073 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003074 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003075 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003076
3077#if WATCH_BUF
3078 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003079 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003080 readl(reloc_entry), reloc_val);
3081#endif
3082 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003083 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003084
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003085 /* The updated presumed offset for this entry will be
3086 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003087 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003088 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003089
3090 drm_gem_object_unreference(target_obj);
3091 }
3092
Eric Anholt673a3942008-07-30 12:06:12 -07003093#if WATCH_BUF
3094 if (0)
3095 i915_gem_dump_object(obj, 128, __func__, ~0);
3096#endif
3097 return 0;
3098}
3099
3100/** Dispatch a batchbuffer to the ring
3101 */
3102static int
3103i915_dispatch_gem_execbuffer(struct drm_device *dev,
3104 struct drm_i915_gem_execbuffer *exec,
Eric Anholt201361a2009-03-11 12:30:04 -07003105 struct drm_clip_rect *cliprects,
Eric Anholt673a3942008-07-30 12:06:12 -07003106 uint64_t exec_offset)
3107{
3108 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003109 int nbox = exec->num_cliprects;
3110 int i = 0, count;
Chris Wilson83d60792009-06-06 09:45:57 +01003111 uint32_t exec_start, exec_len;
Eric Anholt673a3942008-07-30 12:06:12 -07003112 RING_LOCALS;
3113
3114 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3115 exec_len = (uint32_t) exec->batch_len;
3116
Eric Anholt673a3942008-07-30 12:06:12 -07003117 count = nbox ? nbox : 1;
3118
3119 for (i = 0; i < count; i++) {
3120 if (i < nbox) {
Eric Anholt201361a2009-03-11 12:30:04 -07003121 int ret = i915_emit_box(dev, cliprects, i,
Eric Anholt673a3942008-07-30 12:06:12 -07003122 exec->DR1, exec->DR4);
3123 if (ret)
3124 return ret;
3125 }
3126
3127 if (IS_I830(dev) || IS_845G(dev)) {
3128 BEGIN_LP_RING(4);
3129 OUT_RING(MI_BATCH_BUFFER);
3130 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3131 OUT_RING(exec_start + exec_len - 4);
3132 OUT_RING(0);
3133 ADVANCE_LP_RING();
3134 } else {
3135 BEGIN_LP_RING(2);
3136 if (IS_I965G(dev)) {
3137 OUT_RING(MI_BATCH_BUFFER_START |
3138 (2 << 6) |
3139 MI_BATCH_NON_SECURE_I965);
3140 OUT_RING(exec_start);
3141 } else {
3142 OUT_RING(MI_BATCH_BUFFER_START |
3143 (2 << 6));
3144 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3145 }
3146 ADVANCE_LP_RING();
3147 }
3148 }
3149
3150 /* XXX breadcrumb */
3151 return 0;
3152}
3153
3154/* Throttle our rendering by waiting until the ring has completed our requests
3155 * emitted over 20 msec ago.
3156 *
Eric Anholtb9624422009-06-03 07:27:35 +00003157 * Note that if we were to use the current jiffies each time around the loop,
3158 * we wouldn't escape the function with any frames outstanding if the time to
3159 * render a frame was over 20ms.
3160 *
Eric Anholt673a3942008-07-30 12:06:12 -07003161 * This should get us reasonable parallelism between CPU and GPU but also
3162 * relatively low latency when blocking on a particular request to finish.
3163 */
3164static int
3165i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3166{
3167 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3168 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003169 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003170
3171 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003172 while (!list_empty(&i915_file_priv->mm.request_list)) {
3173 struct drm_i915_gem_request *request;
3174
3175 request = list_first_entry(&i915_file_priv->mm.request_list,
3176 struct drm_i915_gem_request,
3177 client_list);
3178
3179 if (time_after_eq(request->emitted_jiffies, recent_enough))
3180 break;
3181
3182 ret = i915_wait_request(dev, request->seqno);
3183 if (ret != 0)
3184 break;
3185 }
Eric Anholt673a3942008-07-30 12:06:12 -07003186 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003187
Eric Anholt673a3942008-07-30 12:06:12 -07003188 return ret;
3189}
3190
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003191static int
3192i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3193 uint32_t buffer_count,
3194 struct drm_i915_gem_relocation_entry **relocs)
3195{
3196 uint32_t reloc_count = 0, reloc_index = 0, i;
3197 int ret;
3198
3199 *relocs = NULL;
3200 for (i = 0; i < buffer_count; i++) {
3201 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3202 return -EINVAL;
3203 reloc_count += exec_list[i].relocation_count;
3204 }
3205
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003206 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003207 if (*relocs == NULL)
3208 return -ENOMEM;
3209
3210 for (i = 0; i < buffer_count; i++) {
3211 struct drm_i915_gem_relocation_entry __user *user_relocs;
3212
3213 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3214
3215 ret = copy_from_user(&(*relocs)[reloc_index],
3216 user_relocs,
3217 exec_list[i].relocation_count *
3218 sizeof(**relocs));
3219 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003220 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003221 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003222 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003223 }
3224
3225 reloc_index += exec_list[i].relocation_count;
3226 }
3227
Florian Mickler2bc43b52009-04-06 22:55:41 +02003228 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003229}
3230
3231static int
3232i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3233 uint32_t buffer_count,
3234 struct drm_i915_gem_relocation_entry *relocs)
3235{
3236 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003237 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003238
3239 for (i = 0; i < buffer_count; i++) {
3240 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003241 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003242
3243 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3244
Florian Mickler2bc43b52009-04-06 22:55:41 +02003245 unwritten = copy_to_user(user_relocs,
3246 &relocs[reloc_count],
3247 exec_list[i].relocation_count *
3248 sizeof(*relocs));
3249
3250 if (unwritten) {
3251 ret = -EFAULT;
3252 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003253 }
3254
3255 reloc_count += exec_list[i].relocation_count;
3256 }
3257
Florian Mickler2bc43b52009-04-06 22:55:41 +02003258err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003259 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003260
3261 return ret;
3262}
3263
Chris Wilson83d60792009-06-06 09:45:57 +01003264static int
3265i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3266 uint64_t exec_offset)
3267{
3268 uint32_t exec_start, exec_len;
3269
3270 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3271 exec_len = (uint32_t) exec->batch_len;
3272
3273 if ((exec_start | exec_len) & 0x7)
3274 return -EINVAL;
3275
3276 if (!exec_start)
3277 return -EINVAL;
3278
3279 return 0;
3280}
3281
Eric Anholt673a3942008-07-30 12:06:12 -07003282int
3283i915_gem_execbuffer(struct drm_device *dev, void *data,
3284 struct drm_file *file_priv)
3285{
3286 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003287 struct drm_i915_gem_execbuffer *args = data;
3288 struct drm_i915_gem_exec_object *exec_list = NULL;
3289 struct drm_gem_object **object_list = NULL;
3290 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003291 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003292 struct drm_clip_rect *cliprects = NULL;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003293 struct drm_i915_gem_relocation_entry *relocs;
3294 int ret, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003295 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003296 uint32_t seqno, flush_domains, reloc_index;
Keith Packardac94a962008-11-20 23:30:27 -08003297 int pin_tries;
Eric Anholt673a3942008-07-30 12:06:12 -07003298
3299#if WATCH_EXEC
3300 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3301 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3302#endif
3303
Eric Anholt4f481ed2008-09-10 14:22:49 -07003304 if (args->buffer_count < 1) {
3305 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3306 return -EINVAL;
3307 }
Eric Anholt673a3942008-07-30 12:06:12 -07003308 /* Copy in the exec list from userland */
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003309 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3310 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
Eric Anholt673a3942008-07-30 12:06:12 -07003311 if (exec_list == NULL || object_list == NULL) {
3312 DRM_ERROR("Failed to allocate exec or object list "
3313 "for %d buffers\n",
3314 args->buffer_count);
3315 ret = -ENOMEM;
3316 goto pre_mutex_err;
3317 }
3318 ret = copy_from_user(exec_list,
3319 (struct drm_i915_relocation_entry __user *)
3320 (uintptr_t) args->buffers_ptr,
3321 sizeof(*exec_list) * args->buffer_count);
3322 if (ret != 0) {
3323 DRM_ERROR("copy %d exec entries failed %d\n",
3324 args->buffer_count, ret);
3325 goto pre_mutex_err;
3326 }
3327
Eric Anholt201361a2009-03-11 12:30:04 -07003328 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003329 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3330 GFP_KERNEL);
Eric Anholt201361a2009-03-11 12:30:04 -07003331 if (cliprects == NULL)
3332 goto pre_mutex_err;
3333
3334 ret = copy_from_user(cliprects,
3335 (struct drm_clip_rect __user *)
3336 (uintptr_t) args->cliprects_ptr,
3337 sizeof(*cliprects) * args->num_cliprects);
3338 if (ret != 0) {
3339 DRM_ERROR("copy %d cliprects failed: %d\n",
3340 args->num_cliprects, ret);
3341 goto pre_mutex_err;
3342 }
3343 }
3344
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003345 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3346 &relocs);
3347 if (ret != 0)
3348 goto pre_mutex_err;
3349
Eric Anholt673a3942008-07-30 12:06:12 -07003350 mutex_lock(&dev->struct_mutex);
3351
3352 i915_verify_inactive(dev, __FILE__, __LINE__);
3353
Ben Gamariba1234d2009-09-14 17:48:47 -04003354 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003355 DRM_ERROR("Execbuf while wedged\n");
3356 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003357 ret = -EIO;
3358 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003359 }
3360
3361 if (dev_priv->mm.suspended) {
3362 DRM_ERROR("Execbuf while VT-switched.\n");
3363 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003364 ret = -EBUSY;
3365 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003366 }
3367
Keith Packardac94a962008-11-20 23:30:27 -08003368 /* Look up object handles */
Eric Anholt673a3942008-07-30 12:06:12 -07003369 for (i = 0; i < args->buffer_count; i++) {
3370 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3371 exec_list[i].handle);
3372 if (object_list[i] == NULL) {
3373 DRM_ERROR("Invalid object handle %d at index %d\n",
3374 exec_list[i].handle, i);
3375 ret = -EBADF;
3376 goto err;
3377 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003378
3379 obj_priv = object_list[i]->driver_private;
3380 if (obj_priv->in_execbuffer) {
3381 DRM_ERROR("Object %p appears more than once in object list\n",
3382 object_list[i]);
3383 ret = -EBADF;
3384 goto err;
3385 }
3386 obj_priv->in_execbuffer = true;
Keith Packardac94a962008-11-20 23:30:27 -08003387 }
Eric Anholt673a3942008-07-30 12:06:12 -07003388
Keith Packardac94a962008-11-20 23:30:27 -08003389 /* Pin and relocate */
3390 for (pin_tries = 0; ; pin_tries++) {
3391 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003392 reloc_index = 0;
3393
Keith Packardac94a962008-11-20 23:30:27 -08003394 for (i = 0; i < args->buffer_count; i++) {
3395 object_list[i]->pending_read_domains = 0;
3396 object_list[i]->pending_write_domain = 0;
3397 ret = i915_gem_object_pin_and_relocate(object_list[i],
3398 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003399 &exec_list[i],
3400 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003401 if (ret)
3402 break;
3403 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003404 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003405 }
3406 /* success */
3407 if (ret == 0)
3408 break;
3409
3410 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003411 if (ret != -ENOSPC || pin_tries >= 1) {
Eric Anholtf1acec92008-12-19 14:47:48 -08003412 if (ret != -ERESTARTSYS)
3413 DRM_ERROR("Failed to pin buffers %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003414 goto err;
3415 }
Keith Packardac94a962008-11-20 23:30:27 -08003416
3417 /* unpin all of our buffers */
3418 for (i = 0; i < pinned; i++)
3419 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003420 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003421
3422 /* evict everyone we can from the aperture */
3423 ret = i915_gem_evict_everything(dev);
3424 if (ret)
3425 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003426 }
3427
3428 /* Set the pending read domains for the batch buffer to COMMAND */
3429 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003430 if (batch_obj->pending_write_domain) {
3431 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3432 ret = -EINVAL;
3433 goto err;
3434 }
3435 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003436
Chris Wilson83d60792009-06-06 09:45:57 +01003437 /* Sanity check the batch buffer, prior to moving objects */
3438 exec_offset = exec_list[args->buffer_count - 1].offset;
3439 ret = i915_gem_check_execbuffer (args, exec_offset);
3440 if (ret != 0) {
3441 DRM_ERROR("execbuf with invalid offset/length\n");
3442 goto err;
3443 }
3444
Eric Anholt673a3942008-07-30 12:06:12 -07003445 i915_verify_inactive(dev, __FILE__, __LINE__);
3446
Keith Packard646f0f62008-11-20 23:23:03 -08003447 /* Zero the global flush/invalidate flags. These
3448 * will be modified as new domains are computed
3449 * for each object
3450 */
3451 dev->invalidate_domains = 0;
3452 dev->flush_domains = 0;
3453
Eric Anholt673a3942008-07-30 12:06:12 -07003454 for (i = 0; i < args->buffer_count; i++) {
3455 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003456
Keith Packard646f0f62008-11-20 23:23:03 -08003457 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003458 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003459 }
3460
3461 i915_verify_inactive(dev, __FILE__, __LINE__);
3462
Keith Packard646f0f62008-11-20 23:23:03 -08003463 if (dev->invalidate_domains | dev->flush_domains) {
3464#if WATCH_EXEC
3465 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3466 __func__,
3467 dev->invalidate_domains,
3468 dev->flush_domains);
3469#endif
3470 i915_gem_flush(dev,
3471 dev->invalidate_domains,
3472 dev->flush_domains);
3473 if (dev->flush_domains)
Eric Anholtb9624422009-06-03 07:27:35 +00003474 (void)i915_add_request(dev, file_priv,
3475 dev->flush_domains);
Keith Packard646f0f62008-11-20 23:23:03 -08003476 }
Eric Anholt673a3942008-07-30 12:06:12 -07003477
Eric Anholtefbeed92009-02-19 14:54:51 -08003478 for (i = 0; i < args->buffer_count; i++) {
3479 struct drm_gem_object *obj = object_list[i];
3480
3481 obj->write_domain = obj->pending_write_domain;
3482 }
3483
Eric Anholt673a3942008-07-30 12:06:12 -07003484 i915_verify_inactive(dev, __FILE__, __LINE__);
3485
3486#if WATCH_COHERENCY
3487 for (i = 0; i < args->buffer_count; i++) {
3488 i915_gem_object_check_coherency(object_list[i],
3489 exec_list[i].handle);
3490 }
3491#endif
3492
Eric Anholt673a3942008-07-30 12:06:12 -07003493#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003494 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003495 args->batch_len,
3496 __func__,
3497 ~0);
3498#endif
3499
Eric Anholt673a3942008-07-30 12:06:12 -07003500 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07003501 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003502 if (ret) {
3503 DRM_ERROR("dispatch failed %d\n", ret);
3504 goto err;
3505 }
3506
3507 /*
3508 * Ensure that the commands in the batch buffer are
3509 * finished before the interrupt fires
3510 */
3511 flush_domains = i915_retire_commands(dev);
3512
3513 i915_verify_inactive(dev, __FILE__, __LINE__);
3514
3515 /*
3516 * Get a seqno representing the execution of the current buffer,
3517 * which we can wait on. We would like to mitigate these interrupts,
3518 * likely by only creating seqnos occasionally (so that we have
3519 * *some* interrupts representing completion of buffers that we can
3520 * wait on when trying to clear up gtt space).
3521 */
Eric Anholtb9624422009-06-03 07:27:35 +00003522 seqno = i915_add_request(dev, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07003523 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003524 for (i = 0; i < args->buffer_count; i++) {
3525 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003526
Eric Anholtce44b0e2008-11-06 16:00:31 -08003527 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07003528#if WATCH_LRU
3529 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3530#endif
3531 }
3532#if WATCH_LRU
3533 i915_dump_lru(dev, __func__);
3534#endif
3535
3536 i915_verify_inactive(dev, __FILE__, __LINE__);
3537
Eric Anholt673a3942008-07-30 12:06:12 -07003538err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003539 for (i = 0; i < pinned; i++)
3540 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003541
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003542 for (i = 0; i < args->buffer_count; i++) {
3543 if (object_list[i]) {
3544 obj_priv = object_list[i]->driver_private;
3545 obj_priv->in_execbuffer = false;
3546 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003547 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003548 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003549
Eric Anholt673a3942008-07-30 12:06:12 -07003550 mutex_unlock(&dev->struct_mutex);
3551
Roland Dreiera35f2e22009-02-06 17:48:09 -08003552 if (!ret) {
3553 /* Copy the new buffer offsets back to the user's exec list. */
3554 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3555 (uintptr_t) args->buffers_ptr,
3556 exec_list,
3557 sizeof(*exec_list) * args->buffer_count);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003558 if (ret) {
3559 ret = -EFAULT;
Roland Dreiera35f2e22009-02-06 17:48:09 -08003560 DRM_ERROR("failed to copy %d exec entries "
3561 "back to user (%d)\n",
3562 args->buffer_count, ret);
Florian Mickler2bc43b52009-04-06 22:55:41 +02003563 }
Roland Dreiera35f2e22009-02-06 17:48:09 -08003564 }
3565
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003566 /* Copy the updated relocations out regardless of current error
3567 * state. Failure to update the relocs would mean that the next
3568 * time userland calls execbuf, it would do so with presumed offset
3569 * state that didn't match the actual object state.
3570 */
3571 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3572 relocs);
3573 if (ret2 != 0) {
3574 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3575
3576 if (ret == 0)
3577 ret = ret2;
3578 }
3579
Eric Anholt673a3942008-07-30 12:06:12 -07003580pre_mutex_err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003581 drm_free_large(object_list);
3582 drm_free_large(exec_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003583 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07003584
3585 return ret;
3586}
3587
3588int
3589i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3590{
3591 struct drm_device *dev = obj->dev;
3592 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3593 int ret;
3594
3595 i915_verify_inactive(dev, __FILE__, __LINE__);
3596 if (obj_priv->gtt_space == NULL) {
3597 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3598 if (ret != 0) {
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003599 if (ret != -EBUSY && ret != -ERESTARTSYS)
Kyle McMartin0fce81e2009-02-28 15:01:16 -05003600 DRM_ERROR("Failure to bind: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003601 return ret;
3602 }
Chris Wilson22c344e2009-02-11 14:26:45 +00003603 }
3604 /*
3605 * Pre-965 chips need a fence register set up in order to
3606 * properly handle tiled surfaces.
3607 */
Eric Anholta09ba7f2009-08-29 12:49:51 -07003608 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01003609 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilson22c344e2009-02-11 14:26:45 +00003610 if (ret != 0) {
3611 if (ret != -EBUSY && ret != -ERESTARTSYS)
3612 DRM_ERROR("Failure to install fence: %d\n",
3613 ret);
3614 return ret;
3615 }
Eric Anholt673a3942008-07-30 12:06:12 -07003616 }
3617 obj_priv->pin_count++;
3618
3619 /* If the object is not active and not pending a flush,
3620 * remove it from the inactive list
3621 */
3622 if (obj_priv->pin_count == 1) {
3623 atomic_inc(&dev->pin_count);
3624 atomic_add(obj->size, &dev->pin_memory);
3625 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003626 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07003627 !list_empty(&obj_priv->list))
3628 list_del_init(&obj_priv->list);
3629 }
3630 i915_verify_inactive(dev, __FILE__, __LINE__);
3631
3632 return 0;
3633}
3634
3635void
3636i915_gem_object_unpin(struct drm_gem_object *obj)
3637{
3638 struct drm_device *dev = obj->dev;
3639 drm_i915_private_t *dev_priv = dev->dev_private;
3640 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3641
3642 i915_verify_inactive(dev, __FILE__, __LINE__);
3643 obj_priv->pin_count--;
3644 BUG_ON(obj_priv->pin_count < 0);
3645 BUG_ON(obj_priv->gtt_space == NULL);
3646
3647 /* If the object is no longer pinned, and is
3648 * neither active nor being flushed, then stick it on
3649 * the inactive list
3650 */
3651 if (obj_priv->pin_count == 0) {
3652 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01003653 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003654 list_move_tail(&obj_priv->list,
3655 &dev_priv->mm.inactive_list);
3656 atomic_dec(&dev->pin_count);
3657 atomic_sub(obj->size, &dev->pin_memory);
3658 }
3659 i915_verify_inactive(dev, __FILE__, __LINE__);
3660}
3661
3662int
3663i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3664 struct drm_file *file_priv)
3665{
3666 struct drm_i915_gem_pin *args = data;
3667 struct drm_gem_object *obj;
3668 struct drm_i915_gem_object *obj_priv;
3669 int ret;
3670
3671 mutex_lock(&dev->struct_mutex);
3672
3673 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3674 if (obj == NULL) {
3675 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3676 args->handle);
3677 mutex_unlock(&dev->struct_mutex);
3678 return -EBADF;
3679 }
3680 obj_priv = obj->driver_private;
3681
Jesse Barnes79e53942008-11-07 14:24:08 -08003682 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3683 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3684 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00003685 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003686 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08003687 return -EINVAL;
3688 }
3689
3690 obj_priv->user_pin_count++;
3691 obj_priv->pin_filp = file_priv;
3692 if (obj_priv->user_pin_count == 1) {
3693 ret = i915_gem_object_pin(obj, args->alignment);
3694 if (ret != 0) {
3695 drm_gem_object_unreference(obj);
3696 mutex_unlock(&dev->struct_mutex);
3697 return ret;
3698 }
Eric Anholt673a3942008-07-30 12:06:12 -07003699 }
3700
3701 /* XXX - flush the CPU caches for pinned objects
3702 * as the X server doesn't manage domains yet
3703 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003704 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003705 args->offset = obj_priv->gtt_offset;
3706 drm_gem_object_unreference(obj);
3707 mutex_unlock(&dev->struct_mutex);
3708
3709 return 0;
3710}
3711
3712int
3713i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3714 struct drm_file *file_priv)
3715{
3716 struct drm_i915_gem_pin *args = data;
3717 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08003718 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003719
3720 mutex_lock(&dev->struct_mutex);
3721
3722 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3723 if (obj == NULL) {
3724 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3725 args->handle);
3726 mutex_unlock(&dev->struct_mutex);
3727 return -EBADF;
3728 }
3729
Jesse Barnes79e53942008-11-07 14:24:08 -08003730 obj_priv = obj->driver_private;
3731 if (obj_priv->pin_filp != file_priv) {
3732 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3733 args->handle);
3734 drm_gem_object_unreference(obj);
3735 mutex_unlock(&dev->struct_mutex);
3736 return -EINVAL;
3737 }
3738 obj_priv->user_pin_count--;
3739 if (obj_priv->user_pin_count == 0) {
3740 obj_priv->pin_filp = NULL;
3741 i915_gem_object_unpin(obj);
3742 }
Eric Anholt673a3942008-07-30 12:06:12 -07003743
3744 drm_gem_object_unreference(obj);
3745 mutex_unlock(&dev->struct_mutex);
3746 return 0;
3747}
3748
3749int
3750i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3751 struct drm_file *file_priv)
3752{
3753 struct drm_i915_gem_busy *args = data;
3754 struct drm_gem_object *obj;
3755 struct drm_i915_gem_object *obj_priv;
3756
Eric Anholt673a3942008-07-30 12:06:12 -07003757 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3758 if (obj == NULL) {
3759 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3760 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003761 return -EBADF;
3762 }
3763
Chris Wilsonb1ce7862009-06-06 09:46:00 +01003764 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08003765 /* Update the active list for the hardware's current position.
3766 * Otherwise this only updates on a delayed timer or when irqs are
3767 * actually unmasked, and our working set ends up being larger than
3768 * required.
3769 */
3770 i915_gem_retire_requests(dev);
3771
Eric Anholt673a3942008-07-30 12:06:12 -07003772 obj_priv = obj->driver_private;
Eric Anholtc4de0a52008-12-14 19:05:04 -08003773 /* Don't count being on the flushing list against the object being
3774 * done. Otherwise, a buffer left on the flushing list but not getting
3775 * flushed (because nobody's flushing that domain) won't ever return
3776 * unbusy and get reused by libdrm's bo cache. The other expected
3777 * consumer of this interface, OpenGL's occlusion queries, also specs
3778 * that the objects get unbusy "eventually" without any interference.
3779 */
3780 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003781
3782 drm_gem_object_unreference(obj);
3783 mutex_unlock(&dev->struct_mutex);
3784 return 0;
3785}
3786
3787int
3788i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3789 struct drm_file *file_priv)
3790{
3791 return i915_gem_ring_throttle(dev, file_priv);
3792}
3793
3794int i915_gem_init_object(struct drm_gem_object *obj)
3795{
3796 struct drm_i915_gem_object *obj_priv;
3797
Eric Anholt9a298b22009-03-24 12:23:04 -07003798 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07003799 if (obj_priv == NULL)
3800 return -ENOMEM;
3801
3802 /*
3803 * We've just allocated pages from the kernel,
3804 * so they've just been written by the CPU with
3805 * zeros. They'll need to be clflushed before we
3806 * use them with the GPU.
3807 */
3808 obj->write_domain = I915_GEM_DOMAIN_CPU;
3809 obj->read_domains = I915_GEM_DOMAIN_CPU;
3810
Keith Packardba1eb1d2008-10-14 19:55:10 -07003811 obj_priv->agp_type = AGP_USER_MEMORY;
3812
Eric Anholt673a3942008-07-30 12:06:12 -07003813 obj->driver_private = obj_priv;
3814 obj_priv->obj = obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003815 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Eric Anholt673a3942008-07-30 12:06:12 -07003816 INIT_LIST_HEAD(&obj_priv->list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003817 INIT_LIST_HEAD(&obj_priv->fence_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003818
Eric Anholt673a3942008-07-30 12:06:12 -07003819 return 0;
3820}
3821
3822void i915_gem_free_object(struct drm_gem_object *obj)
3823{
Jesse Barnesde151cf2008-11-12 10:03:55 -08003824 struct drm_device *dev = obj->dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003825 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3826
3827 while (obj_priv->pin_count > 0)
3828 i915_gem_object_unpin(obj);
3829
Dave Airlie71acb5e2008-12-30 20:31:46 +10003830 if (obj_priv->phys_obj)
3831 i915_gem_detach_phys_object(dev, obj);
3832
Eric Anholt673a3942008-07-30 12:06:12 -07003833 i915_gem_object_unbind(obj);
3834
Chris Wilson7e616152009-09-10 08:53:04 +01003835 if (obj_priv->mmap_offset)
3836 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08003837
Eric Anholt9a298b22009-03-24 12:23:04 -07003838 kfree(obj_priv->page_cpu_valid);
Eric Anholt280b7132009-03-12 16:56:27 -07003839 kfree(obj_priv->bit_17);
Eric Anholt9a298b22009-03-24 12:23:04 -07003840 kfree(obj->driver_private);
Eric Anholt673a3942008-07-30 12:06:12 -07003841}
3842
Eric Anholt673a3942008-07-30 12:06:12 -07003843/** Unbinds all objects that are on the given buffer list. */
3844static int
3845i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3846{
3847 struct drm_gem_object *obj;
3848 struct drm_i915_gem_object *obj_priv;
3849 int ret;
3850
3851 while (!list_empty(head)) {
3852 obj_priv = list_first_entry(head,
3853 struct drm_i915_gem_object,
3854 list);
3855 obj = obj_priv->obj;
3856
3857 if (obj_priv->pin_count != 0) {
3858 DRM_ERROR("Pinned object in unbind list\n");
3859 mutex_unlock(&dev->struct_mutex);
3860 return -EINVAL;
3861 }
3862
3863 ret = i915_gem_object_unbind(obj);
3864 if (ret != 0) {
3865 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3866 ret);
3867 mutex_unlock(&dev->struct_mutex);
3868 return ret;
3869 }
3870 }
3871
3872
3873 return 0;
3874}
3875
Jesse Barnes5669fca2009-02-17 15:13:31 -08003876int
Eric Anholt673a3942008-07-30 12:06:12 -07003877i915_gem_idle(struct drm_device *dev)
3878{
3879 drm_i915_private_t *dev_priv = dev->dev_private;
3880 uint32_t seqno, cur_seqno, last_seqno;
3881 int stuck, ret;
3882
Keith Packard6dbe2772008-10-14 21:41:13 -07003883 mutex_lock(&dev->struct_mutex);
3884
3885 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3886 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003887 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003888 }
Eric Anholt673a3942008-07-30 12:06:12 -07003889
3890 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3891 * We need to replace this with a semaphore, or something.
3892 */
3893 dev_priv->mm.suspended = 1;
Ben Gamarif65d9422009-09-14 17:48:44 -04003894 del_timer(&dev_priv->hangcheck_timer);
Eric Anholt673a3942008-07-30 12:06:12 -07003895
Keith Packard6dbe2772008-10-14 21:41:13 -07003896 /* Cancel the retire work handler, wait for it to finish if running
3897 */
3898 mutex_unlock(&dev->struct_mutex);
3899 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3900 mutex_lock(&dev->struct_mutex);
3901
Eric Anholt673a3942008-07-30 12:06:12 -07003902 i915_kernel_lost_context(dev);
3903
3904 /* Flush the GPU along with all non-CPU write domains
3905 */
Chris Wilson21d509e2009-06-06 09:46:02 +01003906 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3907 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07003908
3909 if (seqno == 0) {
3910 mutex_unlock(&dev->struct_mutex);
3911 return -ENOMEM;
3912 }
3913
3914 dev_priv->mm.waiting_gem_seqno = seqno;
3915 last_seqno = 0;
3916 stuck = 0;
3917 for (;;) {
3918 cur_seqno = i915_get_gem_seqno(dev);
3919 if (i915_seqno_passed(cur_seqno, seqno))
3920 break;
3921 if (last_seqno == cur_seqno) {
3922 if (stuck++ > 100) {
3923 DRM_ERROR("hardware wedged\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003924 atomic_set(&dev_priv->mm.wedged, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003925 DRM_WAKEUP(&dev_priv->irq_queue);
3926 break;
3927 }
3928 }
3929 msleep(10);
3930 last_seqno = cur_seqno;
3931 }
3932 dev_priv->mm.waiting_gem_seqno = 0;
3933
3934 i915_gem_retire_requests(dev);
3935
Carl Worth5e118f42009-03-20 11:54:25 -07003936 spin_lock(&dev_priv->mm.active_list_lock);
Ben Gamariba1234d2009-09-14 17:48:47 -04003937 if (!atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt28dfe522008-11-13 15:00:55 -08003938 /* Active and flushing should now be empty as we've
3939 * waited for a sequence higher than any pending execbuffer
3940 */
3941 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3942 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3943 /* Request should now be empty as we've also waited
3944 * for the last request in the list
3945 */
3946 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3947 }
Eric Anholt673a3942008-07-30 12:06:12 -07003948
Eric Anholt28dfe522008-11-13 15:00:55 -08003949 /* Empty the active and flushing lists to inactive. If there's
3950 * anything left at this point, it means that we're wedged and
3951 * nothing good's going to happen by leaving them there. So strip
3952 * the GPU domains and just stuff them onto inactive.
Eric Anholt673a3942008-07-30 12:06:12 -07003953 */
Eric Anholt28dfe522008-11-13 15:00:55 -08003954 while (!list_empty(&dev_priv->mm.active_list)) {
3955 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07003956
Eric Anholt28dfe522008-11-13 15:00:55 -08003957 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3958 struct drm_i915_gem_object,
3959 list);
3960 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3961 i915_gem_object_move_to_inactive(obj_priv->obj);
3962 }
Carl Worth5e118f42009-03-20 11:54:25 -07003963 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt28dfe522008-11-13 15:00:55 -08003964
3965 while (!list_empty(&dev_priv->mm.flushing_list)) {
3966 struct drm_i915_gem_object *obj_priv;
3967
Eric Anholt151903d2008-12-01 10:23:21 +10003968 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
Eric Anholt28dfe522008-11-13 15:00:55 -08003969 struct drm_i915_gem_object,
3970 list);
3971 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3972 i915_gem_object_move_to_inactive(obj_priv->obj);
3973 }
3974
3975
3976 /* Move all inactive buffers out of the GTT. */
Eric Anholt673a3942008-07-30 12:06:12 -07003977 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
Eric Anholt28dfe522008-11-13 15:00:55 -08003978 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
Keith Packard6dbe2772008-10-14 21:41:13 -07003979 if (ret) {
3980 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003981 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003982 }
Eric Anholt673a3942008-07-30 12:06:12 -07003983
Keith Packard6dbe2772008-10-14 21:41:13 -07003984 i915_gem_cleanup_ringbuffer(dev);
3985 mutex_unlock(&dev->struct_mutex);
3986
Eric Anholt673a3942008-07-30 12:06:12 -07003987 return 0;
3988}
3989
3990static int
3991i915_gem_init_hws(struct drm_device *dev)
3992{
3993 drm_i915_private_t *dev_priv = dev->dev_private;
3994 struct drm_gem_object *obj;
3995 struct drm_i915_gem_object *obj_priv;
3996 int ret;
3997
3998 /* If we need a physical address for the status page, it's already
3999 * initialized at driver load time.
4000 */
4001 if (!I915_NEED_GFX_HWS(dev))
4002 return 0;
4003
4004 obj = drm_gem_object_alloc(dev, 4096);
4005 if (obj == NULL) {
4006 DRM_ERROR("Failed to allocate status page\n");
4007 return -ENOMEM;
4008 }
4009 obj_priv = obj->driver_private;
Keith Packardba1eb1d2008-10-14 19:55:10 -07004010 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt673a3942008-07-30 12:06:12 -07004011
4012 ret = i915_gem_object_pin(obj, 4096);
4013 if (ret != 0) {
4014 drm_gem_object_unreference(obj);
4015 return ret;
4016 }
4017
4018 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07004019
Eric Anholt856fa192009-03-19 14:10:50 -07004020 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004021 if (dev_priv->hw_status_page == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004022 DRM_ERROR("Failed to map status page.\n");
4023 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Chris Wilson3eb2ee72009-02-11 14:26:34 +00004024 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004025 drm_gem_object_unreference(obj);
4026 return -EINVAL;
4027 }
4028 dev_priv->hws_obj = obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004029 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4030 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004031 I915_READ(HWS_PGA); /* posting read */
Eric Anholt673a3942008-07-30 12:06:12 -07004032 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4033
4034 return 0;
4035}
4036
Chris Wilson85a7bb92009-02-11 14:52:44 +00004037static void
4038i915_gem_cleanup_hws(struct drm_device *dev)
4039{
4040 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004041 struct drm_gem_object *obj;
4042 struct drm_i915_gem_object *obj_priv;
Chris Wilson85a7bb92009-02-11 14:52:44 +00004043
4044 if (dev_priv->hws_obj == NULL)
4045 return;
4046
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004047 obj = dev_priv->hws_obj;
4048 obj_priv = obj->driver_private;
4049
Eric Anholt856fa192009-03-19 14:10:50 -07004050 kunmap(obj_priv->pages[0]);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004051 i915_gem_object_unpin(obj);
4052 drm_gem_object_unreference(obj);
4053 dev_priv->hws_obj = NULL;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004054
Chris Wilson85a7bb92009-02-11 14:52:44 +00004055 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4056 dev_priv->hw_status_page = NULL;
4057
4058 /* Write high address into HWS_PGA when disabling. */
4059 I915_WRITE(HWS_PGA, 0x1ffff000);
4060}
4061
Jesse Barnes79e53942008-11-07 14:24:08 -08004062int
Eric Anholt673a3942008-07-30 12:06:12 -07004063i915_gem_init_ringbuffer(struct drm_device *dev)
4064{
4065 drm_i915_private_t *dev_priv = dev->dev_private;
4066 struct drm_gem_object *obj;
4067 struct drm_i915_gem_object *obj_priv;
Jesse Barnes79e53942008-11-07 14:24:08 -08004068 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
Eric Anholt673a3942008-07-30 12:06:12 -07004069 int ret;
Keith Packard50aa253d2008-10-14 17:20:35 -07004070 u32 head;
Eric Anholt673a3942008-07-30 12:06:12 -07004071
4072 ret = i915_gem_init_hws(dev);
4073 if (ret != 0)
4074 return ret;
4075
4076 obj = drm_gem_object_alloc(dev, 128 * 1024);
4077 if (obj == NULL) {
4078 DRM_ERROR("Failed to allocate ringbuffer\n");
Chris Wilson85a7bb92009-02-11 14:52:44 +00004079 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004080 return -ENOMEM;
4081 }
4082 obj_priv = obj->driver_private;
4083
4084 ret = i915_gem_object_pin(obj, 4096);
4085 if (ret != 0) {
4086 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004087 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004088 return ret;
4089 }
4090
4091 /* Set up the kernel mapping for the ring. */
Jesse Barnes79e53942008-11-07 14:24:08 -08004092 ring->Size = obj->size;
Eric Anholt673a3942008-07-30 12:06:12 -07004093
Jesse Barnes79e53942008-11-07 14:24:08 -08004094 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4095 ring->map.size = obj->size;
4096 ring->map.type = 0;
4097 ring->map.flags = 0;
4098 ring->map.mtrr = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004099
Jesse Barnes79e53942008-11-07 14:24:08 -08004100 drm_core_ioremap_wc(&ring->map, dev);
4101 if (ring->map.handle == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004102 DRM_ERROR("Failed to map ringbuffer.\n");
4103 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
Chris Wilson47ed1852009-02-11 14:26:33 +00004104 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004105 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004106 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004107 return -EINVAL;
4108 }
Jesse Barnes79e53942008-11-07 14:24:08 -08004109 ring->ring_obj = obj;
4110 ring->virtual_start = ring->map.handle;
Eric Anholt673a3942008-07-30 12:06:12 -07004111
4112 /* Stop the ring if it's running. */
4113 I915_WRITE(PRB0_CTL, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004114 I915_WRITE(PRB0_TAIL, 0);
Keith Packard50aa253d2008-10-14 17:20:35 -07004115 I915_WRITE(PRB0_HEAD, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004116
4117 /* Initialize the ring. */
4118 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
Keith Packard50aa253d2008-10-14 17:20:35 -07004119 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4120
4121 /* G45 ring initialization fails to reset head to zero */
4122 if (head != 0) {
4123 DRM_ERROR("Ring head not reset to zero "
4124 "ctl %08x head %08x tail %08x start %08x\n",
4125 I915_READ(PRB0_CTL),
4126 I915_READ(PRB0_HEAD),
4127 I915_READ(PRB0_TAIL),
4128 I915_READ(PRB0_START));
4129 I915_WRITE(PRB0_HEAD, 0);
4130
4131 DRM_ERROR("Ring head forced to zero "
4132 "ctl %08x head %08x tail %08x start %08x\n",
4133 I915_READ(PRB0_CTL),
4134 I915_READ(PRB0_HEAD),
4135 I915_READ(PRB0_TAIL),
4136 I915_READ(PRB0_START));
4137 }
4138
Eric Anholt673a3942008-07-30 12:06:12 -07004139 I915_WRITE(PRB0_CTL,
4140 ((obj->size - 4096) & RING_NR_PAGES) |
4141 RING_NO_REPORT |
4142 RING_VALID);
4143
Keith Packard50aa253d2008-10-14 17:20:35 -07004144 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4145
4146 /* If the head is still not zero, the ring is dead */
4147 if (head != 0) {
4148 DRM_ERROR("Ring initialization failed "
4149 "ctl %08x head %08x tail %08x start %08x\n",
4150 I915_READ(PRB0_CTL),
4151 I915_READ(PRB0_HEAD),
4152 I915_READ(PRB0_TAIL),
4153 I915_READ(PRB0_START));
4154 return -EIO;
4155 }
4156
Eric Anholt673a3942008-07-30 12:06:12 -07004157 /* Update our cache of the ring state */
Jesse Barnes79e53942008-11-07 14:24:08 -08004158 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4159 i915_kernel_lost_context(dev);
4160 else {
4161 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4162 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4163 ring->space = ring->head - (ring->tail + 8);
4164 if (ring->space < 0)
4165 ring->space += ring->Size;
4166 }
Eric Anholt673a3942008-07-30 12:06:12 -07004167
4168 return 0;
4169}
4170
Jesse Barnes79e53942008-11-07 14:24:08 -08004171void
Eric Anholt673a3942008-07-30 12:06:12 -07004172i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4173{
4174 drm_i915_private_t *dev_priv = dev->dev_private;
4175
4176 if (dev_priv->ring.ring_obj == NULL)
4177 return;
4178
4179 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4180
4181 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4182 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4183 dev_priv->ring.ring_obj = NULL;
4184 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4185
Chris Wilson85a7bb92009-02-11 14:52:44 +00004186 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004187}
4188
4189int
4190i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4191 struct drm_file *file_priv)
4192{
4193 drm_i915_private_t *dev_priv = dev->dev_private;
4194 int ret;
4195
Jesse Barnes79e53942008-11-07 14:24:08 -08004196 if (drm_core_check_feature(dev, DRIVER_MODESET))
4197 return 0;
4198
Ben Gamariba1234d2009-09-14 17:48:47 -04004199 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004200 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004201 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004202 }
4203
Eric Anholt673a3942008-07-30 12:06:12 -07004204 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004205 dev_priv->mm.suspended = 0;
4206
4207 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004208 if (ret != 0) {
4209 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004210 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004211 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004212
Carl Worth5e118f42009-03-20 11:54:25 -07004213 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004214 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004215 spin_unlock(&dev_priv->mm.active_list_lock);
4216
Eric Anholt673a3942008-07-30 12:06:12 -07004217 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4218 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4219 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004220 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004221
4222 drm_irq_install(dev);
4223
Eric Anholt673a3942008-07-30 12:06:12 -07004224 return 0;
4225}
4226
4227int
4228i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4229 struct drm_file *file_priv)
4230{
4231 int ret;
4232
Jesse Barnes79e53942008-11-07 14:24:08 -08004233 if (drm_core_check_feature(dev, DRIVER_MODESET))
4234 return 0;
4235
Eric Anholt673a3942008-07-30 12:06:12 -07004236 ret = i915_gem_idle(dev);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004237 drm_irq_uninstall(dev);
4238
Keith Packard6dbe2772008-10-14 21:41:13 -07004239 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004240}
4241
4242void
4243i915_gem_lastclose(struct drm_device *dev)
4244{
4245 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004246
Eric Anholte806b492009-01-22 09:56:58 -08004247 if (drm_core_check_feature(dev, DRIVER_MODESET))
4248 return;
4249
Keith Packard6dbe2772008-10-14 21:41:13 -07004250 ret = i915_gem_idle(dev);
4251 if (ret)
4252 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004253}
4254
4255void
4256i915_gem_load(struct drm_device *dev)
4257{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004258 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004259 drm_i915_private_t *dev_priv = dev->dev_private;
4260
Carl Worth5e118f42009-03-20 11:54:25 -07004261 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004262 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4263 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4264 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4265 INIT_LIST_HEAD(&dev_priv->mm.request_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004266 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004267 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4268 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004269 dev_priv->mm.next_gem_seqno = 1;
4270
Chris Wilson31169712009-09-14 16:50:28 +01004271 spin_lock(&shrink_list_lock);
4272 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4273 spin_unlock(&shrink_list_lock);
4274
Jesse Barnesde151cf2008-11-12 10:03:55 -08004275 /* Old X drivers will take 0-2 for front, back, depth buffers */
4276 dev_priv->fence_reg_start = 3;
4277
Jesse Barnes0f973f22009-01-26 17:10:45 -08004278 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004279 dev_priv->num_fence_regs = 16;
4280 else
4281 dev_priv->num_fence_regs = 8;
4282
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004283 /* Initialize fence registers to zero */
4284 if (IS_I965G(dev)) {
4285 for (i = 0; i < 16; i++)
4286 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4287 } else {
4288 for (i = 0; i < 8; i++)
4289 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4290 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4291 for (i = 0; i < 8; i++)
4292 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4293 }
4294
Eric Anholt673a3942008-07-30 12:06:12 -07004295 i915_gem_detect_bit_6_swizzle(dev);
4296}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004297
4298/*
4299 * Create a physically contiguous memory object for this object
4300 * e.g. for cursor + overlay regs
4301 */
4302int i915_gem_init_phys_object(struct drm_device *dev,
4303 int id, int size)
4304{
4305 drm_i915_private_t *dev_priv = dev->dev_private;
4306 struct drm_i915_gem_phys_object *phys_obj;
4307 int ret;
4308
4309 if (dev_priv->mm.phys_objs[id - 1] || !size)
4310 return 0;
4311
Eric Anholt9a298b22009-03-24 12:23:04 -07004312 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004313 if (!phys_obj)
4314 return -ENOMEM;
4315
4316 phys_obj->id = id;
4317
4318 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4319 if (!phys_obj->handle) {
4320 ret = -ENOMEM;
4321 goto kfree_obj;
4322 }
4323#ifdef CONFIG_X86
4324 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4325#endif
4326
4327 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4328
4329 return 0;
4330kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004331 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004332 return ret;
4333}
4334
4335void i915_gem_free_phys_object(struct drm_device *dev, int id)
4336{
4337 drm_i915_private_t *dev_priv = dev->dev_private;
4338 struct drm_i915_gem_phys_object *phys_obj;
4339
4340 if (!dev_priv->mm.phys_objs[id - 1])
4341 return;
4342
4343 phys_obj = dev_priv->mm.phys_objs[id - 1];
4344 if (phys_obj->cur_obj) {
4345 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4346 }
4347
4348#ifdef CONFIG_X86
4349 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4350#endif
4351 drm_pci_free(dev, phys_obj->handle);
4352 kfree(phys_obj);
4353 dev_priv->mm.phys_objs[id - 1] = NULL;
4354}
4355
4356void i915_gem_free_all_phys_object(struct drm_device *dev)
4357{
4358 int i;
4359
Dave Airlie260883c2009-01-22 17:58:49 +10004360 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004361 i915_gem_free_phys_object(dev, i);
4362}
4363
4364void i915_gem_detach_phys_object(struct drm_device *dev,
4365 struct drm_gem_object *obj)
4366{
4367 struct drm_i915_gem_object *obj_priv;
4368 int i;
4369 int ret;
4370 int page_count;
4371
4372 obj_priv = obj->driver_private;
4373 if (!obj_priv->phys_obj)
4374 return;
4375
Eric Anholt856fa192009-03-19 14:10:50 -07004376 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004377 if (ret)
4378 goto out;
4379
4380 page_count = obj->size / PAGE_SIZE;
4381
4382 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004383 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004384 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4385
4386 memcpy(dst, src, PAGE_SIZE);
4387 kunmap_atomic(dst, KM_USER0);
4388 }
Eric Anholt856fa192009-03-19 14:10:50 -07004389 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004390 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004391
4392 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004393out:
4394 obj_priv->phys_obj->cur_obj = NULL;
4395 obj_priv->phys_obj = NULL;
4396}
4397
4398int
4399i915_gem_attach_phys_object(struct drm_device *dev,
4400 struct drm_gem_object *obj, int id)
4401{
4402 drm_i915_private_t *dev_priv = dev->dev_private;
4403 struct drm_i915_gem_object *obj_priv;
4404 int ret = 0;
4405 int page_count;
4406 int i;
4407
4408 if (id > I915_MAX_PHYS_OBJECT)
4409 return -EINVAL;
4410
4411 obj_priv = obj->driver_private;
4412
4413 if (obj_priv->phys_obj) {
4414 if (obj_priv->phys_obj->id == id)
4415 return 0;
4416 i915_gem_detach_phys_object(dev, obj);
4417 }
4418
4419
4420 /* create a new object */
4421 if (!dev_priv->mm.phys_objs[id - 1]) {
4422 ret = i915_gem_init_phys_object(dev, id,
4423 obj->size);
4424 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004425 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004426 goto out;
4427 }
4428 }
4429
4430 /* bind to the object */
4431 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4432 obj_priv->phys_obj->cur_obj = obj;
4433
Eric Anholt856fa192009-03-19 14:10:50 -07004434 ret = i915_gem_object_get_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004435 if (ret) {
4436 DRM_ERROR("failed to get page list\n");
4437 goto out;
4438 }
4439
4440 page_count = obj->size / PAGE_SIZE;
4441
4442 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004443 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004444 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4445
4446 memcpy(dst, src, PAGE_SIZE);
4447 kunmap_atomic(src, KM_USER0);
4448 }
4449
Chris Wilsond78b47b2009-06-17 21:52:49 +01004450 i915_gem_object_put_pages(obj);
4451
Dave Airlie71acb5e2008-12-30 20:31:46 +10004452 return 0;
4453out:
4454 return ret;
4455}
4456
4457static int
4458i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4459 struct drm_i915_gem_pwrite *args,
4460 struct drm_file *file_priv)
4461{
4462 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4463 void *obj_addr;
4464 int ret;
4465 char __user *user_data;
4466
4467 user_data = (char __user *) (uintptr_t) args->data_ptr;
4468 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4469
Dave Airliee08fb4f2009-02-25 14:52:30 +10004470 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004471 ret = copy_from_user(obj_addr, user_data, args->size);
4472 if (ret)
4473 return -EFAULT;
4474
4475 drm_agp_chipset_flush(dev);
4476 return 0;
4477}
Eric Anholtb9624422009-06-03 07:27:35 +00004478
4479void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4480{
4481 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4482
4483 /* Clean up our request list when the client is going away, so that
4484 * later retire_requests won't dereference our soon-to-be-gone
4485 * file_priv.
4486 */
4487 mutex_lock(&dev->struct_mutex);
4488 while (!list_empty(&i915_file_priv->mm.request_list))
4489 list_del_init(i915_file_priv->mm.request_list.next);
4490 mutex_unlock(&dev->struct_mutex);
4491}
Chris Wilson31169712009-09-14 16:50:28 +01004492
4493/* Immediately discard the backing storage */
4494static void
4495i915_gem_object_truncate(struct drm_gem_object *obj)
4496{
4497 struct inode *inode;
4498
4499 inode = obj->filp->f_path.dentry->d_inode;
4500
4501 mutex_lock(&inode->i_mutex);
4502 truncate_inode_pages(inode->i_mapping, 0);
4503 mutex_unlock(&inode->i_mutex);
4504}
4505
4506static inline int
4507i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
4508{
4509 return !obj_priv->dirty;
4510}
4511
4512static int
4513i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4514{
4515 drm_i915_private_t *dev_priv, *next_dev;
4516 struct drm_i915_gem_object *obj_priv, *next_obj;
4517 int cnt = 0;
4518 int would_deadlock = 1;
4519
4520 /* "fast-path" to count number of available objects */
4521 if (nr_to_scan == 0) {
4522 spin_lock(&shrink_list_lock);
4523 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4524 struct drm_device *dev = dev_priv->dev;
4525
4526 if (mutex_trylock(&dev->struct_mutex)) {
4527 list_for_each_entry(obj_priv,
4528 &dev_priv->mm.inactive_list,
4529 list)
4530 cnt++;
4531 mutex_unlock(&dev->struct_mutex);
4532 }
4533 }
4534 spin_unlock(&shrink_list_lock);
4535
4536 return (cnt / 100) * sysctl_vfs_cache_pressure;
4537 }
4538
4539 spin_lock(&shrink_list_lock);
4540
4541 /* first scan for clean buffers */
4542 list_for_each_entry_safe(dev_priv, next_dev,
4543 &shrink_list, mm.shrink_list) {
4544 struct drm_device *dev = dev_priv->dev;
4545
4546 if (! mutex_trylock(&dev->struct_mutex))
4547 continue;
4548
4549 spin_unlock(&shrink_list_lock);
4550
4551 i915_gem_retire_requests(dev);
4552
4553 list_for_each_entry_safe(obj_priv, next_obj,
4554 &dev_priv->mm.inactive_list,
4555 list) {
4556 if (i915_gem_object_is_purgeable(obj_priv)) {
4557 struct drm_gem_object *obj = obj_priv->obj;
4558 i915_gem_object_unbind(obj);
4559 i915_gem_object_truncate(obj);
4560
4561 if (--nr_to_scan <= 0)
4562 break;
4563 }
4564 }
4565
4566 spin_lock(&shrink_list_lock);
4567 mutex_unlock(&dev->struct_mutex);
4568
4569 if (nr_to_scan <= 0)
4570 break;
4571 }
4572
4573 /* second pass, evict/count anything still on the inactive list */
4574 list_for_each_entry_safe(dev_priv, next_dev,
4575 &shrink_list, mm.shrink_list) {
4576 struct drm_device *dev = dev_priv->dev;
4577
4578 if (! mutex_trylock(&dev->struct_mutex))
4579 continue;
4580
4581 spin_unlock(&shrink_list_lock);
4582
4583 list_for_each_entry_safe(obj_priv, next_obj,
4584 &dev_priv->mm.inactive_list,
4585 list) {
4586 if (nr_to_scan > 0) {
4587 struct drm_gem_object *obj = obj_priv->obj;
4588 i915_gem_object_unbind(obj);
4589 if (i915_gem_object_is_purgeable(obj_priv))
4590 i915_gem_object_truncate(obj);
4591
4592 nr_to_scan--;
4593 } else
4594 cnt++;
4595 }
4596
4597 spin_lock(&shrink_list_lock);
4598 mutex_unlock(&dev->struct_mutex);
4599
4600 would_deadlock = 0;
4601 }
4602
4603 spin_unlock(&shrink_list_lock);
4604
4605 if (would_deadlock)
4606 return -1;
4607 else if (cnt > 0)
4608 return (cnt / 100) * sysctl_vfs_cache_pressure;
4609 else
4610 return 0;
4611}
4612
4613static struct shrinker shrinker = {
4614 .shrink = i915_gem_shrink,
4615 .seeks = DEFAULT_SEEKS,
4616};
4617
4618__init void
4619i915_gem_shrinker_init(void)
4620{
4621 register_shrinker(&shrinker);
4622}
4623
4624__exit void
4625i915_gem_shrinker_exit(void)
4626{
4627 unregister_shrinker(&shrinker);
4628}