blob: 112699f71fa4f5f0afdb4b2d03b4960243350d48 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037
Eric Anholt28dfe522008-11-13 15:00:55 -080038#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39
Eric Anholte47c68e2008-11-14 13:35:19 -080040static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080043static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
44 int write);
45static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46 uint64_t offset,
47 uint64_t size);
48static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070049static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
51 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080052static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Chris Wilson07f73f62009-09-14 16:50:30 +010053static int i915_gem_evict_something(struct drm_device *dev, int min_size);
Chris Wilsonab5ee572009-09-20 19:25:47 +010054static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100055static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
56 struct drm_i915_gem_pwrite *args,
57 struct drm_file *file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -070058
Chris Wilson31169712009-09-14 16:50:28 +010059static LIST_HEAD(shrink_list);
60static DEFINE_SPINLOCK(shrink_list_lock);
61
Jesse Barnes79e53942008-11-07 14:24:08 -080062int i915_gem_do_init(struct drm_device *dev, unsigned long start,
63 unsigned long end)
64{
65 drm_i915_private_t *dev_priv = dev->dev_private;
66
67 if (start >= end ||
68 (start & (PAGE_SIZE - 1)) != 0 ||
69 (end & (PAGE_SIZE - 1)) != 0) {
70 return -EINVAL;
71 }
72
73 drm_mm_init(&dev_priv->mm.gtt_space, start,
74 end - start);
75
76 dev->gtt_total = (uint32_t) (end - start);
77
78 return 0;
79}
Keith Packard6dbe2772008-10-14 21:41:13 -070080
Eric Anholt673a3942008-07-30 12:06:12 -070081int
82i915_gem_init_ioctl(struct drm_device *dev, void *data,
83 struct drm_file *file_priv)
84{
Eric Anholt673a3942008-07-30 12:06:12 -070085 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080086 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070087
88 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080089 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070090 mutex_unlock(&dev->struct_mutex);
91
Jesse Barnes79e53942008-11-07 14:24:08 -080092 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070093}
94
Eric Anholt5a125c32008-10-22 21:40:13 -070095int
96i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
97 struct drm_file *file_priv)
98{
Eric Anholt5a125c32008-10-22 21:40:13 -070099 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -0700100
101 if (!(dev->driver->driver_features & DRIVER_GEM))
102 return -ENODEV;
103
104 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800105 args->aper_available_size = (args->aper_size -
106 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700107
108 return 0;
109}
110
Eric Anholt673a3942008-07-30 12:06:12 -0700111
112/**
113 * Creates a new mm object and returns a handle to it.
114 */
115int
116i915_gem_create_ioctl(struct drm_device *dev, void *data,
117 struct drm_file *file_priv)
118{
119 struct drm_i915_gem_create *args = data;
120 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300121 int ret;
122 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700123
124 args->size = roundup(args->size, PAGE_SIZE);
125
126 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000127 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700128 if (obj == NULL)
129 return -ENOMEM;
130
131 ret = drm_gem_handle_create(file_priv, obj, &handle);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000132 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700133
134 if (ret)
135 return ret;
136
137 args->handle = handle;
138
139 return 0;
140}
141
Eric Anholt40123c12009-03-09 13:42:30 -0700142static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700143fast_shmem_read(struct page **pages,
144 loff_t page_base, int page_offset,
145 char __user *data,
146 int length)
147{
148 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200149 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700150
151 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
152 if (vaddr == NULL)
153 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200154 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700155 kunmap_atomic(vaddr, KM_USER0);
156
Florian Mickler2bc43b52009-04-06 22:55:41 +0200157 if (unwritten)
158 return -EFAULT;
159
160 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700161}
162
Eric Anholt280b7132009-03-12 16:56:27 -0700163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
164{
165 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100166 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700167
168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
169 obj_priv->tiling_mode != I915_TILING_NONE;
170}
171
Eric Anholteb014592009-03-10 11:44:52 -0700172static inline int
Eric Anholt40123c12009-03-09 13:42:30 -0700173slow_shmem_copy(struct page *dst_page,
174 int dst_offset,
175 struct page *src_page,
176 int src_offset,
177 int length)
178{
179 char *dst_vaddr, *src_vaddr;
180
181 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
182 if (dst_vaddr == NULL)
183 return -ENOMEM;
184
185 src_vaddr = kmap_atomic(src_page, KM_USER1);
186 if (src_vaddr == NULL) {
187 kunmap_atomic(dst_vaddr, KM_USER0);
188 return -ENOMEM;
189 }
190
191 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
192
193 kunmap_atomic(src_vaddr, KM_USER1);
194 kunmap_atomic(dst_vaddr, KM_USER0);
195
196 return 0;
197}
198
Eric Anholt280b7132009-03-12 16:56:27 -0700199static inline int
200slow_shmem_bit17_copy(struct page *gpu_page,
201 int gpu_offset,
202 struct page *cpu_page,
203 int cpu_offset,
204 int length,
205 int is_read)
206{
207 char *gpu_vaddr, *cpu_vaddr;
208
209 /* Use the unswizzled path if this page isn't affected. */
210 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
211 if (is_read)
212 return slow_shmem_copy(cpu_page, cpu_offset,
213 gpu_page, gpu_offset, length);
214 else
215 return slow_shmem_copy(gpu_page, gpu_offset,
216 cpu_page, cpu_offset, length);
217 }
218
219 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
220 if (gpu_vaddr == NULL)
221 return -ENOMEM;
222
223 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
224 if (cpu_vaddr == NULL) {
225 kunmap_atomic(gpu_vaddr, KM_USER0);
226 return -ENOMEM;
227 }
228
229 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
230 * XORing with the other bits (A9 for Y, A9 and A10 for X)
231 */
232 while (length > 0) {
233 int cacheline_end = ALIGN(gpu_offset + 1, 64);
234 int this_length = min(cacheline_end - gpu_offset, length);
235 int swizzled_gpu_offset = gpu_offset ^ 64;
236
237 if (is_read) {
238 memcpy(cpu_vaddr + cpu_offset,
239 gpu_vaddr + swizzled_gpu_offset,
240 this_length);
241 } else {
242 memcpy(gpu_vaddr + swizzled_gpu_offset,
243 cpu_vaddr + cpu_offset,
244 this_length);
245 }
246 cpu_offset += this_length;
247 gpu_offset += this_length;
248 length -= this_length;
249 }
250
251 kunmap_atomic(cpu_vaddr, KM_USER1);
252 kunmap_atomic(gpu_vaddr, KM_USER0);
253
254 return 0;
255}
256
Eric Anholt673a3942008-07-30 12:06:12 -0700257/**
Eric Anholteb014592009-03-10 11:44:52 -0700258 * This is the fast shmem pread path, which attempts to copy_from_user directly
259 * from the backing pages of the object to the user's address space. On a
260 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
261 */
262static int
263i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
264 struct drm_i915_gem_pread *args,
265 struct drm_file *file_priv)
266{
Daniel Vetter23010e42010-03-08 13:35:02 +0100267 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700268 ssize_t remain;
269 loff_t offset, page_base;
270 char __user *user_data;
271 int page_offset, page_length;
272 int ret;
273
274 user_data = (char __user *) (uintptr_t) args->data_ptr;
275 remain = args->size;
276
277 mutex_lock(&dev->struct_mutex);
278
Chris Wilson4bdadb92010-01-27 13:36:32 +0000279 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700280 if (ret != 0)
281 goto fail_unlock;
282
283 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
284 args->size);
285 if (ret != 0)
286 goto fail_put_pages;
287
Daniel Vetter23010e42010-03-08 13:35:02 +0100288 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700289 offset = args->offset;
290
291 while (remain > 0) {
292 /* Operation in this page
293 *
294 * page_base = page offset within aperture
295 * page_offset = offset within page
296 * page_length = bytes to copy for this page
297 */
298 page_base = (offset & ~(PAGE_SIZE-1));
299 page_offset = offset & (PAGE_SIZE-1);
300 page_length = remain;
301 if ((page_offset + remain) > PAGE_SIZE)
302 page_length = PAGE_SIZE - page_offset;
303
304 ret = fast_shmem_read(obj_priv->pages,
305 page_base, page_offset,
306 user_data, page_length);
307 if (ret)
308 goto fail_put_pages;
309
310 remain -= page_length;
311 user_data += page_length;
312 offset += page_length;
313 }
314
315fail_put_pages:
316 i915_gem_object_put_pages(obj);
317fail_unlock:
318 mutex_unlock(&dev->struct_mutex);
319
320 return ret;
321}
322
Chris Wilson07f73f62009-09-14 16:50:30 +0100323static int
324i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
325{
326 int ret;
327
Chris Wilson4bdadb92010-01-27 13:36:32 +0000328 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100329
330 /* If we've insufficient memory to map in the pages, attempt
331 * to make some space by throwing out some old buffers.
332 */
333 if (ret == -ENOMEM) {
334 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100335
336 ret = i915_gem_evict_something(dev, obj->size);
337 if (ret)
338 return ret;
339
Chris Wilson4bdadb92010-01-27 13:36:32 +0000340 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100341 }
342
343 return ret;
344}
345
Eric Anholteb014592009-03-10 11:44:52 -0700346/**
347 * This is the fallback shmem pread path, which allocates temporary storage
348 * in kernel space to copy_to_user into outside of the struct_mutex, so we
349 * can copy out of the object's backing pages while holding the struct mutex
350 * and not take page faults.
351 */
352static int
353i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
354 struct drm_i915_gem_pread *args,
355 struct drm_file *file_priv)
356{
Daniel Vetter23010e42010-03-08 13:35:02 +0100357 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700358 struct mm_struct *mm = current->mm;
359 struct page **user_pages;
360 ssize_t remain;
361 loff_t offset, pinned_pages, i;
362 loff_t first_data_page, last_data_page, num_pages;
363 int shmem_page_index, shmem_page_offset;
364 int data_page_index, data_page_offset;
365 int page_length;
366 int ret;
367 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700368 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700369
370 remain = args->size;
371
372 /* Pin the user pages containing the data. We can't fault while
373 * holding the struct mutex, yet we want to hold it while
374 * dereferencing the user data.
375 */
376 first_data_page = data_ptr / PAGE_SIZE;
377 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
378 num_pages = last_data_page - first_data_page + 1;
379
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700380 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700381 if (user_pages == NULL)
382 return -ENOMEM;
383
384 down_read(&mm->mmap_sem);
385 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700386 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700387 up_read(&mm->mmap_sem);
388 if (pinned_pages < num_pages) {
389 ret = -EFAULT;
390 goto fail_put_user_pages;
391 }
392
Eric Anholt280b7132009-03-12 16:56:27 -0700393 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
394
Eric Anholteb014592009-03-10 11:44:52 -0700395 mutex_lock(&dev->struct_mutex);
396
Chris Wilson07f73f62009-09-14 16:50:30 +0100397 ret = i915_gem_object_get_pages_or_evict(obj);
398 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700399 goto fail_unlock;
400
401 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
402 args->size);
403 if (ret != 0)
404 goto fail_put_pages;
405
Daniel Vetter23010e42010-03-08 13:35:02 +0100406 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700407 offset = args->offset;
408
409 while (remain > 0) {
410 /* Operation in this page
411 *
412 * shmem_page_index = page number within shmem file
413 * shmem_page_offset = offset within page in shmem file
414 * data_page_index = page number in get_user_pages return
415 * data_page_offset = offset with data_page_index page.
416 * page_length = bytes to copy for this page
417 */
418 shmem_page_index = offset / PAGE_SIZE;
419 shmem_page_offset = offset & ~PAGE_MASK;
420 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
421 data_page_offset = data_ptr & ~PAGE_MASK;
422
423 page_length = remain;
424 if ((shmem_page_offset + page_length) > PAGE_SIZE)
425 page_length = PAGE_SIZE - shmem_page_offset;
426 if ((data_page_offset + page_length) > PAGE_SIZE)
427 page_length = PAGE_SIZE - data_page_offset;
428
Eric Anholt280b7132009-03-12 16:56:27 -0700429 if (do_bit17_swizzling) {
430 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
431 shmem_page_offset,
432 user_pages[data_page_index],
433 data_page_offset,
434 page_length,
435 1);
436 } else {
437 ret = slow_shmem_copy(user_pages[data_page_index],
438 data_page_offset,
439 obj_priv->pages[shmem_page_index],
440 shmem_page_offset,
441 page_length);
442 }
Eric Anholteb014592009-03-10 11:44:52 -0700443 if (ret)
444 goto fail_put_pages;
445
446 remain -= page_length;
447 data_ptr += page_length;
448 offset += page_length;
449 }
450
451fail_put_pages:
452 i915_gem_object_put_pages(obj);
453fail_unlock:
454 mutex_unlock(&dev->struct_mutex);
455fail_put_user_pages:
456 for (i = 0; i < pinned_pages; i++) {
457 SetPageDirty(user_pages[i]);
458 page_cache_release(user_pages[i]);
459 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700460 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700461
462 return ret;
463}
464
Eric Anholt673a3942008-07-30 12:06:12 -0700465/**
466 * Reads data from the object referenced by handle.
467 *
468 * On error, the contents of *data are undefined.
469 */
470int
471i915_gem_pread_ioctl(struct drm_device *dev, void *data,
472 struct drm_file *file_priv)
473{
474 struct drm_i915_gem_pread *args = data;
475 struct drm_gem_object *obj;
476 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700477 int ret;
478
479 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
480 if (obj == NULL)
481 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100482 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700483
484 /* Bounds check source.
485 *
486 * XXX: This could use review for overflow issues...
487 */
488 if (args->offset > obj->size || args->size > obj->size ||
489 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000490 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700491 return -EINVAL;
492 }
493
Eric Anholt280b7132009-03-12 16:56:27 -0700494 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700495 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700496 } else {
497 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
498 if (ret != 0)
499 ret = i915_gem_shmem_pread_slow(dev, obj, args,
500 file_priv);
501 }
Eric Anholt673a3942008-07-30 12:06:12 -0700502
Luca Barbieribc9025b2010-02-09 05:49:12 +0000503 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700504
Eric Anholteb014592009-03-10 11:44:52 -0700505 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700506}
507
Keith Packard0839ccb2008-10-30 19:38:48 -0700508/* This is the fast write path which cannot handle
509 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700510 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700511
Keith Packard0839ccb2008-10-30 19:38:48 -0700512static inline int
513fast_user_write(struct io_mapping *mapping,
514 loff_t page_base, int page_offset,
515 char __user *user_data,
516 int length)
517{
518 char *vaddr_atomic;
519 unsigned long unwritten;
520
521 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
522 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
523 user_data, length);
524 io_mapping_unmap_atomic(vaddr_atomic);
525 if (unwritten)
526 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700527 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700528}
529
530/* Here's the write path which can sleep for
531 * page faults
532 */
533
534static inline int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700535slow_kernel_write(struct io_mapping *mapping,
536 loff_t gtt_base, int gtt_offset,
537 struct page *user_page, int user_offset,
538 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700539{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700540 char *src_vaddr, *dst_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700541 unsigned long unwritten;
542
Eric Anholt3de09aa2009-03-09 09:42:23 -0700543 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
544 src_vaddr = kmap_atomic(user_page, KM_USER1);
545 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
546 src_vaddr + user_offset,
547 length);
548 kunmap_atomic(src_vaddr, KM_USER1);
549 io_mapping_unmap_atomic(dst_vaddr);
Keith Packard0839ccb2008-10-30 19:38:48 -0700550 if (unwritten)
551 return -EFAULT;
552 return 0;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700553}
554
Eric Anholt40123c12009-03-09 13:42:30 -0700555static inline int
556fast_shmem_write(struct page **pages,
557 loff_t page_base, int page_offset,
558 char __user *data,
559 int length)
560{
561 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400562 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700563
564 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
565 if (vaddr == NULL)
566 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400567 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700568 kunmap_atomic(vaddr, KM_USER0);
569
Dave Airlied0088772009-03-28 20:29:48 -0400570 if (unwritten)
571 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700572 return 0;
573}
574
Eric Anholt3de09aa2009-03-09 09:42:23 -0700575/**
576 * This is the fast pwrite path, where we copy the data directly from the
577 * user into the GTT, uncached.
578 */
Eric Anholt673a3942008-07-30 12:06:12 -0700579static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700580i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
581 struct drm_i915_gem_pwrite *args,
582 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700583{
Daniel Vetter23010e42010-03-08 13:35:02 +0100584 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700585 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700586 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700588 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700589 int page_offset, page_length;
590 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700591
592 user_data = (char __user *) (uintptr_t) args->data_ptr;
593 remain = args->size;
594 if (!access_ok(VERIFY_READ, user_data, remain))
595 return -EFAULT;
596
597
598 mutex_lock(&dev->struct_mutex);
599 ret = i915_gem_object_pin(obj, 0);
600 if (ret) {
601 mutex_unlock(&dev->struct_mutex);
602 return ret;
603 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800604 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700605 if (ret)
606 goto fail;
607
Daniel Vetter23010e42010-03-08 13:35:02 +0100608 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700609 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700610
611 while (remain > 0) {
612 /* Operation in this page
613 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700614 * page_base = page offset within aperture
615 * page_offset = offset within page
616 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700617 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700618 page_base = (offset & ~(PAGE_SIZE-1));
619 page_offset = offset & (PAGE_SIZE-1);
620 page_length = remain;
621 if ((page_offset + remain) > PAGE_SIZE)
622 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700623
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
625 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700626
Keith Packard0839ccb2008-10-30 19:38:48 -0700627 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700628 * source page isn't available. Return the error and we'll
629 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700630 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700631 if (ret)
632 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700633
Keith Packard0839ccb2008-10-30 19:38:48 -0700634 remain -= page_length;
635 user_data += page_length;
636 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700637 }
Eric Anholt673a3942008-07-30 12:06:12 -0700638
639fail:
640 i915_gem_object_unpin(obj);
641 mutex_unlock(&dev->struct_mutex);
642
643 return ret;
644}
645
Eric Anholt3de09aa2009-03-09 09:42:23 -0700646/**
647 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
648 * the memory and maps it using kmap_atomic for copying.
649 *
650 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
651 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
652 */
Eric Anholt3043c602008-10-02 12:24:47 -0700653static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700654i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
655 struct drm_i915_gem_pwrite *args,
656 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700657{
Daniel Vetter23010e42010-03-08 13:35:02 +0100658 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700659 drm_i915_private_t *dev_priv = dev->dev_private;
660 ssize_t remain;
661 loff_t gtt_page_base, offset;
662 loff_t first_data_page, last_data_page, num_pages;
663 loff_t pinned_pages, i;
664 struct page **user_pages;
665 struct mm_struct *mm = current->mm;
666 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700667 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700668 uint64_t data_ptr = args->data_ptr;
669
670 remain = args->size;
671
672 /* Pin the user pages containing the data. We can't fault while
673 * holding the struct mutex, and all of the pwrite implementations
674 * want to hold it while dereferencing the user data.
675 */
676 first_data_page = data_ptr / PAGE_SIZE;
677 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
678 num_pages = last_data_page - first_data_page + 1;
679
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700680 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700681 if (user_pages == NULL)
682 return -ENOMEM;
683
684 down_read(&mm->mmap_sem);
685 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
686 num_pages, 0, 0, user_pages, NULL);
687 up_read(&mm->mmap_sem);
688 if (pinned_pages < num_pages) {
689 ret = -EFAULT;
690 goto out_unpin_pages;
691 }
692
693 mutex_lock(&dev->struct_mutex);
694 ret = i915_gem_object_pin(obj, 0);
695 if (ret)
696 goto out_unlock;
697
698 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
699 if (ret)
700 goto out_unpin_object;
701
Daniel Vetter23010e42010-03-08 13:35:02 +0100702 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700703 offset = obj_priv->gtt_offset + args->offset;
704
705 while (remain > 0) {
706 /* Operation in this page
707 *
708 * gtt_page_base = page offset within aperture
709 * gtt_page_offset = offset within page in aperture
710 * data_page_index = page number in get_user_pages return
711 * data_page_offset = offset with data_page_index page.
712 * page_length = bytes to copy for this page
713 */
714 gtt_page_base = offset & PAGE_MASK;
715 gtt_page_offset = offset & ~PAGE_MASK;
716 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
717 data_page_offset = data_ptr & ~PAGE_MASK;
718
719 page_length = remain;
720 if ((gtt_page_offset + page_length) > PAGE_SIZE)
721 page_length = PAGE_SIZE - gtt_page_offset;
722 if ((data_page_offset + page_length) > PAGE_SIZE)
723 page_length = PAGE_SIZE - data_page_offset;
724
725 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
726 gtt_page_base, gtt_page_offset,
727 user_pages[data_page_index],
728 data_page_offset,
729 page_length);
730
731 /* If we get a fault while copying data, then (presumably) our
732 * source page isn't available. Return the error and we'll
733 * retry in the slow path.
734 */
735 if (ret)
736 goto out_unpin_object;
737
738 remain -= page_length;
739 offset += page_length;
740 data_ptr += page_length;
741 }
742
743out_unpin_object:
744 i915_gem_object_unpin(obj);
745out_unlock:
746 mutex_unlock(&dev->struct_mutex);
747out_unpin_pages:
748 for (i = 0; i < pinned_pages; i++)
749 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700750 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700751
752 return ret;
753}
754
Eric Anholt40123c12009-03-09 13:42:30 -0700755/**
756 * This is the fast shmem pwrite path, which attempts to directly
757 * copy_from_user into the kmapped pages backing the object.
758 */
Eric Anholt673a3942008-07-30 12:06:12 -0700759static int
Eric Anholt40123c12009-03-09 13:42:30 -0700760i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
761 struct drm_i915_gem_pwrite *args,
762 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700763{
Daniel Vetter23010e42010-03-08 13:35:02 +0100764 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700765 ssize_t remain;
766 loff_t offset, page_base;
767 char __user *user_data;
768 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700769 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700770
771 user_data = (char __user *) (uintptr_t) args->data_ptr;
772 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700773
774 mutex_lock(&dev->struct_mutex);
775
Chris Wilson4bdadb92010-01-27 13:36:32 +0000776 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700777 if (ret != 0)
778 goto fail_unlock;
779
Eric Anholte47c68e2008-11-14 13:35:19 -0800780 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700781 if (ret != 0)
782 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700783
Daniel Vetter23010e42010-03-08 13:35:02 +0100784 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700785 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700786 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700787
Eric Anholt40123c12009-03-09 13:42:30 -0700788 while (remain > 0) {
789 /* Operation in this page
790 *
791 * page_base = page offset within aperture
792 * page_offset = offset within page
793 * page_length = bytes to copy for this page
794 */
795 page_base = (offset & ~(PAGE_SIZE-1));
796 page_offset = offset & (PAGE_SIZE-1);
797 page_length = remain;
798 if ((page_offset + remain) > PAGE_SIZE)
799 page_length = PAGE_SIZE - page_offset;
800
801 ret = fast_shmem_write(obj_priv->pages,
802 page_base, page_offset,
803 user_data, page_length);
804 if (ret)
805 goto fail_put_pages;
806
807 remain -= page_length;
808 user_data += page_length;
809 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700810 }
811
Eric Anholt40123c12009-03-09 13:42:30 -0700812fail_put_pages:
813 i915_gem_object_put_pages(obj);
814fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700815 mutex_unlock(&dev->struct_mutex);
816
Eric Anholt40123c12009-03-09 13:42:30 -0700817 return ret;
818}
819
820/**
821 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
822 * the memory and maps it using kmap_atomic for copying.
823 *
824 * This avoids taking mmap_sem for faulting on the user's address while the
825 * struct_mutex is held.
826 */
827static int
828i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
829 struct drm_i915_gem_pwrite *args,
830 struct drm_file *file_priv)
831{
Daniel Vetter23010e42010-03-08 13:35:02 +0100832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700833 struct mm_struct *mm = current->mm;
834 struct page **user_pages;
835 ssize_t remain;
836 loff_t offset, pinned_pages, i;
837 loff_t first_data_page, last_data_page, num_pages;
838 int shmem_page_index, shmem_page_offset;
839 int data_page_index, data_page_offset;
840 int page_length;
841 int ret;
842 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700843 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700844
845 remain = args->size;
846
847 /* Pin the user pages containing the data. We can't fault while
848 * holding the struct mutex, and all of the pwrite implementations
849 * want to hold it while dereferencing the user data.
850 */
851 first_data_page = data_ptr / PAGE_SIZE;
852 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
853 num_pages = last_data_page - first_data_page + 1;
854
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700855 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700856 if (user_pages == NULL)
857 return -ENOMEM;
858
859 down_read(&mm->mmap_sem);
860 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
861 num_pages, 0, 0, user_pages, NULL);
862 up_read(&mm->mmap_sem);
863 if (pinned_pages < num_pages) {
864 ret = -EFAULT;
865 goto fail_put_user_pages;
866 }
867
Eric Anholt280b7132009-03-12 16:56:27 -0700868 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
869
Eric Anholt40123c12009-03-09 13:42:30 -0700870 mutex_lock(&dev->struct_mutex);
871
Chris Wilson07f73f62009-09-14 16:50:30 +0100872 ret = i915_gem_object_get_pages_or_evict(obj);
873 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700874 goto fail_unlock;
875
876 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
877 if (ret != 0)
878 goto fail_put_pages;
879
Daniel Vetter23010e42010-03-08 13:35:02 +0100880 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700881 offset = args->offset;
882 obj_priv->dirty = 1;
883
884 while (remain > 0) {
885 /* Operation in this page
886 *
887 * shmem_page_index = page number within shmem file
888 * shmem_page_offset = offset within page in shmem file
889 * data_page_index = page number in get_user_pages return
890 * data_page_offset = offset with data_page_index page.
891 * page_length = bytes to copy for this page
892 */
893 shmem_page_index = offset / PAGE_SIZE;
894 shmem_page_offset = offset & ~PAGE_MASK;
895 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
896 data_page_offset = data_ptr & ~PAGE_MASK;
897
898 page_length = remain;
899 if ((shmem_page_offset + page_length) > PAGE_SIZE)
900 page_length = PAGE_SIZE - shmem_page_offset;
901 if ((data_page_offset + page_length) > PAGE_SIZE)
902 page_length = PAGE_SIZE - data_page_offset;
903
Eric Anholt280b7132009-03-12 16:56:27 -0700904 if (do_bit17_swizzling) {
905 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
906 shmem_page_offset,
907 user_pages[data_page_index],
908 data_page_offset,
909 page_length,
910 0);
911 } else {
912 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
913 shmem_page_offset,
914 user_pages[data_page_index],
915 data_page_offset,
916 page_length);
917 }
Eric Anholt40123c12009-03-09 13:42:30 -0700918 if (ret)
919 goto fail_put_pages;
920
921 remain -= page_length;
922 data_ptr += page_length;
923 offset += page_length;
924 }
925
926fail_put_pages:
927 i915_gem_object_put_pages(obj);
928fail_unlock:
929 mutex_unlock(&dev->struct_mutex);
930fail_put_user_pages:
931 for (i = 0; i < pinned_pages; i++)
932 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700933 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700934
935 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700936}
937
938/**
939 * Writes data to the object referenced by handle.
940 *
941 * On error, the contents of the buffer that were to be modified are undefined.
942 */
943int
944i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
945 struct drm_file *file_priv)
946{
947 struct drm_i915_gem_pwrite *args = data;
948 struct drm_gem_object *obj;
949 struct drm_i915_gem_object *obj_priv;
950 int ret = 0;
951
952 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
953 if (obj == NULL)
954 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100955 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700956
957 /* Bounds check destination.
958 *
959 * XXX: This could use review for overflow issues...
960 */
961 if (args->offset > obj->size || args->size > obj->size ||
962 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000963 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700964 return -EINVAL;
965 }
966
967 /* We can only do the GTT pwrite on untiled buffers, as otherwise
968 * it would end up going through the fenced access, and we'll get
969 * different detiling behavior between reading and writing.
970 * pread/pwrite currently are reading and writing from the CPU
971 * perspective, requiring manual detiling by the client.
972 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000973 if (obj_priv->phys_obj)
974 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
975 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Eric Anholt3de09aa2009-03-09 09:42:23 -0700976 dev->gtt_total != 0) {
977 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
978 if (ret == -EFAULT) {
979 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
980 file_priv);
981 }
Eric Anholt280b7132009-03-12 16:56:27 -0700982 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
983 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700984 } else {
985 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
986 if (ret == -EFAULT) {
987 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
988 file_priv);
989 }
990 }
Eric Anholt673a3942008-07-30 12:06:12 -0700991
992#if WATCH_PWRITE
993 if (ret)
994 DRM_INFO("pwrite failed %d\n", ret);
995#endif
996
Luca Barbieribc9025b2010-02-09 05:49:12 +0000997 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700998
999 return ret;
1000}
1001
1002/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001003 * Called when user space prepares to use an object with the CPU, either
1004 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -07001005 */
1006int
1007i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv)
1009{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001010 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001011 struct drm_i915_gem_set_domain *args = data;
1012 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001013 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001014 uint32_t read_domains = args->read_domains;
1015 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001016 int ret;
1017
1018 if (!(dev->driver->driver_features & DRIVER_GEM))
1019 return -ENODEV;
1020
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001021 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001022 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001023 return -EINVAL;
1024
Chris Wilson21d509e2009-06-06 09:46:02 +01001025 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001026 return -EINVAL;
1027
1028 /* Having something in the write domain implies it's in the read
1029 * domain, and only that read domain. Enforce that in the request.
1030 */
1031 if (write_domain != 0 && read_domains != write_domain)
1032 return -EINVAL;
1033
Eric Anholt673a3942008-07-30 12:06:12 -07001034 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1035 if (obj == NULL)
1036 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +01001037 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001038
1039 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001040
1041 intel_mark_busy(dev, obj);
1042
Eric Anholt673a3942008-07-30 12:06:12 -07001043#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001044 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001045 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001046#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001047 if (read_domains & I915_GEM_DOMAIN_GTT) {
1048 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001049
Eric Anholta09ba7f2009-08-29 12:49:51 -07001050 /* Update the LRU on the fence for the CPU access that's
1051 * about to occur.
1052 */
1053 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001054 struct drm_i915_fence_reg *reg =
1055 &dev_priv->fence_regs[obj_priv->fence_reg];
1056 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001057 &dev_priv->mm.fence_list);
1058 }
1059
Eric Anholt02354392008-11-26 13:58:13 -08001060 /* Silently promote "you're not bound, there was nothing to do"
1061 * to success, since the client was just asking us to
1062 * make sure everything was done.
1063 */
1064 if (ret == -EINVAL)
1065 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001066 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001067 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001068 }
1069
Eric Anholt673a3942008-07-30 12:06:12 -07001070 drm_gem_object_unreference(obj);
1071 mutex_unlock(&dev->struct_mutex);
1072 return ret;
1073}
1074
1075/**
1076 * Called when user space has done writes to this buffer
1077 */
1078int
1079i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1080 struct drm_file *file_priv)
1081{
1082 struct drm_i915_gem_sw_finish *args = data;
1083 struct drm_gem_object *obj;
1084 struct drm_i915_gem_object *obj_priv;
1085 int ret = 0;
1086
1087 if (!(dev->driver->driver_features & DRIVER_GEM))
1088 return -ENODEV;
1089
1090 mutex_lock(&dev->struct_mutex);
1091 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1092 if (obj == NULL) {
1093 mutex_unlock(&dev->struct_mutex);
1094 return -EBADF;
1095 }
1096
1097#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001098 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001099 __func__, args->handle, obj, obj->size);
1100#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001101 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001102
1103 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001104 if (obj_priv->pin_count)
1105 i915_gem_object_flush_cpu_write_domain(obj);
1106
Eric Anholt673a3942008-07-30 12:06:12 -07001107 drm_gem_object_unreference(obj);
1108 mutex_unlock(&dev->struct_mutex);
1109 return ret;
1110}
1111
1112/**
1113 * Maps the contents of an object, returning the address it is mapped
1114 * into.
1115 *
1116 * While the mapping holds a reference on the contents of the object, it doesn't
1117 * imply a ref on the object itself.
1118 */
1119int
1120i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1121 struct drm_file *file_priv)
1122{
1123 struct drm_i915_gem_mmap *args = data;
1124 struct drm_gem_object *obj;
1125 loff_t offset;
1126 unsigned long addr;
1127
1128 if (!(dev->driver->driver_features & DRIVER_GEM))
1129 return -ENODEV;
1130
1131 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1132 if (obj == NULL)
1133 return -EBADF;
1134
1135 offset = args->offset;
1136
1137 down_write(&current->mm->mmap_sem);
1138 addr = do_mmap(obj->filp, 0, args->size,
1139 PROT_READ | PROT_WRITE, MAP_SHARED,
1140 args->offset);
1141 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001142 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001143 if (IS_ERR((void *)addr))
1144 return addr;
1145
1146 args->addr_ptr = (uint64_t) addr;
1147
1148 return 0;
1149}
1150
Jesse Barnesde151cf2008-11-12 10:03:55 -08001151/**
1152 * i915_gem_fault - fault a page into the GTT
1153 * vma: VMA in question
1154 * vmf: fault info
1155 *
1156 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1157 * from userspace. The fault handler takes care of binding the object to
1158 * the GTT (if needed), allocating and programming a fence register (again,
1159 * only if needed based on whether the old reg is still valid or the object
1160 * is tiled) and inserting a new PTE into the faulting process.
1161 *
1162 * Note that the faulting process may involve evicting existing objects
1163 * from the GTT and/or fence registers to make room. So performance may
1164 * suffer if the GTT working set is large or there are few fence registers
1165 * left.
1166 */
1167int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1168{
1169 struct drm_gem_object *obj = vma->vm_private_data;
1170 struct drm_device *dev = obj->dev;
1171 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001172 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001173 pgoff_t page_offset;
1174 unsigned long pfn;
1175 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001176 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001177
1178 /* We don't use vmf->pgoff since that has the fake offset */
1179 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1180 PAGE_SHIFT;
1181
1182 /* Now bind it into the GTT if needed */
1183 mutex_lock(&dev->struct_mutex);
1184 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001185 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001186 if (ret)
1187 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001188
Jesse Barnes14b60392009-05-20 16:47:08 -04001189 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001190
1191 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001192 if (ret)
1193 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001194 }
1195
1196 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001197 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001198 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001199 if (ret)
1200 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001201 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001202
1203 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1204 page_offset;
1205
1206 /* Finally, remap it using the new GTT offset */
1207 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001208unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001209 mutex_unlock(&dev->struct_mutex);
1210
1211 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001212 case 0:
1213 case -ERESTARTSYS:
1214 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001215 case -ENOMEM:
1216 case -EAGAIN:
1217 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001218 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001219 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001220 }
1221}
1222
1223/**
1224 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1225 * @obj: obj in question
1226 *
1227 * GEM memory mapping works by handing back to userspace a fake mmap offset
1228 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1229 * up the object based on the offset and sets up the various memory mapping
1230 * structures.
1231 *
1232 * This routine allocates and attaches a fake offset for @obj.
1233 */
1234static int
1235i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1236{
1237 struct drm_device *dev = obj->dev;
1238 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001240 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001241 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001242 int ret = 0;
1243
1244 /* Set the object up for mmap'ing */
1245 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001246 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001247 if (!list->map)
1248 return -ENOMEM;
1249
1250 map = list->map;
1251 map->type = _DRM_GEM;
1252 map->size = obj->size;
1253 map->handle = obj;
1254
1255 /* Get a DRM GEM mmap offset allocated... */
1256 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1257 obj->size / PAGE_SIZE, 0, 0);
1258 if (!list->file_offset_node) {
1259 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1260 ret = -ENOMEM;
1261 goto out_free_list;
1262 }
1263
1264 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1265 obj->size / PAGE_SIZE, 0);
1266 if (!list->file_offset_node) {
1267 ret = -ENOMEM;
1268 goto out_free_list;
1269 }
1270
1271 list->hash.key = list->file_offset_node->start;
1272 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1273 DRM_ERROR("failed to add to map hash\n");
Chris Wilson5618ca62009-12-02 15:15:30 +00001274 ret = -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001275 goto out_free_mm;
1276 }
1277
1278 /* By now we should be all set, any drm_mmap request on the offset
1279 * below will get to our mmap & fault handler */
1280 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1281
1282 return 0;
1283
1284out_free_mm:
1285 drm_mm_put_block(list->file_offset_node);
1286out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001287 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001288
1289 return ret;
1290}
1291
Chris Wilson901782b2009-07-10 08:18:50 +01001292/**
1293 * i915_gem_release_mmap - remove physical page mappings
1294 * @obj: obj in question
1295 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001296 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001297 * relinquish ownership of the pages back to the system.
1298 *
1299 * It is vital that we remove the page mapping if we have mapped a tiled
1300 * object through the GTT and then lose the fence register due to
1301 * resource pressure. Similarly if the object has been moved out of the
1302 * aperture, than pages mapped into userspace must be revoked. Removing the
1303 * mapping will then trigger a page fault on the next user access, allowing
1304 * fixup by i915_gem_fault().
1305 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001306void
Chris Wilson901782b2009-07-10 08:18:50 +01001307i915_gem_release_mmap(struct drm_gem_object *obj)
1308{
1309 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001310 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001311
1312 if (dev->dev_mapping)
1313 unmap_mapping_range(dev->dev_mapping,
1314 obj_priv->mmap_offset, obj->size, 1);
1315}
1316
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001317static void
1318i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1319{
1320 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001321 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001322 struct drm_gem_mm *mm = dev->mm_private;
1323 struct drm_map_list *list;
1324
1325 list = &obj->map_list;
1326 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1327
1328 if (list->file_offset_node) {
1329 drm_mm_put_block(list->file_offset_node);
1330 list->file_offset_node = NULL;
1331 }
1332
1333 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001334 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001335 list->map = NULL;
1336 }
1337
1338 obj_priv->mmap_offset = 0;
1339}
1340
Jesse Barnesde151cf2008-11-12 10:03:55 -08001341/**
1342 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1343 * @obj: object to check
1344 *
1345 * Return the required GTT alignment for an object, taking into account
1346 * potential fence register mapping if needed.
1347 */
1348static uint32_t
1349i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1350{
1351 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001352 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001353 int start, i;
1354
1355 /*
1356 * Minimum alignment is 4k (GTT page size), but might be greater
1357 * if a fence register is needed for the object.
1358 */
1359 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1360 return 4096;
1361
1362 /*
1363 * Previous chips need to be aligned to the size of the smallest
1364 * fence register that can contain the object.
1365 */
1366 if (IS_I9XX(dev))
1367 start = 1024*1024;
1368 else
1369 start = 512*1024;
1370
1371 for (i = start; i < obj->size; i <<= 1)
1372 ;
1373
1374 return i;
1375}
1376
1377/**
1378 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1379 * @dev: DRM device
1380 * @data: GTT mapping ioctl data
1381 * @file_priv: GEM object info
1382 *
1383 * Simply returns the fake offset to userspace so it can mmap it.
1384 * The mmap call will end up in drm_gem_mmap(), which will set things
1385 * up so we can get faults in the handler above.
1386 *
1387 * The fault handler will take care of binding the object into the GTT
1388 * (since it may have been evicted to make room for something), allocating
1389 * a fence register, and mapping the appropriate aperture address into
1390 * userspace.
1391 */
1392int
1393i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1394 struct drm_file *file_priv)
1395{
1396 struct drm_i915_gem_mmap_gtt *args = data;
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_gem_object *obj;
1399 struct drm_i915_gem_object *obj_priv;
1400 int ret;
1401
1402 if (!(dev->driver->driver_features & DRIVER_GEM))
1403 return -ENODEV;
1404
1405 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1406 if (obj == NULL)
1407 return -EBADF;
1408
1409 mutex_lock(&dev->struct_mutex);
1410
Daniel Vetter23010e42010-03-08 13:35:02 +01001411 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001412
Chris Wilsonab182822009-09-22 18:46:17 +01001413 if (obj_priv->madv != I915_MADV_WILLNEED) {
1414 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1415 drm_gem_object_unreference(obj);
1416 mutex_unlock(&dev->struct_mutex);
1417 return -EINVAL;
1418 }
1419
1420
Jesse Barnesde151cf2008-11-12 10:03:55 -08001421 if (!obj_priv->mmap_offset) {
1422 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001423 if (ret) {
1424 drm_gem_object_unreference(obj);
1425 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001426 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001427 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001428 }
1429
1430 args->offset = obj_priv->mmap_offset;
1431
Jesse Barnesde151cf2008-11-12 10:03:55 -08001432 /*
1433 * Pull it into the GTT so that we have a page list (makes the
1434 * initial fault faster and any subsequent flushing possible).
1435 */
1436 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001437 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001438 if (ret) {
1439 drm_gem_object_unreference(obj);
1440 mutex_unlock(&dev->struct_mutex);
1441 return ret;
1442 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001443 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001444 }
1445
1446 drm_gem_object_unreference(obj);
1447 mutex_unlock(&dev->struct_mutex);
1448
1449 return 0;
1450}
1451
Ben Gamari6911a9b2009-04-02 11:24:54 -07001452void
Eric Anholt856fa192009-03-19 14:10:50 -07001453i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001454{
Daniel Vetter23010e42010-03-08 13:35:02 +01001455 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001456 int page_count = obj->size / PAGE_SIZE;
1457 int i;
1458
Eric Anholt856fa192009-03-19 14:10:50 -07001459 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001460 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001461
1462 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001463 return;
1464
Eric Anholt280b7132009-03-12 16:56:27 -07001465 if (obj_priv->tiling_mode != I915_TILING_NONE)
1466 i915_gem_object_save_bit_17_swizzle(obj);
1467
Chris Wilson3ef94da2009-09-14 16:50:29 +01001468 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001469 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001470
1471 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001472 if (obj_priv->dirty)
1473 set_page_dirty(obj_priv->pages[i]);
1474
1475 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001476 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001477
1478 page_cache_release(obj_priv->pages[i]);
1479 }
Eric Anholt673a3942008-07-30 12:06:12 -07001480 obj_priv->dirty = 0;
1481
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001482 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001483 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001484}
1485
1486static void
Eric Anholtce44b0e2008-11-06 16:00:31 -08001487i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001488{
1489 struct drm_device *dev = obj->dev;
1490 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001491 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001492
1493 /* Add a reference if we're newly entering the active list. */
1494 if (!obj_priv->active) {
1495 drm_gem_object_reference(obj);
1496 obj_priv->active = 1;
1497 }
1498 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001499 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001500 list_move_tail(&obj_priv->list,
1501 &dev_priv->mm.active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001502 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001503 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001504}
1505
Eric Anholtce44b0e2008-11-06 16:00:31 -08001506static void
1507i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1508{
1509 struct drm_device *dev = obj->dev;
1510 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001511 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001512
1513 BUG_ON(!obj_priv->active);
1514 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1515 obj_priv->last_rendering_seqno = 0;
1516}
Eric Anholt673a3942008-07-30 12:06:12 -07001517
Chris Wilson963b4832009-09-20 23:03:54 +01001518/* Immediately discard the backing storage */
1519static void
1520i915_gem_object_truncate(struct drm_gem_object *obj)
1521{
Daniel Vetter23010e42010-03-08 13:35:02 +01001522 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001523 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001524
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001525 inode = obj->filp->f_path.dentry->d_inode;
1526 if (inode->i_op->truncate)
1527 inode->i_op->truncate (inode);
1528
1529 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001530}
1531
1532static inline int
1533i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1534{
1535 return obj_priv->madv == I915_MADV_DONTNEED;
1536}
1537
Eric Anholt673a3942008-07-30 12:06:12 -07001538static void
1539i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1540{
1541 struct drm_device *dev = obj->dev;
1542 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001543 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001544
1545 i915_verify_inactive(dev, __FILE__, __LINE__);
1546 if (obj_priv->pin_count != 0)
1547 list_del_init(&obj_priv->list);
1548 else
1549 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1550
Daniel Vetter99fcb762010-02-07 16:20:18 +01001551 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1552
Eric Anholtce44b0e2008-11-06 16:00:31 -08001553 obj_priv->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001554 if (obj_priv->active) {
1555 obj_priv->active = 0;
1556 drm_gem_object_unreference(obj);
1557 }
1558 i915_verify_inactive(dev, __FILE__, __LINE__);
1559}
1560
Daniel Vetter63560392010-02-19 11:51:59 +01001561static void
1562i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno)
1564{
1565 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next;
1567
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.gpu_write_list,
1570 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001571 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 uint32_t old_write_domain = obj->write_domain;
1576
1577 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno);
1580
1581 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1583 struct drm_i915_fence_reg *reg =
1584 &dev_priv->fence_regs[obj_priv->fence_reg];
1585 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001586 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001587 }
Daniel Vetter63560392010-02-19 11:51:59 +01001588
1589 trace_i915_gem_object_change_domain(obj,
1590 obj->read_domains,
1591 old_write_domain);
1592 }
1593 }
1594}
1595
Jesse Barnese552eb72010-04-21 11:39:23 -07001596#define PIPE_CONTROL_FLUSH(addr) \
1597 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1598 PIPE_CONTROL_DEPTH_STALL); \
1599 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1600 OUT_RING(0); \
1601 OUT_RING(0); \
1602
Eric Anholt673a3942008-07-30 12:06:12 -07001603/**
1604 * Creates a new sequence number, emitting a write of it to the status page
1605 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1606 *
1607 * Must be called with struct_lock held.
1608 *
1609 * Returned sequence numbers are nonzero on success.
1610 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001611uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001612i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1613 uint32_t flush_domains)
Eric Anholt673a3942008-07-30 12:06:12 -07001614{
1615 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001616 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001617 struct drm_i915_gem_request *request;
1618 uint32_t seqno;
1619 int was_empty;
1620 RING_LOCALS;
1621
Eric Anholtb9624422009-06-03 07:27:35 +00001622 if (file_priv != NULL)
1623 i915_file_priv = file_priv->driver_priv;
1624
Eric Anholt9a298b22009-03-24 12:23:04 -07001625 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001626 if (request == NULL)
1627 return 0;
1628
1629 /* Grab the seqno we're going to make this request be, and bump the
1630 * next (skipping 0 so it can be the reserved no-seqno value).
1631 */
1632 seqno = dev_priv->mm.next_gem_seqno;
1633 dev_priv->mm.next_gem_seqno++;
1634 if (dev_priv->mm.next_gem_seqno == 0)
1635 dev_priv->mm.next_gem_seqno++;
1636
Jesse Barnese552eb72010-04-21 11:39:23 -07001637 if (HAS_PIPE_CONTROL(dev)) {
1638 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
Eric Anholt673a3942008-07-30 12:06:12 -07001639
Jesse Barnese552eb72010-04-21 11:39:23 -07001640 /*
1641 * Workaround qword write incoherence by flushing the
1642 * PIPE_NOTIFY buffers out to memory before requesting
1643 * an interrupt.
1644 */
1645 BEGIN_LP_RING(32);
1646 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1647 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1648 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1649 OUT_RING(seqno);
1650 OUT_RING(0);
1651 PIPE_CONTROL_FLUSH(scratch_addr);
1652 scratch_addr += 128; /* write to separate cachelines */
1653 PIPE_CONTROL_FLUSH(scratch_addr);
1654 scratch_addr += 128;
1655 PIPE_CONTROL_FLUSH(scratch_addr);
1656 scratch_addr += 128;
1657 PIPE_CONTROL_FLUSH(scratch_addr);
1658 scratch_addr += 128;
1659 PIPE_CONTROL_FLUSH(scratch_addr);
1660 scratch_addr += 128;
1661 PIPE_CONTROL_FLUSH(scratch_addr);
1662 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1663 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1664 PIPE_CONTROL_NOTIFY);
1665 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1666 OUT_RING(seqno);
1667 OUT_RING(0);
1668 ADVANCE_LP_RING();
1669 } else {
1670 BEGIN_LP_RING(4);
1671 OUT_RING(MI_STORE_DWORD_INDEX);
1672 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1673 OUT_RING(seqno);
1674
1675 OUT_RING(MI_USER_INTERRUPT);
1676 ADVANCE_LP_RING();
1677 }
Eric Anholt673a3942008-07-30 12:06:12 -07001678
Zhao Yakui44d98a62009-10-09 11:39:40 +08001679 DRM_DEBUG_DRIVER("%d\n", seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001680
1681 request->seqno = seqno;
1682 request->emitted_jiffies = jiffies;
Eric Anholt673a3942008-07-30 12:06:12 -07001683 was_empty = list_empty(&dev_priv->mm.request_list);
1684 list_add_tail(&request->list, &dev_priv->mm.request_list);
Eric Anholtb9624422009-06-03 07:27:35 +00001685 if (i915_file_priv) {
1686 list_add_tail(&request->client_list,
1687 &i915_file_priv->mm.request_list);
1688 } else {
1689 INIT_LIST_HEAD(&request->client_list);
1690 }
Eric Anholt673a3942008-07-30 12:06:12 -07001691
Eric Anholtce44b0e2008-11-06 16:00:31 -08001692 /* Associate any objects on the flushing list matching the write
1693 * domain we're flushing with our flush.
1694 */
Daniel Vetter63560392010-02-19 11:51:59 +01001695 if (flush_domains != 0)
1696 i915_gem_process_flushing_list(dev, flush_domains, seqno);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001697
Ben Gamarif65d9422009-09-14 17:48:44 -04001698 if (!dev_priv->mm.suspended) {
1699 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1700 if (was_empty)
1701 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1702 }
Eric Anholt673a3942008-07-30 12:06:12 -07001703 return seqno;
1704}
1705
1706/**
1707 * Command execution barrier
1708 *
1709 * Ensures that all commands in the ring are finished
1710 * before signalling the CPU
1711 */
Eric Anholt3043c602008-10-02 12:24:47 -07001712static uint32_t
Eric Anholt673a3942008-07-30 12:06:12 -07001713i915_retire_commands(struct drm_device *dev)
1714{
1715 drm_i915_private_t *dev_priv = dev->dev_private;
1716 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1717 uint32_t flush_domains = 0;
1718 RING_LOCALS;
1719
1720 /* The sampler always gets flushed on i965 (sigh) */
1721 if (IS_I965G(dev))
1722 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1723 BEGIN_LP_RING(2);
1724 OUT_RING(cmd);
1725 OUT_RING(0); /* noop */
1726 ADVANCE_LP_RING();
1727 return flush_domains;
1728}
1729
1730/**
1731 * Moves buffers associated only with the given active seqno from the active
1732 * to inactive list, potentially freeing them.
1733 */
1734static void
1735i915_gem_retire_request(struct drm_device *dev,
1736 struct drm_i915_gem_request *request)
1737{
1738 drm_i915_private_t *dev_priv = dev->dev_private;
1739
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001740 trace_i915_gem_request_retire(dev, request->seqno);
1741
Eric Anholt673a3942008-07-30 12:06:12 -07001742 /* Move any buffers on the active list that are no longer referenced
1743 * by the ringbuffer to the flushing/inactive lists as appropriate.
1744 */
Carl Worth5e118f42009-03-20 11:54:25 -07001745 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001746 while (!list_empty(&dev_priv->mm.active_list)) {
1747 struct drm_gem_object *obj;
1748 struct drm_i915_gem_object *obj_priv;
1749
1750 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1751 struct drm_i915_gem_object,
1752 list);
Daniel Vettera8089e82010-04-09 19:05:09 +00001753 obj = &obj_priv->base;
Eric Anholt673a3942008-07-30 12:06:12 -07001754
1755 /* If the seqno being retired doesn't match the oldest in the
1756 * list, then the oldest in the list must still be newer than
1757 * this seqno.
1758 */
1759 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001760 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001761
Eric Anholt673a3942008-07-30 12:06:12 -07001762#if WATCH_LRU
1763 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1764 __func__, request->seqno, obj);
1765#endif
1766
Eric Anholtce44b0e2008-11-06 16:00:31 -08001767 if (obj->write_domain != 0)
1768 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001769 else {
1770 /* Take a reference on the object so it won't be
1771 * freed while the spinlock is held. The list
1772 * protection for this spinlock is safe when breaking
1773 * the lock like this since the next thing we do
1774 * is just get the head of the list again.
1775 */
1776 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001777 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001778 spin_unlock(&dev_priv->mm.active_list_lock);
1779 drm_gem_object_unreference(obj);
1780 spin_lock(&dev_priv->mm.active_list_lock);
1781 }
Eric Anholt673a3942008-07-30 12:06:12 -07001782 }
Carl Worth5e118f42009-03-20 11:54:25 -07001783out:
1784 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001785}
1786
1787/**
1788 * Returns true if seq1 is later than seq2.
1789 */
Ben Gamari22be1722009-09-14 17:48:43 -04001790bool
Eric Anholt673a3942008-07-30 12:06:12 -07001791i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1792{
1793 return (int32_t)(seq1 - seq2) >= 0;
1794}
1795
1796uint32_t
1797i915_get_gem_seqno(struct drm_device *dev)
1798{
1799 drm_i915_private_t *dev_priv = dev->dev_private;
1800
Jesse Barnes1918ad72010-04-23 09:32:23 -07001801 if (HAS_PIPE_CONTROL(dev))
Jesse Barnese552eb72010-04-21 11:39:23 -07001802 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1803 else
1804 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
Eric Anholt673a3942008-07-30 12:06:12 -07001805}
1806
1807/**
1808 * This function clears the request list as sequence numbers are passed.
1809 */
1810void
1811i915_gem_retire_requests(struct drm_device *dev)
1812{
1813 drm_i915_private_t *dev_priv = dev->dev_private;
1814 uint32_t seqno;
1815
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001816 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001817 return;
1818
Eric Anholt673a3942008-07-30 12:06:12 -07001819 seqno = i915_get_gem_seqno(dev);
1820
1821 while (!list_empty(&dev_priv->mm.request_list)) {
1822 struct drm_i915_gem_request *request;
1823 uint32_t retiring_seqno;
1824
1825 request = list_first_entry(&dev_priv->mm.request_list,
1826 struct drm_i915_gem_request,
1827 list);
1828 retiring_seqno = request->seqno;
1829
1830 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001831 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001832 i915_gem_retire_request(dev, request);
1833
1834 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001835 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001836 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001837 } else
1838 break;
1839 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001840
1841 if (unlikely (dev_priv->trace_irq_seqno &&
1842 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1843 i915_user_irq_put(dev);
1844 dev_priv->trace_irq_seqno = 0;
1845 }
Eric Anholt673a3942008-07-30 12:06:12 -07001846}
1847
1848void
1849i915_gem_retire_work_handler(struct work_struct *work)
1850{
1851 drm_i915_private_t *dev_priv;
1852 struct drm_device *dev;
1853
1854 dev_priv = container_of(work, drm_i915_private_t,
1855 mm.retire_work.work);
1856 dev = dev_priv->dev;
1857
1858 mutex_lock(&dev->struct_mutex);
1859 i915_gem_retire_requests(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07001860 if (!dev_priv->mm.suspended &&
1861 !list_empty(&dev_priv->mm.request_list))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001862 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001863 mutex_unlock(&dev->struct_mutex);
1864}
1865
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001866int
Daniel Vetter48764bf2009-09-15 22:57:32 +02001867i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
Eric Anholt673a3942008-07-30 12:06:12 -07001868{
1869 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001870 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001871 int ret = 0;
1872
1873 BUG_ON(seqno == 0);
1874
Ben Gamariba1234d2009-09-14 17:48:47 -04001875 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001876 return -EIO;
1877
Eric Anholt673a3942008-07-30 12:06:12 -07001878 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001879 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001880 ier = I915_READ(DEIER) | I915_READ(GTIER);
1881 else
1882 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001883 if (!ier) {
1884 DRM_ERROR("something (likely vbetool) disabled "
1885 "interrupts, re-enabling\n");
1886 i915_driver_irq_preinstall(dev);
1887 i915_driver_irq_postinstall(dev);
1888 }
1889
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001890 trace_i915_gem_request_wait_begin(dev, seqno);
1891
Eric Anholt673a3942008-07-30 12:06:12 -07001892 dev_priv->mm.waiting_gem_seqno = seqno;
1893 i915_user_irq_get(dev);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001894 if (interruptible)
1895 ret = wait_event_interruptible(dev_priv->irq_queue,
1896 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1897 atomic_read(&dev_priv->mm.wedged));
1898 else
1899 wait_event(dev_priv->irq_queue,
1900 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1901 atomic_read(&dev_priv->mm.wedged));
1902
Eric Anholt673a3942008-07-30 12:06:12 -07001903 i915_user_irq_put(dev);
1904 dev_priv->mm.waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001905
1906 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001907 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001908 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001909 ret = -EIO;
1910
1911 if (ret && ret != -ERESTARTSYS)
1912 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1913 __func__, ret, seqno, i915_get_gem_seqno(dev));
1914
1915 /* Directly dispatch request retiring. While we have the work queue
1916 * to handle this, the waiter on a request often wants an associated
1917 * buffer to have made it to the inactive list, and we would need
1918 * a separate wait queue to handle that.
1919 */
1920 if (ret == 0)
1921 i915_gem_retire_requests(dev);
1922
1923 return ret;
1924}
1925
Daniel Vetter48764bf2009-09-15 22:57:32 +02001926/**
1927 * Waits for a sequence number to be signaled, and cleans up the
1928 * request and object lists appropriately for that event.
1929 */
1930static int
1931i915_wait_request(struct drm_device *dev, uint32_t seqno)
1932{
1933 return i915_do_wait_request(dev, seqno, 1);
1934}
1935
Eric Anholt673a3942008-07-30 12:06:12 -07001936static void
1937i915_gem_flush(struct drm_device *dev,
1938 uint32_t invalidate_domains,
1939 uint32_t flush_domains)
1940{
1941 drm_i915_private_t *dev_priv = dev->dev_private;
1942 uint32_t cmd;
1943 RING_LOCALS;
1944
1945#if WATCH_EXEC
1946 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1947 invalidate_domains, flush_domains);
1948#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001949 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1950 invalidate_domains, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001951
1952 if (flush_domains & I915_GEM_DOMAIN_CPU)
1953 drm_agp_chipset_flush(dev);
1954
Chris Wilson21d509e2009-06-06 09:46:02 +01001955 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
Eric Anholt673a3942008-07-30 12:06:12 -07001956 /*
1957 * read/write caches:
1958 *
1959 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1960 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1961 * also flushed at 2d versus 3d pipeline switches.
1962 *
1963 * read-only caches:
1964 *
1965 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1966 * MI_READ_FLUSH is set, and is always flushed on 965.
1967 *
1968 * I915_GEM_DOMAIN_COMMAND may not exist?
1969 *
1970 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1971 * invalidated when MI_EXE_FLUSH is set.
1972 *
1973 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1974 * invalidated with every MI_FLUSH.
1975 *
1976 * TLBs:
1977 *
1978 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1979 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1980 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1981 * are flushed at any MI_FLUSH.
1982 */
1983
1984 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1985 if ((invalidate_domains|flush_domains) &
1986 I915_GEM_DOMAIN_RENDER)
1987 cmd &= ~MI_NO_WRITE_FLUSH;
1988 if (!IS_I965G(dev)) {
1989 /*
1990 * On the 965, the sampler cache always gets flushed
1991 * and this bit is reserved.
1992 */
1993 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1994 cmd |= MI_READ_FLUSH;
1995 }
1996 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1997 cmd |= MI_EXE_FLUSH;
1998
1999#if WATCH_EXEC
2000 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
2001#endif
2002 BEGIN_LP_RING(2);
2003 OUT_RING(cmd);
Daniel Vetter48764bf2009-09-15 22:57:32 +02002004 OUT_RING(MI_NOOP);
Eric Anholt673a3942008-07-30 12:06:12 -07002005 ADVANCE_LP_RING();
2006 }
2007}
2008
2009/**
2010 * Ensures that all rendering to the object has completed and the object is
2011 * safe to unbind from the GTT or access from the CPU.
2012 */
2013static int
2014i915_gem_object_wait_rendering(struct drm_gem_object *obj)
2015{
2016 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002017 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002018 int ret;
2019
Eric Anholte47c68e2008-11-14 13:35:19 -08002020 /* This function only exists to support waiting for existing rendering,
2021 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002022 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002023 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002024
2025 /* If there is rendering queued on the buffer being evicted, wait for
2026 * it.
2027 */
2028 if (obj_priv->active) {
2029#if WATCH_BUF
2030 DRM_INFO("%s: object %p wait for seqno %08x\n",
2031 __func__, obj, obj_priv->last_rendering_seqno);
2032#endif
2033 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
2034 if (ret != 0)
2035 return ret;
2036 }
2037
2038 return 0;
2039}
2040
2041/**
2042 * Unbinds an object from the GTT aperture.
2043 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002044int
Eric Anholt673a3942008-07-30 12:06:12 -07002045i915_gem_object_unbind(struct drm_gem_object *obj)
2046{
2047 struct drm_device *dev = obj->dev;
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002048 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002049 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002050 int ret = 0;
2051
2052#if WATCH_BUF
2053 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2054 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2055#endif
2056 if (obj_priv->gtt_space == NULL)
2057 return 0;
2058
2059 if (obj_priv->pin_count != 0) {
2060 DRM_ERROR("Attempting to unbind pinned buffer\n");
2061 return -EINVAL;
2062 }
2063
Eric Anholt5323fd02009-09-09 11:50:45 -07002064 /* blow away mappings if mapped through GTT */
2065 i915_gem_release_mmap(obj);
2066
Eric Anholt673a3942008-07-30 12:06:12 -07002067 /* Move the object to the CPU domain to ensure that
2068 * any possible CPU writes while it's not in the GTT
2069 * are flushed when we go to remap it. This will
2070 * also ensure that all pending GPU writes are finished
2071 * before we unbind.
2072 */
Eric Anholte47c68e2008-11-14 13:35:19 -08002073 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07002074 if (ret) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002075 if (ret != -ERESTARTSYS)
2076 DRM_ERROR("set_domain failed: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07002077 return ret;
2078 }
2079
Eric Anholt5323fd02009-09-09 11:50:45 -07002080 BUG_ON(obj_priv->active);
2081
Daniel Vetter96b47b62009-12-15 17:50:00 +01002082 /* release the fence reg _after_ flushing */
2083 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2084 i915_gem_clear_fence_reg(obj);
2085
Eric Anholt673a3942008-07-30 12:06:12 -07002086 if (obj_priv->agp_mem != NULL) {
2087 drm_unbind_agp(obj_priv->agp_mem);
2088 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2089 obj_priv->agp_mem = NULL;
2090 }
2091
Eric Anholt856fa192009-03-19 14:10:50 -07002092 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002093 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002094
2095 if (obj_priv->gtt_space) {
2096 atomic_dec(&dev->gtt_count);
2097 atomic_sub(obj->size, &dev->gtt_memory);
2098
2099 drm_mm_put_block(obj_priv->gtt_space);
2100 obj_priv->gtt_space = NULL;
2101 }
2102
2103 /* Remove ourselves from the LRU list if present. */
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002104 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002105 if (!list_empty(&obj_priv->list))
2106 list_del_init(&obj_priv->list);
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002107 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002108
Chris Wilson963b4832009-09-20 23:03:54 +01002109 if (i915_gem_object_is_purgeable(obj_priv))
2110 i915_gem_object_truncate(obj);
2111
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002112 trace_i915_gem_object_unbind(obj);
2113
Eric Anholt673a3942008-07-30 12:06:12 -07002114 return 0;
2115}
2116
Chris Wilson07f73f62009-09-14 16:50:30 +01002117static struct drm_gem_object *
2118i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2119{
2120 drm_i915_private_t *dev_priv = dev->dev_private;
2121 struct drm_i915_gem_object *obj_priv;
2122 struct drm_gem_object *best = NULL;
2123 struct drm_gem_object *first = NULL;
2124
2125 /* Try to find the smallest clean object */
2126 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00002127 struct drm_gem_object *obj = &obj_priv->base;
Chris Wilson07f73f62009-09-14 16:50:30 +01002128 if (obj->size >= min_size) {
Chris Wilson963b4832009-09-20 23:03:54 +01002129 if ((!obj_priv->dirty ||
2130 i915_gem_object_is_purgeable(obj_priv)) &&
Chris Wilson07f73f62009-09-14 16:50:30 +01002131 (!best || obj->size < best->size)) {
2132 best = obj;
2133 if (best->size == min_size)
2134 return best;
2135 }
2136 if (!first)
2137 first = obj;
2138 }
2139 }
2140
2141 return best ? best : first;
2142}
2143
Eric Anholt673a3942008-07-30 12:06:12 -07002144static int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002145i915_gpu_idle(struct drm_device *dev)
2146{
2147 drm_i915_private_t *dev_priv = dev->dev_private;
2148 bool lists_empty;
2149 uint32_t seqno;
2150
2151 spin_lock(&dev_priv->mm.active_list_lock);
2152 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2153 list_empty(&dev_priv->mm.active_list);
2154 spin_unlock(&dev_priv->mm.active_list_lock);
2155
2156 if (lists_empty)
2157 return 0;
2158
2159 /* Flush everything onto the inactive list. */
2160 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2161 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2162 if (seqno == 0)
2163 return -ENOMEM;
2164
2165 return i915_wait_request(dev, seqno);
2166}
2167
2168static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002169i915_gem_evict_everything(struct drm_device *dev)
2170{
2171 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson07f73f62009-09-14 16:50:30 +01002172 int ret;
2173 bool lists_empty;
2174
Chris Wilson07f73f62009-09-14 16:50:30 +01002175 spin_lock(&dev_priv->mm.active_list_lock);
2176 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2177 list_empty(&dev_priv->mm.flushing_list) &&
2178 list_empty(&dev_priv->mm.active_list));
2179 spin_unlock(&dev_priv->mm.active_list_lock);
2180
Chris Wilson97311292009-09-21 00:22:34 +01002181 if (lists_empty)
Chris Wilson07f73f62009-09-14 16:50:30 +01002182 return -ENOSPC;
Chris Wilson07f73f62009-09-14 16:50:30 +01002183
2184 /* Flush everything (on to the inactive lists) and evict */
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002185 ret = i915_gpu_idle(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002186 if (ret)
2187 return ret;
2188
Daniel Vetter99fcb762010-02-07 16:20:18 +01002189 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2190
Chris Wilsonab5ee572009-09-20 19:25:47 +01002191 ret = i915_gem_evict_from_inactive_list(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002192 if (ret)
2193 return ret;
2194
2195 spin_lock(&dev_priv->mm.active_list_lock);
2196 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2197 list_empty(&dev_priv->mm.flushing_list) &&
2198 list_empty(&dev_priv->mm.active_list));
2199 spin_unlock(&dev_priv->mm.active_list_lock);
2200 BUG_ON(!lists_empty);
2201
Eric Anholt673a3942008-07-30 12:06:12 -07002202 return 0;
2203}
2204
2205static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002206i915_gem_evict_something(struct drm_device *dev, int min_size)
Eric Anholt673a3942008-07-30 12:06:12 -07002207{
2208 drm_i915_private_t *dev_priv = dev->dev_private;
2209 struct drm_gem_object *obj;
Chris Wilson07f73f62009-09-14 16:50:30 +01002210 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002211
2212 for (;;) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002213 i915_gem_retire_requests(dev);
2214
Eric Anholt673a3942008-07-30 12:06:12 -07002215 /* If there's an inactive buffer available now, grab it
2216 * and be done.
2217 */
Chris Wilson07f73f62009-09-14 16:50:30 +01002218 obj = i915_gem_find_inactive_object(dev, min_size);
2219 if (obj) {
2220 struct drm_i915_gem_object *obj_priv;
2221
Eric Anholt673a3942008-07-30 12:06:12 -07002222#if WATCH_LRU
2223 DRM_INFO("%s: evicting %p\n", __func__, obj);
2224#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01002225 obj_priv = to_intel_bo(obj);
Chris Wilson07f73f62009-09-14 16:50:30 +01002226 BUG_ON(obj_priv->pin_count != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002227 BUG_ON(obj_priv->active);
2228
2229 /* Wait on the rendering and unbind the buffer. */
Chris Wilson07f73f62009-09-14 16:50:30 +01002230 return i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002231 }
2232
2233 /* If we didn't get anything, but the ring is still processing
Chris Wilson07f73f62009-09-14 16:50:30 +01002234 * things, wait for the next to finish and hopefully leave us
2235 * a buffer to evict.
Eric Anholt673a3942008-07-30 12:06:12 -07002236 */
2237 if (!list_empty(&dev_priv->mm.request_list)) {
2238 struct drm_i915_gem_request *request;
2239
2240 request = list_first_entry(&dev_priv->mm.request_list,
2241 struct drm_i915_gem_request,
2242 list);
2243
2244 ret = i915_wait_request(dev, request->seqno);
2245 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002246 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002247
Chris Wilson07f73f62009-09-14 16:50:30 +01002248 continue;
Eric Anholt673a3942008-07-30 12:06:12 -07002249 }
2250
2251 /* If we didn't have anything on the request list but there
2252 * are buffers awaiting a flush, emit one and try again.
2253 * When we wait on it, those buffers waiting for that flush
2254 * will get moved to inactive.
2255 */
2256 if (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002257 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002258
Chris Wilson9a1e2582009-09-20 20:16:50 +01002259 /* Find an object that we can immediately reuse */
2260 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00002261 obj = &obj_priv->base;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002262 if (obj->size >= min_size)
2263 break;
Eric Anholt673a3942008-07-30 12:06:12 -07002264
Chris Wilson9a1e2582009-09-20 20:16:50 +01002265 obj = NULL;
2266 }
Eric Anholt673a3942008-07-30 12:06:12 -07002267
Chris Wilson9a1e2582009-09-20 20:16:50 +01002268 if (obj != NULL) {
2269 uint32_t seqno;
Chris Wilson07f73f62009-09-14 16:50:30 +01002270
Chris Wilson9a1e2582009-09-20 20:16:50 +01002271 i915_gem_flush(dev,
2272 obj->write_domain,
2273 obj->write_domain);
2274 seqno = i915_add_request(dev, NULL, obj->write_domain);
2275 if (seqno == 0)
2276 return -ENOMEM;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002277 continue;
2278 }
Eric Anholt673a3942008-07-30 12:06:12 -07002279 }
2280
Chris Wilson07f73f62009-09-14 16:50:30 +01002281 /* If we didn't do any of the above, there's no single buffer
2282 * large enough to swap out for the new one, so just evict
2283 * everything and start again. (This should be rare.)
Eric Anholt673a3942008-07-30 12:06:12 -07002284 */
Chris Wilson97311292009-09-21 00:22:34 +01002285 if (!list_empty (&dev_priv->mm.inactive_list))
Chris Wilsonab5ee572009-09-20 19:25:47 +01002286 return i915_gem_evict_from_inactive_list(dev);
Chris Wilson97311292009-09-21 00:22:34 +01002287 else
Chris Wilson07f73f62009-09-14 16:50:30 +01002288 return i915_gem_evict_everything(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002289 }
Keith Packardac94a962008-11-20 23:30:27 -08002290}
2291
Ben Gamari6911a9b2009-04-02 11:24:54 -07002292int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002293i915_gem_object_get_pages(struct drm_gem_object *obj,
2294 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002295{
Daniel Vetter23010e42010-03-08 13:35:02 +01002296 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002297 int page_count, i;
2298 struct address_space *mapping;
2299 struct inode *inode;
2300 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002301
Eric Anholt856fa192009-03-19 14:10:50 -07002302 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002303 return 0;
2304
2305 /* Get the list of pages out of our struct file. They'll be pinned
2306 * at this point until we release them.
2307 */
2308 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002309 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002310 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002311 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002312 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002313 return -ENOMEM;
2314 }
2315
2316 inode = obj->filp->f_path.dentry->d_inode;
2317 mapping = inode->i_mapping;
2318 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002319 page = read_cache_page_gfp(mapping, i,
2320 mapping_gfp_mask (mapping) |
2321 __GFP_COLD |
2322 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002323 if (IS_ERR(page))
2324 goto err_pages;
2325
Eric Anholt856fa192009-03-19 14:10:50 -07002326 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002327 }
Eric Anholt280b7132009-03-12 16:56:27 -07002328
2329 if (obj_priv->tiling_mode != I915_TILING_NONE)
2330 i915_gem_object_do_bit_17_swizzle(obj);
2331
Eric Anholt673a3942008-07-30 12:06:12 -07002332 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002333
2334err_pages:
2335 while (i--)
2336 page_cache_release(obj_priv->pages[i]);
2337
2338 drm_free_large(obj_priv->pages);
2339 obj_priv->pages = NULL;
2340 obj_priv->pages_refcount--;
2341 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002342}
2343
Eric Anholt4e901fd2009-10-26 16:44:17 -07002344static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2345{
2346 struct drm_gem_object *obj = reg->obj;
2347 struct drm_device *dev = obj->dev;
2348 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002349 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002350 int regnum = obj_priv->fence_reg;
2351 uint64_t val;
2352
2353 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2354 0xfffff000) << 32;
2355 val |= obj_priv->gtt_offset & 0xfffff000;
2356 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2357 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2358
2359 if (obj_priv->tiling_mode == I915_TILING_Y)
2360 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2361 val |= I965_FENCE_REG_VALID;
2362
2363 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2364}
2365
Jesse Barnesde151cf2008-11-12 10:03:55 -08002366static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2367{
2368 struct drm_gem_object *obj = reg->obj;
2369 struct drm_device *dev = obj->dev;
2370 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002371 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002372 int regnum = obj_priv->fence_reg;
2373 uint64_t val;
2374
2375 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2376 0xfffff000) << 32;
2377 val |= obj_priv->gtt_offset & 0xfffff000;
2378 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2379 if (obj_priv->tiling_mode == I915_TILING_Y)
2380 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2381 val |= I965_FENCE_REG_VALID;
2382
2383 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2384}
2385
2386static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2387{
2388 struct drm_gem_object *obj = reg->obj;
2389 struct drm_device *dev = obj->dev;
2390 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002391 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002392 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002393 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002394 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002395 uint32_t pitch_val;
2396
2397 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2398 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002399 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002400 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002401 return;
2402 }
2403
Jesse Barnes0f973f22009-01-26 17:10:45 -08002404 if (obj_priv->tiling_mode == I915_TILING_Y &&
2405 HAS_128_BYTE_Y_TILING(dev))
2406 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002407 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002408 tile_width = 512;
2409
2410 /* Note: pitch better be a power of two tile widths */
2411 pitch_val = obj_priv->stride / tile_width;
2412 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002413
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002414 if (obj_priv->tiling_mode == I915_TILING_Y &&
2415 HAS_128_BYTE_Y_TILING(dev))
2416 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2417 else
2418 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2419
Jesse Barnesde151cf2008-11-12 10:03:55 -08002420 val = obj_priv->gtt_offset;
2421 if (obj_priv->tiling_mode == I915_TILING_Y)
2422 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2423 val |= I915_FENCE_SIZE_BITS(obj->size);
2424 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2425 val |= I830_FENCE_REG_VALID;
2426
Eric Anholtdc529a42009-03-10 22:34:49 -07002427 if (regnum < 8)
2428 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2429 else
2430 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2431 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002432}
2433
2434static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2435{
2436 struct drm_gem_object *obj = reg->obj;
2437 struct drm_device *dev = obj->dev;
2438 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002439 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002440 int regnum = obj_priv->fence_reg;
2441 uint32_t val;
2442 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002443 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002444
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002445 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002446 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002447 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002448 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002449 return;
2450 }
2451
Eric Anholte76a16d2009-05-26 17:44:56 -07002452 pitch_val = obj_priv->stride / 128;
2453 pitch_val = ffs(pitch_val) - 1;
2454 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2455
Jesse Barnesde151cf2008-11-12 10:03:55 -08002456 val = obj_priv->gtt_offset;
2457 if (obj_priv->tiling_mode == I915_TILING_Y)
2458 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002459 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2460 WARN_ON(fence_size_bits & ~0x00000f00);
2461 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002462 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2463 val |= I830_FENCE_REG_VALID;
2464
2465 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002466}
2467
Daniel Vetterae3db242010-02-19 11:51:58 +01002468static int i915_find_fence_reg(struct drm_device *dev)
2469{
2470 struct drm_i915_fence_reg *reg = NULL;
2471 struct drm_i915_gem_object *obj_priv = NULL;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473 struct drm_gem_object *obj = NULL;
2474 int i, avail, ret;
2475
2476 /* First try to find a free reg */
2477 avail = 0;
2478 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2479 reg = &dev_priv->fence_regs[i];
2480 if (!reg->obj)
2481 return i;
2482
Daniel Vetter23010e42010-03-08 13:35:02 +01002483 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002484 if (!obj_priv->pin_count)
2485 avail++;
2486 }
2487
2488 if (avail == 0)
2489 return -ENOSPC;
2490
2491 /* None available, try to steal one or wait for a user to finish */
2492 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002493 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2494 lru_list) {
2495 obj = reg->obj;
2496 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002497
2498 if (obj_priv->pin_count)
2499 continue;
2500
2501 /* found one! */
2502 i = obj_priv->fence_reg;
2503 break;
2504 }
2505
2506 BUG_ON(i == I915_FENCE_REG_NONE);
2507
2508 /* We only have a reference on obj from the active list. put_fence_reg
2509 * might drop that one, causing a use-after-free in it. So hold a
2510 * private reference to obj like the other callers of put_fence_reg
2511 * (set_tiling ioctl) do. */
2512 drm_gem_object_reference(obj);
2513 ret = i915_gem_object_put_fence_reg(obj);
2514 drm_gem_object_unreference(obj);
2515 if (ret != 0)
2516 return ret;
2517
2518 return i;
2519}
2520
Jesse Barnesde151cf2008-11-12 10:03:55 -08002521/**
2522 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2523 * @obj: object to map through a fence reg
2524 *
2525 * When mapping objects through the GTT, userspace wants to be able to write
2526 * to them without having to worry about swizzling if the object is tiled.
2527 *
2528 * This function walks the fence regs looking for a free one for @obj,
2529 * stealing one if it can't find any.
2530 *
2531 * It then sets up the reg based on the object's properties: address, pitch
2532 * and tiling format.
2533 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002534int
2535i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002536{
2537 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002538 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002539 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002540 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002541 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002542
Eric Anholta09ba7f2009-08-29 12:49:51 -07002543 /* Just update our place in the LRU if our fence is getting used. */
2544 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002545 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2546 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002547 return 0;
2548 }
2549
Jesse Barnesde151cf2008-11-12 10:03:55 -08002550 switch (obj_priv->tiling_mode) {
2551 case I915_TILING_NONE:
2552 WARN(1, "allocating a fence for non-tiled object?\n");
2553 break;
2554 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002555 if (!obj_priv->stride)
2556 return -EINVAL;
2557 WARN((obj_priv->stride & (512 - 1)),
2558 "object 0x%08x is X tiled but has non-512B pitch\n",
2559 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002560 break;
2561 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002562 if (!obj_priv->stride)
2563 return -EINVAL;
2564 WARN((obj_priv->stride & (128 - 1)),
2565 "object 0x%08x is Y tiled but has non-128B pitch\n",
2566 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002567 break;
2568 }
2569
Daniel Vetterae3db242010-02-19 11:51:58 +01002570 ret = i915_find_fence_reg(dev);
2571 if (ret < 0)
2572 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002573
Daniel Vetterae3db242010-02-19 11:51:58 +01002574 obj_priv->fence_reg = ret;
2575 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002576 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002577
Jesse Barnesde151cf2008-11-12 10:03:55 -08002578 reg->obj = obj;
2579
Eric Anholt4e901fd2009-10-26 16:44:17 -07002580 if (IS_GEN6(dev))
2581 sandybridge_write_fence_reg(reg);
2582 else if (IS_I965G(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08002583 i965_write_fence_reg(reg);
2584 else if (IS_I9XX(dev))
2585 i915_write_fence_reg(reg);
2586 else
2587 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002588
Daniel Vetterae3db242010-02-19 11:51:58 +01002589 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2590 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002591
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002592 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002593}
2594
2595/**
2596 * i915_gem_clear_fence_reg - clear out fence register info
2597 * @obj: object to clear
2598 *
2599 * Zeroes out the fence register itself and clears out the associated
2600 * data structures in dev_priv and obj_priv.
2601 */
2602static void
2603i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2604{
2605 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002606 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002607 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002608 struct drm_i915_fence_reg *reg =
2609 &dev_priv->fence_regs[obj_priv->fence_reg];
Jesse Barnesde151cf2008-11-12 10:03:55 -08002610
Eric Anholt4e901fd2009-10-26 16:44:17 -07002611 if (IS_GEN6(dev)) {
2612 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2613 (obj_priv->fence_reg * 8), 0);
2614 } else if (IS_I965G(dev)) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002615 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002616 } else {
Eric Anholtdc529a42009-03-10 22:34:49 -07002617 uint32_t fence_reg;
2618
2619 if (obj_priv->fence_reg < 8)
2620 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2621 else
2622 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2623 8) * 4;
2624
2625 I915_WRITE(fence_reg, 0);
2626 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002627
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002628 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002629 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002630 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002631}
2632
Eric Anholt673a3942008-07-30 12:06:12 -07002633/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002634 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2635 * to the buffer to finish, and then resets the fence register.
2636 * @obj: tiled object holding a fence register.
2637 *
2638 * Zeroes out the fence register itself and clears out the associated
2639 * data structures in dev_priv and obj_priv.
2640 */
2641int
2642i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2643{
2644 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002645 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002646
2647 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2648 return 0;
2649
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002650 /* If we've changed tiling, GTT-mappings of the object
2651 * need to re-fault to ensure that the correct fence register
2652 * setup is in place.
2653 */
2654 i915_gem_release_mmap(obj);
2655
Chris Wilson52dc7d32009-06-06 09:46:01 +01002656 /* On the i915, GPU access to tiled buffers is via a fence,
2657 * therefore we must wait for any outstanding access to complete
2658 * before clearing the fence.
2659 */
2660 if (!IS_I965G(dev)) {
2661 int ret;
2662
2663 i915_gem_object_flush_gpu_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002664 ret = i915_gem_object_wait_rendering(obj);
2665 if (ret != 0)
2666 return ret;
2667 }
2668
Daniel Vetter4a726612010-02-01 13:59:16 +01002669 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002670 i915_gem_clear_fence_reg (obj);
2671
2672 return 0;
2673}
2674
2675/**
Eric Anholt673a3942008-07-30 12:06:12 -07002676 * Finds free space in the GTT aperture and binds the object there.
2677 */
2678static int
2679i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2680{
2681 struct drm_device *dev = obj->dev;
2682 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002683 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002684 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002685 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002686 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002687
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002688 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002689 DRM_ERROR("Attempting to bind a purgeable object\n");
2690 return -EINVAL;
2691 }
2692
Eric Anholt673a3942008-07-30 12:06:12 -07002693 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002694 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002695 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002696 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2697 return -EINVAL;
2698 }
2699
2700 search_free:
2701 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2702 obj->size, alignment, 0);
2703 if (free_space != NULL) {
2704 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2705 alignment);
2706 if (obj_priv->gtt_space != NULL) {
2707 obj_priv->gtt_space->private = obj;
2708 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2709 }
2710 }
2711 if (obj_priv->gtt_space == NULL) {
2712 /* If the gtt is empty and we're still having trouble
2713 * fitting our object in, we're out of memory.
2714 */
2715#if WATCH_LRU
2716 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2717#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002718 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002719 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002720 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002721
Eric Anholt673a3942008-07-30 12:06:12 -07002722 goto search_free;
2723 }
2724
2725#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002726 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002727 obj->size, obj_priv->gtt_offset);
2728#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002729 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002730 if (ret) {
2731 drm_mm_put_block(obj_priv->gtt_space);
2732 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002733
2734 if (ret == -ENOMEM) {
2735 /* first try to clear up some space from the GTT */
2736 ret = i915_gem_evict_something(dev, obj->size);
2737 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002738 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002739 if (gfpmask) {
2740 gfpmask = 0;
2741 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002742 }
2743
2744 return ret;
2745 }
2746
2747 goto search_free;
2748 }
2749
Eric Anholt673a3942008-07-30 12:06:12 -07002750 return ret;
2751 }
2752
Eric Anholt673a3942008-07-30 12:06:12 -07002753 /* Create an AGP memory structure pointing at our pages, and bind it
2754 * into the GTT.
2755 */
2756 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002757 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002758 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002759 obj_priv->gtt_offset,
2760 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002761 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002762 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002763 drm_mm_put_block(obj_priv->gtt_space);
2764 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002765
2766 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002767 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002768 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002769
2770 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002771 }
2772 atomic_inc(&dev->gtt_count);
2773 atomic_add(obj->size, &dev->gtt_memory);
2774
2775 /* Assert that the object is not currently in any GPU domain. As it
2776 * wasn't in the GTT, there shouldn't be any way it could have been in
2777 * a GPU cache
2778 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002779 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2780 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002781
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002782 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2783
Eric Anholt673a3942008-07-30 12:06:12 -07002784 return 0;
2785}
2786
2787void
2788i915_gem_clflush_object(struct drm_gem_object *obj)
2789{
Daniel Vetter23010e42010-03-08 13:35:02 +01002790 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002791
2792 /* If we don't have a page list set up, then we're not pinned
2793 * to GPU, and we can ignore the cache flush because it'll happen
2794 * again at bind time.
2795 */
Eric Anholt856fa192009-03-19 14:10:50 -07002796 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002797 return;
2798
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002799 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002800
Eric Anholt856fa192009-03-19 14:10:50 -07002801 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002802}
2803
Eric Anholte47c68e2008-11-14 13:35:19 -08002804/** Flushes any GPU write domain for the object if it's dirty. */
2805static void
2806i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2807{
2808 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002809 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002810
2811 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2812 return;
2813
2814 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002815 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002816 i915_gem_flush(dev, 0, obj->write_domain);
Daniel Vetter922a2ef2010-02-19 11:52:01 +01002817 (void) i915_add_request(dev, NULL, obj->write_domain);
Daniel Vetter99fcb762010-02-07 16:20:18 +01002818 BUG_ON(obj->write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002819
2820 trace_i915_gem_object_change_domain(obj,
2821 obj->read_domains,
2822 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002823}
2824
2825/** Flushes the GTT write domain for the object if it's dirty. */
2826static void
2827i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2828{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002829 uint32_t old_write_domain;
2830
Eric Anholte47c68e2008-11-14 13:35:19 -08002831 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2832 return;
2833
2834 /* No actual flushing is required for the GTT write domain. Writes
2835 * to it immediately go to main memory as far as we know, so there's
2836 * no chipset flush. It also doesn't land in render cache.
2837 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002838 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002839 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002840
2841 trace_i915_gem_object_change_domain(obj,
2842 obj->read_domains,
2843 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002844}
2845
2846/** Flushes the CPU write domain for the object if it's dirty. */
2847static void
2848i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2849{
2850 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002851 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002852
2853 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2854 return;
2855
2856 i915_gem_clflush_object(obj);
2857 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002858 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002859 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002860
2861 trace_i915_gem_object_change_domain(obj,
2862 obj->read_domains,
2863 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002864}
2865
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002866void
2867i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2868{
2869 switch (obj->write_domain) {
2870 case I915_GEM_DOMAIN_GTT:
2871 i915_gem_object_flush_gtt_write_domain(obj);
2872 break;
2873 case I915_GEM_DOMAIN_CPU:
2874 i915_gem_object_flush_cpu_write_domain(obj);
2875 break;
2876 default:
2877 i915_gem_object_flush_gpu_write_domain(obj);
2878 break;
2879 }
2880}
2881
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002882/**
2883 * Moves a single object to the GTT read, and possibly write domain.
2884 *
2885 * This function returns when the move is complete, including waiting on
2886 * flushes to occur.
2887 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002888int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002889i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2890{
Daniel Vetter23010e42010-03-08 13:35:02 +01002891 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002892 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002893 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002894
Eric Anholt02354392008-11-26 13:58:13 -08002895 /* Not valid to be called on unbound objects. */
2896 if (obj_priv->gtt_space == NULL)
2897 return -EINVAL;
2898
Eric Anholte47c68e2008-11-14 13:35:19 -08002899 i915_gem_object_flush_gpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002900 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002901 ret = i915_gem_object_wait_rendering(obj);
2902 if (ret != 0)
2903 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002904
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002905 old_write_domain = obj->write_domain;
2906 old_read_domains = obj->read_domains;
2907
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002908 /* If we're writing through the GTT domain, then CPU and GPU caches
2909 * will need to be invalidated at next use.
2910 */
2911 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002912 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002913
Eric Anholte47c68e2008-11-14 13:35:19 -08002914 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002915
2916 /* It should now be out of any other write domains, and we can update
2917 * the domain values for our changes.
2918 */
2919 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2920 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002921 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002922 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002923 obj_priv->dirty = 1;
2924 }
2925
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002926 trace_i915_gem_object_change_domain(obj,
2927 old_read_domains,
2928 old_write_domain);
2929
Eric Anholte47c68e2008-11-14 13:35:19 -08002930 return 0;
2931}
2932
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002933/*
2934 * Prepare buffer for display plane. Use uninterruptible for possible flush
2935 * wait, as in modesetting process we're not supposed to be interrupted.
2936 */
2937int
2938i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2939{
2940 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002941 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002942 uint32_t old_write_domain, old_read_domains;
2943 int ret;
2944
2945 /* Not valid to be called on unbound objects. */
2946 if (obj_priv->gtt_space == NULL)
2947 return -EINVAL;
2948
2949 i915_gem_object_flush_gpu_write_domain(obj);
2950
2951 /* Wait on any GPU rendering and flushing to occur. */
2952 if (obj_priv->active) {
2953#if WATCH_BUF
2954 DRM_INFO("%s: object %p wait for seqno %08x\n",
2955 __func__, obj, obj_priv->last_rendering_seqno);
2956#endif
2957 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
2958 if (ret != 0)
2959 return ret;
2960 }
2961
2962 old_write_domain = obj->write_domain;
2963 old_read_domains = obj->read_domains;
2964
2965 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2966
2967 i915_gem_object_flush_cpu_write_domain(obj);
2968
2969 /* It should now be out of any other write domains, and we can update
2970 * the domain values for our changes.
2971 */
2972 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2973 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2974 obj->write_domain = I915_GEM_DOMAIN_GTT;
2975 obj_priv->dirty = 1;
2976
2977 trace_i915_gem_object_change_domain(obj,
2978 old_read_domains,
2979 old_write_domain);
2980
2981 return 0;
2982}
2983
Eric Anholte47c68e2008-11-14 13:35:19 -08002984/**
2985 * Moves a single object to the CPU read, and possibly write domain.
2986 *
2987 * This function returns when the move is complete, including waiting on
2988 * flushes to occur.
2989 */
2990static int
2991i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2992{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002993 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002994 int ret;
2995
2996 i915_gem_object_flush_gpu_write_domain(obj);
2997 /* Wait on any GPU rendering and flushing to occur. */
2998 ret = i915_gem_object_wait_rendering(obj);
2999 if (ret != 0)
3000 return ret;
3001
3002 i915_gem_object_flush_gtt_write_domain(obj);
3003
3004 /* If we have a partially-valid cache of the object in the CPU,
3005 * finish invalidating it and free the per-page flags.
3006 */
3007 i915_gem_object_set_to_full_cpu_read_domain(obj);
3008
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003009 old_write_domain = obj->write_domain;
3010 old_read_domains = obj->read_domains;
3011
Eric Anholte47c68e2008-11-14 13:35:19 -08003012 /* Flush the CPU cache if it's still invalid. */
3013 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3014 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003015
3016 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3017 }
3018
3019 /* It should now be out of any other write domains, and we can update
3020 * the domain values for our changes.
3021 */
3022 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3023
3024 /* If we're writing through the CPU, then the GPU read domains will
3025 * need to be invalidated at next use.
3026 */
3027 if (write) {
3028 obj->read_domains &= I915_GEM_DOMAIN_CPU;
3029 obj->write_domain = I915_GEM_DOMAIN_CPU;
3030 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003031
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003032 trace_i915_gem_object_change_domain(obj,
3033 old_read_domains,
3034 old_write_domain);
3035
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003036 return 0;
3037}
3038
Eric Anholt673a3942008-07-30 12:06:12 -07003039/*
3040 * Set the next domain for the specified object. This
3041 * may not actually perform the necessary flushing/invaliding though,
3042 * as that may want to be batched with other set_domain operations
3043 *
3044 * This is (we hope) the only really tricky part of gem. The goal
3045 * is fairly simple -- track which caches hold bits of the object
3046 * and make sure they remain coherent. A few concrete examples may
3047 * help to explain how it works. For shorthand, we use the notation
3048 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3049 * a pair of read and write domain masks.
3050 *
3051 * Case 1: the batch buffer
3052 *
3053 * 1. Allocated
3054 * 2. Written by CPU
3055 * 3. Mapped to GTT
3056 * 4. Read by GPU
3057 * 5. Unmapped from GTT
3058 * 6. Freed
3059 *
3060 * Let's take these a step at a time
3061 *
3062 * 1. Allocated
3063 * Pages allocated from the kernel may still have
3064 * cache contents, so we set them to (CPU, CPU) always.
3065 * 2. Written by CPU (using pwrite)
3066 * The pwrite function calls set_domain (CPU, CPU) and
3067 * this function does nothing (as nothing changes)
3068 * 3. Mapped by GTT
3069 * This function asserts that the object is not
3070 * currently in any GPU-based read or write domains
3071 * 4. Read by GPU
3072 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
3073 * As write_domain is zero, this function adds in the
3074 * current read domains (CPU+COMMAND, 0).
3075 * flush_domains is set to CPU.
3076 * invalidate_domains is set to COMMAND
3077 * clflush is run to get data out of the CPU caches
3078 * then i915_dev_set_domain calls i915_gem_flush to
3079 * emit an MI_FLUSH and drm_agp_chipset_flush
3080 * 5. Unmapped from GTT
3081 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3082 * flush_domains and invalidate_domains end up both zero
3083 * so no flushing/invalidating happens
3084 * 6. Freed
3085 * yay, done
3086 *
3087 * Case 2: The shared render buffer
3088 *
3089 * 1. Allocated
3090 * 2. Mapped to GTT
3091 * 3. Read/written by GPU
3092 * 4. set_domain to (CPU,CPU)
3093 * 5. Read/written by CPU
3094 * 6. Read/written by GPU
3095 *
3096 * 1. Allocated
3097 * Same as last example, (CPU, CPU)
3098 * 2. Mapped to GTT
3099 * Nothing changes (assertions find that it is not in the GPU)
3100 * 3. Read/written by GPU
3101 * execbuffer calls set_domain (RENDER, RENDER)
3102 * flush_domains gets CPU
3103 * invalidate_domains gets GPU
3104 * clflush (obj)
3105 * MI_FLUSH and drm_agp_chipset_flush
3106 * 4. set_domain (CPU, CPU)
3107 * flush_domains gets GPU
3108 * invalidate_domains gets CPU
3109 * wait_rendering (obj) to make sure all drawing is complete.
3110 * This will include an MI_FLUSH to get the data from GPU
3111 * to memory
3112 * clflush (obj) to invalidate the CPU cache
3113 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3114 * 5. Read/written by CPU
3115 * cache lines are loaded and dirtied
3116 * 6. Read written by GPU
3117 * Same as last GPU access
3118 *
3119 * Case 3: The constant buffer
3120 *
3121 * 1. Allocated
3122 * 2. Written by CPU
3123 * 3. Read by GPU
3124 * 4. Updated (written) by CPU again
3125 * 5. Read by GPU
3126 *
3127 * 1. Allocated
3128 * (CPU, CPU)
3129 * 2. Written by CPU
3130 * (CPU, CPU)
3131 * 3. Read by GPU
3132 * (CPU+RENDER, 0)
3133 * flush_domains = CPU
3134 * invalidate_domains = RENDER
3135 * clflush (obj)
3136 * MI_FLUSH
3137 * drm_agp_chipset_flush
3138 * 4. Updated (written) by CPU again
3139 * (CPU, CPU)
3140 * flush_domains = 0 (no previous write domain)
3141 * invalidate_domains = 0 (no new read domains)
3142 * 5. Read by GPU
3143 * (CPU+RENDER, 0)
3144 * flush_domains = CPU
3145 * invalidate_domains = RENDER
3146 * clflush (obj)
3147 * MI_FLUSH
3148 * drm_agp_chipset_flush
3149 */
Keith Packardc0d90822008-11-20 23:11:08 -08003150static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003151i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003152{
3153 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01003154 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003155 uint32_t invalidate_domains = 0;
3156 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003157 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003158
Eric Anholt8b0e3782009-02-19 14:40:50 -08003159 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3160 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003161
Jesse Barnes652c3932009-08-17 13:31:43 -07003162 intel_mark_busy(dev, obj);
3163
Eric Anholt673a3942008-07-30 12:06:12 -07003164#if WATCH_BUF
3165 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3166 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003167 obj->read_domains, obj->pending_read_domains,
3168 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003169#endif
3170 /*
3171 * If the object isn't moving to a new write domain,
3172 * let the object stay in multiple read domains
3173 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003174 if (obj->pending_write_domain == 0)
3175 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003176 else
3177 obj_priv->dirty = 1;
3178
3179 /*
3180 * Flush the current write domain if
3181 * the new read domains don't match. Invalidate
3182 * any read domains which differ from the old
3183 * write domain
3184 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003185 if (obj->write_domain &&
3186 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003187 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003188 invalidate_domains |=
3189 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003190 }
3191 /*
3192 * Invalidate any read caches which may have
3193 * stale data. That is, any new read domains.
3194 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003195 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003196 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3197#if WATCH_BUF
3198 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3199 __func__, flush_domains, invalidate_domains);
3200#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003201 i915_gem_clflush_object(obj);
3202 }
3203
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003204 old_read_domains = obj->read_domains;
3205
Eric Anholtefbeed92009-02-19 14:54:51 -08003206 /* The actual obj->write_domain will be updated with
3207 * pending_write_domain after we emit the accumulated flush for all
3208 * of our domain changes in execbuffers (which clears objects'
3209 * write_domains). So if we have a current write domain that we
3210 * aren't changing, set pending_write_domain to that.
3211 */
3212 if (flush_domains == 0 && obj->pending_write_domain == 0)
3213 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003214 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003215
3216 dev->invalidate_domains |= invalidate_domains;
3217 dev->flush_domains |= flush_domains;
3218#if WATCH_BUF
3219 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3220 __func__,
3221 obj->read_domains, obj->write_domain,
3222 dev->invalidate_domains, dev->flush_domains);
3223#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003224
3225 trace_i915_gem_object_change_domain(obj,
3226 old_read_domains,
3227 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003228}
3229
3230/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003231 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003232 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003233 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3234 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3235 */
3236static void
3237i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3238{
Daniel Vetter23010e42010-03-08 13:35:02 +01003239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003240
3241 if (!obj_priv->page_cpu_valid)
3242 return;
3243
3244 /* If we're partially in the CPU read domain, finish moving it in.
3245 */
3246 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3247 int i;
3248
3249 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3250 if (obj_priv->page_cpu_valid[i])
3251 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003252 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003253 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003254 }
3255
3256 /* Free the page_cpu_valid mappings which are now stale, whether
3257 * or not we've got I915_GEM_DOMAIN_CPU.
3258 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003259 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003260 obj_priv->page_cpu_valid = NULL;
3261}
3262
3263/**
3264 * Set the CPU read domain on a range of the object.
3265 *
3266 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3267 * not entirely valid. The page_cpu_valid member of the object flags which
3268 * pages have been flushed, and will be respected by
3269 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3270 * of the whole object.
3271 *
3272 * This function returns when the move is complete, including waiting on
3273 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003274 */
3275static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003276i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3277 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003278{
Daniel Vetter23010e42010-03-08 13:35:02 +01003279 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003280 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003281 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003282
Eric Anholte47c68e2008-11-14 13:35:19 -08003283 if (offset == 0 && size == obj->size)
3284 return i915_gem_object_set_to_cpu_domain(obj, 0);
3285
3286 i915_gem_object_flush_gpu_write_domain(obj);
3287 /* Wait on any GPU rendering and flushing to occur. */
3288 ret = i915_gem_object_wait_rendering(obj);
3289 if (ret != 0)
3290 return ret;
3291 i915_gem_object_flush_gtt_write_domain(obj);
3292
3293 /* If we're already fully in the CPU read domain, we're done. */
3294 if (obj_priv->page_cpu_valid == NULL &&
3295 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003296 return 0;
3297
Eric Anholte47c68e2008-11-14 13:35:19 -08003298 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3299 * newly adding I915_GEM_DOMAIN_CPU
3300 */
Eric Anholt673a3942008-07-30 12:06:12 -07003301 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003302 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3303 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003304 if (obj_priv->page_cpu_valid == NULL)
3305 return -ENOMEM;
3306 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3307 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003308
3309 /* Flush the cache on any pages that are still invalid from the CPU's
3310 * perspective.
3311 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003312 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3313 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003314 if (obj_priv->page_cpu_valid[i])
3315 continue;
3316
Eric Anholt856fa192009-03-19 14:10:50 -07003317 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003318
3319 obj_priv->page_cpu_valid[i] = 1;
3320 }
3321
Eric Anholte47c68e2008-11-14 13:35:19 -08003322 /* It should now be out of any other write domains, and we can update
3323 * the domain values for our changes.
3324 */
3325 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3326
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003327 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003328 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3329
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003330 trace_i915_gem_object_change_domain(obj,
3331 old_read_domains,
3332 obj->write_domain);
3333
Eric Anholt673a3942008-07-30 12:06:12 -07003334 return 0;
3335}
3336
3337/**
Eric Anholt673a3942008-07-30 12:06:12 -07003338 * Pin an object to the GTT and evaluate the relocations landing in it.
3339 */
3340static int
3341i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3342 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003343 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003344 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003345{
3346 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003347 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003348 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003349 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003350 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003351 bool need_fence;
3352
3353 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3354 obj_priv->tiling_mode != I915_TILING_NONE;
3355
3356 /* Check fence reg constraints and rebind if necessary */
Owain Ainsworthf590d272010-02-18 15:33:00 +00003357 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3358 obj_priv->tiling_mode))
Jesse Barnes76446ca2009-12-17 22:05:42 -05003359 i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003360
3361 /* Choose the GTT offset for our buffer and put it there. */
3362 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3363 if (ret)
3364 return ret;
3365
Jesse Barnes76446ca2009-12-17 22:05:42 -05003366 /*
3367 * Pre-965 chips need a fence register set up in order to
3368 * properly handle blits to/from tiled surfaces.
3369 */
3370 if (need_fence) {
3371 ret = i915_gem_object_get_fence_reg(obj);
3372 if (ret != 0) {
3373 if (ret != -EBUSY && ret != -ERESTARTSYS)
3374 DRM_ERROR("Failure to install fence: %d\n",
3375 ret);
3376 i915_gem_object_unpin(obj);
3377 return ret;
3378 }
3379 }
3380
Eric Anholt673a3942008-07-30 12:06:12 -07003381 entry->offset = obj_priv->gtt_offset;
3382
Eric Anholt673a3942008-07-30 12:06:12 -07003383 /* Apply the relocations, using the GTT aperture to avoid cache
3384 * flushing requirements.
3385 */
3386 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003387 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003388 struct drm_gem_object *target_obj;
3389 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003390 uint32_t reloc_val, reloc_offset;
3391 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003392
Eric Anholt673a3942008-07-30 12:06:12 -07003393 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003394 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003395 if (target_obj == NULL) {
3396 i915_gem_object_unpin(obj);
3397 return -EBADF;
3398 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003399 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003400
Chris Wilson8542a0b2009-09-09 21:15:15 +01003401#if WATCH_RELOC
3402 DRM_INFO("%s: obj %p offset %08x target %d "
3403 "read %08x write %08x gtt %08x "
3404 "presumed %08x delta %08x\n",
3405 __func__,
3406 obj,
3407 (int) reloc->offset,
3408 (int) reloc->target_handle,
3409 (int) reloc->read_domains,
3410 (int) reloc->write_domain,
3411 (int) target_obj_priv->gtt_offset,
3412 (int) reloc->presumed_offset,
3413 reloc->delta);
3414#endif
3415
Eric Anholt673a3942008-07-30 12:06:12 -07003416 /* The target buffer should have appeared before us in the
3417 * exec_object list, so it should have a GTT space bound by now.
3418 */
3419 if (target_obj_priv->gtt_space == NULL) {
3420 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003421 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003422 drm_gem_object_unreference(target_obj);
3423 i915_gem_object_unpin(obj);
3424 return -EINVAL;
3425 }
3426
Chris Wilson8542a0b2009-09-09 21:15:15 +01003427 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003428 if (reloc->write_domain & (reloc->write_domain - 1)) {
3429 DRM_ERROR("reloc with multiple write domains: "
3430 "obj %p target %d offset %d "
3431 "read %08x write %08x",
3432 obj, reloc->target_handle,
3433 (int) reloc->offset,
3434 reloc->read_domains,
3435 reloc->write_domain);
3436 return -EINVAL;
3437 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003438 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3439 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3440 DRM_ERROR("reloc with read/write CPU domains: "
3441 "obj %p target %d offset %d "
3442 "read %08x write %08x",
3443 obj, reloc->target_handle,
3444 (int) reloc->offset,
3445 reloc->read_domains,
3446 reloc->write_domain);
3447 drm_gem_object_unreference(target_obj);
3448 i915_gem_object_unpin(obj);
3449 return -EINVAL;
3450 }
3451 if (reloc->write_domain && target_obj->pending_write_domain &&
3452 reloc->write_domain != target_obj->pending_write_domain) {
3453 DRM_ERROR("Write domain conflict: "
3454 "obj %p target %d offset %d "
3455 "new %08x old %08x\n",
3456 obj, reloc->target_handle,
3457 (int) reloc->offset,
3458 reloc->write_domain,
3459 target_obj->pending_write_domain);
3460 drm_gem_object_unreference(target_obj);
3461 i915_gem_object_unpin(obj);
3462 return -EINVAL;
3463 }
3464
3465 target_obj->pending_read_domains |= reloc->read_domains;
3466 target_obj->pending_write_domain |= reloc->write_domain;
3467
3468 /* If the relocation already has the right value in it, no
3469 * more work needs to be done.
3470 */
3471 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3472 drm_gem_object_unreference(target_obj);
3473 continue;
3474 }
3475
3476 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003477 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003478 DRM_ERROR("Relocation beyond object bounds: "
3479 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003480 obj, reloc->target_handle,
3481 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003482 drm_gem_object_unreference(target_obj);
3483 i915_gem_object_unpin(obj);
3484 return -EINVAL;
3485 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003486 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003487 DRM_ERROR("Relocation not 4-byte aligned: "
3488 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003489 obj, reloc->target_handle,
3490 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003491 drm_gem_object_unreference(target_obj);
3492 i915_gem_object_unpin(obj);
3493 return -EINVAL;
3494 }
3495
Chris Wilson8542a0b2009-09-09 21:15:15 +01003496 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003497 if (reloc->delta >= target_obj->size) {
3498 DRM_ERROR("Relocation beyond target object bounds: "
3499 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003500 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003501 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003502 drm_gem_object_unreference(target_obj);
3503 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003504 return -EINVAL;
3505 }
3506
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003507 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3508 if (ret != 0) {
3509 drm_gem_object_unreference(target_obj);
3510 i915_gem_object_unpin(obj);
3511 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003512 }
3513
3514 /* Map the page containing the relocation we're going to
3515 * perform.
3516 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003517 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003518 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3519 (reloc_offset &
3520 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003521 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003522 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003523 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003524
3525#if WATCH_BUF
3526 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003527 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003528 readl(reloc_entry), reloc_val);
3529#endif
3530 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003531 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003532
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003533 /* The updated presumed offset for this entry will be
3534 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003535 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003536 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003537
3538 drm_gem_object_unreference(target_obj);
3539 }
3540
Eric Anholt673a3942008-07-30 12:06:12 -07003541#if WATCH_BUF
3542 if (0)
3543 i915_gem_dump_object(obj, 128, __func__, ~0);
3544#endif
3545 return 0;
3546}
3547
3548/** Dispatch a batchbuffer to the ring
3549 */
3550static int
3551i915_dispatch_gem_execbuffer(struct drm_device *dev,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003552 struct drm_i915_gem_execbuffer2 *exec,
Eric Anholt201361a2009-03-11 12:30:04 -07003553 struct drm_clip_rect *cliprects,
Eric Anholt673a3942008-07-30 12:06:12 -07003554 uint64_t exec_offset)
3555{
3556 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003557 int nbox = exec->num_cliprects;
3558 int i = 0, count;
Chris Wilson83d60792009-06-06 09:45:57 +01003559 uint32_t exec_start, exec_len;
Eric Anholt673a3942008-07-30 12:06:12 -07003560 RING_LOCALS;
3561
3562 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3563 exec_len = (uint32_t) exec->batch_len;
3564
Chris Wilson8f0dc5b2009-09-24 00:43:17 +01003565 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003566
Eric Anholt673a3942008-07-30 12:06:12 -07003567 count = nbox ? nbox : 1;
3568
3569 for (i = 0; i < count; i++) {
3570 if (i < nbox) {
Eric Anholt201361a2009-03-11 12:30:04 -07003571 int ret = i915_emit_box(dev, cliprects, i,
Eric Anholt673a3942008-07-30 12:06:12 -07003572 exec->DR1, exec->DR4);
3573 if (ret)
3574 return ret;
3575 }
3576
3577 if (IS_I830(dev) || IS_845G(dev)) {
3578 BEGIN_LP_RING(4);
3579 OUT_RING(MI_BATCH_BUFFER);
3580 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3581 OUT_RING(exec_start + exec_len - 4);
3582 OUT_RING(0);
3583 ADVANCE_LP_RING();
3584 } else {
3585 BEGIN_LP_RING(2);
3586 if (IS_I965G(dev)) {
3587 OUT_RING(MI_BATCH_BUFFER_START |
3588 (2 << 6) |
3589 MI_BATCH_NON_SECURE_I965);
3590 OUT_RING(exec_start);
3591 } else {
3592 OUT_RING(MI_BATCH_BUFFER_START |
3593 (2 << 6));
3594 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3595 }
3596 ADVANCE_LP_RING();
3597 }
3598 }
3599
3600 /* XXX breadcrumb */
3601 return 0;
3602}
3603
3604/* Throttle our rendering by waiting until the ring has completed our requests
3605 * emitted over 20 msec ago.
3606 *
Eric Anholtb9624422009-06-03 07:27:35 +00003607 * Note that if we were to use the current jiffies each time around the loop,
3608 * we wouldn't escape the function with any frames outstanding if the time to
3609 * render a frame was over 20ms.
3610 *
Eric Anholt673a3942008-07-30 12:06:12 -07003611 * This should get us reasonable parallelism between CPU and GPU but also
3612 * relatively low latency when blocking on a particular request to finish.
3613 */
3614static int
3615i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3616{
3617 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3618 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003619 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003620
3621 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003622 while (!list_empty(&i915_file_priv->mm.request_list)) {
3623 struct drm_i915_gem_request *request;
3624
3625 request = list_first_entry(&i915_file_priv->mm.request_list,
3626 struct drm_i915_gem_request,
3627 client_list);
3628
3629 if (time_after_eq(request->emitted_jiffies, recent_enough))
3630 break;
3631
3632 ret = i915_wait_request(dev, request->seqno);
3633 if (ret != 0)
3634 break;
3635 }
Eric Anholt673a3942008-07-30 12:06:12 -07003636 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003637
Eric Anholt673a3942008-07-30 12:06:12 -07003638 return ret;
3639}
3640
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003641static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003642i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003643 uint32_t buffer_count,
3644 struct drm_i915_gem_relocation_entry **relocs)
3645{
3646 uint32_t reloc_count = 0, reloc_index = 0, i;
3647 int ret;
3648
3649 *relocs = NULL;
3650 for (i = 0; i < buffer_count; i++) {
3651 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3652 return -EINVAL;
3653 reloc_count += exec_list[i].relocation_count;
3654 }
3655
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003656 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003657 if (*relocs == NULL) {
3658 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003659 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003660 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003661
3662 for (i = 0; i < buffer_count; i++) {
3663 struct drm_i915_gem_relocation_entry __user *user_relocs;
3664
3665 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3666
3667 ret = copy_from_user(&(*relocs)[reloc_index],
3668 user_relocs,
3669 exec_list[i].relocation_count *
3670 sizeof(**relocs));
3671 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003672 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003673 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003674 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003675 }
3676
3677 reloc_index += exec_list[i].relocation_count;
3678 }
3679
Florian Mickler2bc43b52009-04-06 22:55:41 +02003680 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003681}
3682
3683static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003684i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003685 uint32_t buffer_count,
3686 struct drm_i915_gem_relocation_entry *relocs)
3687{
3688 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003689 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003690
Chris Wilson93533c22010-01-31 10:40:48 +00003691 if (relocs == NULL)
3692 return 0;
3693
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003694 for (i = 0; i < buffer_count; i++) {
3695 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003696 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003697
3698 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3699
Florian Mickler2bc43b52009-04-06 22:55:41 +02003700 unwritten = copy_to_user(user_relocs,
3701 &relocs[reloc_count],
3702 exec_list[i].relocation_count *
3703 sizeof(*relocs));
3704
3705 if (unwritten) {
3706 ret = -EFAULT;
3707 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003708 }
3709
3710 reloc_count += exec_list[i].relocation_count;
3711 }
3712
Florian Mickler2bc43b52009-04-06 22:55:41 +02003713err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003714 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003715
3716 return ret;
3717}
3718
Chris Wilson83d60792009-06-06 09:45:57 +01003719static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003720i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003721 uint64_t exec_offset)
3722{
3723 uint32_t exec_start, exec_len;
3724
3725 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3726 exec_len = (uint32_t) exec->batch_len;
3727
3728 if ((exec_start | exec_len) & 0x7)
3729 return -EINVAL;
3730
3731 if (!exec_start)
3732 return -EINVAL;
3733
3734 return 0;
3735}
3736
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003737static int
3738i915_gem_wait_for_pending_flip(struct drm_device *dev,
3739 struct drm_gem_object **object_list,
3740 int count)
3741{
3742 drm_i915_private_t *dev_priv = dev->dev_private;
3743 struct drm_i915_gem_object *obj_priv;
3744 DEFINE_WAIT(wait);
3745 int i, ret = 0;
3746
3747 for (;;) {
3748 prepare_to_wait(&dev_priv->pending_flip_queue,
3749 &wait, TASK_INTERRUPTIBLE);
3750 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003751 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003752 if (atomic_read(&obj_priv->pending_flip) > 0)
3753 break;
3754 }
3755 if (i == count)
3756 break;
3757
3758 if (!signal_pending(current)) {
3759 mutex_unlock(&dev->struct_mutex);
3760 schedule();
3761 mutex_lock(&dev->struct_mutex);
3762 continue;
3763 }
3764 ret = -ERESTARTSYS;
3765 break;
3766 }
3767 finish_wait(&dev_priv->pending_flip_queue, &wait);
3768
3769 return ret;
3770}
3771
Eric Anholt673a3942008-07-30 12:06:12 -07003772int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003773i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3774 struct drm_file *file_priv,
3775 struct drm_i915_gem_execbuffer2 *args,
3776 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003777{
3778 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003779 struct drm_gem_object **object_list = NULL;
3780 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003781 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003782 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003783 struct drm_i915_gem_relocation_entry *relocs = NULL;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003784 int ret = 0, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003785 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003786 uint32_t seqno, flush_domains, reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003787 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003788
3789#if WATCH_EXEC
3790 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3791 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3792#endif
3793
Eric Anholt4f481ed2008-09-10 14:22:49 -07003794 if (args->buffer_count < 1) {
3795 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3796 return -EINVAL;
3797 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003798 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003799 if (object_list == NULL) {
3800 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003801 args->buffer_count);
3802 ret = -ENOMEM;
3803 goto pre_mutex_err;
3804 }
Eric Anholt673a3942008-07-30 12:06:12 -07003805
Eric Anholt201361a2009-03-11 12:30:04 -07003806 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003807 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3808 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003809 if (cliprects == NULL) {
3810 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003811 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003812 }
Eric Anholt201361a2009-03-11 12:30:04 -07003813
3814 ret = copy_from_user(cliprects,
3815 (struct drm_clip_rect __user *)
3816 (uintptr_t) args->cliprects_ptr,
3817 sizeof(*cliprects) * args->num_cliprects);
3818 if (ret != 0) {
3819 DRM_ERROR("copy %d cliprects failed: %d\n",
3820 args->num_cliprects, ret);
3821 goto pre_mutex_err;
3822 }
3823 }
3824
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003825 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3826 &relocs);
3827 if (ret != 0)
3828 goto pre_mutex_err;
3829
Eric Anholt673a3942008-07-30 12:06:12 -07003830 mutex_lock(&dev->struct_mutex);
3831
3832 i915_verify_inactive(dev, __FILE__, __LINE__);
3833
Ben Gamariba1234d2009-09-14 17:48:47 -04003834 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003835 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003836 ret = -EIO;
3837 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003838 }
3839
3840 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003841 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003842 ret = -EBUSY;
3843 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003844 }
3845
Keith Packardac94a962008-11-20 23:30:27 -08003846 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003847 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003848 for (i = 0; i < args->buffer_count; i++) {
3849 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3850 exec_list[i].handle);
3851 if (object_list[i] == NULL) {
3852 DRM_ERROR("Invalid object handle %d at index %d\n",
3853 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003854 /* prevent error path from reading uninitialized data */
3855 args->buffer_count = i + 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003856 ret = -EBADF;
3857 goto err;
3858 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003859
Daniel Vetter23010e42010-03-08 13:35:02 +01003860 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003861 if (obj_priv->in_execbuffer) {
3862 DRM_ERROR("Object %p appears more than once in object list\n",
3863 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003864 /* prevent error path from reading uninitialized data */
3865 args->buffer_count = i + 1;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003866 ret = -EBADF;
3867 goto err;
3868 }
3869 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003870 flips += atomic_read(&obj_priv->pending_flip);
3871 }
3872
3873 if (flips > 0) {
3874 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3875 args->buffer_count);
3876 if (ret)
3877 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003878 }
Eric Anholt673a3942008-07-30 12:06:12 -07003879
Keith Packardac94a962008-11-20 23:30:27 -08003880 /* Pin and relocate */
3881 for (pin_tries = 0; ; pin_tries++) {
3882 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003883 reloc_index = 0;
3884
Keith Packardac94a962008-11-20 23:30:27 -08003885 for (i = 0; i < args->buffer_count; i++) {
3886 object_list[i]->pending_read_domains = 0;
3887 object_list[i]->pending_write_domain = 0;
3888 ret = i915_gem_object_pin_and_relocate(object_list[i],
3889 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003890 &exec_list[i],
3891 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003892 if (ret)
3893 break;
3894 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003895 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003896 }
3897 /* success */
3898 if (ret == 0)
3899 break;
3900
3901 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003902 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003903 if (ret != -ERESTARTSYS) {
3904 unsigned long long total_size = 0;
3905 for (i = 0; i < args->buffer_count; i++)
3906 total_size += object_list[i]->size;
3907 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3908 pinned+1, args->buffer_count,
3909 total_size, ret);
3910 DRM_ERROR("%d objects [%d pinned], "
3911 "%d object bytes [%d pinned], "
3912 "%d/%d gtt bytes\n",
3913 atomic_read(&dev->object_count),
3914 atomic_read(&dev->pin_count),
3915 atomic_read(&dev->object_memory),
3916 atomic_read(&dev->pin_memory),
3917 atomic_read(&dev->gtt_memory),
3918 dev->gtt_total);
3919 }
Eric Anholt673a3942008-07-30 12:06:12 -07003920 goto err;
3921 }
Keith Packardac94a962008-11-20 23:30:27 -08003922
3923 /* unpin all of our buffers */
3924 for (i = 0; i < pinned; i++)
3925 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003926 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003927
3928 /* evict everyone we can from the aperture */
3929 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003930 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003931 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003932 }
3933
3934 /* Set the pending read domains for the batch buffer to COMMAND */
3935 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003936 if (batch_obj->pending_write_domain) {
3937 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3938 ret = -EINVAL;
3939 goto err;
3940 }
3941 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003942
Chris Wilson83d60792009-06-06 09:45:57 +01003943 /* Sanity check the batch buffer, prior to moving objects */
3944 exec_offset = exec_list[args->buffer_count - 1].offset;
3945 ret = i915_gem_check_execbuffer (args, exec_offset);
3946 if (ret != 0) {
3947 DRM_ERROR("execbuf with invalid offset/length\n");
3948 goto err;
3949 }
3950
Eric Anholt673a3942008-07-30 12:06:12 -07003951 i915_verify_inactive(dev, __FILE__, __LINE__);
3952
Keith Packard646f0f62008-11-20 23:23:03 -08003953 /* Zero the global flush/invalidate flags. These
3954 * will be modified as new domains are computed
3955 * for each object
3956 */
3957 dev->invalidate_domains = 0;
3958 dev->flush_domains = 0;
3959
Eric Anholt673a3942008-07-30 12:06:12 -07003960 for (i = 0; i < args->buffer_count; i++) {
3961 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003962
Keith Packard646f0f62008-11-20 23:23:03 -08003963 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003964 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003965 }
3966
3967 i915_verify_inactive(dev, __FILE__, __LINE__);
3968
Keith Packard646f0f62008-11-20 23:23:03 -08003969 if (dev->invalidate_domains | dev->flush_domains) {
3970#if WATCH_EXEC
3971 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3972 __func__,
3973 dev->invalidate_domains,
3974 dev->flush_domains);
3975#endif
3976 i915_gem_flush(dev,
3977 dev->invalidate_domains,
3978 dev->flush_domains);
Daniel Vetter99fcb762010-02-07 16:20:18 +01003979 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
Eric Anholtb9624422009-06-03 07:27:35 +00003980 (void)i915_add_request(dev, file_priv,
3981 dev->flush_domains);
Keith Packard646f0f62008-11-20 23:23:03 -08003982 }
Eric Anholt673a3942008-07-30 12:06:12 -07003983
Eric Anholtefbeed92009-02-19 14:54:51 -08003984 for (i = 0; i < args->buffer_count; i++) {
3985 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003986 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003987 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003988
3989 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003990 if (obj->write_domain)
3991 list_move_tail(&obj_priv->gpu_write_list,
3992 &dev_priv->mm.gpu_write_list);
3993 else
3994 list_del_init(&obj_priv->gpu_write_list);
3995
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003996 trace_i915_gem_object_change_domain(obj,
3997 obj->read_domains,
3998 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003999 }
4000
Eric Anholt673a3942008-07-30 12:06:12 -07004001 i915_verify_inactive(dev, __FILE__, __LINE__);
4002
4003#if WATCH_COHERENCY
4004 for (i = 0; i < args->buffer_count; i++) {
4005 i915_gem_object_check_coherency(object_list[i],
4006 exec_list[i].handle);
4007 }
4008#endif
4009
Eric Anholt673a3942008-07-30 12:06:12 -07004010#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07004011 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07004012 args->batch_len,
4013 __func__,
4014 ~0);
4015#endif
4016
Eric Anholt673a3942008-07-30 12:06:12 -07004017 /* Exec the batchbuffer */
Eric Anholt201361a2009-03-11 12:30:04 -07004018 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07004019 if (ret) {
4020 DRM_ERROR("dispatch failed %d\n", ret);
4021 goto err;
4022 }
4023
4024 /*
4025 * Ensure that the commands in the batch buffer are
4026 * finished before the interrupt fires
4027 */
4028 flush_domains = i915_retire_commands(dev);
4029
4030 i915_verify_inactive(dev, __FILE__, __LINE__);
4031
4032 /*
4033 * Get a seqno representing the execution of the current buffer,
4034 * which we can wait on. We would like to mitigate these interrupts,
4035 * likely by only creating seqnos occasionally (so that we have
4036 * *some* interrupts representing completion of buffers that we can
4037 * wait on when trying to clear up gtt space).
4038 */
Eric Anholtb9624422009-06-03 07:27:35 +00004039 seqno = i915_add_request(dev, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07004040 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004041 for (i = 0; i < args->buffer_count; i++) {
4042 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07004043
Eric Anholtce44b0e2008-11-06 16:00:31 -08004044 i915_gem_object_move_to_active(obj, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07004045#if WATCH_LRU
4046 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
4047#endif
4048 }
4049#if WATCH_LRU
4050 i915_dump_lru(dev, __func__);
4051#endif
4052
4053 i915_verify_inactive(dev, __FILE__, __LINE__);
4054
Eric Anholt673a3942008-07-30 12:06:12 -07004055err:
Julia Lawallaad87df2008-12-21 16:28:47 +01004056 for (i = 0; i < pinned; i++)
4057 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07004058
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004059 for (i = 0; i < args->buffer_count; i++) {
4060 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01004061 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004062 obj_priv->in_execbuffer = false;
4063 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004064 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004065 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004066
Eric Anholt673a3942008-07-30 12:06:12 -07004067 mutex_unlock(&dev->struct_mutex);
4068
Chris Wilson93533c22010-01-31 10:40:48 +00004069pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07004070 /* Copy the updated relocations out regardless of current error
4071 * state. Failure to update the relocs would mean that the next
4072 * time userland calls execbuf, it would do so with presumed offset
4073 * state that didn't match the actual object state.
4074 */
4075 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
4076 relocs);
4077 if (ret2 != 0) {
4078 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
4079
4080 if (ret == 0)
4081 ret = ret2;
4082 }
4083
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07004084 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07004085 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07004086
4087 return ret;
4088}
4089
Jesse Barnes76446ca2009-12-17 22:05:42 -05004090/*
4091 * Legacy execbuffer just creates an exec2 list from the original exec object
4092 * list array and passes it to the real function.
4093 */
4094int
4095i915_gem_execbuffer(struct drm_device *dev, void *data,
4096 struct drm_file *file_priv)
4097{
4098 struct drm_i915_gem_execbuffer *args = data;
4099 struct drm_i915_gem_execbuffer2 exec2;
4100 struct drm_i915_gem_exec_object *exec_list = NULL;
4101 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4102 int ret, i;
4103
4104#if WATCH_EXEC
4105 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4106 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4107#endif
4108
4109 if (args->buffer_count < 1) {
4110 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4111 return -EINVAL;
4112 }
4113
4114 /* Copy in the exec list from userland */
4115 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4116 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4117 if (exec_list == NULL || exec2_list == NULL) {
4118 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4119 args->buffer_count);
4120 drm_free_large(exec_list);
4121 drm_free_large(exec2_list);
4122 return -ENOMEM;
4123 }
4124 ret = copy_from_user(exec_list,
4125 (struct drm_i915_relocation_entry __user *)
4126 (uintptr_t) args->buffers_ptr,
4127 sizeof(*exec_list) * args->buffer_count);
4128 if (ret != 0) {
4129 DRM_ERROR("copy %d exec entries failed %d\n",
4130 args->buffer_count, ret);
4131 drm_free_large(exec_list);
4132 drm_free_large(exec2_list);
4133 return -EFAULT;
4134 }
4135
4136 for (i = 0; i < args->buffer_count; i++) {
4137 exec2_list[i].handle = exec_list[i].handle;
4138 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4139 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4140 exec2_list[i].alignment = exec_list[i].alignment;
4141 exec2_list[i].offset = exec_list[i].offset;
4142 if (!IS_I965G(dev))
4143 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4144 else
4145 exec2_list[i].flags = 0;
4146 }
4147
4148 exec2.buffers_ptr = args->buffers_ptr;
4149 exec2.buffer_count = args->buffer_count;
4150 exec2.batch_start_offset = args->batch_start_offset;
4151 exec2.batch_len = args->batch_len;
4152 exec2.DR1 = args->DR1;
4153 exec2.DR4 = args->DR4;
4154 exec2.num_cliprects = args->num_cliprects;
4155 exec2.cliprects_ptr = args->cliprects_ptr;
4156 exec2.flags = 0;
4157
4158 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4159 if (!ret) {
4160 /* Copy the new buffer offsets back to the user's exec list. */
4161 for (i = 0; i < args->buffer_count; i++)
4162 exec_list[i].offset = exec2_list[i].offset;
4163 /* ... and back out to userspace */
4164 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4165 (uintptr_t) args->buffers_ptr,
4166 exec_list,
4167 sizeof(*exec_list) * args->buffer_count);
4168 if (ret) {
4169 ret = -EFAULT;
4170 DRM_ERROR("failed to copy %d exec entries "
4171 "back to user (%d)\n",
4172 args->buffer_count, ret);
4173 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004174 }
4175
4176 drm_free_large(exec_list);
4177 drm_free_large(exec2_list);
4178 return ret;
4179}
4180
4181int
4182i915_gem_execbuffer2(struct drm_device *dev, void *data,
4183 struct drm_file *file_priv)
4184{
4185 struct drm_i915_gem_execbuffer2 *args = data;
4186 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4187 int ret;
4188
4189#if WATCH_EXEC
4190 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4191 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4192#endif
4193
4194 if (args->buffer_count < 1) {
4195 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4196 return -EINVAL;
4197 }
4198
4199 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4200 if (exec2_list == NULL) {
4201 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4202 args->buffer_count);
4203 return -ENOMEM;
4204 }
4205 ret = copy_from_user(exec2_list,
4206 (struct drm_i915_relocation_entry __user *)
4207 (uintptr_t) args->buffers_ptr,
4208 sizeof(*exec2_list) * args->buffer_count);
4209 if (ret != 0) {
4210 DRM_ERROR("copy %d exec entries failed %d\n",
4211 args->buffer_count, ret);
4212 drm_free_large(exec2_list);
4213 return -EFAULT;
4214 }
4215
4216 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4217 if (!ret) {
4218 /* Copy the new buffer offsets back to the user's exec list. */
4219 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4220 (uintptr_t) args->buffers_ptr,
4221 exec2_list,
4222 sizeof(*exec2_list) * args->buffer_count);
4223 if (ret) {
4224 ret = -EFAULT;
4225 DRM_ERROR("failed to copy %d exec entries "
4226 "back to user (%d)\n",
4227 args->buffer_count, ret);
4228 }
4229 }
4230
4231 drm_free_large(exec2_list);
4232 return ret;
4233}
4234
Eric Anholt673a3942008-07-30 12:06:12 -07004235int
4236i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4237{
4238 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004240 int ret;
4241
4242 i915_verify_inactive(dev, __FILE__, __LINE__);
4243 if (obj_priv->gtt_space == NULL) {
4244 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004245 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004246 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004247 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004248
Eric Anholt673a3942008-07-30 12:06:12 -07004249 obj_priv->pin_count++;
4250
4251 /* If the object is not active and not pending a flush,
4252 * remove it from the inactive list
4253 */
4254 if (obj_priv->pin_count == 1) {
4255 atomic_inc(&dev->pin_count);
4256 atomic_add(obj->size, &dev->pin_memory);
4257 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004258 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07004259 !list_empty(&obj_priv->list))
4260 list_del_init(&obj_priv->list);
4261 }
4262 i915_verify_inactive(dev, __FILE__, __LINE__);
4263
4264 return 0;
4265}
4266
4267void
4268i915_gem_object_unpin(struct drm_gem_object *obj)
4269{
4270 struct drm_device *dev = obj->dev;
4271 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004272 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004273
4274 i915_verify_inactive(dev, __FILE__, __LINE__);
4275 obj_priv->pin_count--;
4276 BUG_ON(obj_priv->pin_count < 0);
4277 BUG_ON(obj_priv->gtt_space == NULL);
4278
4279 /* If the object is no longer pinned, and is
4280 * neither active nor being flushed, then stick it on
4281 * the inactive list
4282 */
4283 if (obj_priv->pin_count == 0) {
4284 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004285 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07004286 list_move_tail(&obj_priv->list,
4287 &dev_priv->mm.inactive_list);
4288 atomic_dec(&dev->pin_count);
4289 atomic_sub(obj->size, &dev->pin_memory);
4290 }
4291 i915_verify_inactive(dev, __FILE__, __LINE__);
4292}
4293
4294int
4295i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4296 struct drm_file *file_priv)
4297{
4298 struct drm_i915_gem_pin *args = data;
4299 struct drm_gem_object *obj;
4300 struct drm_i915_gem_object *obj_priv;
4301 int ret;
4302
4303 mutex_lock(&dev->struct_mutex);
4304
4305 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4306 if (obj == NULL) {
4307 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4308 args->handle);
4309 mutex_unlock(&dev->struct_mutex);
4310 return -EBADF;
4311 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004312 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004313
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004314 if (obj_priv->madv != I915_MADV_WILLNEED) {
4315 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004316 drm_gem_object_unreference(obj);
4317 mutex_unlock(&dev->struct_mutex);
4318 return -EINVAL;
4319 }
4320
Jesse Barnes79e53942008-11-07 14:24:08 -08004321 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4322 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4323 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004324 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004325 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004326 return -EINVAL;
4327 }
4328
4329 obj_priv->user_pin_count++;
4330 obj_priv->pin_filp = file_priv;
4331 if (obj_priv->user_pin_count == 1) {
4332 ret = i915_gem_object_pin(obj, args->alignment);
4333 if (ret != 0) {
4334 drm_gem_object_unreference(obj);
4335 mutex_unlock(&dev->struct_mutex);
4336 return ret;
4337 }
Eric Anholt673a3942008-07-30 12:06:12 -07004338 }
4339
4340 /* XXX - flush the CPU caches for pinned objects
4341 * as the X server doesn't manage domains yet
4342 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004343 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004344 args->offset = obj_priv->gtt_offset;
4345 drm_gem_object_unreference(obj);
4346 mutex_unlock(&dev->struct_mutex);
4347
4348 return 0;
4349}
4350
4351int
4352i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4353 struct drm_file *file_priv)
4354{
4355 struct drm_i915_gem_pin *args = data;
4356 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004357 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07004358
4359 mutex_lock(&dev->struct_mutex);
4360
4361 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4362 if (obj == NULL) {
4363 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4364 args->handle);
4365 mutex_unlock(&dev->struct_mutex);
4366 return -EBADF;
4367 }
4368
Daniel Vetter23010e42010-03-08 13:35:02 +01004369 obj_priv = to_intel_bo(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004370 if (obj_priv->pin_filp != file_priv) {
4371 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4372 args->handle);
4373 drm_gem_object_unreference(obj);
4374 mutex_unlock(&dev->struct_mutex);
4375 return -EINVAL;
4376 }
4377 obj_priv->user_pin_count--;
4378 if (obj_priv->user_pin_count == 0) {
4379 obj_priv->pin_filp = NULL;
4380 i915_gem_object_unpin(obj);
4381 }
Eric Anholt673a3942008-07-30 12:06:12 -07004382
4383 drm_gem_object_unreference(obj);
4384 mutex_unlock(&dev->struct_mutex);
4385 return 0;
4386}
4387
4388int
4389i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4390 struct drm_file *file_priv)
4391{
4392 struct drm_i915_gem_busy *args = data;
4393 struct drm_gem_object *obj;
4394 struct drm_i915_gem_object *obj_priv;
4395
Eric Anholt673a3942008-07-30 12:06:12 -07004396 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4397 if (obj == NULL) {
4398 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4399 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07004400 return -EBADF;
4401 }
4402
Chris Wilsonb1ce7862009-06-06 09:46:00 +01004403 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08004404 /* Update the active list for the hardware's current position.
4405 * Otherwise this only updates on a delayed timer or when irqs are
4406 * actually unmasked, and our working set ends up being larger than
4407 * required.
4408 */
4409 i915_gem_retire_requests(dev);
4410
Daniel Vetter23010e42010-03-08 13:35:02 +01004411 obj_priv = to_intel_bo(obj);
Eric Anholtc4de0a52008-12-14 19:05:04 -08004412 /* Don't count being on the flushing list against the object being
4413 * done. Otherwise, a buffer left on the flushing list but not getting
4414 * flushed (because nobody's flushing that domain) won't ever return
4415 * unbusy and get reused by libdrm's bo cache. The other expected
4416 * consumer of this interface, OpenGL's occlusion queries, also specs
4417 * that the objects get unbusy "eventually" without any interference.
4418 */
4419 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004420
4421 drm_gem_object_unreference(obj);
4422 mutex_unlock(&dev->struct_mutex);
4423 return 0;
4424}
4425
4426int
4427i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4428 struct drm_file *file_priv)
4429{
4430 return i915_gem_ring_throttle(dev, file_priv);
4431}
4432
Chris Wilson3ef94da2009-09-14 16:50:29 +01004433int
4434i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4435 struct drm_file *file_priv)
4436{
4437 struct drm_i915_gem_madvise *args = data;
4438 struct drm_gem_object *obj;
4439 struct drm_i915_gem_object *obj_priv;
4440
4441 switch (args->madv) {
4442 case I915_MADV_DONTNEED:
4443 case I915_MADV_WILLNEED:
4444 break;
4445 default:
4446 return -EINVAL;
4447 }
4448
4449 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4450 if (obj == NULL) {
4451 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4452 args->handle);
4453 return -EBADF;
4454 }
4455
4456 mutex_lock(&dev->struct_mutex);
Daniel Vetter23010e42010-03-08 13:35:02 +01004457 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004458
4459 if (obj_priv->pin_count) {
4460 drm_gem_object_unreference(obj);
4461 mutex_unlock(&dev->struct_mutex);
4462
4463 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4464 return -EINVAL;
4465 }
4466
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004467 if (obj_priv->madv != __I915_MADV_PURGED)
4468 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004469
Chris Wilson2d7ef392009-09-20 23:13:10 +01004470 /* if the object is no longer bound, discard its backing storage */
4471 if (i915_gem_object_is_purgeable(obj_priv) &&
4472 obj_priv->gtt_space == NULL)
4473 i915_gem_object_truncate(obj);
4474
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004475 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4476
Chris Wilson3ef94da2009-09-14 16:50:29 +01004477 drm_gem_object_unreference(obj);
4478 mutex_unlock(&dev->struct_mutex);
4479
4480 return 0;
4481}
4482
Daniel Vetterac52bc52010-04-09 19:05:06 +00004483struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4484 size_t size)
4485{
Daniel Vetterc397b902010-04-09 19:05:07 +00004486 struct drm_i915_gem_object *obj;
4487
4488 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4489 if (obj == NULL)
4490 return NULL;
4491
4492 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4493 kfree(obj);
4494 return NULL;
4495 }
4496
4497 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4498 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4499
4500 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004501 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004502 obj->fence_reg = I915_FENCE_REG_NONE;
4503 INIT_LIST_HEAD(&obj->list);
4504 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004505 obj->madv = I915_MADV_WILLNEED;
4506
4507 trace_i915_gem_object_create(&obj->base);
4508
4509 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004510}
4511
Eric Anholt673a3942008-07-30 12:06:12 -07004512int i915_gem_init_object(struct drm_gem_object *obj)
4513{
Daniel Vetterc397b902010-04-09 19:05:07 +00004514 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004515
Eric Anholt673a3942008-07-30 12:06:12 -07004516 return 0;
4517}
4518
4519void i915_gem_free_object(struct drm_gem_object *obj)
4520{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004521 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004522 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004523
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004524 trace_i915_gem_object_destroy(obj);
4525
Eric Anholt673a3942008-07-30 12:06:12 -07004526 while (obj_priv->pin_count > 0)
4527 i915_gem_object_unpin(obj);
4528
Dave Airlie71acb5e2008-12-30 20:31:46 +10004529 if (obj_priv->phys_obj)
4530 i915_gem_detach_phys_object(dev, obj);
4531
Eric Anholt673a3942008-07-30 12:06:12 -07004532 i915_gem_object_unbind(obj);
4533
Chris Wilson7e616152009-09-10 08:53:04 +01004534 if (obj_priv->mmap_offset)
4535 i915_gem_free_mmap_offset(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08004536
Daniel Vetterc397b902010-04-09 19:05:07 +00004537 drm_gem_object_release(obj);
4538
Eric Anholt9a298b22009-03-24 12:23:04 -07004539 kfree(obj_priv->page_cpu_valid);
Eric Anholt280b7132009-03-12 16:56:27 -07004540 kfree(obj_priv->bit_17);
Daniel Vetterc397b902010-04-09 19:05:07 +00004541 kfree(obj_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07004542}
4543
Chris Wilsonab5ee572009-09-20 19:25:47 +01004544/** Unbinds all inactive objects. */
Eric Anholt673a3942008-07-30 12:06:12 -07004545static int
Chris Wilsonab5ee572009-09-20 19:25:47 +01004546i915_gem_evict_from_inactive_list(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004547{
Chris Wilsonab5ee572009-09-20 19:25:47 +01004548 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07004549
Chris Wilsonab5ee572009-09-20 19:25:47 +01004550 while (!list_empty(&dev_priv->mm.inactive_list)) {
4551 struct drm_gem_object *obj;
4552 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004553
Daniel Vettera8089e82010-04-09 19:05:09 +00004554 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4555 struct drm_i915_gem_object,
4556 list)->base;
Eric Anholt673a3942008-07-30 12:06:12 -07004557
4558 ret = i915_gem_object_unbind(obj);
4559 if (ret != 0) {
Chris Wilsonab5ee572009-09-20 19:25:47 +01004560 DRM_ERROR("Error unbinding object: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004561 return ret;
4562 }
4563 }
4564
Eric Anholt673a3942008-07-30 12:06:12 -07004565 return 0;
4566}
4567
Jesse Barnes5669fca2009-02-17 15:13:31 -08004568int
Eric Anholt673a3942008-07-30 12:06:12 -07004569i915_gem_idle(struct drm_device *dev)
4570{
4571 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004572 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004573
Keith Packard6dbe2772008-10-14 21:41:13 -07004574 mutex_lock(&dev->struct_mutex);
4575
4576 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4577 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004578 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004579 }
Eric Anholt673a3942008-07-30 12:06:12 -07004580
Chris Wilson29105cc2010-01-07 10:39:13 +00004581 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004582 if (ret) {
4583 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004584 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004585 }
Eric Anholt673a3942008-07-30 12:06:12 -07004586
Chris Wilson29105cc2010-01-07 10:39:13 +00004587 /* Under UMS, be paranoid and evict. */
4588 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4589 ret = i915_gem_evict_from_inactive_list(dev);
4590 if (ret) {
4591 mutex_unlock(&dev->struct_mutex);
4592 return ret;
4593 }
4594 }
4595
4596 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4597 * We need to replace this with a semaphore, or something.
4598 * And not confound mm.suspended!
4599 */
4600 dev_priv->mm.suspended = 1;
4601 del_timer(&dev_priv->hangcheck_timer);
4602
4603 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004604 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004605
Keith Packard6dbe2772008-10-14 21:41:13 -07004606 mutex_unlock(&dev->struct_mutex);
4607
Chris Wilson29105cc2010-01-07 10:39:13 +00004608 /* Cancel the retire work handler, which should be idle now. */
4609 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4610
Eric Anholt673a3942008-07-30 12:06:12 -07004611 return 0;
4612}
4613
Jesse Barnese552eb72010-04-21 11:39:23 -07004614/*
4615 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4616 * over cache flushing.
4617 */
4618static int
4619i915_gem_init_pipe_control(struct drm_device *dev)
4620{
4621 drm_i915_private_t *dev_priv = dev->dev_private;
4622 struct drm_gem_object *obj;
4623 struct drm_i915_gem_object *obj_priv;
4624 int ret;
4625
Eric Anholt34dc4d42010-05-07 14:30:03 -07004626 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004627 if (obj == NULL) {
4628 DRM_ERROR("Failed to allocate seqno page\n");
4629 ret = -ENOMEM;
4630 goto err;
4631 }
4632 obj_priv = to_intel_bo(obj);
4633 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4634
4635 ret = i915_gem_object_pin(obj, 4096);
4636 if (ret)
4637 goto err_unref;
4638
4639 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4640 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4641 if (dev_priv->seqno_page == NULL)
4642 goto err_unpin;
4643
4644 dev_priv->seqno_obj = obj;
4645 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4646
4647 return 0;
4648
4649err_unpin:
4650 i915_gem_object_unpin(obj);
4651err_unref:
4652 drm_gem_object_unreference(obj);
4653err:
4654 return ret;
4655}
4656
Eric Anholt673a3942008-07-30 12:06:12 -07004657static int
4658i915_gem_init_hws(struct drm_device *dev)
4659{
4660 drm_i915_private_t *dev_priv = dev->dev_private;
4661 struct drm_gem_object *obj;
4662 struct drm_i915_gem_object *obj_priv;
4663 int ret;
4664
4665 /* If we need a physical address for the status page, it's already
4666 * initialized at driver load time.
4667 */
4668 if (!I915_NEED_GFX_HWS(dev))
4669 return 0;
4670
Daniel Vetterac52bc52010-04-09 19:05:06 +00004671 obj = i915_gem_alloc_object(dev, 4096);
Eric Anholt673a3942008-07-30 12:06:12 -07004672 if (obj == NULL) {
4673 DRM_ERROR("Failed to allocate status page\n");
Jesse Barnese552eb72010-04-21 11:39:23 -07004674 ret = -ENOMEM;
4675 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07004676 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004677 obj_priv = to_intel_bo(obj);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004678 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
Eric Anholt673a3942008-07-30 12:06:12 -07004679
4680 ret = i915_gem_object_pin(obj, 4096);
4681 if (ret != 0) {
4682 drm_gem_object_unreference(obj);
Jesse Barnese552eb72010-04-21 11:39:23 -07004683 goto err_unref;
Eric Anholt673a3942008-07-30 12:06:12 -07004684 }
4685
4686 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07004687
Eric Anholt856fa192009-03-19 14:10:50 -07004688 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
Keith Packardba1eb1d2008-10-14 19:55:10 -07004689 if (dev_priv->hw_status_page == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004690 DRM_ERROR("Failed to map status page.\n");
4691 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
Jesse Barnese552eb72010-04-21 11:39:23 -07004692 ret = -EINVAL;
4693 goto err_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -07004694 }
Jesse Barnese552eb72010-04-21 11:39:23 -07004695
4696 if (HAS_PIPE_CONTROL(dev)) {
4697 ret = i915_gem_init_pipe_control(dev);
4698 if (ret)
4699 goto err_unpin;
4700 }
4701
Eric Anholt673a3942008-07-30 12:06:12 -07004702 dev_priv->hws_obj = obj;
Eric Anholt673a3942008-07-30 12:06:12 -07004703 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
Eric Anholtf6e450a2009-11-02 12:08:22 -08004704 if (IS_GEN6(dev)) {
4705 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4706 I915_READ(HWS_PGA_GEN6); /* posting read */
4707 } else {
4708 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4709 I915_READ(HWS_PGA); /* posting read */
4710 }
Zhao Yakui44d98a62009-10-09 11:39:40 +08004711 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
Eric Anholt673a3942008-07-30 12:06:12 -07004712
4713 return 0;
Jesse Barnese552eb72010-04-21 11:39:23 -07004714
4715err_unpin:
4716 i915_gem_object_unpin(obj);
4717err_unref:
4718 drm_gem_object_unreference(obj);
4719err:
4720 return 0;
4721}
4722
4723static void
4724i915_gem_cleanup_pipe_control(struct drm_device *dev)
4725{
4726 drm_i915_private_t *dev_priv = dev->dev_private;
4727 struct drm_gem_object *obj;
4728 struct drm_i915_gem_object *obj_priv;
4729
4730 obj = dev_priv->seqno_obj;
4731 obj_priv = to_intel_bo(obj);
4732 kunmap(obj_priv->pages[0]);
4733 i915_gem_object_unpin(obj);
4734 drm_gem_object_unreference(obj);
4735 dev_priv->seqno_obj = NULL;
4736
4737 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004738}
4739
Chris Wilson85a7bb92009-02-11 14:52:44 +00004740static void
4741i915_gem_cleanup_hws(struct drm_device *dev)
4742{
4743 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004744 struct drm_gem_object *obj;
4745 struct drm_i915_gem_object *obj_priv;
Chris Wilson85a7bb92009-02-11 14:52:44 +00004746
4747 if (dev_priv->hws_obj == NULL)
4748 return;
4749
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004750 obj = dev_priv->hws_obj;
Daniel Vetter23010e42010-03-08 13:35:02 +01004751 obj_priv = to_intel_bo(obj);
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004752
Eric Anholt856fa192009-03-19 14:10:50 -07004753 kunmap(obj_priv->pages[0]);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004754 i915_gem_object_unpin(obj);
4755 drm_gem_object_unreference(obj);
4756 dev_priv->hws_obj = NULL;
Chris Wilsonbab2d1f2009-02-20 17:52:20 +00004757
Chris Wilson85a7bb92009-02-11 14:52:44 +00004758 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4759 dev_priv->hw_status_page = NULL;
4760
Jesse Barnese552eb72010-04-21 11:39:23 -07004761 if (HAS_PIPE_CONTROL(dev))
4762 i915_gem_cleanup_pipe_control(dev);
4763
Chris Wilson85a7bb92009-02-11 14:52:44 +00004764 /* Write high address into HWS_PGA when disabling. */
4765 I915_WRITE(HWS_PGA, 0x1ffff000);
4766}
4767
Jesse Barnes79e53942008-11-07 14:24:08 -08004768int
Eric Anholt673a3942008-07-30 12:06:12 -07004769i915_gem_init_ringbuffer(struct drm_device *dev)
4770{
4771 drm_i915_private_t *dev_priv = dev->dev_private;
4772 struct drm_gem_object *obj;
4773 struct drm_i915_gem_object *obj_priv;
Jesse Barnes79e53942008-11-07 14:24:08 -08004774 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
Eric Anholt673a3942008-07-30 12:06:12 -07004775 int ret;
Keith Packard50aa2532008-10-14 17:20:35 -07004776 u32 head;
Eric Anholt673a3942008-07-30 12:06:12 -07004777
4778 ret = i915_gem_init_hws(dev);
4779 if (ret != 0)
4780 return ret;
4781
Daniel Vetterac52bc52010-04-09 19:05:06 +00004782 obj = i915_gem_alloc_object(dev, 128 * 1024);
Eric Anholt673a3942008-07-30 12:06:12 -07004783 if (obj == NULL) {
4784 DRM_ERROR("Failed to allocate ringbuffer\n");
Chris Wilson85a7bb92009-02-11 14:52:44 +00004785 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004786 return -ENOMEM;
4787 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004788 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004789
4790 ret = i915_gem_object_pin(obj, 4096);
4791 if (ret != 0) {
4792 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004793 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004794 return ret;
4795 }
4796
4797 /* Set up the kernel mapping for the ring. */
Jesse Barnes79e53942008-11-07 14:24:08 -08004798 ring->Size = obj->size;
Eric Anholt673a3942008-07-30 12:06:12 -07004799
Jesse Barnes79e53942008-11-07 14:24:08 -08004800 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4801 ring->map.size = obj->size;
4802 ring->map.type = 0;
4803 ring->map.flags = 0;
4804 ring->map.mtrr = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004805
Jesse Barnes79e53942008-11-07 14:24:08 -08004806 drm_core_ioremap_wc(&ring->map, dev);
4807 if (ring->map.handle == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07004808 DRM_ERROR("Failed to map ringbuffer.\n");
4809 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
Chris Wilson47ed1852009-02-11 14:26:33 +00004810 i915_gem_object_unpin(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004811 drm_gem_object_unreference(obj);
Chris Wilson85a7bb92009-02-11 14:52:44 +00004812 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004813 return -EINVAL;
4814 }
Jesse Barnes79e53942008-11-07 14:24:08 -08004815 ring->ring_obj = obj;
4816 ring->virtual_start = ring->map.handle;
Eric Anholt673a3942008-07-30 12:06:12 -07004817
4818 /* Stop the ring if it's running. */
4819 I915_WRITE(PRB0_CTL, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004820 I915_WRITE(PRB0_TAIL, 0);
Keith Packard50aa2532008-10-14 17:20:35 -07004821 I915_WRITE(PRB0_HEAD, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004822
4823 /* Initialize the ring. */
4824 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
Keith Packard50aa2532008-10-14 17:20:35 -07004825 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4826
4827 /* G45 ring initialization fails to reset head to zero */
4828 if (head != 0) {
4829 DRM_ERROR("Ring head not reset to zero "
4830 "ctl %08x head %08x tail %08x start %08x\n",
4831 I915_READ(PRB0_CTL),
4832 I915_READ(PRB0_HEAD),
4833 I915_READ(PRB0_TAIL),
4834 I915_READ(PRB0_START));
4835 I915_WRITE(PRB0_HEAD, 0);
4836
4837 DRM_ERROR("Ring head forced to zero "
4838 "ctl %08x head %08x tail %08x start %08x\n",
4839 I915_READ(PRB0_CTL),
4840 I915_READ(PRB0_HEAD),
4841 I915_READ(PRB0_TAIL),
4842 I915_READ(PRB0_START));
4843 }
4844
Eric Anholt673a3942008-07-30 12:06:12 -07004845 I915_WRITE(PRB0_CTL,
4846 ((obj->size - 4096) & RING_NR_PAGES) |
4847 RING_NO_REPORT |
4848 RING_VALID);
4849
Keith Packard50aa2532008-10-14 17:20:35 -07004850 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4851
4852 /* If the head is still not zero, the ring is dead */
4853 if (head != 0) {
4854 DRM_ERROR("Ring initialization failed "
4855 "ctl %08x head %08x tail %08x start %08x\n",
4856 I915_READ(PRB0_CTL),
4857 I915_READ(PRB0_HEAD),
4858 I915_READ(PRB0_TAIL),
4859 I915_READ(PRB0_START));
4860 return -EIO;
4861 }
4862
Eric Anholt673a3942008-07-30 12:06:12 -07004863 /* Update our cache of the ring state */
Jesse Barnes79e53942008-11-07 14:24:08 -08004864 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4865 i915_kernel_lost_context(dev);
4866 else {
4867 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4868 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4869 ring->space = ring->head - (ring->tail + 8);
4870 if (ring->space < 0)
4871 ring->space += ring->Size;
4872 }
Eric Anholt673a3942008-07-30 12:06:12 -07004873
Eric Anholt71cf39b2010-03-08 23:41:55 -08004874 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
4875 I915_WRITE(MI_MODE,
4876 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
4877 }
4878
Eric Anholt673a3942008-07-30 12:06:12 -07004879 return 0;
4880}
4881
Jesse Barnes79e53942008-11-07 14:24:08 -08004882void
Eric Anholt673a3942008-07-30 12:06:12 -07004883i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4884{
4885 drm_i915_private_t *dev_priv = dev->dev_private;
4886
4887 if (dev_priv->ring.ring_obj == NULL)
4888 return;
4889
4890 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4891
4892 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4893 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4894 dev_priv->ring.ring_obj = NULL;
4895 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4896
Chris Wilson85a7bb92009-02-11 14:52:44 +00004897 i915_gem_cleanup_hws(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004898}
4899
4900int
4901i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4902 struct drm_file *file_priv)
4903{
4904 drm_i915_private_t *dev_priv = dev->dev_private;
4905 int ret;
4906
Jesse Barnes79e53942008-11-07 14:24:08 -08004907 if (drm_core_check_feature(dev, DRIVER_MODESET))
4908 return 0;
4909
Ben Gamariba1234d2009-09-14 17:48:47 -04004910 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004911 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004912 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004913 }
4914
Eric Anholt673a3942008-07-30 12:06:12 -07004915 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004916 dev_priv->mm.suspended = 0;
4917
4918 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004919 if (ret != 0) {
4920 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004921 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004922 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004923
Carl Worth5e118f42009-03-20 11:54:25 -07004924 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004925 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004926 spin_unlock(&dev_priv->mm.active_list_lock);
4927
Eric Anholt673a3942008-07-30 12:06:12 -07004928 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4929 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4930 BUG_ON(!list_empty(&dev_priv->mm.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004931 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004932
4933 drm_irq_install(dev);
4934
Eric Anholt673a3942008-07-30 12:06:12 -07004935 return 0;
4936}
4937
4938int
4939i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4940 struct drm_file *file_priv)
4941{
Jesse Barnes79e53942008-11-07 14:24:08 -08004942 if (drm_core_check_feature(dev, DRIVER_MODESET))
4943 return 0;
4944
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004945 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004946 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004947}
4948
4949void
4950i915_gem_lastclose(struct drm_device *dev)
4951{
4952 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004953
Eric Anholte806b492009-01-22 09:56:58 -08004954 if (drm_core_check_feature(dev, DRIVER_MODESET))
4955 return;
4956
Keith Packard6dbe2772008-10-14 21:41:13 -07004957 ret = i915_gem_idle(dev);
4958 if (ret)
4959 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004960}
4961
4962void
4963i915_gem_load(struct drm_device *dev)
4964{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004965 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004966 drm_i915_private_t *dev_priv = dev->dev_private;
4967
Carl Worth5e118f42009-03-20 11:54:25 -07004968 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004969 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4970 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004971 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004972 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4973 INIT_LIST_HEAD(&dev_priv->mm.request_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004974 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004975 for (i = 0; i < 16; i++)
4976 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004977 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4978 i915_gem_retire_work_handler);
Eric Anholt673a3942008-07-30 12:06:12 -07004979 dev_priv->mm.next_gem_seqno = 1;
4980
Chris Wilson31169712009-09-14 16:50:28 +01004981 spin_lock(&shrink_list_lock);
4982 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4983 spin_unlock(&shrink_list_lock);
4984
Jesse Barnesde151cf2008-11-12 10:03:55 -08004985 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004986 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4987 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004988
Jesse Barnes0f973f22009-01-26 17:10:45 -08004989 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004990 dev_priv->num_fence_regs = 16;
4991 else
4992 dev_priv->num_fence_regs = 8;
4993
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004994 /* Initialize fence registers to zero */
4995 if (IS_I965G(dev)) {
4996 for (i = 0; i < 16; i++)
4997 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4998 } else {
4999 for (i = 0; i < 8; i++)
5000 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
5001 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5002 for (i = 0; i < 8; i++)
5003 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
5004 }
Eric Anholt673a3942008-07-30 12:06:12 -07005005 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05005006 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07005007}
Dave Airlie71acb5e2008-12-30 20:31:46 +10005008
5009/*
5010 * Create a physically contiguous memory object for this object
5011 * e.g. for cursor + overlay regs
5012 */
5013int i915_gem_init_phys_object(struct drm_device *dev,
5014 int id, int size)
5015{
5016 drm_i915_private_t *dev_priv = dev->dev_private;
5017 struct drm_i915_gem_phys_object *phys_obj;
5018 int ret;
5019
5020 if (dev_priv->mm.phys_objs[id - 1] || !size)
5021 return 0;
5022
Eric Anholt9a298b22009-03-24 12:23:04 -07005023 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005024 if (!phys_obj)
5025 return -ENOMEM;
5026
5027 phys_obj->id = id;
5028
Zhenyu Wange6be8d92010-01-05 11:25:05 +08005029 phys_obj->handle = drm_pci_alloc(dev, size, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005030 if (!phys_obj->handle) {
5031 ret = -ENOMEM;
5032 goto kfree_obj;
5033 }
5034#ifdef CONFIG_X86
5035 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
5036#endif
5037
5038 dev_priv->mm.phys_objs[id - 1] = phys_obj;
5039
5040 return 0;
5041kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07005042 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005043 return ret;
5044}
5045
5046void i915_gem_free_phys_object(struct drm_device *dev, int id)
5047{
5048 drm_i915_private_t *dev_priv = dev->dev_private;
5049 struct drm_i915_gem_phys_object *phys_obj;
5050
5051 if (!dev_priv->mm.phys_objs[id - 1])
5052 return;
5053
5054 phys_obj = dev_priv->mm.phys_objs[id - 1];
5055 if (phys_obj->cur_obj) {
5056 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
5057 }
5058
5059#ifdef CONFIG_X86
5060 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
5061#endif
5062 drm_pci_free(dev, phys_obj->handle);
5063 kfree(phys_obj);
5064 dev_priv->mm.phys_objs[id - 1] = NULL;
5065}
5066
5067void i915_gem_free_all_phys_object(struct drm_device *dev)
5068{
5069 int i;
5070
Dave Airlie260883c2009-01-22 17:58:49 +10005071 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10005072 i915_gem_free_phys_object(dev, i);
5073}
5074
5075void i915_gem_detach_phys_object(struct drm_device *dev,
5076 struct drm_gem_object *obj)
5077{
5078 struct drm_i915_gem_object *obj_priv;
5079 int i;
5080 int ret;
5081 int page_count;
5082
Daniel Vetter23010e42010-03-08 13:35:02 +01005083 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005084 if (!obj_priv->phys_obj)
5085 return;
5086
Chris Wilson4bdadb92010-01-27 13:36:32 +00005087 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005088 if (ret)
5089 goto out;
5090
5091 page_count = obj->size / PAGE_SIZE;
5092
5093 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07005094 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005095 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5096
5097 memcpy(dst, src, PAGE_SIZE);
5098 kunmap_atomic(dst, KM_USER0);
5099 }
Eric Anholt856fa192009-03-19 14:10:50 -07005100 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005101 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01005102
5103 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005104out:
5105 obj_priv->phys_obj->cur_obj = NULL;
5106 obj_priv->phys_obj = NULL;
5107}
5108
5109int
5110i915_gem_attach_phys_object(struct drm_device *dev,
5111 struct drm_gem_object *obj, int id)
5112{
5113 drm_i915_private_t *dev_priv = dev->dev_private;
5114 struct drm_i915_gem_object *obj_priv;
5115 int ret = 0;
5116 int page_count;
5117 int i;
5118
5119 if (id > I915_MAX_PHYS_OBJECT)
5120 return -EINVAL;
5121
Daniel Vetter23010e42010-03-08 13:35:02 +01005122 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005123
5124 if (obj_priv->phys_obj) {
5125 if (obj_priv->phys_obj->id == id)
5126 return 0;
5127 i915_gem_detach_phys_object(dev, obj);
5128 }
5129
5130
5131 /* create a new object */
5132 if (!dev_priv->mm.phys_objs[id - 1]) {
5133 ret = i915_gem_init_phys_object(dev, id,
5134 obj->size);
5135 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08005136 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005137 goto out;
5138 }
5139 }
5140
5141 /* bind to the object */
5142 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
5143 obj_priv->phys_obj->cur_obj = obj;
5144
Chris Wilson4bdadb92010-01-27 13:36:32 +00005145 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005146 if (ret) {
5147 DRM_ERROR("failed to get page list\n");
5148 goto out;
5149 }
5150
5151 page_count = obj->size / PAGE_SIZE;
5152
5153 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07005154 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005155 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5156
5157 memcpy(dst, src, PAGE_SIZE);
5158 kunmap_atomic(src, KM_USER0);
5159 }
5160
Chris Wilsond78b47b2009-06-17 21:52:49 +01005161 i915_gem_object_put_pages(obj);
5162
Dave Airlie71acb5e2008-12-30 20:31:46 +10005163 return 0;
5164out:
5165 return ret;
5166}
5167
5168static int
5169i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5170 struct drm_i915_gem_pwrite *args,
5171 struct drm_file *file_priv)
5172{
Daniel Vetter23010e42010-03-08 13:35:02 +01005173 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005174 void *obj_addr;
5175 int ret;
5176 char __user *user_data;
5177
5178 user_data = (char __user *) (uintptr_t) args->data_ptr;
5179 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
5180
Zhao Yakui44d98a62009-10-09 11:39:40 +08005181 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005182 ret = copy_from_user(obj_addr, user_data, args->size);
5183 if (ret)
5184 return -EFAULT;
5185
5186 drm_agp_chipset_flush(dev);
5187 return 0;
5188}
Eric Anholtb9624422009-06-03 07:27:35 +00005189
5190void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5191{
5192 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
5193
5194 /* Clean up our request list when the client is going away, so that
5195 * later retire_requests won't dereference our soon-to-be-gone
5196 * file_priv.
5197 */
5198 mutex_lock(&dev->struct_mutex);
5199 while (!list_empty(&i915_file_priv->mm.request_list))
5200 list_del_init(i915_file_priv->mm.request_list.next);
5201 mutex_unlock(&dev->struct_mutex);
5202}
Chris Wilson31169712009-09-14 16:50:28 +01005203
Chris Wilson31169712009-09-14 16:50:28 +01005204static int
Chris Wilson1637ef42010-04-20 17:10:35 +01005205i915_gpu_is_active(struct drm_device *dev)
5206{
5207 drm_i915_private_t *dev_priv = dev->dev_private;
5208 int lists_empty;
5209
5210 spin_lock(&dev_priv->mm.active_list_lock);
5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5212 list_empty(&dev_priv->mm.active_list);
5213 spin_unlock(&dev_priv->mm.active_list_lock);
5214
5215 return !lists_empty;
5216}
5217
5218static int
Chris Wilson31169712009-09-14 16:50:28 +01005219i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5220{
5221 drm_i915_private_t *dev_priv, *next_dev;
5222 struct drm_i915_gem_object *obj_priv, *next_obj;
5223 int cnt = 0;
5224 int would_deadlock = 1;
5225
5226 /* "fast-path" to count number of available objects */
5227 if (nr_to_scan == 0) {
5228 spin_lock(&shrink_list_lock);
5229 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5230 struct drm_device *dev = dev_priv->dev;
5231
5232 if (mutex_trylock(&dev->struct_mutex)) {
5233 list_for_each_entry(obj_priv,
5234 &dev_priv->mm.inactive_list,
5235 list)
5236 cnt++;
5237 mutex_unlock(&dev->struct_mutex);
5238 }
5239 }
5240 spin_unlock(&shrink_list_lock);
5241
5242 return (cnt / 100) * sysctl_vfs_cache_pressure;
5243 }
5244
5245 spin_lock(&shrink_list_lock);
5246
Chris Wilson1637ef42010-04-20 17:10:35 +01005247rescan:
Chris Wilson31169712009-09-14 16:50:28 +01005248 /* first scan for clean buffers */
5249 list_for_each_entry_safe(dev_priv, next_dev,
5250 &shrink_list, mm.shrink_list) {
5251 struct drm_device *dev = dev_priv->dev;
5252
5253 if (! mutex_trylock(&dev->struct_mutex))
5254 continue;
5255
5256 spin_unlock(&shrink_list_lock);
5257
5258 i915_gem_retire_requests(dev);
5259
5260 list_for_each_entry_safe(obj_priv, next_obj,
5261 &dev_priv->mm.inactive_list,
5262 list) {
5263 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005264 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005265 if (--nr_to_scan <= 0)
5266 break;
5267 }
5268 }
5269
5270 spin_lock(&shrink_list_lock);
5271 mutex_unlock(&dev->struct_mutex);
5272
Chris Wilson963b4832009-09-20 23:03:54 +01005273 would_deadlock = 0;
5274
Chris Wilson31169712009-09-14 16:50:28 +01005275 if (nr_to_scan <= 0)
5276 break;
5277 }
5278
5279 /* second pass, evict/count anything still on the inactive list */
5280 list_for_each_entry_safe(dev_priv, next_dev,
5281 &shrink_list, mm.shrink_list) {
5282 struct drm_device *dev = dev_priv->dev;
5283
5284 if (! mutex_trylock(&dev->struct_mutex))
5285 continue;
5286
5287 spin_unlock(&shrink_list_lock);
5288
5289 list_for_each_entry_safe(obj_priv, next_obj,
5290 &dev_priv->mm.inactive_list,
5291 list) {
5292 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005293 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005294 nr_to_scan--;
5295 } else
5296 cnt++;
5297 }
5298
5299 spin_lock(&shrink_list_lock);
5300 mutex_unlock(&dev->struct_mutex);
5301
5302 would_deadlock = 0;
5303 }
5304
Chris Wilson1637ef42010-04-20 17:10:35 +01005305 if (nr_to_scan) {
5306 int active = 0;
5307
5308 /*
5309 * We are desperate for pages, so as a last resort, wait
5310 * for the GPU to finish and discard whatever we can.
5311 * This has a dramatic impact to reduce the number of
5312 * OOM-killer events whilst running the GPU aggressively.
5313 */
5314 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5315 struct drm_device *dev = dev_priv->dev;
5316
5317 if (!mutex_trylock(&dev->struct_mutex))
5318 continue;
5319
5320 spin_unlock(&shrink_list_lock);
5321
5322 if (i915_gpu_is_active(dev)) {
5323 i915_gpu_idle(dev);
5324 active++;
5325 }
5326
5327 spin_lock(&shrink_list_lock);
5328 mutex_unlock(&dev->struct_mutex);
5329 }
5330
5331 if (active)
5332 goto rescan;
5333 }
5334
Chris Wilson31169712009-09-14 16:50:28 +01005335 spin_unlock(&shrink_list_lock);
5336
5337 if (would_deadlock)
5338 return -1;
5339 else if (cnt > 0)
5340 return (cnt / 100) * sysctl_vfs_cache_pressure;
5341 else
5342 return 0;
5343}
5344
5345static struct shrinker shrinker = {
5346 .shrink = i915_gem_shrink,
5347 .seeks = DEFAULT_SEEKS,
5348};
5349
5350__init void
5351i915_gem_shrinker_init(void)
5352{
5353 register_shrinker(&shrinker);
5354}
5355
5356__exit void
5357i915_gem_shrinker_exit(void)
5358{
5359 unregister_shrinker(&shrinker);
5360}