blob: 7749e78a7300d9bebdd0f36a6ae7fbb4d97e6db0 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Zhenyu Wangf8f235e2010-08-27 11:08:57 +080037#include <linux/intel-gtt.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Daniel Vetter0108a3e2010-08-07 11:01:21 +010039static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
Chris Wilson2dafb1e2010-06-07 14:03:05 +010040static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080041static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080043static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
44 int write);
45static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46 uint64_t offset,
47 uint64_t size);
48static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070049static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
51 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080052static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +100053static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +010056static void i915_gem_free_object_tail(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070057
Chris Wilson31169712009-09-14 16:50:28 +010058static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
Chris Wilson7d1c4802010-08-07 21:45:03 +010061static inline bool
62i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
63{
64 return obj_priv->gtt_space &&
65 !obj_priv->active &&
66 obj_priv->pin_count == 0;
67}
68
Jesse Barnes79e53942008-11-07 14:24:08 -080069int i915_gem_do_init(struct drm_device *dev, unsigned long start,
70 unsigned long end)
71{
72 drm_i915_private_t *dev_priv = dev->dev_private;
73
74 if (start >= end ||
75 (start & (PAGE_SIZE - 1)) != 0 ||
76 (end & (PAGE_SIZE - 1)) != 0) {
77 return -EINVAL;
78 }
79
80 drm_mm_init(&dev_priv->mm.gtt_space, start,
81 end - start);
82
83 dev->gtt_total = (uint32_t) (end - start);
84
85 return 0;
86}
Keith Packard6dbe2772008-10-14 21:41:13 -070087
Eric Anholt673a3942008-07-30 12:06:12 -070088int
89i915_gem_init_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
91{
Eric Anholt673a3942008-07-30 12:06:12 -070092 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080093 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070094
95 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080096 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070097 mutex_unlock(&dev->struct_mutex);
98
Jesse Barnes79e53942008-11-07 14:24:08 -080099 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700100}
101
Eric Anholt5a125c32008-10-22 21:40:13 -0700102int
103i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
104 struct drm_file *file_priv)
105{
Eric Anholt5a125c32008-10-22 21:40:13 -0700106 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -0700107
108 if (!(dev->driver->driver_features & DRIVER_GEM))
109 return -ENODEV;
110
111 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800112 args->aper_available_size = (args->aper_size -
113 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700114
115 return 0;
116}
117
Eric Anholt673a3942008-07-30 12:06:12 -0700118
119/**
120 * Creates a new mm object and returns a handle to it.
121 */
122int
123i915_gem_create_ioctl(struct drm_device *dev, void *data,
124 struct drm_file *file_priv)
125{
126 struct drm_i915_gem_create *args = data;
127 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300128 int ret;
129 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700130
131 args->size = roundup(args->size, PAGE_SIZE);
132
133 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000134 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700135 if (obj == NULL)
136 return -ENOMEM;
137
138 ret = drm_gem_handle_create(file_priv, obj, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100139 if (ret) {
140 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700141 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100142 }
143
144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700146
147 args->handle = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700148 return 0;
149}
150
Eric Anholt40123c12009-03-09 13:42:30 -0700151static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700152fast_shmem_read(struct page **pages,
153 loff_t page_base, int page_offset,
154 char __user *data,
155 int length)
156{
157 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200158 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700159
160 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
161 if (vaddr == NULL)
162 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200163 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700164 kunmap_atomic(vaddr, KM_USER0);
165
Florian Mickler2bc43b52009-04-06 22:55:41 +0200166 if (unwritten)
167 return -EFAULT;
168
169 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700170}
171
Eric Anholt280b7132009-03-12 16:56:27 -0700172static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
173{
174 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100175 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700176
177 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
178 obj_priv->tiling_mode != I915_TILING_NONE;
179}
180
Chris Wilson99a03df2010-05-27 14:15:34 +0100181static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700182slow_shmem_copy(struct page *dst_page,
183 int dst_offset,
184 struct page *src_page,
185 int src_offset,
186 int length)
187{
188 char *dst_vaddr, *src_vaddr;
189
Chris Wilson99a03df2010-05-27 14:15:34 +0100190 dst_vaddr = kmap(dst_page);
191 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700192
193 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
194
Chris Wilson99a03df2010-05-27 14:15:34 +0100195 kunmap(src_page);
196 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700197}
198
Chris Wilson99a03df2010-05-27 14:15:34 +0100199static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700200slow_shmem_bit17_copy(struct page *gpu_page,
201 int gpu_offset,
202 struct page *cpu_page,
203 int cpu_offset,
204 int length,
205 int is_read)
206{
207 char *gpu_vaddr, *cpu_vaddr;
208
209 /* Use the unswizzled path if this page isn't affected. */
210 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
211 if (is_read)
212 return slow_shmem_copy(cpu_page, cpu_offset,
213 gpu_page, gpu_offset, length);
214 else
215 return slow_shmem_copy(gpu_page, gpu_offset,
216 cpu_page, cpu_offset, length);
217 }
218
Chris Wilson99a03df2010-05-27 14:15:34 +0100219 gpu_vaddr = kmap(gpu_page);
220 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700221
222 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
223 * XORing with the other bits (A9 for Y, A9 and A10 for X)
224 */
225 while (length > 0) {
226 int cacheline_end = ALIGN(gpu_offset + 1, 64);
227 int this_length = min(cacheline_end - gpu_offset, length);
228 int swizzled_gpu_offset = gpu_offset ^ 64;
229
230 if (is_read) {
231 memcpy(cpu_vaddr + cpu_offset,
232 gpu_vaddr + swizzled_gpu_offset,
233 this_length);
234 } else {
235 memcpy(gpu_vaddr + swizzled_gpu_offset,
236 cpu_vaddr + cpu_offset,
237 this_length);
238 }
239 cpu_offset += this_length;
240 gpu_offset += this_length;
241 length -= this_length;
242 }
243
Chris Wilson99a03df2010-05-27 14:15:34 +0100244 kunmap(cpu_page);
245 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700246}
247
Eric Anholt673a3942008-07-30 12:06:12 -0700248/**
Eric Anholteb014592009-03-10 11:44:52 -0700249 * This is the fast shmem pread path, which attempts to copy_from_user directly
250 * from the backing pages of the object to the user's address space. On a
251 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
252 */
253static int
254i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
255 struct drm_i915_gem_pread *args,
256 struct drm_file *file_priv)
257{
Daniel Vetter23010e42010-03-08 13:35:02 +0100258 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700259 ssize_t remain;
260 loff_t offset, page_base;
261 char __user *user_data;
262 int page_offset, page_length;
263 int ret;
264
265 user_data = (char __user *) (uintptr_t) args->data_ptr;
266 remain = args->size;
267
268 mutex_lock(&dev->struct_mutex);
269
Chris Wilson4bdadb92010-01-27 13:36:32 +0000270 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700271 if (ret != 0)
272 goto fail_unlock;
273
274 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
275 args->size);
276 if (ret != 0)
277 goto fail_put_pages;
278
Daniel Vetter23010e42010-03-08 13:35:02 +0100279 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700280 offset = args->offset;
281
282 while (remain > 0) {
283 /* Operation in this page
284 *
285 * page_base = page offset within aperture
286 * page_offset = offset within page
287 * page_length = bytes to copy for this page
288 */
289 page_base = (offset & ~(PAGE_SIZE-1));
290 page_offset = offset & (PAGE_SIZE-1);
291 page_length = remain;
292 if ((page_offset + remain) > PAGE_SIZE)
293 page_length = PAGE_SIZE - page_offset;
294
295 ret = fast_shmem_read(obj_priv->pages,
296 page_base, page_offset,
297 user_data, page_length);
298 if (ret)
299 goto fail_put_pages;
300
301 remain -= page_length;
302 user_data += page_length;
303 offset += page_length;
304 }
305
306fail_put_pages:
307 i915_gem_object_put_pages(obj);
308fail_unlock:
309 mutex_unlock(&dev->struct_mutex);
310
311 return ret;
312}
313
Chris Wilson07f73f62009-09-14 16:50:30 +0100314static int
315i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
316{
317 int ret;
318
Chris Wilson4bdadb92010-01-27 13:36:32 +0000319 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100320
321 /* If we've insufficient memory to map in the pages, attempt
322 * to make some space by throwing out some old buffers.
323 */
324 if (ret == -ENOMEM) {
325 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100326
Daniel Vetter0108a3e2010-08-07 11:01:21 +0100327 ret = i915_gem_evict_something(dev, obj->size,
328 i915_gem_get_gtt_alignment(obj));
Chris Wilson07f73f62009-09-14 16:50:30 +0100329 if (ret)
330 return ret;
331
Chris Wilson4bdadb92010-01-27 13:36:32 +0000332 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100333 }
334
335 return ret;
336}
337
Eric Anholteb014592009-03-10 11:44:52 -0700338/**
339 * This is the fallback shmem pread path, which allocates temporary storage
340 * in kernel space to copy_to_user into outside of the struct_mutex, so we
341 * can copy out of the object's backing pages while holding the struct mutex
342 * and not take page faults.
343 */
344static int
345i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
346 struct drm_i915_gem_pread *args,
347 struct drm_file *file_priv)
348{
Daniel Vetter23010e42010-03-08 13:35:02 +0100349 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700350 struct mm_struct *mm = current->mm;
351 struct page **user_pages;
352 ssize_t remain;
353 loff_t offset, pinned_pages, i;
354 loff_t first_data_page, last_data_page, num_pages;
355 int shmem_page_index, shmem_page_offset;
356 int data_page_index, data_page_offset;
357 int page_length;
358 int ret;
359 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700360 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700361
362 remain = args->size;
363
364 /* Pin the user pages containing the data. We can't fault while
365 * holding the struct mutex, yet we want to hold it while
366 * dereferencing the user data.
367 */
368 first_data_page = data_ptr / PAGE_SIZE;
369 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
370 num_pages = last_data_page - first_data_page + 1;
371
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700372 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700373 if (user_pages == NULL)
374 return -ENOMEM;
375
376 down_read(&mm->mmap_sem);
377 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700378 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700379 up_read(&mm->mmap_sem);
380 if (pinned_pages < num_pages) {
381 ret = -EFAULT;
382 goto fail_put_user_pages;
383 }
384
Eric Anholt280b7132009-03-12 16:56:27 -0700385 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
386
Eric Anholteb014592009-03-10 11:44:52 -0700387 mutex_lock(&dev->struct_mutex);
388
Chris Wilson07f73f62009-09-14 16:50:30 +0100389 ret = i915_gem_object_get_pages_or_evict(obj);
390 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700391 goto fail_unlock;
392
393 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
394 args->size);
395 if (ret != 0)
396 goto fail_put_pages;
397
Daniel Vetter23010e42010-03-08 13:35:02 +0100398 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700399 offset = args->offset;
400
401 while (remain > 0) {
402 /* Operation in this page
403 *
404 * shmem_page_index = page number within shmem file
405 * shmem_page_offset = offset within page in shmem file
406 * data_page_index = page number in get_user_pages return
407 * data_page_offset = offset with data_page_index page.
408 * page_length = bytes to copy for this page
409 */
410 shmem_page_index = offset / PAGE_SIZE;
411 shmem_page_offset = offset & ~PAGE_MASK;
412 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
413 data_page_offset = data_ptr & ~PAGE_MASK;
414
415 page_length = remain;
416 if ((shmem_page_offset + page_length) > PAGE_SIZE)
417 page_length = PAGE_SIZE - shmem_page_offset;
418 if ((data_page_offset + page_length) > PAGE_SIZE)
419 page_length = PAGE_SIZE - data_page_offset;
420
Eric Anholt280b7132009-03-12 16:56:27 -0700421 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100422 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700423 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100424 user_pages[data_page_index],
425 data_page_offset,
426 page_length,
427 1);
428 } else {
429 slow_shmem_copy(user_pages[data_page_index],
430 data_page_offset,
431 obj_priv->pages[shmem_page_index],
432 shmem_page_offset,
433 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700434 }
Eric Anholteb014592009-03-10 11:44:52 -0700435
436 remain -= page_length;
437 data_ptr += page_length;
438 offset += page_length;
439 }
440
441fail_put_pages:
442 i915_gem_object_put_pages(obj);
443fail_unlock:
444 mutex_unlock(&dev->struct_mutex);
445fail_put_user_pages:
446 for (i = 0; i < pinned_pages; i++) {
447 SetPageDirty(user_pages[i]);
448 page_cache_release(user_pages[i]);
449 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700450 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700451
452 return ret;
453}
454
Eric Anholt673a3942008-07-30 12:06:12 -0700455/**
456 * Reads data from the object referenced by handle.
457 *
458 * On error, the contents of *data are undefined.
459 */
460int
461i915_gem_pread_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
463{
464 struct drm_i915_gem_pread *args = data;
465 struct drm_gem_object *obj;
466 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700467 int ret;
468
469 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
470 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100471 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100472 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700473
474 /* Bounds check source.
475 *
476 * XXX: This could use review for overflow issues...
477 */
478 if (args->offset > obj->size || args->size > obj->size ||
479 args->offset + args->size > obj->size) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100480 ret = -EINVAL;
481 goto err;
482 }
483
484 if (!access_ok(VERIFY_WRITE,
485 (char __user *)(uintptr_t)args->data_ptr,
486 args->size)) {
487 ret = -EFAULT;
488 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -0700489 }
490
Eric Anholt280b7132009-03-12 16:56:27 -0700491 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700492 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700493 } else {
494 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
495 if (ret != 0)
496 ret = i915_gem_shmem_pread_slow(dev, obj, args,
497 file_priv);
498 }
Eric Anholt673a3942008-07-30 12:06:12 -0700499
Chris Wilsonce9d4192010-09-26 20:50:05 +0100500err:
Luca Barbieribc9025b2010-02-09 05:49:12 +0000501 drm_gem_object_unreference_unlocked(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700502 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700503}
504
Keith Packard0839ccb2008-10-30 19:38:48 -0700505/* This is the fast write path which cannot handle
506 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700507 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700508
Keith Packard0839ccb2008-10-30 19:38:48 -0700509static inline int
510fast_user_write(struct io_mapping *mapping,
511 loff_t page_base, int page_offset,
512 char __user *user_data,
513 int length)
514{
515 char *vaddr_atomic;
516 unsigned long unwritten;
517
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100518 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700519 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
520 user_data, length);
Chris Wilsonfca3ec02010-08-04 14:34:24 +0100521 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
Keith Packard0839ccb2008-10-30 19:38:48 -0700522 if (unwritten)
523 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700524 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700525}
526
527/* Here's the write path which can sleep for
528 * page faults
529 */
530
Chris Wilsonab34c222010-05-27 14:15:35 +0100531static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700532slow_kernel_write(struct io_mapping *mapping,
533 loff_t gtt_base, int gtt_offset,
534 struct page *user_page, int user_offset,
535 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700536{
Chris Wilsonab34c222010-05-27 14:15:35 +0100537 char __iomem *dst_vaddr;
538 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700539
Chris Wilsonab34c222010-05-27 14:15:35 +0100540 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
541 src_vaddr = kmap(user_page);
542
543 memcpy_toio(dst_vaddr + gtt_offset,
544 src_vaddr + user_offset,
545 length);
546
547 kunmap(user_page);
548 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700549}
550
Eric Anholt40123c12009-03-09 13:42:30 -0700551static inline int
552fast_shmem_write(struct page **pages,
553 loff_t page_base, int page_offset,
554 char __user *data,
555 int length)
556{
557 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400558 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700559
560 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
561 if (vaddr == NULL)
562 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400563 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700564 kunmap_atomic(vaddr, KM_USER0);
565
Dave Airlied0088772009-03-28 20:29:48 -0400566 if (unwritten)
567 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700568 return 0;
569}
570
Eric Anholt3de09aa2009-03-09 09:42:23 -0700571/**
572 * This is the fast pwrite path, where we copy the data directly from the
573 * user into the GTT, uncached.
574 */
Eric Anholt673a3942008-07-30 12:06:12 -0700575static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700576i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
577 struct drm_i915_gem_pwrite *args,
578 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700579{
Daniel Vetter23010e42010-03-08 13:35:02 +0100580 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700581 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700582 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700583 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700584 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700585 int page_offset, page_length;
586 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700587
588 user_data = (char __user *) (uintptr_t) args->data_ptr;
589 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700590
591
592 mutex_lock(&dev->struct_mutex);
593 ret = i915_gem_object_pin(obj, 0);
594 if (ret) {
595 mutex_unlock(&dev->struct_mutex);
596 return ret;
597 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800598 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700599 if (ret)
600 goto fail;
601
Daniel Vetter23010e42010-03-08 13:35:02 +0100602 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700603 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700604
605 while (remain > 0) {
606 /* Operation in this page
607 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700608 * page_base = page offset within aperture
609 * page_offset = offset within page
610 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700611 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700612 page_base = (offset & ~(PAGE_SIZE-1));
613 page_offset = offset & (PAGE_SIZE-1);
614 page_length = remain;
615 if ((page_offset + remain) > PAGE_SIZE)
616 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700617
Keith Packard0839ccb2008-10-30 19:38:48 -0700618 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
619 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700620
Keith Packard0839ccb2008-10-30 19:38:48 -0700621 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700625 if (ret)
626 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700627
Keith Packard0839ccb2008-10-30 19:38:48 -0700628 remain -= page_length;
629 user_data += page_length;
630 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700631 }
Eric Anholt673a3942008-07-30 12:06:12 -0700632
633fail:
634 i915_gem_object_unpin(obj);
635 mutex_unlock(&dev->struct_mutex);
636
637 return ret;
638}
639
Eric Anholt3de09aa2009-03-09 09:42:23 -0700640/**
641 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
642 * the memory and maps it using kmap_atomic for copying.
643 *
644 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
645 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
646 */
Eric Anholt3043c602008-10-02 12:24:47 -0700647static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700648i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 struct drm_i915_gem_pwrite *args,
650 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700651{
Daniel Vetter23010e42010-03-08 13:35:02 +0100652 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700653 drm_i915_private_t *dev_priv = dev->dev_private;
654 ssize_t remain;
655 loff_t gtt_page_base, offset;
656 loff_t first_data_page, last_data_page, num_pages;
657 loff_t pinned_pages, i;
658 struct page **user_pages;
659 struct mm_struct *mm = current->mm;
660 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700661 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700662 uint64_t data_ptr = args->data_ptr;
663
664 remain = args->size;
665
666 /* Pin the user pages containing the data. We can't fault while
667 * holding the struct mutex, and all of the pwrite implementations
668 * want to hold it while dereferencing the user data.
669 */
670 first_data_page = data_ptr / PAGE_SIZE;
671 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
672 num_pages = last_data_page - first_data_page + 1;
673
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700674 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700675 if (user_pages == NULL)
676 return -ENOMEM;
677
678 down_read(&mm->mmap_sem);
679 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
680 num_pages, 0, 0, user_pages, NULL);
681 up_read(&mm->mmap_sem);
682 if (pinned_pages < num_pages) {
683 ret = -EFAULT;
684 goto out_unpin_pages;
685 }
686
687 mutex_lock(&dev->struct_mutex);
688 ret = i915_gem_object_pin(obj, 0);
689 if (ret)
690 goto out_unlock;
691
692 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
693 if (ret)
694 goto out_unpin_object;
695
Daniel Vetter23010e42010-03-08 13:35:02 +0100696 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700697 offset = obj_priv->gtt_offset + args->offset;
698
699 while (remain > 0) {
700 /* Operation in this page
701 *
702 * gtt_page_base = page offset within aperture
703 * gtt_page_offset = offset within page in aperture
704 * data_page_index = page number in get_user_pages return
705 * data_page_offset = offset with data_page_index page.
706 * page_length = bytes to copy for this page
707 */
708 gtt_page_base = offset & PAGE_MASK;
709 gtt_page_offset = offset & ~PAGE_MASK;
710 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
711 data_page_offset = data_ptr & ~PAGE_MASK;
712
713 page_length = remain;
714 if ((gtt_page_offset + page_length) > PAGE_SIZE)
715 page_length = PAGE_SIZE - gtt_page_offset;
716 if ((data_page_offset + page_length) > PAGE_SIZE)
717 page_length = PAGE_SIZE - data_page_offset;
718
Chris Wilsonab34c222010-05-27 14:15:35 +0100719 slow_kernel_write(dev_priv->mm.gtt_mapping,
720 gtt_page_base, gtt_page_offset,
721 user_pages[data_page_index],
722 data_page_offset,
723 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700724
725 remain -= page_length;
726 offset += page_length;
727 data_ptr += page_length;
728 }
729
730out_unpin_object:
731 i915_gem_object_unpin(obj);
732out_unlock:
733 mutex_unlock(&dev->struct_mutex);
734out_unpin_pages:
735 for (i = 0; i < pinned_pages; i++)
736 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700737 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700738
739 return ret;
740}
741
Eric Anholt40123c12009-03-09 13:42:30 -0700742/**
743 * This is the fast shmem pwrite path, which attempts to directly
744 * copy_from_user into the kmapped pages backing the object.
745 */
Eric Anholt673a3942008-07-30 12:06:12 -0700746static int
Eric Anholt40123c12009-03-09 13:42:30 -0700747i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
748 struct drm_i915_gem_pwrite *args,
749 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700750{
Daniel Vetter23010e42010-03-08 13:35:02 +0100751 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700752 ssize_t remain;
753 loff_t offset, page_base;
754 char __user *user_data;
755 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700756 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700757
758 user_data = (char __user *) (uintptr_t) args->data_ptr;
759 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700760
761 mutex_lock(&dev->struct_mutex);
762
Chris Wilson4bdadb92010-01-27 13:36:32 +0000763 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700764 if (ret != 0)
765 goto fail_unlock;
766
Eric Anholte47c68e2008-11-14 13:35:19 -0800767 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700768 if (ret != 0)
769 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700770
Daniel Vetter23010e42010-03-08 13:35:02 +0100771 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700772 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700773 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700774
Eric Anholt40123c12009-03-09 13:42:30 -0700775 while (remain > 0) {
776 /* Operation in this page
777 *
778 * page_base = page offset within aperture
779 * page_offset = offset within page
780 * page_length = bytes to copy for this page
781 */
782 page_base = (offset & ~(PAGE_SIZE-1));
783 page_offset = offset & (PAGE_SIZE-1);
784 page_length = remain;
785 if ((page_offset + remain) > PAGE_SIZE)
786 page_length = PAGE_SIZE - page_offset;
787
788 ret = fast_shmem_write(obj_priv->pages,
789 page_base, page_offset,
790 user_data, page_length);
791 if (ret)
792 goto fail_put_pages;
793
794 remain -= page_length;
795 user_data += page_length;
796 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700797 }
798
Eric Anholt40123c12009-03-09 13:42:30 -0700799fail_put_pages:
800 i915_gem_object_put_pages(obj);
801fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700802 mutex_unlock(&dev->struct_mutex);
803
Eric Anholt40123c12009-03-09 13:42:30 -0700804 return ret;
805}
806
807/**
808 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
809 * the memory and maps it using kmap_atomic for copying.
810 *
811 * This avoids taking mmap_sem for faulting on the user's address while the
812 * struct_mutex is held.
813 */
814static int
815i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
816 struct drm_i915_gem_pwrite *args,
817 struct drm_file *file_priv)
818{
Daniel Vetter23010e42010-03-08 13:35:02 +0100819 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700820 struct mm_struct *mm = current->mm;
821 struct page **user_pages;
822 ssize_t remain;
823 loff_t offset, pinned_pages, i;
824 loff_t first_data_page, last_data_page, num_pages;
825 int shmem_page_index, shmem_page_offset;
826 int data_page_index, data_page_offset;
827 int page_length;
828 int ret;
829 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700830 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700831
832 remain = args->size;
833
834 /* Pin the user pages containing the data. We can't fault while
835 * holding the struct mutex, and all of the pwrite implementations
836 * want to hold it while dereferencing the user data.
837 */
838 first_data_page = data_ptr / PAGE_SIZE;
839 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
840 num_pages = last_data_page - first_data_page + 1;
841
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700842 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700843 if (user_pages == NULL)
844 return -ENOMEM;
845
846 down_read(&mm->mmap_sem);
847 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
848 num_pages, 0, 0, user_pages, NULL);
849 up_read(&mm->mmap_sem);
850 if (pinned_pages < num_pages) {
851 ret = -EFAULT;
852 goto fail_put_user_pages;
853 }
854
Eric Anholt280b7132009-03-12 16:56:27 -0700855 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
856
Eric Anholt40123c12009-03-09 13:42:30 -0700857 mutex_lock(&dev->struct_mutex);
858
Chris Wilson07f73f62009-09-14 16:50:30 +0100859 ret = i915_gem_object_get_pages_or_evict(obj);
860 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700861 goto fail_unlock;
862
863 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
864 if (ret != 0)
865 goto fail_put_pages;
866
Daniel Vetter23010e42010-03-08 13:35:02 +0100867 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700868 offset = args->offset;
869 obj_priv->dirty = 1;
870
871 while (remain > 0) {
872 /* Operation in this page
873 *
874 * shmem_page_index = page number within shmem file
875 * shmem_page_offset = offset within page in shmem file
876 * data_page_index = page number in get_user_pages return
877 * data_page_offset = offset with data_page_index page.
878 * page_length = bytes to copy for this page
879 */
880 shmem_page_index = offset / PAGE_SIZE;
881 shmem_page_offset = offset & ~PAGE_MASK;
882 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
883 data_page_offset = data_ptr & ~PAGE_MASK;
884
885 page_length = remain;
886 if ((shmem_page_offset + page_length) > PAGE_SIZE)
887 page_length = PAGE_SIZE - shmem_page_offset;
888 if ((data_page_offset + page_length) > PAGE_SIZE)
889 page_length = PAGE_SIZE - data_page_offset;
890
Eric Anholt280b7132009-03-12 16:56:27 -0700891 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100892 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700893 shmem_page_offset,
894 user_pages[data_page_index],
895 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100896 page_length,
897 0);
898 } else {
899 slow_shmem_copy(obj_priv->pages[shmem_page_index],
900 shmem_page_offset,
901 user_pages[data_page_index],
902 data_page_offset,
903 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700904 }
Eric Anholt40123c12009-03-09 13:42:30 -0700905
906 remain -= page_length;
907 data_ptr += page_length;
908 offset += page_length;
909 }
910
911fail_put_pages:
912 i915_gem_object_put_pages(obj);
913fail_unlock:
914 mutex_unlock(&dev->struct_mutex);
915fail_put_user_pages:
916 for (i = 0; i < pinned_pages; i++)
917 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700918 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700919
920 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700921}
922
923/**
924 * Writes data to the object referenced by handle.
925 *
926 * On error, the contents of the buffer that were to be modified are undefined.
927 */
928int
929i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 struct drm_file *file_priv)
931{
932 struct drm_i915_gem_pwrite *args = data;
933 struct drm_gem_object *obj;
934 struct drm_i915_gem_object *obj_priv;
935 int ret = 0;
936
937 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
938 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100939 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100940 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700941
942 /* Bounds check destination.
943 *
944 * XXX: This could use review for overflow issues...
945 */
946 if (args->offset > obj->size || args->size > obj->size ||
947 args->offset + args->size > obj->size) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100948 ret = -EINVAL;
949 goto err;
950 }
951
952 if (!access_ok(VERIFY_READ,
953 (char __user *)(uintptr_t)args->data_ptr,
954 args->size)) {
955 ret = -EFAULT;
956 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -0700957 }
958
959 /* We can only do the GTT pwrite on untiled buffers, as otherwise
960 * it would end up going through the fenced access, and we'll get
961 * different detiling behavior between reading and writing.
962 * pread/pwrite currently are reading and writing from the CPU
963 * perspective, requiring manual detiling by the client.
964 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000965 if (obj_priv->phys_obj)
966 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
967 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Chris Wilson9b8c4a02010-05-27 14:21:01 +0100968 dev->gtt_total != 0 &&
969 obj->write_domain != I915_GEM_DOMAIN_CPU) {
Eric Anholt3de09aa2009-03-09 09:42:23 -0700970 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
971 if (ret == -EFAULT) {
972 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
973 file_priv);
974 }
Eric Anholt280b7132009-03-12 16:56:27 -0700975 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
976 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700977 } else {
978 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
979 if (ret == -EFAULT) {
980 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
981 file_priv);
982 }
983 }
Eric Anholt673a3942008-07-30 12:06:12 -0700984
985#if WATCH_PWRITE
986 if (ret)
987 DRM_INFO("pwrite failed %d\n", ret);
988#endif
989
Chris Wilsonce9d4192010-09-26 20:50:05 +0100990err:
Luca Barbieribc9025b2010-02-09 05:49:12 +0000991 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700992 return ret;
993}
994
995/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800996 * Called when user space prepares to use an object with the CPU, either
997 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700998 */
999int
1000i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1001 struct drm_file *file_priv)
1002{
Eric Anholta09ba7f2009-08-29 12:49:51 -07001003 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001004 struct drm_i915_gem_set_domain *args = data;
1005 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -07001006 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001007 uint32_t read_domains = args->read_domains;
1008 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07001009 int ret;
1010
1011 if (!(dev->driver->driver_features & DRIVER_GEM))
1012 return -ENODEV;
1013
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001014 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +01001015 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001016 return -EINVAL;
1017
Chris Wilson21d509e2009-06-06 09:46:02 +01001018 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001019 return -EINVAL;
1020
1021 /* Having something in the write domain implies it's in the read
1022 * domain, and only that read domain. Enforce that in the request.
1023 */
1024 if (write_domain != 0 && read_domains != write_domain)
1025 return -EINVAL;
1026
Eric Anholt673a3942008-07-30 12:06:12 -07001027 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1028 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001029 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +01001030 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001031
1032 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001033
1034 intel_mark_busy(dev, obj);
1035
Eric Anholt673a3942008-07-30 12:06:12 -07001036#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001037 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001038 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001039#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001040 if (read_domains & I915_GEM_DOMAIN_GTT) {
1041 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001042
Eric Anholta09ba7f2009-08-29 12:49:51 -07001043 /* Update the LRU on the fence for the CPU access that's
1044 * about to occur.
1045 */
1046 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001047 struct drm_i915_fence_reg *reg =
1048 &dev_priv->fence_regs[obj_priv->fence_reg];
1049 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001050 &dev_priv->mm.fence_list);
1051 }
1052
Eric Anholt02354392008-11-26 13:58:13 -08001053 /* Silently promote "you're not bound, there was nothing to do"
1054 * to success, since the client was just asking us to
1055 * make sure everything was done.
1056 */
1057 if (ret == -EINVAL)
1058 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001059 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001060 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001061 }
1062
Chris Wilson7d1c4802010-08-07 21:45:03 +01001063
1064 /* Maintain LRU order of "inactive" objects */
1065 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1066 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1067
Eric Anholt673a3942008-07-30 12:06:12 -07001068 drm_gem_object_unreference(obj);
1069 mutex_unlock(&dev->struct_mutex);
1070 return ret;
1071}
1072
1073/**
1074 * Called when user space has done writes to this buffer
1075 */
1076int
1077i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1079{
1080 struct drm_i915_gem_sw_finish *args = data;
1081 struct drm_gem_object *obj;
1082 struct drm_i915_gem_object *obj_priv;
1083 int ret = 0;
1084
1085 if (!(dev->driver->driver_features & DRIVER_GEM))
1086 return -ENODEV;
1087
1088 mutex_lock(&dev->struct_mutex);
1089 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1090 if (obj == NULL) {
1091 mutex_unlock(&dev->struct_mutex);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001092 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001093 }
1094
1095#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001097 __func__, args->handle, obj, obj->size);
1098#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001099 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001100
1101 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001102 if (obj_priv->pin_count)
1103 i915_gem_object_flush_cpu_write_domain(obj);
1104
Eric Anholt673a3942008-07-30 12:06:12 -07001105 drm_gem_object_unreference(obj);
1106 mutex_unlock(&dev->struct_mutex);
1107 return ret;
1108}
1109
1110/**
1111 * Maps the contents of an object, returning the address it is mapped
1112 * into.
1113 *
1114 * While the mapping holds a reference on the contents of the object, it doesn't
1115 * imply a ref on the object itself.
1116 */
1117int
1118i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1119 struct drm_file *file_priv)
1120{
1121 struct drm_i915_gem_mmap *args = data;
1122 struct drm_gem_object *obj;
1123 loff_t offset;
1124 unsigned long addr;
1125
1126 if (!(dev->driver->driver_features & DRIVER_GEM))
1127 return -ENODEV;
1128
1129 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1130 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001131 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001132
1133 offset = args->offset;
1134
1135 down_write(&current->mm->mmap_sem);
1136 addr = do_mmap(obj->filp, 0, args->size,
1137 PROT_READ | PROT_WRITE, MAP_SHARED,
1138 args->offset);
1139 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001140 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001141 if (IS_ERR((void *)addr))
1142 return addr;
1143
1144 args->addr_ptr = (uint64_t) addr;
1145
1146 return 0;
1147}
1148
Jesse Barnesde151cf2008-11-12 10:03:55 -08001149/**
1150 * i915_gem_fault - fault a page into the GTT
1151 * vma: VMA in question
1152 * vmf: fault info
1153 *
1154 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1155 * from userspace. The fault handler takes care of binding the object to
1156 * the GTT (if needed), allocating and programming a fence register (again,
1157 * only if needed based on whether the old reg is still valid or the object
1158 * is tiled) and inserting a new PTE into the faulting process.
1159 *
1160 * Note that the faulting process may involve evicting existing objects
1161 * from the GTT and/or fence registers to make room. So performance may
1162 * suffer if the GTT working set is large or there are few fence registers
1163 * left.
1164 */
1165int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1166{
1167 struct drm_gem_object *obj = vma->vm_private_data;
1168 struct drm_device *dev = obj->dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001169 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001171 pgoff_t page_offset;
1172 unsigned long pfn;
1173 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001174 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001175
1176 /* We don't use vmf->pgoff since that has the fake offset */
1177 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1178 PAGE_SHIFT;
1179
1180 /* Now bind it into the GTT if needed */
1181 mutex_lock(&dev->struct_mutex);
1182 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001183 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001184 if (ret)
1185 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001186
Jesse Barnesde151cf2008-11-12 10:03:55 -08001187 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001188 if (ret)
1189 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001190 }
1191
1192 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001193 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001194 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001195 if (ret)
1196 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001197 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001198
Chris Wilson7d1c4802010-08-07 21:45:03 +01001199 if (i915_gem_object_is_inactive(obj_priv))
1200 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1201
Jesse Barnesde151cf2008-11-12 10:03:55 -08001202 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1203 page_offset;
1204
1205 /* Finally, remap it using the new GTT offset */
1206 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001207unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001208 mutex_unlock(&dev->struct_mutex);
1209
1210 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001211 case 0:
1212 case -ERESTARTSYS:
1213 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001214 case -ENOMEM:
1215 case -EAGAIN:
1216 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001217 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001218 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001219 }
1220}
1221
1222/**
1223 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1224 * @obj: obj in question
1225 *
1226 * GEM memory mapping works by handing back to userspace a fake mmap offset
1227 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1228 * up the object based on the offset and sets up the various memory mapping
1229 * structures.
1230 *
1231 * This routine allocates and attaches a fake offset for @obj.
1232 */
1233static int
1234i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1235{
1236 struct drm_device *dev = obj->dev;
1237 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001238 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001239 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001240 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001241 int ret = 0;
1242
1243 /* Set the object up for mmap'ing */
1244 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001245 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001246 if (!list->map)
1247 return -ENOMEM;
1248
1249 map = list->map;
1250 map->type = _DRM_GEM;
1251 map->size = obj->size;
1252 map->handle = obj;
1253
1254 /* Get a DRM GEM mmap offset allocated... */
1255 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1256 obj->size / PAGE_SIZE, 0, 0);
1257 if (!list->file_offset_node) {
1258 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1259 ret = -ENOMEM;
1260 goto out_free_list;
1261 }
1262
1263 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1264 obj->size / PAGE_SIZE, 0);
1265 if (!list->file_offset_node) {
1266 ret = -ENOMEM;
1267 goto out_free_list;
1268 }
1269
1270 list->hash.key = list->file_offset_node->start;
1271 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1272 DRM_ERROR("failed to add to map hash\n");
Chris Wilson5618ca62009-12-02 15:15:30 +00001273 ret = -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001274 goto out_free_mm;
1275 }
1276
1277 /* By now we should be all set, any drm_mmap request on the offset
1278 * below will get to our mmap & fault handler */
1279 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1280
1281 return 0;
1282
1283out_free_mm:
1284 drm_mm_put_block(list->file_offset_node);
1285out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001286 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001287
1288 return ret;
1289}
1290
Chris Wilson901782b2009-07-10 08:18:50 +01001291/**
1292 * i915_gem_release_mmap - remove physical page mappings
1293 * @obj: obj in question
1294 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001295 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001296 * relinquish ownership of the pages back to the system.
1297 *
1298 * It is vital that we remove the page mapping if we have mapped a tiled
1299 * object through the GTT and then lose the fence register due to
1300 * resource pressure. Similarly if the object has been moved out of the
1301 * aperture, than pages mapped into userspace must be revoked. Removing the
1302 * mapping will then trigger a page fault on the next user access, allowing
1303 * fixup by i915_gem_fault().
1304 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001305void
Chris Wilson901782b2009-07-10 08:18:50 +01001306i915_gem_release_mmap(struct drm_gem_object *obj)
1307{
1308 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001309 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001310
1311 if (dev->dev_mapping)
1312 unmap_mapping_range(dev->dev_mapping,
1313 obj_priv->mmap_offset, obj->size, 1);
1314}
1315
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001316static void
1317i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1318{
1319 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001320 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001321 struct drm_gem_mm *mm = dev->mm_private;
1322 struct drm_map_list *list;
1323
1324 list = &obj->map_list;
1325 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1326
1327 if (list->file_offset_node) {
1328 drm_mm_put_block(list->file_offset_node);
1329 list->file_offset_node = NULL;
1330 }
1331
1332 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001333 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001334 list->map = NULL;
1335 }
1336
1337 obj_priv->mmap_offset = 0;
1338}
1339
Jesse Barnesde151cf2008-11-12 10:03:55 -08001340/**
1341 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1342 * @obj: object to check
1343 *
1344 * Return the required GTT alignment for an object, taking into account
1345 * potential fence register mapping if needed.
1346 */
1347static uint32_t
1348i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1349{
1350 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001351 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001352 int start, i;
1353
1354 /*
1355 * Minimum alignment is 4k (GTT page size), but might be greater
1356 * if a fence register is needed for the object.
1357 */
1358 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1359 return 4096;
1360
1361 /*
1362 * Previous chips need to be aligned to the size of the smallest
1363 * fence register that can contain the object.
1364 */
1365 if (IS_I9XX(dev))
1366 start = 1024*1024;
1367 else
1368 start = 512*1024;
1369
1370 for (i = start; i < obj->size; i <<= 1)
1371 ;
1372
1373 return i;
1374}
1375
1376/**
1377 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1378 * @dev: DRM device
1379 * @data: GTT mapping ioctl data
1380 * @file_priv: GEM object info
1381 *
1382 * Simply returns the fake offset to userspace so it can mmap it.
1383 * The mmap call will end up in drm_gem_mmap(), which will set things
1384 * up so we can get faults in the handler above.
1385 *
1386 * The fault handler will take care of binding the object into the GTT
1387 * (since it may have been evicted to make room for something), allocating
1388 * a fence register, and mapping the appropriate aperture address into
1389 * userspace.
1390 */
1391int
1392i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1393 struct drm_file *file_priv)
1394{
1395 struct drm_i915_gem_mmap_gtt *args = data;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001396 struct drm_gem_object *obj;
1397 struct drm_i915_gem_object *obj_priv;
1398 int ret;
1399
1400 if (!(dev->driver->driver_features & DRIVER_GEM))
1401 return -ENODEV;
1402
1403 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1404 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001405 return -ENOENT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001406
1407 mutex_lock(&dev->struct_mutex);
1408
Daniel Vetter23010e42010-03-08 13:35:02 +01001409 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001410
Chris Wilsonab182822009-09-22 18:46:17 +01001411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1413 drm_gem_object_unreference(obj);
1414 mutex_unlock(&dev->struct_mutex);
1415 return -EINVAL;
1416 }
1417
1418
Jesse Barnesde151cf2008-11-12 10:03:55 -08001419 if (!obj_priv->mmap_offset) {
1420 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001421 if (ret) {
1422 drm_gem_object_unreference(obj);
1423 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001424 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001425 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001426 }
1427
1428 args->offset = obj_priv->mmap_offset;
1429
Jesse Barnesde151cf2008-11-12 10:03:55 -08001430 /*
1431 * Pull it into the GTT so that we have a page list (makes the
1432 * initial fault faster and any subsequent flushing possible).
1433 */
1434 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001435 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001436 if (ret) {
1437 drm_gem_object_unreference(obj);
1438 mutex_unlock(&dev->struct_mutex);
1439 return ret;
1440 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001441 }
1442
1443 drm_gem_object_unreference(obj);
1444 mutex_unlock(&dev->struct_mutex);
1445
1446 return 0;
1447}
1448
Ben Gamari6911a9b2009-04-02 11:24:54 -07001449void
Eric Anholt856fa192009-03-19 14:10:50 -07001450i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001451{
Daniel Vetter23010e42010-03-08 13:35:02 +01001452 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001453 int page_count = obj->size / PAGE_SIZE;
1454 int i;
1455
Eric Anholt856fa192009-03-19 14:10:50 -07001456 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001457 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001458
1459 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001460 return;
1461
Eric Anholt280b7132009-03-12 16:56:27 -07001462 if (obj_priv->tiling_mode != I915_TILING_NONE)
1463 i915_gem_object_save_bit_17_swizzle(obj);
1464
Chris Wilson3ef94da2009-09-14 16:50:29 +01001465 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001466 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001467
1468 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001469 if (obj_priv->dirty)
1470 set_page_dirty(obj_priv->pages[i]);
1471
1472 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001473 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001474
1475 page_cache_release(obj_priv->pages[i]);
1476 }
Eric Anholt673a3942008-07-30 12:06:12 -07001477 obj_priv->dirty = 0;
1478
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001479 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001480 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001481}
1482
1483static void
Zou Nan hai852835f2010-05-21 09:08:56 +08001484i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1485 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001486{
1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zou Nan hai852835f2010-05-21 09:08:56 +08001490 BUG_ON(ring == NULL);
1491 obj_priv->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001492
1493 /* Add a reference if we're newly entering the active list. */
1494 if (!obj_priv->active) {
1495 drm_gem_object_reference(obj);
1496 obj_priv->active = 1;
1497 }
1498 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001499 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08001500 list_move_tail(&obj_priv->list, &ring->active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001501 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001502 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001503}
1504
Eric Anholtce44b0e2008-11-06 16:00:31 -08001505static void
1506i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1507{
1508 struct drm_device *dev = obj->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001510 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001511
1512 BUG_ON(!obj_priv->active);
1513 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1514 obj_priv->last_rendering_seqno = 0;
1515}
Eric Anholt673a3942008-07-30 12:06:12 -07001516
Chris Wilson963b4832009-09-20 23:03:54 +01001517/* Immediately discard the backing storage */
1518static void
1519i915_gem_object_truncate(struct drm_gem_object *obj)
1520{
Daniel Vetter23010e42010-03-08 13:35:02 +01001521 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001522 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001523
Chris Wilsonae9fed62010-08-07 11:01:30 +01001524 /* Our goal here is to return as much of the memory as
1525 * is possible back to the system as we are called from OOM.
1526 * To do this we must instruct the shmfs to drop all of its
1527 * backing pages, *now*. Here we mirror the actions taken
1528 * when by shmem_delete_inode() to release the backing store.
1529 */
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001530 inode = obj->filp->f_path.dentry->d_inode;
Chris Wilsonae9fed62010-08-07 11:01:30 +01001531 truncate_inode_pages(inode->i_mapping, 0);
1532 if (inode->i_op->truncate_range)
1533 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001534
1535 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001536}
1537
1538static inline int
1539i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1540{
1541 return obj_priv->madv == I915_MADV_DONTNEED;
1542}
1543
Eric Anholt673a3942008-07-30 12:06:12 -07001544static void
1545i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1546{
1547 struct drm_device *dev = obj->dev;
1548 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001549 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001550
1551 i915_verify_inactive(dev, __FILE__, __LINE__);
1552 if (obj_priv->pin_count != 0)
1553 list_del_init(&obj_priv->list);
1554 else
1555 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1556
Daniel Vetter99fcb762010-02-07 16:20:18 +01001557 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1558
Eric Anholtce44b0e2008-11-06 16:00:31 -08001559 obj_priv->last_rendering_seqno = 0;
Zou Nan hai852835f2010-05-21 09:08:56 +08001560 obj_priv->ring = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001561 if (obj_priv->active) {
1562 obj_priv->active = 0;
1563 drm_gem_object_unreference(obj);
1564 }
1565 i915_verify_inactive(dev, __FILE__, __LINE__);
1566}
1567
Daniel Vetter63560392010-02-19 11:51:59 +01001568static void
1569i915_gem_process_flushing_list(struct drm_device *dev,
Zou Nan hai852835f2010-05-21 09:08:56 +08001570 uint32_t flush_domains, uint32_t seqno,
1571 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001572{
1573 drm_i915_private_t *dev_priv = dev->dev_private;
1574 struct drm_i915_gem_object *obj_priv, *next;
1575
1576 list_for_each_entry_safe(obj_priv, next,
1577 &dev_priv->mm.gpu_write_list,
1578 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001579 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001580
1581 if ((obj->write_domain & flush_domains) ==
Zou Nan hai852835f2010-05-21 09:08:56 +08001582 obj->write_domain &&
1583 obj_priv->ring->ring_flag == ring->ring_flag) {
Daniel Vetter63560392010-02-19 11:51:59 +01001584 uint32_t old_write_domain = obj->write_domain;
1585
1586 obj->write_domain = 0;
1587 list_del_init(&obj_priv->gpu_write_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08001588 i915_gem_object_move_to_active(obj, seqno, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001589
1590 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001591 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1592 struct drm_i915_fence_reg *reg =
1593 &dev_priv->fence_regs[obj_priv->fence_reg];
1594 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001595 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001596 }
Daniel Vetter63560392010-02-19 11:51:59 +01001597
1598 trace_i915_gem_object_change_domain(obj,
1599 obj->read_domains,
1600 old_write_domain);
1601 }
1602 }
1603}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001604
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001605uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001606i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
Zou Nan hai852835f2010-05-21 09:08:56 +08001607 uint32_t flush_domains, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001608{
1609 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001610 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001611 struct drm_i915_gem_request *request;
1612 uint32_t seqno;
1613 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001614
Eric Anholtb9624422009-06-03 07:27:35 +00001615 if (file_priv != NULL)
1616 i915_file_priv = file_priv->driver_priv;
1617
Eric Anholt9a298b22009-03-24 12:23:04 -07001618 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001619 if (request == NULL)
1620 return 0;
1621
Zou Nan hai852835f2010-05-21 09:08:56 +08001622 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001623
1624 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001625 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001626 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001627 was_empty = list_empty(&ring->request_list);
1628 list_add_tail(&request->list, &ring->request_list);
1629
Eric Anholtb9624422009-06-03 07:27:35 +00001630 if (i915_file_priv) {
1631 list_add_tail(&request->client_list,
1632 &i915_file_priv->mm.request_list);
1633 } else {
1634 INIT_LIST_HEAD(&request->client_list);
1635 }
Eric Anholt673a3942008-07-30 12:06:12 -07001636
Eric Anholtce44b0e2008-11-06 16:00:31 -08001637 /* Associate any objects on the flushing list matching the write
1638 * domain we're flushing with our flush.
1639 */
Daniel Vetter63560392010-02-19 11:51:59 +01001640 if (flush_domains != 0)
Zou Nan hai852835f2010-05-21 09:08:56 +08001641 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001642
Ben Gamarif65d9422009-09-14 17:48:44 -04001643 if (!dev_priv->mm.suspended) {
1644 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1645 if (was_empty)
1646 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1647 }
Eric Anholt673a3942008-07-30 12:06:12 -07001648 return seqno;
1649}
1650
1651/**
1652 * Command execution barrier
1653 *
1654 * Ensures that all commands in the ring are finished
1655 * before signalling the CPU
1656 */
Eric Anholt3043c602008-10-02 12:24:47 -07001657static uint32_t
Zou Nan hai852835f2010-05-21 09:08:56 +08001658i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001659{
Eric Anholt673a3942008-07-30 12:06:12 -07001660 uint32_t flush_domains = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001661
1662 /* The sampler always gets flushed on i965 (sigh) */
1663 if (IS_I965G(dev))
1664 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
Zou Nan hai852835f2010-05-21 09:08:56 +08001665
1666 ring->flush(dev, ring,
1667 I915_GEM_DOMAIN_COMMAND, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001668 return flush_domains;
1669}
1670
1671/**
1672 * Moves buffers associated only with the given active seqno from the active
1673 * to inactive list, potentially freeing them.
1674 */
1675static void
1676i915_gem_retire_request(struct drm_device *dev,
1677 struct drm_i915_gem_request *request)
1678{
1679 drm_i915_private_t *dev_priv = dev->dev_private;
1680
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001681 trace_i915_gem_request_retire(dev, request->seqno);
1682
Eric Anholt673a3942008-07-30 12:06:12 -07001683 /* Move any buffers on the active list that are no longer referenced
1684 * by the ringbuffer to the flushing/inactive lists as appropriate.
1685 */
Carl Worth5e118f42009-03-20 11:54:25 -07001686 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08001687 while (!list_empty(&request->ring->active_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001688 struct drm_gem_object *obj;
1689 struct drm_i915_gem_object *obj_priv;
1690
Zou Nan hai852835f2010-05-21 09:08:56 +08001691 obj_priv = list_first_entry(&request->ring->active_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001692 struct drm_i915_gem_object,
1693 list);
Daniel Vettera8089e82010-04-09 19:05:09 +00001694 obj = &obj_priv->base;
Eric Anholt673a3942008-07-30 12:06:12 -07001695
1696 /* If the seqno being retired doesn't match the oldest in the
1697 * list, then the oldest in the list must still be newer than
1698 * this seqno.
1699 */
1700 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001701 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001702
Eric Anholt673a3942008-07-30 12:06:12 -07001703#if WATCH_LRU
1704 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1705 __func__, request->seqno, obj);
1706#endif
1707
Eric Anholtce44b0e2008-11-06 16:00:31 -08001708 if (obj->write_domain != 0)
1709 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001710 else {
1711 /* Take a reference on the object so it won't be
1712 * freed while the spinlock is held. The list
1713 * protection for this spinlock is safe when breaking
1714 * the lock like this since the next thing we do
1715 * is just get the head of the list again.
1716 */
1717 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001718 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001719 spin_unlock(&dev_priv->mm.active_list_lock);
1720 drm_gem_object_unreference(obj);
1721 spin_lock(&dev_priv->mm.active_list_lock);
1722 }
Eric Anholt673a3942008-07-30 12:06:12 -07001723 }
Carl Worth5e118f42009-03-20 11:54:25 -07001724out:
1725 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001726}
1727
1728/**
1729 * Returns true if seq1 is later than seq2.
1730 */
Ben Gamari22be1722009-09-14 17:48:43 -04001731bool
Eric Anholt673a3942008-07-30 12:06:12 -07001732i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1733{
1734 return (int32_t)(seq1 - seq2) >= 0;
1735}
1736
1737uint32_t
Zou Nan hai852835f2010-05-21 09:08:56 +08001738i915_get_gem_seqno(struct drm_device *dev,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001739 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001740{
Zou Nan hai852835f2010-05-21 09:08:56 +08001741 return ring->get_gem_seqno(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001742}
1743
1744/**
1745 * This function clears the request list as sequence numbers are passed.
1746 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001747static void
1748i915_gem_retire_requests_ring(struct drm_device *dev,
1749 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001750{
1751 drm_i915_private_t *dev_priv = dev->dev_private;
1752 uint32_t seqno;
1753
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001754 if (!ring->status_page.page_addr
Zou Nan hai852835f2010-05-21 09:08:56 +08001755 || list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001756 return;
1757
Zou Nan hai852835f2010-05-21 09:08:56 +08001758 seqno = i915_get_gem_seqno(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001759
Zou Nan hai852835f2010-05-21 09:08:56 +08001760 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001761 struct drm_i915_gem_request *request;
1762 uint32_t retiring_seqno;
1763
Zou Nan hai852835f2010-05-21 09:08:56 +08001764 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001765 struct drm_i915_gem_request,
1766 list);
1767 retiring_seqno = request->seqno;
1768
1769 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001770 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001771 i915_gem_retire_request(dev, request);
1772
1773 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001774 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001775 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001776 } else
1777 break;
1778 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001779
1780 if (unlikely (dev_priv->trace_irq_seqno &&
1781 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001782
1783 ring->user_irq_put(dev, ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001784 dev_priv->trace_irq_seqno = 0;
1785 }
Eric Anholt673a3942008-07-30 12:06:12 -07001786}
1787
1788void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001789i915_gem_retire_requests(struct drm_device *dev)
1790{
1791 drm_i915_private_t *dev_priv = dev->dev_private;
1792
Chris Wilsonbe726152010-07-23 23:18:50 +01001793 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1794 struct drm_i915_gem_object *obj_priv, *tmp;
1795
1796 /* We must be careful that during unbind() we do not
1797 * accidentally infinitely recurse into retire requests.
1798 * Currently:
1799 * retire -> free -> unbind -> wait -> retire_ring
1800 */
1801 list_for_each_entry_safe(obj_priv, tmp,
1802 &dev_priv->mm.deferred_free_list,
1803 list)
1804 i915_gem_free_object_tail(&obj_priv->base);
1805 }
1806
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001807 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1808 if (HAS_BSD(dev))
1809 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1810}
1811
1812void
Eric Anholt673a3942008-07-30 12:06:12 -07001813i915_gem_retire_work_handler(struct work_struct *work)
1814{
1815 drm_i915_private_t *dev_priv;
1816 struct drm_device *dev;
1817
1818 dev_priv = container_of(work, drm_i915_private_t,
1819 mm.retire_work.work);
1820 dev = dev_priv->dev;
1821
1822 mutex_lock(&dev->struct_mutex);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001823 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001824
Keith Packard6dbe2772008-10-14 21:41:13 -07001825 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001826 (!list_empty(&dev_priv->render_ring.request_list) ||
1827 (HAS_BSD(dev) &&
1828 !list_empty(&dev_priv->bsd_ring.request_list))))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001829 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001830 mutex_unlock(&dev->struct_mutex);
1831}
1832
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001833int
Zou Nan hai852835f2010-05-21 09:08:56 +08001834i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1835 int interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001836{
1837 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001838 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001839 int ret = 0;
1840
1841 BUG_ON(seqno == 0);
1842
Ben Gamariba1234d2009-09-14 17:48:47 -04001843 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001844 return -EIO;
1845
Zou Nan hai852835f2010-05-21 09:08:56 +08001846 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001847 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001848 ier = I915_READ(DEIER) | I915_READ(GTIER);
1849 else
1850 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001851 if (!ier) {
1852 DRM_ERROR("something (likely vbetool) disabled "
1853 "interrupts, re-enabling\n");
1854 i915_driver_irq_preinstall(dev);
1855 i915_driver_irq_postinstall(dev);
1856 }
1857
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001858 trace_i915_gem_request_wait_begin(dev, seqno);
1859
Zou Nan hai852835f2010-05-21 09:08:56 +08001860 ring->waiting_gem_seqno = seqno;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001861 ring->user_irq_get(dev, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001862 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08001863 ret = wait_event_interruptible(ring->irq_queue,
1864 i915_seqno_passed(
1865 ring->get_gem_seqno(dev, ring), seqno)
1866 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001867 else
Zou Nan hai852835f2010-05-21 09:08:56 +08001868 wait_event(ring->irq_queue,
1869 i915_seqno_passed(
1870 ring->get_gem_seqno(dev, ring), seqno)
1871 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001872
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001873 ring->user_irq_put(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001874 ring->waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001875
1876 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001877 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001878 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001879 ret = -EIO;
1880
1881 if (ret && ret != -ERESTARTSYS)
1882 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
Zou Nan hai852835f2010-05-21 09:08:56 +08001883 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
Eric Anholt673a3942008-07-30 12:06:12 -07001884
1885 /* Directly dispatch request retiring. While we have the work queue
1886 * to handle this, the waiter on a request often wants an associated
1887 * buffer to have made it to the inactive list, and we would need
1888 * a separate wait queue to handle that.
1889 */
1890 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001891 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001892
1893 return ret;
1894}
1895
Daniel Vetter48764bf2009-09-15 22:57:32 +02001896/**
1897 * Waits for a sequence number to be signaled, and cleans up the
1898 * request and object lists appropriately for that event.
1899 */
1900static int
Zou Nan hai852835f2010-05-21 09:08:56 +08001901i915_wait_request(struct drm_device *dev, uint32_t seqno,
1902 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02001903{
Zou Nan hai852835f2010-05-21 09:08:56 +08001904 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001905}
1906
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001907static void
1908i915_gem_flush(struct drm_device *dev,
1909 uint32_t invalidate_domains,
1910 uint32_t flush_domains)
1911{
1912 drm_i915_private_t *dev_priv = dev->dev_private;
1913 if (flush_domains & I915_GEM_DOMAIN_CPU)
1914 drm_agp_chipset_flush(dev);
1915 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1916 invalidate_domains,
1917 flush_domains);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001918
1919 if (HAS_BSD(dev))
1920 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1921 invalidate_domains,
1922 flush_domains);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001923}
1924
Eric Anholt673a3942008-07-30 12:06:12 -07001925/**
1926 * Ensures that all rendering to the object has completed and the object is
1927 * safe to unbind from the GTT or access from the CPU.
1928 */
1929static int
1930i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1931{
1932 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001933 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001934 int ret;
1935
Eric Anholte47c68e2008-11-14 13:35:19 -08001936 /* This function only exists to support waiting for existing rendering,
1937 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001938 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001939 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001940
1941 /* If there is rendering queued on the buffer being evicted, wait for
1942 * it.
1943 */
1944 if (obj_priv->active) {
1945#if WATCH_BUF
1946 DRM_INFO("%s: object %p wait for seqno %08x\n",
1947 __func__, obj, obj_priv->last_rendering_seqno);
1948#endif
Zou Nan hai852835f2010-05-21 09:08:56 +08001949 ret = i915_wait_request(dev,
1950 obj_priv->last_rendering_seqno, obj_priv->ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001951 if (ret != 0)
1952 return ret;
1953 }
1954
1955 return 0;
1956}
1957
1958/**
1959 * Unbinds an object from the GTT aperture.
1960 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001961int
Eric Anholt673a3942008-07-30 12:06:12 -07001962i915_gem_object_unbind(struct drm_gem_object *obj)
1963{
1964 struct drm_device *dev = obj->dev;
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01001965 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001966 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001967 int ret = 0;
1968
1969#if WATCH_BUF
1970 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1971 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1972#endif
1973 if (obj_priv->gtt_space == NULL)
1974 return 0;
1975
1976 if (obj_priv->pin_count != 0) {
1977 DRM_ERROR("Attempting to unbind pinned buffer\n");
1978 return -EINVAL;
1979 }
1980
Eric Anholt5323fd02009-09-09 11:50:45 -07001981 /* blow away mappings if mapped through GTT */
1982 i915_gem_release_mmap(obj);
1983
Eric Anholt673a3942008-07-30 12:06:12 -07001984 /* Move the object to the CPU domain to ensure that
1985 * any possible CPU writes while it's not in the GTT
1986 * are flushed when we go to remap it. This will
1987 * also ensure that all pending GPU writes are finished
1988 * before we unbind.
1989 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001990 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01001991 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07001992 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01001993 /* Continue on if we fail due to EIO, the GPU is hung so we
1994 * should be safe and we need to cleanup or else we might
1995 * cause memory corruption through use-after-free.
1996 */
Eric Anholt673a3942008-07-30 12:06:12 -07001997
Daniel Vetter96b47b62009-12-15 17:50:00 +01001998 /* release the fence reg _after_ flushing */
1999 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2000 i915_gem_clear_fence_reg(obj);
2001
Eric Anholt673a3942008-07-30 12:06:12 -07002002 if (obj_priv->agp_mem != NULL) {
2003 drm_unbind_agp(obj_priv->agp_mem);
2004 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2005 obj_priv->agp_mem = NULL;
2006 }
2007
Eric Anholt856fa192009-03-19 14:10:50 -07002008 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01002009 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07002010
2011 if (obj_priv->gtt_space) {
2012 atomic_dec(&dev->gtt_count);
2013 atomic_sub(obj->size, &dev->gtt_memory);
2014
2015 drm_mm_put_block(obj_priv->gtt_space);
2016 obj_priv->gtt_space = NULL;
2017 }
2018
2019 /* Remove ourselves from the LRU list if present. */
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002020 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002021 if (!list_empty(&obj_priv->list))
2022 list_del_init(&obj_priv->list);
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002023 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002024
Chris Wilson963b4832009-09-20 23:03:54 +01002025 if (i915_gem_object_is_purgeable(obj_priv))
2026 i915_gem_object_truncate(obj);
2027
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002028 trace_i915_gem_object_unbind(obj);
2029
Chris Wilson8dc17752010-07-23 23:18:51 +01002030 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002031}
2032
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01002033int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002034i915_gpu_idle(struct drm_device *dev)
2035{
2036 drm_i915_private_t *dev_priv = dev->dev_private;
2037 bool lists_empty;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002038 uint32_t seqno1, seqno2;
Zou Nan hai852835f2010-05-21 09:08:56 +08002039 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002040
2041 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002042 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2043 list_empty(&dev_priv->render_ring.active_list) &&
2044 (!HAS_BSD(dev) ||
2045 list_empty(&dev_priv->bsd_ring.active_list)));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002046 spin_unlock(&dev_priv->mm.active_list_lock);
2047
2048 if (lists_empty)
2049 return 0;
2050
2051 /* Flush everything onto the inactive list. */
2052 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002053 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
Zou Nan hai852835f2010-05-21 09:08:56 +08002054 &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002055 if (seqno1 == 0)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002056 return -ENOMEM;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002057 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2058
2059 if (HAS_BSD(dev)) {
2060 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2061 &dev_priv->bsd_ring);
2062 if (seqno2 == 0)
2063 return -ENOMEM;
2064
2065 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2066 if (ret)
2067 return ret;
2068 }
2069
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002070
Zou Nan hai852835f2010-05-21 09:08:56 +08002071 return ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002072}
2073
Ben Gamari6911a9b2009-04-02 11:24:54 -07002074int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002075i915_gem_object_get_pages(struct drm_gem_object *obj,
2076 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002077{
Daniel Vetter23010e42010-03-08 13:35:02 +01002078 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002079 int page_count, i;
2080 struct address_space *mapping;
2081 struct inode *inode;
2082 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002083
Daniel Vetter778c3542010-05-13 11:49:44 +02002084 BUG_ON(obj_priv->pages_refcount
2085 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2086
Eric Anholt856fa192009-03-19 14:10:50 -07002087 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002088 return 0;
2089
2090 /* Get the list of pages out of our struct file. They'll be pinned
2091 * at this point until we release them.
2092 */
2093 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002094 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002095 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002096 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002097 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002098 return -ENOMEM;
2099 }
2100
2101 inode = obj->filp->f_path.dentry->d_inode;
2102 mapping = inode->i_mapping;
2103 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002104 page = read_cache_page_gfp(mapping, i,
Linus Torvalds985b8232010-07-02 10:04:42 +10002105 GFP_HIGHUSER |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002106 __GFP_COLD |
Linus Torvaldscd9f0402010-07-18 09:44:37 -07002107 __GFP_RECLAIMABLE |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002108 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002109 if (IS_ERR(page))
2110 goto err_pages;
2111
Eric Anholt856fa192009-03-19 14:10:50 -07002112 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002113 }
Eric Anholt280b7132009-03-12 16:56:27 -07002114
2115 if (obj_priv->tiling_mode != I915_TILING_NONE)
2116 i915_gem_object_do_bit_17_swizzle(obj);
2117
Eric Anholt673a3942008-07-30 12:06:12 -07002118 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002119
2120err_pages:
2121 while (i--)
2122 page_cache_release(obj_priv->pages[i]);
2123
2124 drm_free_large(obj_priv->pages);
2125 obj_priv->pages = NULL;
2126 obj_priv->pages_refcount--;
2127 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002128}
2129
Eric Anholt4e901fd2009-10-26 16:44:17 -07002130static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2131{
2132 struct drm_gem_object *obj = reg->obj;
2133 struct drm_device *dev = obj->dev;
2134 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002135 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002136 int regnum = obj_priv->fence_reg;
2137 uint64_t val;
2138
2139 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2140 0xfffff000) << 32;
2141 val |= obj_priv->gtt_offset & 0xfffff000;
2142 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2143 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2144
2145 if (obj_priv->tiling_mode == I915_TILING_Y)
2146 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2147 val |= I965_FENCE_REG_VALID;
2148
2149 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2150}
2151
Jesse Barnesde151cf2008-11-12 10:03:55 -08002152static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2153{
2154 struct drm_gem_object *obj = reg->obj;
2155 struct drm_device *dev = obj->dev;
2156 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002157 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002158 int regnum = obj_priv->fence_reg;
2159 uint64_t val;
2160
2161 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2162 0xfffff000) << 32;
2163 val |= obj_priv->gtt_offset & 0xfffff000;
2164 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2165 if (obj_priv->tiling_mode == I915_TILING_Y)
2166 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2167 val |= I965_FENCE_REG_VALID;
2168
2169 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2170}
2171
2172static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2173{
2174 struct drm_gem_object *obj = reg->obj;
2175 struct drm_device *dev = obj->dev;
2176 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002177 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002178 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002179 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002180 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002181 uint32_t pitch_val;
2182
2183 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2184 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002185 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002186 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002187 return;
2188 }
2189
Jesse Barnes0f973f22009-01-26 17:10:45 -08002190 if (obj_priv->tiling_mode == I915_TILING_Y &&
2191 HAS_128_BYTE_Y_TILING(dev))
2192 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002193 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002194 tile_width = 512;
2195
2196 /* Note: pitch better be a power of two tile widths */
2197 pitch_val = obj_priv->stride / tile_width;
2198 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002199
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002200 if (obj_priv->tiling_mode == I915_TILING_Y &&
2201 HAS_128_BYTE_Y_TILING(dev))
2202 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2203 else
2204 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2205
Jesse Barnesde151cf2008-11-12 10:03:55 -08002206 val = obj_priv->gtt_offset;
2207 if (obj_priv->tiling_mode == I915_TILING_Y)
2208 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2209 val |= I915_FENCE_SIZE_BITS(obj->size);
2210 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2211 val |= I830_FENCE_REG_VALID;
2212
Eric Anholtdc529a42009-03-10 22:34:49 -07002213 if (regnum < 8)
2214 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2215 else
2216 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2217 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002218}
2219
2220static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2221{
2222 struct drm_gem_object *obj = reg->obj;
2223 struct drm_device *dev = obj->dev;
2224 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002225 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002226 int regnum = obj_priv->fence_reg;
2227 uint32_t val;
2228 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002229 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002230
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002231 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002232 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002233 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002234 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002235 return;
2236 }
2237
Eric Anholte76a16d2009-05-26 17:44:56 -07002238 pitch_val = obj_priv->stride / 128;
2239 pitch_val = ffs(pitch_val) - 1;
2240 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2241
Jesse Barnesde151cf2008-11-12 10:03:55 -08002242 val = obj_priv->gtt_offset;
2243 if (obj_priv->tiling_mode == I915_TILING_Y)
2244 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002245 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2246 WARN_ON(fence_size_bits & ~0x00000f00);
2247 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002248 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2249 val |= I830_FENCE_REG_VALID;
2250
2251 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002252}
2253
Daniel Vetterae3db242010-02-19 11:51:58 +01002254static int i915_find_fence_reg(struct drm_device *dev)
2255{
2256 struct drm_i915_fence_reg *reg = NULL;
2257 struct drm_i915_gem_object *obj_priv = NULL;
2258 struct drm_i915_private *dev_priv = dev->dev_private;
2259 struct drm_gem_object *obj = NULL;
2260 int i, avail, ret;
2261
2262 /* First try to find a free reg */
2263 avail = 0;
2264 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2265 reg = &dev_priv->fence_regs[i];
2266 if (!reg->obj)
2267 return i;
2268
Daniel Vetter23010e42010-03-08 13:35:02 +01002269 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002270 if (!obj_priv->pin_count)
2271 avail++;
2272 }
2273
2274 if (avail == 0)
2275 return -ENOSPC;
2276
2277 /* None available, try to steal one or wait for a user to finish */
2278 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002279 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2280 lru_list) {
2281 obj = reg->obj;
2282 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002283
2284 if (obj_priv->pin_count)
2285 continue;
2286
2287 /* found one! */
2288 i = obj_priv->fence_reg;
2289 break;
2290 }
2291
2292 BUG_ON(i == I915_FENCE_REG_NONE);
2293
2294 /* We only have a reference on obj from the active list. put_fence_reg
2295 * might drop that one, causing a use-after-free in it. So hold a
2296 * private reference to obj like the other callers of put_fence_reg
2297 * (set_tiling ioctl) do. */
2298 drm_gem_object_reference(obj);
2299 ret = i915_gem_object_put_fence_reg(obj);
2300 drm_gem_object_unreference(obj);
2301 if (ret != 0)
2302 return ret;
2303
2304 return i;
2305}
2306
Jesse Barnesde151cf2008-11-12 10:03:55 -08002307/**
2308 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2309 * @obj: object to map through a fence reg
2310 *
2311 * When mapping objects through the GTT, userspace wants to be able to write
2312 * to them without having to worry about swizzling if the object is tiled.
2313 *
2314 * This function walks the fence regs looking for a free one for @obj,
2315 * stealing one if it can't find any.
2316 *
2317 * It then sets up the reg based on the object's properties: address, pitch
2318 * and tiling format.
2319 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002320int
2321i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002322{
2323 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002324 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002325 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002326 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002327 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002328
Eric Anholta09ba7f2009-08-29 12:49:51 -07002329 /* Just update our place in the LRU if our fence is getting used. */
2330 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002331 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2332 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002333 return 0;
2334 }
2335
Jesse Barnesde151cf2008-11-12 10:03:55 -08002336 switch (obj_priv->tiling_mode) {
2337 case I915_TILING_NONE:
2338 WARN(1, "allocating a fence for non-tiled object?\n");
2339 break;
2340 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002341 if (!obj_priv->stride)
2342 return -EINVAL;
2343 WARN((obj_priv->stride & (512 - 1)),
2344 "object 0x%08x is X tiled but has non-512B pitch\n",
2345 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002346 break;
2347 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002348 if (!obj_priv->stride)
2349 return -EINVAL;
2350 WARN((obj_priv->stride & (128 - 1)),
2351 "object 0x%08x is Y tiled but has non-128B pitch\n",
2352 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002353 break;
2354 }
2355
Daniel Vetterae3db242010-02-19 11:51:58 +01002356 ret = i915_find_fence_reg(dev);
2357 if (ret < 0)
2358 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002359
Daniel Vetterae3db242010-02-19 11:51:58 +01002360 obj_priv->fence_reg = ret;
2361 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002362 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002363
Jesse Barnesde151cf2008-11-12 10:03:55 -08002364 reg->obj = obj;
2365
Chris Wilsone259bef2010-09-17 00:32:02 +01002366 switch (INTEL_INFO(dev)->gen) {
2367 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002368 sandybridge_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002369 break;
2370 case 5:
2371 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002372 i965_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002373 break;
2374 case 3:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002375 i915_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002376 break;
2377 case 2:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002378 i830_write_fence_reg(reg);
Chris Wilsone259bef2010-09-17 00:32:02 +01002379 break;
2380 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002381
Daniel Vetterae3db242010-02-19 11:51:58 +01002382 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2383 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002384
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002385 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002386}
2387
2388/**
2389 * i915_gem_clear_fence_reg - clear out fence register info
2390 * @obj: object to clear
2391 *
2392 * Zeroes out the fence register itself and clears out the associated
2393 * data structures in dev_priv and obj_priv.
2394 */
2395static void
2396i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2397{
2398 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002399 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002400 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002401 struct drm_i915_fence_reg *reg =
2402 &dev_priv->fence_regs[obj_priv->fence_reg];
Chris Wilsone259bef2010-09-17 00:32:02 +01002403 uint32_t fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002404
Chris Wilsone259bef2010-09-17 00:32:02 +01002405 switch (INTEL_INFO(dev)->gen) {
2406 case 6:
Eric Anholt4e901fd2009-10-26 16:44:17 -07002407 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2408 (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002409 break;
2410 case 5:
2411 case 4:
Jesse Barnesde151cf2008-11-12 10:03:55 -08002412 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002413 break;
2414 case 3:
Chris Wilson9b74f732010-09-22 19:10:44 +01002415 if (obj_priv->fence_reg >= 8)
Chris Wilsone259bef2010-09-17 00:32:02 +01002416 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002417 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002418 case 2:
2419 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002420
2421 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002422 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002423 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002424
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002425 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002426 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002427 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002428}
2429
Eric Anholt673a3942008-07-30 12:06:12 -07002430/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002431 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2432 * to the buffer to finish, and then resets the fence register.
2433 * @obj: tiled object holding a fence register.
2434 *
2435 * Zeroes out the fence register itself and clears out the associated
2436 * data structures in dev_priv and obj_priv.
2437 */
2438int
2439i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2440{
2441 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002442 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002443
2444 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2445 return 0;
2446
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002447 /* If we've changed tiling, GTT-mappings of the object
2448 * need to re-fault to ensure that the correct fence register
2449 * setup is in place.
2450 */
2451 i915_gem_release_mmap(obj);
2452
Chris Wilson52dc7d32009-06-06 09:46:01 +01002453 /* On the i915, GPU access to tiled buffers is via a fence,
2454 * therefore we must wait for any outstanding access to complete
2455 * before clearing the fence.
2456 */
2457 if (!IS_I965G(dev)) {
2458 int ret;
2459
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002460 ret = i915_gem_object_flush_gpu_write_domain(obj);
2461 if (ret != 0)
2462 return ret;
2463
Chris Wilson52dc7d32009-06-06 09:46:01 +01002464 ret = i915_gem_object_wait_rendering(obj);
2465 if (ret != 0)
2466 return ret;
2467 }
2468
Daniel Vetter4a726612010-02-01 13:59:16 +01002469 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002470 i915_gem_clear_fence_reg (obj);
2471
2472 return 0;
2473}
2474
2475/**
Eric Anholt673a3942008-07-30 12:06:12 -07002476 * Finds free space in the GTT aperture and binds the object there.
2477 */
2478static int
2479i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2480{
2481 struct drm_device *dev = obj->dev;
2482 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002483 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002484 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002485 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002486 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002487
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002488 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002489 DRM_ERROR("Attempting to bind a purgeable object\n");
2490 return -EINVAL;
2491 }
2492
Eric Anholt673a3942008-07-30 12:06:12 -07002493 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002494 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002495 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002496 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2497 return -EINVAL;
2498 }
2499
Chris Wilson654fc602010-05-27 13:18:21 +01002500 /* If the object is bigger than the entire aperture, reject it early
2501 * before evicting everything in a vain attempt to find space.
2502 */
2503 if (obj->size > dev->gtt_total) {
2504 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2505 return -E2BIG;
2506 }
2507
Eric Anholt673a3942008-07-30 12:06:12 -07002508 search_free:
2509 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2510 obj->size, alignment, 0);
2511 if (free_space != NULL) {
2512 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2513 alignment);
Daniel Vetterdb3307a2010-07-02 15:02:12 +01002514 if (obj_priv->gtt_space != NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002515 obj_priv->gtt_offset = obj_priv->gtt_space->start;
Eric Anholt673a3942008-07-30 12:06:12 -07002516 }
2517 if (obj_priv->gtt_space == NULL) {
2518 /* If the gtt is empty and we're still having trouble
2519 * fitting our object in, we're out of memory.
2520 */
2521#if WATCH_LRU
2522 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2523#endif
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002524 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002525 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002526 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002527
Eric Anholt673a3942008-07-30 12:06:12 -07002528 goto search_free;
2529 }
2530
2531#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002532 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002533 obj->size, obj_priv->gtt_offset);
2534#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002535 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002536 if (ret) {
2537 drm_mm_put_block(obj_priv->gtt_space);
2538 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002539
2540 if (ret == -ENOMEM) {
2541 /* first try to clear up some space from the GTT */
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002542 ret = i915_gem_evict_something(dev, obj->size,
2543 alignment);
Chris Wilson07f73f62009-09-14 16:50:30 +01002544 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002545 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002546 if (gfpmask) {
2547 gfpmask = 0;
2548 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002549 }
2550
2551 return ret;
2552 }
2553
2554 goto search_free;
2555 }
2556
Eric Anholt673a3942008-07-30 12:06:12 -07002557 return ret;
2558 }
2559
Eric Anholt673a3942008-07-30 12:06:12 -07002560 /* Create an AGP memory structure pointing at our pages, and bind it
2561 * into the GTT.
2562 */
2563 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002564 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002565 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002566 obj_priv->gtt_offset,
2567 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002568 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002569 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002570 drm_mm_put_block(obj_priv->gtt_space);
2571 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002572
Daniel Vetter0108a3e2010-08-07 11:01:21 +01002573 ret = i915_gem_evict_something(dev, obj->size, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01002574 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002575 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002576
2577 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002578 }
2579 atomic_inc(&dev->gtt_count);
2580 atomic_add(obj->size, &dev->gtt_memory);
2581
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002582 /* keep track of bounds object by adding it to the inactive list */
2583 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2584
Eric Anholt673a3942008-07-30 12:06:12 -07002585 /* Assert that the object is not currently in any GPU domain. As it
2586 * wasn't in the GTT, there shouldn't be any way it could have been in
2587 * a GPU cache
2588 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002589 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2590 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002591
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002592 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2593
Eric Anholt673a3942008-07-30 12:06:12 -07002594 return 0;
2595}
2596
2597void
2598i915_gem_clflush_object(struct drm_gem_object *obj)
2599{
Daniel Vetter23010e42010-03-08 13:35:02 +01002600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002601
2602 /* If we don't have a page list set up, then we're not pinned
2603 * to GPU, and we can ignore the cache flush because it'll happen
2604 * again at bind time.
2605 */
Eric Anholt856fa192009-03-19 14:10:50 -07002606 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002607 return;
2608
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002609 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002610
Eric Anholt856fa192009-03-19 14:10:50 -07002611 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002612}
2613
Eric Anholte47c68e2008-11-14 13:35:19 -08002614/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002615static int
Eric Anholte47c68e2008-11-14 13:35:19 -08002616i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2617{
2618 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002619 uint32_t old_write_domain;
Zou Nan hai852835f2010-05-21 09:08:56 +08002620 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002621
2622 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002623 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002624
2625 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002626 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002627 i915_gem_flush(dev, 0, obj->write_domain);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002628 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2629 return -ENOMEM;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002630
2631 trace_i915_gem_object_change_domain(obj,
2632 obj->read_domains,
2633 old_write_domain);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002634 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002635}
2636
2637/** Flushes the GTT write domain for the object if it's dirty. */
2638static void
2639i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2640{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002641 uint32_t old_write_domain;
2642
Eric Anholte47c68e2008-11-14 13:35:19 -08002643 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2644 return;
2645
2646 /* No actual flushing is required for the GTT write domain. Writes
2647 * to it immediately go to main memory as far as we know, so there's
2648 * no chipset flush. It also doesn't land in render cache.
2649 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002650 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002651 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002652
2653 trace_i915_gem_object_change_domain(obj,
2654 obj->read_domains,
2655 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002656}
2657
2658/** Flushes the CPU write domain for the object if it's dirty. */
2659static void
2660i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2661{
2662 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002663 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002664
2665 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2666 return;
2667
2668 i915_gem_clflush_object(obj);
2669 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002670 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002671 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002672
2673 trace_i915_gem_object_change_domain(obj,
2674 obj->read_domains,
2675 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002676}
2677
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002678int
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002679i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2680{
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002681 int ret = 0;
2682
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002683 switch (obj->write_domain) {
2684 case I915_GEM_DOMAIN_GTT:
2685 i915_gem_object_flush_gtt_write_domain(obj);
2686 break;
2687 case I915_GEM_DOMAIN_CPU:
2688 i915_gem_object_flush_cpu_write_domain(obj);
2689 break;
2690 default:
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002691 ret = i915_gem_object_flush_gpu_write_domain(obj);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002692 break;
2693 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002694
2695 return ret;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002696}
2697
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002698/**
2699 * Moves a single object to the GTT read, and possibly write domain.
2700 *
2701 * This function returns when the move is complete, including waiting on
2702 * flushes to occur.
2703 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002704int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002705i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2706{
Daniel Vetter23010e42010-03-08 13:35:02 +01002707 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002708 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002709 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002710
Eric Anholt02354392008-11-26 13:58:13 -08002711 /* Not valid to be called on unbound objects. */
2712 if (obj_priv->gtt_space == NULL)
2713 return -EINVAL;
2714
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002715 ret = i915_gem_object_flush_gpu_write_domain(obj);
2716 if (ret != 0)
2717 return ret;
2718
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002719 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002720 ret = i915_gem_object_wait_rendering(obj);
2721 if (ret != 0)
2722 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002723
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002724 old_write_domain = obj->write_domain;
2725 old_read_domains = obj->read_domains;
2726
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002727 /* If we're writing through the GTT domain, then CPU and GPU caches
2728 * will need to be invalidated at next use.
2729 */
2730 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002731 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002732
Eric Anholte47c68e2008-11-14 13:35:19 -08002733 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002734
2735 /* It should now be out of any other write domains, and we can update
2736 * the domain values for our changes.
2737 */
2738 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2739 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002740 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002741 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002742 obj_priv->dirty = 1;
2743 }
2744
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002745 trace_i915_gem_object_change_domain(obj,
2746 old_read_domains,
2747 old_write_domain);
2748
Eric Anholte47c68e2008-11-14 13:35:19 -08002749 return 0;
2750}
2751
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002752/*
2753 * Prepare buffer for display plane. Use uninterruptible for possible flush
2754 * wait, as in modesetting process we're not supposed to be interrupted.
2755 */
2756int
2757i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2758{
2759 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002760 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002761 uint32_t old_write_domain, old_read_domains;
2762 int ret;
2763
2764 /* Not valid to be called on unbound objects. */
2765 if (obj_priv->gtt_space == NULL)
2766 return -EINVAL;
2767
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002768 ret = i915_gem_object_flush_gpu_write_domain(obj);
2769 if (ret)
2770 return ret;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002771
2772 /* Wait on any GPU rendering and flushing to occur. */
2773 if (obj_priv->active) {
2774#if WATCH_BUF
2775 DRM_INFO("%s: object %p wait for seqno %08x\n",
2776 __func__, obj, obj_priv->last_rendering_seqno);
2777#endif
Zou Nan hai852835f2010-05-21 09:08:56 +08002778 ret = i915_do_wait_request(dev,
2779 obj_priv->last_rendering_seqno,
2780 0,
2781 obj_priv->ring);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002782 if (ret != 0)
2783 return ret;
2784 }
2785
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002786 i915_gem_object_flush_cpu_write_domain(obj);
2787
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002788 old_write_domain = obj->write_domain;
2789 old_read_domains = obj->read_domains;
2790
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002791 /* It should now be out of any other write domains, and we can update
2792 * the domain values for our changes.
2793 */
2794 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002795 obj->read_domains = I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002796 obj->write_domain = I915_GEM_DOMAIN_GTT;
2797 obj_priv->dirty = 1;
2798
2799 trace_i915_gem_object_change_domain(obj,
2800 old_read_domains,
2801 old_write_domain);
2802
2803 return 0;
2804}
2805
Eric Anholte47c68e2008-11-14 13:35:19 -08002806/**
2807 * Moves a single object to the CPU read, and possibly write domain.
2808 *
2809 * This function returns when the move is complete, including waiting on
2810 * flushes to occur.
2811 */
2812static int
2813i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2814{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002815 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002816 int ret;
2817
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002818 ret = i915_gem_object_flush_gpu_write_domain(obj);
2819 if (ret)
2820 return ret;
2821
Eric Anholte47c68e2008-11-14 13:35:19 -08002822 /* Wait on any GPU rendering and flushing to occur. */
2823 ret = i915_gem_object_wait_rendering(obj);
2824 if (ret != 0)
2825 return ret;
2826
2827 i915_gem_object_flush_gtt_write_domain(obj);
2828
2829 /* If we have a partially-valid cache of the object in the CPU,
2830 * finish invalidating it and free the per-page flags.
2831 */
2832 i915_gem_object_set_to_full_cpu_read_domain(obj);
2833
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002834 old_write_domain = obj->write_domain;
2835 old_read_domains = obj->read_domains;
2836
Eric Anholte47c68e2008-11-14 13:35:19 -08002837 /* Flush the CPU cache if it's still invalid. */
2838 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2839 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002840
2841 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2842 }
2843
2844 /* It should now be out of any other write domains, and we can update
2845 * the domain values for our changes.
2846 */
2847 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2848
2849 /* If we're writing through the CPU, then the GPU read domains will
2850 * need to be invalidated at next use.
2851 */
2852 if (write) {
2853 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2854 obj->write_domain = I915_GEM_DOMAIN_CPU;
2855 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002856
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002857 trace_i915_gem_object_change_domain(obj,
2858 old_read_domains,
2859 old_write_domain);
2860
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002861 return 0;
2862}
2863
Eric Anholt673a3942008-07-30 12:06:12 -07002864/*
2865 * Set the next domain for the specified object. This
2866 * may not actually perform the necessary flushing/invaliding though,
2867 * as that may want to be batched with other set_domain operations
2868 *
2869 * This is (we hope) the only really tricky part of gem. The goal
2870 * is fairly simple -- track which caches hold bits of the object
2871 * and make sure they remain coherent. A few concrete examples may
2872 * help to explain how it works. For shorthand, we use the notation
2873 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2874 * a pair of read and write domain masks.
2875 *
2876 * Case 1: the batch buffer
2877 *
2878 * 1. Allocated
2879 * 2. Written by CPU
2880 * 3. Mapped to GTT
2881 * 4. Read by GPU
2882 * 5. Unmapped from GTT
2883 * 6. Freed
2884 *
2885 * Let's take these a step at a time
2886 *
2887 * 1. Allocated
2888 * Pages allocated from the kernel may still have
2889 * cache contents, so we set them to (CPU, CPU) always.
2890 * 2. Written by CPU (using pwrite)
2891 * The pwrite function calls set_domain (CPU, CPU) and
2892 * this function does nothing (as nothing changes)
2893 * 3. Mapped by GTT
2894 * This function asserts that the object is not
2895 * currently in any GPU-based read or write domains
2896 * 4. Read by GPU
2897 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2898 * As write_domain is zero, this function adds in the
2899 * current read domains (CPU+COMMAND, 0).
2900 * flush_domains is set to CPU.
2901 * invalidate_domains is set to COMMAND
2902 * clflush is run to get data out of the CPU caches
2903 * then i915_dev_set_domain calls i915_gem_flush to
2904 * emit an MI_FLUSH and drm_agp_chipset_flush
2905 * 5. Unmapped from GTT
2906 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2907 * flush_domains and invalidate_domains end up both zero
2908 * so no flushing/invalidating happens
2909 * 6. Freed
2910 * yay, done
2911 *
2912 * Case 2: The shared render buffer
2913 *
2914 * 1. Allocated
2915 * 2. Mapped to GTT
2916 * 3. Read/written by GPU
2917 * 4. set_domain to (CPU,CPU)
2918 * 5. Read/written by CPU
2919 * 6. Read/written by GPU
2920 *
2921 * 1. Allocated
2922 * Same as last example, (CPU, CPU)
2923 * 2. Mapped to GTT
2924 * Nothing changes (assertions find that it is not in the GPU)
2925 * 3. Read/written by GPU
2926 * execbuffer calls set_domain (RENDER, RENDER)
2927 * flush_domains gets CPU
2928 * invalidate_domains gets GPU
2929 * clflush (obj)
2930 * MI_FLUSH and drm_agp_chipset_flush
2931 * 4. set_domain (CPU, CPU)
2932 * flush_domains gets GPU
2933 * invalidate_domains gets CPU
2934 * wait_rendering (obj) to make sure all drawing is complete.
2935 * This will include an MI_FLUSH to get the data from GPU
2936 * to memory
2937 * clflush (obj) to invalidate the CPU cache
2938 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2939 * 5. Read/written by CPU
2940 * cache lines are loaded and dirtied
2941 * 6. Read written by GPU
2942 * Same as last GPU access
2943 *
2944 * Case 3: The constant buffer
2945 *
2946 * 1. Allocated
2947 * 2. Written by CPU
2948 * 3. Read by GPU
2949 * 4. Updated (written) by CPU again
2950 * 5. Read by GPU
2951 *
2952 * 1. Allocated
2953 * (CPU, CPU)
2954 * 2. Written by CPU
2955 * (CPU, CPU)
2956 * 3. Read by GPU
2957 * (CPU+RENDER, 0)
2958 * flush_domains = CPU
2959 * invalidate_domains = RENDER
2960 * clflush (obj)
2961 * MI_FLUSH
2962 * drm_agp_chipset_flush
2963 * 4. Updated (written) by CPU again
2964 * (CPU, CPU)
2965 * flush_domains = 0 (no previous write domain)
2966 * invalidate_domains = 0 (no new read domains)
2967 * 5. Read by GPU
2968 * (CPU+RENDER, 0)
2969 * flush_domains = CPU
2970 * invalidate_domains = RENDER
2971 * clflush (obj)
2972 * MI_FLUSH
2973 * drm_agp_chipset_flush
2974 */
Keith Packardc0d90822008-11-20 23:11:08 -08002975static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08002976i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002977{
2978 struct drm_device *dev = obj->dev;
Chris Wilson88f356b2010-08-04 13:55:32 +01002979 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002980 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002981 uint32_t invalidate_domains = 0;
2982 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002983 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002984
Eric Anholt8b0e3782009-02-19 14:40:50 -08002985 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2986 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07002987
Jesse Barnes652c3932009-08-17 13:31:43 -07002988 intel_mark_busy(dev, obj);
2989
Eric Anholt673a3942008-07-30 12:06:12 -07002990#if WATCH_BUF
2991 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2992 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08002993 obj->read_domains, obj->pending_read_domains,
2994 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07002995#endif
2996 /*
2997 * If the object isn't moving to a new write domain,
2998 * let the object stay in multiple read domains
2999 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003000 if (obj->pending_write_domain == 0)
3001 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003002 else
3003 obj_priv->dirty = 1;
3004
3005 /*
3006 * Flush the current write domain if
3007 * the new read domains don't match. Invalidate
3008 * any read domains which differ from the old
3009 * write domain
3010 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003011 if (obj->write_domain &&
3012 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003013 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003014 invalidate_domains |=
3015 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003016 }
3017 /*
3018 * Invalidate any read caches which may have
3019 * stale data. That is, any new read domains.
3020 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003021 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003022 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3023#if WATCH_BUF
3024 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3025 __func__, flush_domains, invalidate_domains);
3026#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003027 i915_gem_clflush_object(obj);
3028 }
3029
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003030 old_read_domains = obj->read_domains;
3031
Eric Anholtefbeed92009-02-19 14:54:51 -08003032 /* The actual obj->write_domain will be updated with
3033 * pending_write_domain after we emit the accumulated flush for all
3034 * of our domain changes in execbuffers (which clears objects'
3035 * write_domains). So if we have a current write domain that we
3036 * aren't changing, set pending_write_domain to that.
3037 */
3038 if (flush_domains == 0 && obj->pending_write_domain == 0)
3039 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003040 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003041
Chris Wilson88f356b2010-08-04 13:55:32 +01003042 if (flush_domains & I915_GEM_GPU_DOMAINS) {
3043 if (obj_priv->ring == &dev_priv->render_ring)
3044 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3045 else if (obj_priv->ring == &dev_priv->bsd_ring)
3046 dev_priv->flush_rings |= FLUSH_BSD_RING;
3047 }
3048
Eric Anholt673a3942008-07-30 12:06:12 -07003049 dev->invalidate_domains |= invalidate_domains;
3050 dev->flush_domains |= flush_domains;
3051#if WATCH_BUF
3052 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3053 __func__,
3054 obj->read_domains, obj->write_domain,
3055 dev->invalidate_domains, dev->flush_domains);
3056#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003057
3058 trace_i915_gem_object_change_domain(obj,
3059 old_read_domains,
3060 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003061}
3062
3063/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003064 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003065 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003066 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3067 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3068 */
3069static void
3070i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3071{
Daniel Vetter23010e42010-03-08 13:35:02 +01003072 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003073
3074 if (!obj_priv->page_cpu_valid)
3075 return;
3076
3077 /* If we're partially in the CPU read domain, finish moving it in.
3078 */
3079 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3080 int i;
3081
3082 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3083 if (obj_priv->page_cpu_valid[i])
3084 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003085 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003086 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003087 }
3088
3089 /* Free the page_cpu_valid mappings which are now stale, whether
3090 * or not we've got I915_GEM_DOMAIN_CPU.
3091 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003092 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003093 obj_priv->page_cpu_valid = NULL;
3094}
3095
3096/**
3097 * Set the CPU read domain on a range of the object.
3098 *
3099 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3100 * not entirely valid. The page_cpu_valid member of the object flags which
3101 * pages have been flushed, and will be respected by
3102 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3103 * of the whole object.
3104 *
3105 * This function returns when the move is complete, including waiting on
3106 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003107 */
3108static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003109i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3110 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003111{
Daniel Vetter23010e42010-03-08 13:35:02 +01003112 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003113 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003114 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003115
Eric Anholte47c68e2008-11-14 13:35:19 -08003116 if (offset == 0 && size == obj->size)
3117 return i915_gem_object_set_to_cpu_domain(obj, 0);
3118
Chris Wilson2dafb1e2010-06-07 14:03:05 +01003119 ret = i915_gem_object_flush_gpu_write_domain(obj);
3120 if (ret)
3121 return ret;
3122
Eric Anholte47c68e2008-11-14 13:35:19 -08003123 /* Wait on any GPU rendering and flushing to occur. */
3124 ret = i915_gem_object_wait_rendering(obj);
3125 if (ret != 0)
3126 return ret;
3127 i915_gem_object_flush_gtt_write_domain(obj);
3128
3129 /* If we're already fully in the CPU read domain, we're done. */
3130 if (obj_priv->page_cpu_valid == NULL &&
3131 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003132 return 0;
3133
Eric Anholte47c68e2008-11-14 13:35:19 -08003134 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3135 * newly adding I915_GEM_DOMAIN_CPU
3136 */
Eric Anholt673a3942008-07-30 12:06:12 -07003137 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003138 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3139 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003140 if (obj_priv->page_cpu_valid == NULL)
3141 return -ENOMEM;
3142 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3143 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003144
3145 /* Flush the cache on any pages that are still invalid from the CPU's
3146 * perspective.
3147 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003148 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3149 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003150 if (obj_priv->page_cpu_valid[i])
3151 continue;
3152
Eric Anholt856fa192009-03-19 14:10:50 -07003153 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003154
3155 obj_priv->page_cpu_valid[i] = 1;
3156 }
3157
Eric Anholte47c68e2008-11-14 13:35:19 -08003158 /* It should now be out of any other write domains, and we can update
3159 * the domain values for our changes.
3160 */
3161 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3162
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003163 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003164 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3165
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003166 trace_i915_gem_object_change_domain(obj,
3167 old_read_domains,
3168 obj->write_domain);
3169
Eric Anholt673a3942008-07-30 12:06:12 -07003170 return 0;
3171}
3172
3173/**
Eric Anholt673a3942008-07-30 12:06:12 -07003174 * Pin an object to the GTT and evaluate the relocations landing in it.
3175 */
3176static int
3177i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3178 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003179 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003180 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003181{
3182 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003183 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003184 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003185 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003186 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003187 bool need_fence;
3188
3189 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3190 obj_priv->tiling_mode != I915_TILING_NONE;
3191
3192 /* Check fence reg constraints and rebind if necessary */
Chris Wilson808b24d62010-05-27 13:18:15 +01003193 if (need_fence &&
3194 !i915_gem_object_fence_offset_ok(obj,
3195 obj_priv->tiling_mode)) {
3196 ret = i915_gem_object_unbind(obj);
3197 if (ret)
3198 return ret;
3199 }
Eric Anholt673a3942008-07-30 12:06:12 -07003200
3201 /* Choose the GTT offset for our buffer and put it there. */
3202 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3203 if (ret)
3204 return ret;
3205
Jesse Barnes76446ca2009-12-17 22:05:42 -05003206 /*
3207 * Pre-965 chips need a fence register set up in order to
3208 * properly handle blits to/from tiled surfaces.
3209 */
3210 if (need_fence) {
3211 ret = i915_gem_object_get_fence_reg(obj);
3212 if (ret != 0) {
Jesse Barnes76446ca2009-12-17 22:05:42 -05003213 i915_gem_object_unpin(obj);
3214 return ret;
3215 }
3216 }
3217
Eric Anholt673a3942008-07-30 12:06:12 -07003218 entry->offset = obj_priv->gtt_offset;
3219
Eric Anholt673a3942008-07-30 12:06:12 -07003220 /* Apply the relocations, using the GTT aperture to avoid cache
3221 * flushing requirements.
3222 */
3223 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003224 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003225 struct drm_gem_object *target_obj;
3226 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003227 uint32_t reloc_val, reloc_offset;
3228 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003229
Eric Anholt673a3942008-07-30 12:06:12 -07003230 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003231 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003232 if (target_obj == NULL) {
3233 i915_gem_object_unpin(obj);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003234 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003235 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003236 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003237
Chris Wilson8542a0b2009-09-09 21:15:15 +01003238#if WATCH_RELOC
3239 DRM_INFO("%s: obj %p offset %08x target %d "
3240 "read %08x write %08x gtt %08x "
3241 "presumed %08x delta %08x\n",
3242 __func__,
3243 obj,
3244 (int) reloc->offset,
3245 (int) reloc->target_handle,
3246 (int) reloc->read_domains,
3247 (int) reloc->write_domain,
3248 (int) target_obj_priv->gtt_offset,
3249 (int) reloc->presumed_offset,
3250 reloc->delta);
3251#endif
3252
Eric Anholt673a3942008-07-30 12:06:12 -07003253 /* The target buffer should have appeared before us in the
3254 * exec_object list, so it should have a GTT space bound by now.
3255 */
3256 if (target_obj_priv->gtt_space == NULL) {
3257 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003258 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003259 drm_gem_object_unreference(target_obj);
3260 i915_gem_object_unpin(obj);
3261 return -EINVAL;
3262 }
3263
Chris Wilson8542a0b2009-09-09 21:15:15 +01003264 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003265 if (reloc->write_domain & (reloc->write_domain - 1)) {
3266 DRM_ERROR("reloc with multiple write domains: "
3267 "obj %p target %d offset %d "
3268 "read %08x write %08x",
3269 obj, reloc->target_handle,
3270 (int) reloc->offset,
3271 reloc->read_domains,
3272 reloc->write_domain);
Julia Lawall929f49b2010-10-02 15:59:17 +02003273 drm_gem_object_unreference(target_obj);
3274 i915_gem_object_unpin(obj);
Daniel Vetter16edd552010-02-19 11:52:02 +01003275 return -EINVAL;
3276 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003277 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3278 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3279 DRM_ERROR("reloc with read/write CPU domains: "
3280 "obj %p target %d offset %d "
3281 "read %08x write %08x",
3282 obj, reloc->target_handle,
3283 (int) reloc->offset,
3284 reloc->read_domains,
3285 reloc->write_domain);
3286 drm_gem_object_unreference(target_obj);
3287 i915_gem_object_unpin(obj);
3288 return -EINVAL;
3289 }
3290 if (reloc->write_domain && target_obj->pending_write_domain &&
3291 reloc->write_domain != target_obj->pending_write_domain) {
3292 DRM_ERROR("Write domain conflict: "
3293 "obj %p target %d offset %d "
3294 "new %08x old %08x\n",
3295 obj, reloc->target_handle,
3296 (int) reloc->offset,
3297 reloc->write_domain,
3298 target_obj->pending_write_domain);
3299 drm_gem_object_unreference(target_obj);
3300 i915_gem_object_unpin(obj);
3301 return -EINVAL;
3302 }
3303
3304 target_obj->pending_read_domains |= reloc->read_domains;
3305 target_obj->pending_write_domain |= reloc->write_domain;
3306
3307 /* If the relocation already has the right value in it, no
3308 * more work needs to be done.
3309 */
3310 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3311 drm_gem_object_unreference(target_obj);
3312 continue;
3313 }
3314
3315 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003316 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003317 DRM_ERROR("Relocation beyond object bounds: "
3318 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003319 obj, reloc->target_handle,
3320 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003321 drm_gem_object_unreference(target_obj);
3322 i915_gem_object_unpin(obj);
3323 return -EINVAL;
3324 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003325 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003326 DRM_ERROR("Relocation not 4-byte aligned: "
3327 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003328 obj, reloc->target_handle,
3329 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003330 drm_gem_object_unreference(target_obj);
3331 i915_gem_object_unpin(obj);
3332 return -EINVAL;
3333 }
3334
Chris Wilson8542a0b2009-09-09 21:15:15 +01003335 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003336 if (reloc->delta >= target_obj->size) {
3337 DRM_ERROR("Relocation beyond target object bounds: "
3338 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003339 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003340 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003341 drm_gem_object_unreference(target_obj);
3342 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003343 return -EINVAL;
3344 }
3345
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003346 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3347 if (ret != 0) {
3348 drm_gem_object_unreference(target_obj);
3349 i915_gem_object_unpin(obj);
3350 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003351 }
3352
3353 /* Map the page containing the relocation we're going to
3354 * perform.
3355 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003356 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003357 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3358 (reloc_offset &
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003359 ~(PAGE_SIZE - 1)),
3360 KM_USER0);
Eric Anholt3043c602008-10-02 12:24:47 -07003361 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003362 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003363 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003364
3365#if WATCH_BUF
3366 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003367 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003368 readl(reloc_entry), reloc_val);
3369#endif
3370 writel(reloc_val, reloc_entry);
Chris Wilsonfca3ec02010-08-04 14:34:24 +01003371 io_mapping_unmap_atomic(reloc_page, KM_USER0);
Eric Anholt673a3942008-07-30 12:06:12 -07003372
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003373 /* The updated presumed offset for this entry will be
3374 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003375 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003376 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003377
3378 drm_gem_object_unreference(target_obj);
3379 }
3380
Eric Anholt673a3942008-07-30 12:06:12 -07003381#if WATCH_BUF
3382 if (0)
3383 i915_gem_dump_object(obj, 128, __func__, ~0);
3384#endif
3385 return 0;
3386}
3387
Eric Anholt673a3942008-07-30 12:06:12 -07003388/* Throttle our rendering by waiting until the ring has completed our requests
3389 * emitted over 20 msec ago.
3390 *
Eric Anholtb9624422009-06-03 07:27:35 +00003391 * Note that if we were to use the current jiffies each time around the loop,
3392 * we wouldn't escape the function with any frames outstanding if the time to
3393 * render a frame was over 20ms.
3394 *
Eric Anholt673a3942008-07-30 12:06:12 -07003395 * This should get us reasonable parallelism between CPU and GPU but also
3396 * relatively low latency when blocking on a particular request to finish.
3397 */
3398static int
3399i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3400{
3401 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3402 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003403 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003404
3405 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003406 while (!list_empty(&i915_file_priv->mm.request_list)) {
3407 struct drm_i915_gem_request *request;
3408
3409 request = list_first_entry(&i915_file_priv->mm.request_list,
3410 struct drm_i915_gem_request,
3411 client_list);
3412
3413 if (time_after_eq(request->emitted_jiffies, recent_enough))
3414 break;
3415
Zou Nan hai852835f2010-05-21 09:08:56 +08003416 ret = i915_wait_request(dev, request->seqno, request->ring);
Eric Anholtb9624422009-06-03 07:27:35 +00003417 if (ret != 0)
3418 break;
3419 }
Eric Anholt673a3942008-07-30 12:06:12 -07003420 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003421
Eric Anholt673a3942008-07-30 12:06:12 -07003422 return ret;
3423}
3424
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003425static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003426i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003427 uint32_t buffer_count,
3428 struct drm_i915_gem_relocation_entry **relocs)
3429{
3430 uint32_t reloc_count = 0, reloc_index = 0, i;
3431 int ret;
3432
3433 *relocs = NULL;
3434 for (i = 0; i < buffer_count; i++) {
3435 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3436 return -EINVAL;
3437 reloc_count += exec_list[i].relocation_count;
3438 }
3439
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003440 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003441 if (*relocs == NULL) {
3442 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003443 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003444 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003445
3446 for (i = 0; i < buffer_count; i++) {
3447 struct drm_i915_gem_relocation_entry __user *user_relocs;
3448
3449 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3450
3451 ret = copy_from_user(&(*relocs)[reloc_index],
3452 user_relocs,
3453 exec_list[i].relocation_count *
3454 sizeof(**relocs));
3455 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003456 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003457 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003458 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003459 }
3460
3461 reloc_index += exec_list[i].relocation_count;
3462 }
3463
Florian Mickler2bc43b52009-04-06 22:55:41 +02003464 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003465}
3466
3467static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003468i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003469 uint32_t buffer_count,
3470 struct drm_i915_gem_relocation_entry *relocs)
3471{
3472 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003473 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003474
Chris Wilson93533c22010-01-31 10:40:48 +00003475 if (relocs == NULL)
3476 return 0;
3477
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003478 for (i = 0; i < buffer_count; i++) {
3479 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003480 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003481
3482 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3483
Florian Mickler2bc43b52009-04-06 22:55:41 +02003484 unwritten = copy_to_user(user_relocs,
3485 &relocs[reloc_count],
3486 exec_list[i].relocation_count *
3487 sizeof(*relocs));
3488
3489 if (unwritten) {
3490 ret = -EFAULT;
3491 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003492 }
3493
3494 reloc_count += exec_list[i].relocation_count;
3495 }
3496
Florian Mickler2bc43b52009-04-06 22:55:41 +02003497err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003498 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003499
3500 return ret;
3501}
3502
Chris Wilson83d60792009-06-06 09:45:57 +01003503static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003504i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003505 uint64_t exec_offset)
3506{
3507 uint32_t exec_start, exec_len;
3508
3509 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3510 exec_len = (uint32_t) exec->batch_len;
3511
3512 if ((exec_start | exec_len) & 0x7)
3513 return -EINVAL;
3514
3515 if (!exec_start)
3516 return -EINVAL;
3517
3518 return 0;
3519}
3520
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003521static int
3522i915_gem_wait_for_pending_flip(struct drm_device *dev,
3523 struct drm_gem_object **object_list,
3524 int count)
3525{
3526 drm_i915_private_t *dev_priv = dev->dev_private;
3527 struct drm_i915_gem_object *obj_priv;
3528 DEFINE_WAIT(wait);
3529 int i, ret = 0;
3530
3531 for (;;) {
3532 prepare_to_wait(&dev_priv->pending_flip_queue,
3533 &wait, TASK_INTERRUPTIBLE);
3534 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003535 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003536 if (atomic_read(&obj_priv->pending_flip) > 0)
3537 break;
3538 }
3539 if (i == count)
3540 break;
3541
3542 if (!signal_pending(current)) {
3543 mutex_unlock(&dev->struct_mutex);
3544 schedule();
3545 mutex_lock(&dev->struct_mutex);
3546 continue;
3547 }
3548 ret = -ERESTARTSYS;
3549 break;
3550 }
3551 finish_wait(&dev_priv->pending_flip_queue, &wait);
3552
3553 return ret;
3554}
3555
Chris Wilson43b27f42010-07-02 08:57:15 +01003556
Eric Anholt673a3942008-07-30 12:06:12 -07003557int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003558i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv,
3560 struct drm_i915_gem_execbuffer2 *args,
3561 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003562{
3563 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003564 struct drm_gem_object **object_list = NULL;
3565 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003566 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003567 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003568 struct drm_i915_gem_relocation_entry *relocs = NULL;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003569 int ret = 0, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003570 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003571 uint32_t seqno, flush_domains, reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003572 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003573
Zou Nan hai852835f2010-05-21 09:08:56 +08003574 struct intel_ring_buffer *ring = NULL;
3575
Eric Anholt673a3942008-07-30 12:06:12 -07003576#if WATCH_EXEC
3577 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3578 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3579#endif
Zou Nan haid1b851f2010-05-21 09:08:57 +08003580 if (args->flags & I915_EXEC_BSD) {
3581 if (!HAS_BSD(dev)) {
3582 DRM_ERROR("execbuf with wrong flag\n");
3583 return -EINVAL;
3584 }
3585 ring = &dev_priv->bsd_ring;
3586 } else {
3587 ring = &dev_priv->render_ring;
3588 }
3589
Eric Anholt4f481ed2008-09-10 14:22:49 -07003590 if (args->buffer_count < 1) {
3591 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3592 return -EINVAL;
3593 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003594 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003595 if (object_list == NULL) {
3596 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003597 args->buffer_count);
3598 ret = -ENOMEM;
3599 goto pre_mutex_err;
3600 }
Eric Anholt673a3942008-07-30 12:06:12 -07003601
Eric Anholt201361a2009-03-11 12:30:04 -07003602 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003603 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3604 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003605 if (cliprects == NULL) {
3606 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003607 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003608 }
Eric Anholt201361a2009-03-11 12:30:04 -07003609
3610 ret = copy_from_user(cliprects,
3611 (struct drm_clip_rect __user *)
3612 (uintptr_t) args->cliprects_ptr,
3613 sizeof(*cliprects) * args->num_cliprects);
3614 if (ret != 0) {
3615 DRM_ERROR("copy %d cliprects failed: %d\n",
3616 args->num_cliprects, ret);
Dan Carpenterc877cdc2010-06-23 19:03:01 +02003617 ret = -EFAULT;
Eric Anholt201361a2009-03-11 12:30:04 -07003618 goto pre_mutex_err;
3619 }
3620 }
3621
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003622 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3623 &relocs);
3624 if (ret != 0)
3625 goto pre_mutex_err;
3626
Eric Anholt673a3942008-07-30 12:06:12 -07003627 mutex_lock(&dev->struct_mutex);
3628
3629 i915_verify_inactive(dev, __FILE__, __LINE__);
3630
Ben Gamariba1234d2009-09-14 17:48:47 -04003631 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003632 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003633 ret = -EIO;
3634 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003635 }
3636
3637 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003638 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003639 ret = -EBUSY;
3640 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003641 }
3642
Keith Packardac94a962008-11-20 23:30:27 -08003643 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003644 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003645 for (i = 0; i < args->buffer_count; i++) {
3646 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3647 exec_list[i].handle);
3648 if (object_list[i] == NULL) {
3649 DRM_ERROR("Invalid object handle %d at index %d\n",
3650 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003651 /* prevent error path from reading uninitialized data */
3652 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003653 ret = -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07003654 goto err;
3655 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003656
Daniel Vetter23010e42010-03-08 13:35:02 +01003657 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003658 if (obj_priv->in_execbuffer) {
3659 DRM_ERROR("Object %p appears more than once in object list\n",
3660 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003661 /* prevent error path from reading uninitialized data */
3662 args->buffer_count = i + 1;
Chris Wilsonbf79cb92010-08-04 14:19:46 +01003663 ret = -EINVAL;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003664 goto err;
3665 }
3666 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003667 flips += atomic_read(&obj_priv->pending_flip);
3668 }
3669
3670 if (flips > 0) {
3671 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3672 args->buffer_count);
3673 if (ret)
3674 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003675 }
Eric Anholt673a3942008-07-30 12:06:12 -07003676
Keith Packardac94a962008-11-20 23:30:27 -08003677 /* Pin and relocate */
3678 for (pin_tries = 0; ; pin_tries++) {
3679 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003680 reloc_index = 0;
3681
Keith Packardac94a962008-11-20 23:30:27 -08003682 for (i = 0; i < args->buffer_count; i++) {
3683 object_list[i]->pending_read_domains = 0;
3684 object_list[i]->pending_write_domain = 0;
3685 ret = i915_gem_object_pin_and_relocate(object_list[i],
3686 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003687 &exec_list[i],
3688 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003689 if (ret)
3690 break;
3691 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003692 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003693 }
3694 /* success */
3695 if (ret == 0)
3696 break;
3697
3698 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003699 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003700 if (ret != -ERESTARTSYS) {
3701 unsigned long long total_size = 0;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003702 int num_fences = 0;
3703 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson43b27f42010-07-02 08:57:15 +01003704 obj_priv = to_intel_bo(object_list[i]);
Chris Wilson3d1cc472010-05-27 13:18:19 +01003705
Chris Wilson07f73f62009-09-14 16:50:30 +01003706 total_size += object_list[i]->size;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003707 num_fences +=
3708 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3709 obj_priv->tiling_mode != I915_TILING_NONE;
3710 }
3711 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
Chris Wilson07f73f62009-09-14 16:50:30 +01003712 pinned+1, args->buffer_count,
Chris Wilson3d1cc472010-05-27 13:18:19 +01003713 total_size, num_fences,
3714 ret);
Chris Wilson07f73f62009-09-14 16:50:30 +01003715 DRM_ERROR("%d objects [%d pinned], "
3716 "%d object bytes [%d pinned], "
3717 "%d/%d gtt bytes\n",
3718 atomic_read(&dev->object_count),
3719 atomic_read(&dev->pin_count),
3720 atomic_read(&dev->object_memory),
3721 atomic_read(&dev->pin_memory),
3722 atomic_read(&dev->gtt_memory),
3723 dev->gtt_total);
3724 }
Eric Anholt673a3942008-07-30 12:06:12 -07003725 goto err;
3726 }
Keith Packardac94a962008-11-20 23:30:27 -08003727
3728 /* unpin all of our buffers */
3729 for (i = 0; i < pinned; i++)
3730 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003731 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003732
3733 /* evict everyone we can from the aperture */
3734 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003735 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003736 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003737 }
3738
3739 /* Set the pending read domains for the batch buffer to COMMAND */
3740 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003741 if (batch_obj->pending_write_domain) {
3742 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3743 ret = -EINVAL;
3744 goto err;
3745 }
3746 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003747
Chris Wilson83d60792009-06-06 09:45:57 +01003748 /* Sanity check the batch buffer, prior to moving objects */
3749 exec_offset = exec_list[args->buffer_count - 1].offset;
3750 ret = i915_gem_check_execbuffer (args, exec_offset);
3751 if (ret != 0) {
3752 DRM_ERROR("execbuf with invalid offset/length\n");
3753 goto err;
3754 }
3755
Eric Anholt673a3942008-07-30 12:06:12 -07003756 i915_verify_inactive(dev, __FILE__, __LINE__);
3757
Keith Packard646f0f62008-11-20 23:23:03 -08003758 /* Zero the global flush/invalidate flags. These
3759 * will be modified as new domains are computed
3760 * for each object
3761 */
3762 dev->invalidate_domains = 0;
3763 dev->flush_domains = 0;
Chris Wilson88f356b2010-08-04 13:55:32 +01003764 dev_priv->flush_rings = 0;
Keith Packard646f0f62008-11-20 23:23:03 -08003765
Eric Anholt673a3942008-07-30 12:06:12 -07003766 for (i = 0; i < args->buffer_count; i++) {
3767 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003768
Keith Packard646f0f62008-11-20 23:23:03 -08003769 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003770 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003771 }
3772
3773 i915_verify_inactive(dev, __FILE__, __LINE__);
3774
Keith Packard646f0f62008-11-20 23:23:03 -08003775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev,
3783 dev->invalidate_domains,
3784 dev->flush_domains);
Chris Wilson88f356b2010-08-04 13:55:32 +01003785 if (dev_priv->flush_rings & FLUSH_RENDER_RING)
Eric Anholtb9624422009-06-03 07:27:35 +00003786 (void)i915_add_request(dev, file_priv,
Chris Wilson88f356b2010-08-04 13:55:32 +01003787 dev->flush_domains,
3788 &dev_priv->render_ring);
3789 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3790 (void)i915_add_request(dev, file_priv,
3791 dev->flush_domains,
3792 &dev_priv->bsd_ring);
Keith Packard646f0f62008-11-20 23:23:03 -08003793 }
Eric Anholt673a3942008-07-30 12:06:12 -07003794
Eric Anholtefbeed92009-02-19 14:54:51 -08003795 for (i = 0; i < args->buffer_count; i++) {
3796 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003797 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003798 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003799
3800 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003801 if (obj->write_domain)
3802 list_move_tail(&obj_priv->gpu_write_list,
3803 &dev_priv->mm.gpu_write_list);
3804 else
3805 list_del_init(&obj_priv->gpu_write_list);
3806
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003807 trace_i915_gem_object_change_domain(obj,
3808 obj->read_domains,
3809 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003810 }
3811
Eric Anholt673a3942008-07-30 12:06:12 -07003812 i915_verify_inactive(dev, __FILE__, __LINE__);
3813
3814#if WATCH_COHERENCY
3815 for (i = 0; i < args->buffer_count; i++) {
3816 i915_gem_object_check_coherency(object_list[i],
3817 exec_list[i].handle);
3818 }
3819#endif
3820
Eric Anholt673a3942008-07-30 12:06:12 -07003821#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003822 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003823 args->batch_len,
3824 __func__,
3825 ~0);
3826#endif
3827
Eric Anholt673a3942008-07-30 12:06:12 -07003828 /* Exec the batchbuffer */
Zou Nan hai852835f2010-05-21 09:08:56 +08003829 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3830 cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003831 if (ret) {
3832 DRM_ERROR("dispatch failed %d\n", ret);
3833 goto err;
3834 }
3835
3836 /*
3837 * Ensure that the commands in the batch buffer are
3838 * finished before the interrupt fires
3839 */
Zou Nan hai852835f2010-05-21 09:08:56 +08003840 flush_domains = i915_retire_commands(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003841
3842 i915_verify_inactive(dev, __FILE__, __LINE__);
3843
3844 /*
3845 * Get a seqno representing the execution of the current buffer,
3846 * which we can wait on. We would like to mitigate these interrupts,
3847 * likely by only creating seqnos occasionally (so that we have
3848 * *some* interrupts representing completion of buffers that we can
3849 * wait on when trying to clear up gtt space).
3850 */
Zou Nan hai852835f2010-05-21 09:08:56 +08003851 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003852 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003853 for (i = 0; i < args->buffer_count; i++) {
3854 struct drm_gem_object *obj = object_list[i];
Zou Nan hai852835f2010-05-21 09:08:56 +08003855 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003856
Zou Nan hai852835f2010-05-21 09:08:56 +08003857 i915_gem_object_move_to_active(obj, seqno, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003858#if WATCH_LRU
3859 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3860#endif
3861 }
3862#if WATCH_LRU
3863 i915_dump_lru(dev, __func__);
3864#endif
3865
3866 i915_verify_inactive(dev, __FILE__, __LINE__);
3867
Eric Anholt673a3942008-07-30 12:06:12 -07003868err:
Julia Lawallaad87df2008-12-21 16:28:47 +01003869 for (i = 0; i < pinned; i++)
3870 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07003871
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003872 for (i = 0; i < args->buffer_count; i++) {
3873 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003874 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003875 obj_priv->in_execbuffer = false;
3876 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003877 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003878 }
Julia Lawallaad87df2008-12-21 16:28:47 +01003879
Eric Anholt673a3942008-07-30 12:06:12 -07003880 mutex_unlock(&dev->struct_mutex);
3881
Chris Wilson93533c22010-01-31 10:40:48 +00003882pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003883 /* Copy the updated relocations out regardless of current error
3884 * state. Failure to update the relocs would mean that the next
3885 * time userland calls execbuf, it would do so with presumed offset
3886 * state that didn't match the actual object state.
3887 */
3888 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3889 relocs);
3890 if (ret2 != 0) {
3891 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3892
3893 if (ret == 0)
3894 ret = ret2;
3895 }
3896
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003897 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07003898 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07003899
3900 return ret;
3901}
3902
Jesse Barnes76446ca2009-12-17 22:05:42 -05003903/*
3904 * Legacy execbuffer just creates an exec2 list from the original exec object
3905 * list array and passes it to the real function.
3906 */
3907int
3908i915_gem_execbuffer(struct drm_device *dev, void *data,
3909 struct drm_file *file_priv)
3910{
3911 struct drm_i915_gem_execbuffer *args = data;
3912 struct drm_i915_gem_execbuffer2 exec2;
3913 struct drm_i915_gem_exec_object *exec_list = NULL;
3914 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3915 int ret, i;
3916
3917#if WATCH_EXEC
3918 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3919 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3920#endif
3921
3922 if (args->buffer_count < 1) {
3923 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3924 return -EINVAL;
3925 }
3926
3927 /* Copy in the exec list from userland */
3928 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3929 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3930 if (exec_list == NULL || exec2_list == NULL) {
3931 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3932 args->buffer_count);
3933 drm_free_large(exec_list);
3934 drm_free_large(exec2_list);
3935 return -ENOMEM;
3936 }
3937 ret = copy_from_user(exec_list,
3938 (struct drm_i915_relocation_entry __user *)
3939 (uintptr_t) args->buffers_ptr,
3940 sizeof(*exec_list) * args->buffer_count);
3941 if (ret != 0) {
3942 DRM_ERROR("copy %d exec entries failed %d\n",
3943 args->buffer_count, ret);
3944 drm_free_large(exec_list);
3945 drm_free_large(exec2_list);
3946 return -EFAULT;
3947 }
3948
3949 for (i = 0; i < args->buffer_count; i++) {
3950 exec2_list[i].handle = exec_list[i].handle;
3951 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3952 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3953 exec2_list[i].alignment = exec_list[i].alignment;
3954 exec2_list[i].offset = exec_list[i].offset;
3955 if (!IS_I965G(dev))
3956 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3957 else
3958 exec2_list[i].flags = 0;
3959 }
3960
3961 exec2.buffers_ptr = args->buffers_ptr;
3962 exec2.buffer_count = args->buffer_count;
3963 exec2.batch_start_offset = args->batch_start_offset;
3964 exec2.batch_len = args->batch_len;
3965 exec2.DR1 = args->DR1;
3966 exec2.DR4 = args->DR4;
3967 exec2.num_cliprects = args->num_cliprects;
3968 exec2.cliprects_ptr = args->cliprects_ptr;
Zou Nan hai852835f2010-05-21 09:08:56 +08003969 exec2.flags = I915_EXEC_RENDER;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003970
3971 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3972 if (!ret) {
3973 /* Copy the new buffer offsets back to the user's exec list. */
3974 for (i = 0; i < args->buffer_count; i++)
3975 exec_list[i].offset = exec2_list[i].offset;
3976 /* ... and back out to userspace */
3977 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3978 (uintptr_t) args->buffers_ptr,
3979 exec_list,
3980 sizeof(*exec_list) * args->buffer_count);
3981 if (ret) {
3982 ret = -EFAULT;
3983 DRM_ERROR("failed to copy %d exec entries "
3984 "back to user (%d)\n",
3985 args->buffer_count, ret);
3986 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003987 }
3988
3989 drm_free_large(exec_list);
3990 drm_free_large(exec2_list);
3991 return ret;
3992}
3993
3994int
3995i915_gem_execbuffer2(struct drm_device *dev, void *data,
3996 struct drm_file *file_priv)
3997{
3998 struct drm_i915_gem_execbuffer2 *args = data;
3999 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4000 int ret;
4001
4002#if WATCH_EXEC
4003 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4004 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4005#endif
4006
4007 if (args->buffer_count < 1) {
4008 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4009 return -EINVAL;
4010 }
4011
4012 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4013 if (exec2_list == NULL) {
4014 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4015 args->buffer_count);
4016 return -ENOMEM;
4017 }
4018 ret = copy_from_user(exec2_list,
4019 (struct drm_i915_relocation_entry __user *)
4020 (uintptr_t) args->buffers_ptr,
4021 sizeof(*exec2_list) * args->buffer_count);
4022 if (ret != 0) {
4023 DRM_ERROR("copy %d exec entries failed %d\n",
4024 args->buffer_count, ret);
4025 drm_free_large(exec2_list);
4026 return -EFAULT;
4027 }
4028
4029 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4030 if (!ret) {
4031 /* Copy the new buffer offsets back to the user's exec list. */
4032 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4033 (uintptr_t) args->buffers_ptr,
4034 exec2_list,
4035 sizeof(*exec2_list) * args->buffer_count);
4036 if (ret) {
4037 ret = -EFAULT;
4038 DRM_ERROR("failed to copy %d exec entries "
4039 "back to user (%d)\n",
4040 args->buffer_count, ret);
4041 }
4042 }
4043
4044 drm_free_large(exec2_list);
4045 return ret;
4046}
4047
Eric Anholt673a3942008-07-30 12:06:12 -07004048int
4049i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4050{
4051 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004052 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004053 int ret;
4054
Daniel Vetter778c3542010-05-13 11:49:44 +02004055 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4056
Eric Anholt673a3942008-07-30 12:06:12 -07004057 i915_verify_inactive(dev, __FILE__, __LINE__);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004058
4059 if (obj_priv->gtt_space != NULL) {
4060 if (alignment == 0)
4061 alignment = i915_gem_get_gtt_alignment(obj);
4062 if (obj_priv->gtt_offset & (alignment - 1)) {
Chris Wilsonae7d49d2010-08-04 12:37:41 +01004063 WARN(obj_priv->pin_count,
4064 "bo is already pinned with incorrect alignment:"
4065 " offset=%x, req.alignment=%x\n",
4066 obj_priv->gtt_offset, alignment);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004067 ret = i915_gem_object_unbind(obj);
4068 if (ret)
4069 return ret;
4070 }
4071 }
4072
Eric Anholt673a3942008-07-30 12:06:12 -07004073 if (obj_priv->gtt_space == NULL) {
4074 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004075 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004076 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004077 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004078
Eric Anholt673a3942008-07-30 12:06:12 -07004079 obj_priv->pin_count++;
4080
4081 /* If the object is not active and not pending a flush,
4082 * remove it from the inactive list
4083 */
4084 if (obj_priv->pin_count == 1) {
4085 atomic_inc(&dev->pin_count);
4086 atomic_add(obj->size, &dev->pin_memory);
4087 if (!obj_priv->active &&
Chris Wilsonbf1a1092010-08-07 11:01:20 +01004088 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07004089 list_del_init(&obj_priv->list);
4090 }
4091 i915_verify_inactive(dev, __FILE__, __LINE__);
4092
4093 return 0;
4094}
4095
4096void
4097i915_gem_object_unpin(struct drm_gem_object *obj)
4098{
4099 struct drm_device *dev = obj->dev;
4100 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004101 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004102
4103 i915_verify_inactive(dev, __FILE__, __LINE__);
4104 obj_priv->pin_count--;
4105 BUG_ON(obj_priv->pin_count < 0);
4106 BUG_ON(obj_priv->gtt_space == NULL);
4107
4108 /* If the object is no longer pinned, and is
4109 * neither active nor being flushed, then stick it on
4110 * the inactive list
4111 */
4112 if (obj_priv->pin_count == 0) {
4113 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004114 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07004115 list_move_tail(&obj_priv->list,
4116 &dev_priv->mm.inactive_list);
4117 atomic_dec(&dev->pin_count);
4118 atomic_sub(obj->size, &dev->pin_memory);
4119 }
4120 i915_verify_inactive(dev, __FILE__, __LINE__);
4121}
4122
4123int
4124i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4125 struct drm_file *file_priv)
4126{
4127 struct drm_i915_gem_pin *args = data;
4128 struct drm_gem_object *obj;
4129 struct drm_i915_gem_object *obj_priv;
4130 int ret;
4131
4132 mutex_lock(&dev->struct_mutex);
4133
4134 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4135 if (obj == NULL) {
4136 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4137 args->handle);
4138 mutex_unlock(&dev->struct_mutex);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004139 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004140 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004141 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004142
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004143 if (obj_priv->madv != I915_MADV_WILLNEED) {
4144 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004145 drm_gem_object_unreference(obj);
4146 mutex_unlock(&dev->struct_mutex);
4147 return -EINVAL;
4148 }
4149
Jesse Barnes79e53942008-11-07 14:24:08 -08004150 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4151 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4152 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004153 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004154 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004155 return -EINVAL;
4156 }
4157
4158 obj_priv->user_pin_count++;
4159 obj_priv->pin_filp = file_priv;
4160 if (obj_priv->user_pin_count == 1) {
4161 ret = i915_gem_object_pin(obj, args->alignment);
4162 if (ret != 0) {
4163 drm_gem_object_unreference(obj);
4164 mutex_unlock(&dev->struct_mutex);
4165 return ret;
4166 }
Eric Anholt673a3942008-07-30 12:06:12 -07004167 }
4168
4169 /* XXX - flush the CPU caches for pinned objects
4170 * as the X server doesn't manage domains yet
4171 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004172 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004173 args->offset = obj_priv->gtt_offset;
4174 drm_gem_object_unreference(obj);
4175 mutex_unlock(&dev->struct_mutex);
4176
4177 return 0;
4178}
4179
4180int
4181i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4182 struct drm_file *file_priv)
4183{
4184 struct drm_i915_gem_pin *args = data;
4185 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004186 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07004187
4188 mutex_lock(&dev->struct_mutex);
4189
4190 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4191 if (obj == NULL) {
4192 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4193 args->handle);
4194 mutex_unlock(&dev->struct_mutex);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004195 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004196 }
4197
Daniel Vetter23010e42010-03-08 13:35:02 +01004198 obj_priv = to_intel_bo(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004199 if (obj_priv->pin_filp != file_priv) {
4200 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4201 args->handle);
4202 drm_gem_object_unreference(obj);
4203 mutex_unlock(&dev->struct_mutex);
4204 return -EINVAL;
4205 }
4206 obj_priv->user_pin_count--;
4207 if (obj_priv->user_pin_count == 0) {
4208 obj_priv->pin_filp = NULL;
4209 i915_gem_object_unpin(obj);
4210 }
Eric Anholt673a3942008-07-30 12:06:12 -07004211
4212 drm_gem_object_unreference(obj);
4213 mutex_unlock(&dev->struct_mutex);
4214 return 0;
4215}
4216
4217int
4218i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4219 struct drm_file *file_priv)
4220{
4221 struct drm_i915_gem_busy *args = data;
4222 struct drm_gem_object *obj;
4223 struct drm_i915_gem_object *obj_priv;
4224
Eric Anholt673a3942008-07-30 12:06:12 -07004225 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4226 if (obj == NULL) {
4227 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4228 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004229 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07004230 }
4231
Chris Wilsonb1ce7862009-06-06 09:46:00 +01004232 mutex_lock(&dev->struct_mutex);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004233
Chris Wilson0be555b2010-08-04 15:36:30 +01004234 /* Count all active objects as busy, even if they are currently not used
4235 * by the gpu. Users of this interface expect objects to eventually
4236 * become non-busy without any further actions, therefore emit any
4237 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08004238 */
Chris Wilson0be555b2010-08-04 15:36:30 +01004239 obj_priv = to_intel_bo(obj);
4240 args->busy = obj_priv->active;
4241 if (args->busy) {
4242 /* Unconditionally flush objects, even when the gpu still uses this
4243 * object. Userspace calling this function indicates that it wants to
4244 * use this buffer rather sooner than later, so issuing the required
4245 * flush earlier is beneficial.
4246 */
4247 if (obj->write_domain) {
4248 i915_gem_flush(dev, 0, obj->write_domain);
4249 (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
4250 }
4251
4252 /* Update the active list for the hardware's current position.
4253 * Otherwise this only updates on a delayed timer or when irqs
4254 * are actually unmasked, and our working set ends up being
4255 * larger than required.
4256 */
4257 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4258
4259 args->busy = obj_priv->active;
4260 }
Eric Anholt673a3942008-07-30 12:06:12 -07004261
4262 drm_gem_object_unreference(obj);
4263 mutex_unlock(&dev->struct_mutex);
4264 return 0;
4265}
4266
4267int
4268i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4269 struct drm_file *file_priv)
4270{
4271 return i915_gem_ring_throttle(dev, file_priv);
4272}
4273
Chris Wilson3ef94da2009-09-14 16:50:29 +01004274int
4275i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4276 struct drm_file *file_priv)
4277{
4278 struct drm_i915_gem_madvise *args = data;
4279 struct drm_gem_object *obj;
4280 struct drm_i915_gem_object *obj_priv;
4281
4282 switch (args->madv) {
4283 case I915_MADV_DONTNEED:
4284 case I915_MADV_WILLNEED:
4285 break;
4286 default:
4287 return -EINVAL;
4288 }
4289
4290 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4291 if (obj == NULL) {
4292 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4293 args->handle);
Chris Wilsonbf79cb92010-08-04 14:19:46 +01004294 return -ENOENT;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004295 }
4296
4297 mutex_lock(&dev->struct_mutex);
Daniel Vetter23010e42010-03-08 13:35:02 +01004298 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004299
4300 if (obj_priv->pin_count) {
4301 drm_gem_object_unreference(obj);
4302 mutex_unlock(&dev->struct_mutex);
4303
4304 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4305 return -EINVAL;
4306 }
4307
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004308 if (obj_priv->madv != __I915_MADV_PURGED)
4309 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004310
Chris Wilson2d7ef392009-09-20 23:13:10 +01004311 /* if the object is no longer bound, discard its backing storage */
4312 if (i915_gem_object_is_purgeable(obj_priv) &&
4313 obj_priv->gtt_space == NULL)
4314 i915_gem_object_truncate(obj);
4315
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004316 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4317
Chris Wilson3ef94da2009-09-14 16:50:29 +01004318 drm_gem_object_unreference(obj);
4319 mutex_unlock(&dev->struct_mutex);
4320
4321 return 0;
4322}
4323
Daniel Vetterac52bc52010-04-09 19:05:06 +00004324struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4325 size_t size)
4326{
Daniel Vetterc397b902010-04-09 19:05:07 +00004327 struct drm_i915_gem_object *obj;
4328
4329 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4330 if (obj == NULL)
4331 return NULL;
4332
4333 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4334 kfree(obj);
4335 return NULL;
4336 }
4337
4338 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4339 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4340
4341 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004342 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004343 obj->fence_reg = I915_FENCE_REG_NONE;
4344 INIT_LIST_HEAD(&obj->list);
4345 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004346 obj->madv = I915_MADV_WILLNEED;
4347
4348 trace_i915_gem_object_create(&obj->base);
4349
4350 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004351}
4352
Eric Anholt673a3942008-07-30 12:06:12 -07004353int i915_gem_init_object(struct drm_gem_object *obj)
4354{
Daniel Vetterc397b902010-04-09 19:05:07 +00004355 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004356
Eric Anholt673a3942008-07-30 12:06:12 -07004357 return 0;
4358}
4359
Chris Wilsonbe726152010-07-23 23:18:50 +01004360static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4361{
4362 struct drm_device *dev = obj->dev;
4363 drm_i915_private_t *dev_priv = dev->dev_private;
4364 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4365 int ret;
4366
4367 ret = i915_gem_object_unbind(obj);
4368 if (ret == -ERESTARTSYS) {
4369 list_move(&obj_priv->list,
4370 &dev_priv->mm.deferred_free_list);
4371 return;
4372 }
4373
4374 if (obj_priv->mmap_offset)
4375 i915_gem_free_mmap_offset(obj);
4376
4377 drm_gem_object_release(obj);
4378
4379 kfree(obj_priv->page_cpu_valid);
4380 kfree(obj_priv->bit_17);
4381 kfree(obj_priv);
4382}
4383
Eric Anholt673a3942008-07-30 12:06:12 -07004384void i915_gem_free_object(struct drm_gem_object *obj)
4385{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004386 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004387 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004388
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004389 trace_i915_gem_object_destroy(obj);
4390
Eric Anholt673a3942008-07-30 12:06:12 -07004391 while (obj_priv->pin_count > 0)
4392 i915_gem_object_unpin(obj);
4393
Dave Airlie71acb5e2008-12-30 20:31:46 +10004394 if (obj_priv->phys_obj)
4395 i915_gem_detach_phys_object(dev, obj);
4396
Chris Wilsonbe726152010-07-23 23:18:50 +01004397 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004398}
4399
Jesse Barnes5669fca2009-02-17 15:13:31 -08004400int
Eric Anholt673a3942008-07-30 12:06:12 -07004401i915_gem_idle(struct drm_device *dev)
4402{
4403 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004404 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004405
Keith Packard6dbe2772008-10-14 21:41:13 -07004406 mutex_lock(&dev->struct_mutex);
4407
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004408 if (dev_priv->mm.suspended ||
Zou Nan haid1b851f2010-05-21 09:08:57 +08004409 (dev_priv->render_ring.gem_object == NULL) ||
4410 (HAS_BSD(dev) &&
4411 dev_priv->bsd_ring.gem_object == NULL)) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004412 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004413 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004414 }
Eric Anholt673a3942008-07-30 12:06:12 -07004415
Chris Wilson29105cc2010-01-07 10:39:13 +00004416 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004417 if (ret) {
4418 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004419 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004420 }
Eric Anholt673a3942008-07-30 12:06:12 -07004421
Chris Wilson29105cc2010-01-07 10:39:13 +00004422 /* Under UMS, be paranoid and evict. */
4423 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01004424 ret = i915_gem_evict_inactive(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004425 if (ret) {
4426 mutex_unlock(&dev->struct_mutex);
4427 return ret;
4428 }
4429 }
4430
4431 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4432 * We need to replace this with a semaphore, or something.
4433 * And not confound mm.suspended!
4434 */
4435 dev_priv->mm.suspended = 1;
4436 del_timer(&dev_priv->hangcheck_timer);
4437
4438 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004439 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004440
Keith Packard6dbe2772008-10-14 21:41:13 -07004441 mutex_unlock(&dev->struct_mutex);
4442
Chris Wilson29105cc2010-01-07 10:39:13 +00004443 /* Cancel the retire work handler, which should be idle now. */
4444 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4445
Eric Anholt673a3942008-07-30 12:06:12 -07004446 return 0;
4447}
4448
Jesse Barnese552eb72010-04-21 11:39:23 -07004449/*
4450 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4451 * over cache flushing.
4452 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004453static int
Jesse Barnese552eb72010-04-21 11:39:23 -07004454i915_gem_init_pipe_control(struct drm_device *dev)
4455{
4456 drm_i915_private_t *dev_priv = dev->dev_private;
4457 struct drm_gem_object *obj;
4458 struct drm_i915_gem_object *obj_priv;
4459 int ret;
4460
Eric Anholt34dc4d42010-05-07 14:30:03 -07004461 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004462 if (obj == NULL) {
4463 DRM_ERROR("Failed to allocate seqno page\n");
4464 ret = -ENOMEM;
4465 goto err;
4466 }
4467 obj_priv = to_intel_bo(obj);
4468 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4469
4470 ret = i915_gem_object_pin(obj, 4096);
4471 if (ret)
4472 goto err_unref;
4473
4474 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4475 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4476 if (dev_priv->seqno_page == NULL)
4477 goto err_unpin;
4478
4479 dev_priv->seqno_obj = obj;
4480 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4481
4482 return 0;
4483
4484err_unpin:
4485 i915_gem_object_unpin(obj);
4486err_unref:
4487 drm_gem_object_unreference(obj);
4488err:
4489 return ret;
4490}
4491
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004492
4493static void
Jesse Barnese552eb72010-04-21 11:39:23 -07004494i915_gem_cleanup_pipe_control(struct drm_device *dev)
4495{
4496 drm_i915_private_t *dev_priv = dev->dev_private;
4497 struct drm_gem_object *obj;
4498 struct drm_i915_gem_object *obj_priv;
4499
4500 obj = dev_priv->seqno_obj;
4501 obj_priv = to_intel_bo(obj);
4502 kunmap(obj_priv->pages[0]);
4503 i915_gem_object_unpin(obj);
4504 drm_gem_object_unreference(obj);
4505 dev_priv->seqno_obj = NULL;
4506
4507 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004508}
4509
Eric Anholt673a3942008-07-30 12:06:12 -07004510int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004511i915_gem_init_ringbuffer(struct drm_device *dev)
4512{
4513 drm_i915_private_t *dev_priv = dev->dev_private;
4514 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004515
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004516 dev_priv->render_ring = render_ring;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004517
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004518 if (!I915_NEED_GFX_HWS(dev)) {
4519 dev_priv->render_ring.status_page.page_addr
4520 = dev_priv->status_page_dmah->vaddr;
4521 memset(dev_priv->render_ring.status_page.page_addr,
4522 0, PAGE_SIZE);
4523 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004524
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004525 if (HAS_PIPE_CONTROL(dev)) {
4526 ret = i915_gem_init_pipe_control(dev);
4527 if (ret)
4528 return ret;
4529 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004530
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004531 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004532 if (ret)
4533 goto cleanup_pipe_control;
4534
4535 if (HAS_BSD(dev)) {
Zou Nan haid1b851f2010-05-21 09:08:57 +08004536 dev_priv->bsd_ring = bsd_ring;
4537 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004538 if (ret)
4539 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004540 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004541
Chris Wilson6f392d5482010-08-07 11:01:22 +01004542 dev_priv->next_seqno = 1;
4543
Chris Wilson68f95ba2010-05-27 13:18:22 +01004544 return 0;
4545
4546cleanup_render_ring:
4547 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4548cleanup_pipe_control:
4549 if (HAS_PIPE_CONTROL(dev))
4550 i915_gem_cleanup_pipe_control(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004551 return ret;
4552}
4553
4554void
4555i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4556{
4557 drm_i915_private_t *dev_priv = dev->dev_private;
4558
4559 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004560 if (HAS_BSD(dev))
4561 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004562 if (HAS_PIPE_CONTROL(dev))
4563 i915_gem_cleanup_pipe_control(dev);
4564}
4565
4566int
Eric Anholt673a3942008-07-30 12:06:12 -07004567i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4568 struct drm_file *file_priv)
4569{
4570 drm_i915_private_t *dev_priv = dev->dev_private;
4571 int ret;
4572
Jesse Barnes79e53942008-11-07 14:24:08 -08004573 if (drm_core_check_feature(dev, DRIVER_MODESET))
4574 return 0;
4575
Ben Gamariba1234d2009-09-14 17:48:47 -04004576 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004577 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004578 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004579 }
4580
Eric Anholt673a3942008-07-30 12:06:12 -07004581 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004582 dev_priv->mm.suspended = 0;
4583
4584 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004585 if (ret != 0) {
4586 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004587 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004588 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004589
Carl Worth5e118f42009-03-20 11:54:25 -07004590 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08004591 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004592 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004593 spin_unlock(&dev_priv->mm.active_list_lock);
4594
Eric Anholt673a3942008-07-30 12:06:12 -07004595 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4596 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004597 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004598 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004599 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004600
Chris Wilson5f353082010-06-07 14:03:03 +01004601 ret = drm_irq_install(dev);
4602 if (ret)
4603 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004604
Eric Anholt673a3942008-07-30 12:06:12 -07004605 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004606
4607cleanup_ringbuffer:
4608 mutex_lock(&dev->struct_mutex);
4609 i915_gem_cleanup_ringbuffer(dev);
4610 dev_priv->mm.suspended = 1;
4611 mutex_unlock(&dev->struct_mutex);
4612
4613 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004614}
4615
4616int
4617i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4618 struct drm_file *file_priv)
4619{
Jesse Barnes79e53942008-11-07 14:24:08 -08004620 if (drm_core_check_feature(dev, DRIVER_MODESET))
4621 return 0;
4622
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004623 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004624 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004625}
4626
4627void
4628i915_gem_lastclose(struct drm_device *dev)
4629{
4630 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004631
Eric Anholte806b492009-01-22 09:56:58 -08004632 if (drm_core_check_feature(dev, DRIVER_MODESET))
4633 return;
4634
Keith Packard6dbe2772008-10-14 21:41:13 -07004635 ret = i915_gem_idle(dev);
4636 if (ret)
4637 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004638}
4639
4640void
4641i915_gem_load(struct drm_device *dev)
4642{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004643 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004644 drm_i915_private_t *dev_priv = dev->dev_private;
4645
Carl Worth5e118f42009-03-20 11:54:25 -07004646 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004647 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004648 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004649 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004650 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01004651 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08004652 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4653 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004654 if (HAS_BSD(dev)) {
4655 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4656 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4657 }
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004658 for (i = 0; i < 16; i++)
4659 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004660 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4661 i915_gem_retire_work_handler);
Chris Wilson31169712009-09-14 16:50:28 +01004662 spin_lock(&shrink_list_lock);
4663 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4664 spin_unlock(&shrink_list_lock);
4665
Dave Airlie94400122010-07-20 13:15:31 +10004666 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4667 if (IS_GEN3(dev)) {
4668 u32 tmp = I915_READ(MI_ARB_STATE);
4669 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4670 /* arb state is a masked write, so set bit + bit in mask */
4671 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4672 I915_WRITE(MI_ARB_STATE, tmp);
4673 }
4674 }
4675
Jesse Barnesde151cf2008-11-12 10:03:55 -08004676 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004677 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4678 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004679
Jesse Barnes0f973f22009-01-26 17:10:45 -08004680 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004681 dev_priv->num_fence_regs = 16;
4682 else
4683 dev_priv->num_fence_regs = 8;
4684
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004685 /* Initialize fence registers to zero */
4686 if (IS_I965G(dev)) {
4687 for (i = 0; i < 16; i++)
4688 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4689 } else {
4690 for (i = 0; i < 8; i++)
4691 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4692 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4693 for (i = 0; i < 8; i++)
4694 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4695 }
Eric Anholt673a3942008-07-30 12:06:12 -07004696 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004697 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004698}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004699
4700/*
4701 * Create a physically contiguous memory object for this object
4702 * e.g. for cursor + overlay regs
4703 */
4704int i915_gem_init_phys_object(struct drm_device *dev,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004705 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004706{
4707 drm_i915_private_t *dev_priv = dev->dev_private;
4708 struct drm_i915_gem_phys_object *phys_obj;
4709 int ret;
4710
4711 if (dev_priv->mm.phys_objs[id - 1] || !size)
4712 return 0;
4713
Eric Anholt9a298b22009-03-24 12:23:04 -07004714 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004715 if (!phys_obj)
4716 return -ENOMEM;
4717
4718 phys_obj->id = id;
4719
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004720 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004721 if (!phys_obj->handle) {
4722 ret = -ENOMEM;
4723 goto kfree_obj;
4724 }
4725#ifdef CONFIG_X86
4726 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4727#endif
4728
4729 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4730
4731 return 0;
4732kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004733 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004734 return ret;
4735}
4736
4737void i915_gem_free_phys_object(struct drm_device *dev, int id)
4738{
4739 drm_i915_private_t *dev_priv = dev->dev_private;
4740 struct drm_i915_gem_phys_object *phys_obj;
4741
4742 if (!dev_priv->mm.phys_objs[id - 1])
4743 return;
4744
4745 phys_obj = dev_priv->mm.phys_objs[id - 1];
4746 if (phys_obj->cur_obj) {
4747 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4748 }
4749
4750#ifdef CONFIG_X86
4751 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4752#endif
4753 drm_pci_free(dev, phys_obj->handle);
4754 kfree(phys_obj);
4755 dev_priv->mm.phys_objs[id - 1] = NULL;
4756}
4757
4758void i915_gem_free_all_phys_object(struct drm_device *dev)
4759{
4760 int i;
4761
Dave Airlie260883c2009-01-22 17:58:49 +10004762 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004763 i915_gem_free_phys_object(dev, i);
4764}
4765
4766void i915_gem_detach_phys_object(struct drm_device *dev,
4767 struct drm_gem_object *obj)
4768{
4769 struct drm_i915_gem_object *obj_priv;
4770 int i;
4771 int ret;
4772 int page_count;
4773
Daniel Vetter23010e42010-03-08 13:35:02 +01004774 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004775 if (!obj_priv->phys_obj)
4776 return;
4777
Chris Wilson4bdadb92010-01-27 13:36:32 +00004778 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004779 if (ret)
4780 goto out;
4781
4782 page_count = obj->size / PAGE_SIZE;
4783
4784 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004785 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004786 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4787
4788 memcpy(dst, src, PAGE_SIZE);
4789 kunmap_atomic(dst, KM_USER0);
4790 }
Eric Anholt856fa192009-03-19 14:10:50 -07004791 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004792 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004793
4794 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004795out:
4796 obj_priv->phys_obj->cur_obj = NULL;
4797 obj_priv->phys_obj = NULL;
4798}
4799
4800int
4801i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004802 struct drm_gem_object *obj,
4803 int id,
4804 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004805{
4806 drm_i915_private_t *dev_priv = dev->dev_private;
4807 struct drm_i915_gem_object *obj_priv;
4808 int ret = 0;
4809 int page_count;
4810 int i;
4811
4812 if (id > I915_MAX_PHYS_OBJECT)
4813 return -EINVAL;
4814
Daniel Vetter23010e42010-03-08 13:35:02 +01004815 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004816
4817 if (obj_priv->phys_obj) {
4818 if (obj_priv->phys_obj->id == id)
4819 return 0;
4820 i915_gem_detach_phys_object(dev, obj);
4821 }
4822
Dave Airlie71acb5e2008-12-30 20:31:46 +10004823 /* create a new object */
4824 if (!dev_priv->mm.phys_objs[id - 1]) {
4825 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004826 obj->size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004827 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004828 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004829 goto out;
4830 }
4831 }
4832
4833 /* bind to the object */
4834 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4835 obj_priv->phys_obj->cur_obj = obj;
4836
Chris Wilson4bdadb92010-01-27 13:36:32 +00004837 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004838 if (ret) {
4839 DRM_ERROR("failed to get page list\n");
4840 goto out;
4841 }
4842
4843 page_count = obj->size / PAGE_SIZE;
4844
4845 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004846 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004847 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4848
4849 memcpy(dst, src, PAGE_SIZE);
4850 kunmap_atomic(src, KM_USER0);
4851 }
4852
Chris Wilsond78b47b2009-06-17 21:52:49 +01004853 i915_gem_object_put_pages(obj);
4854
Dave Airlie71acb5e2008-12-30 20:31:46 +10004855 return 0;
4856out:
4857 return ret;
4858}
4859
4860static int
4861i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4862 struct drm_i915_gem_pwrite *args,
4863 struct drm_file *file_priv)
4864{
Daniel Vetter23010e42010-03-08 13:35:02 +01004865 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004866 void *obj_addr;
4867 int ret;
4868 char __user *user_data;
4869
4870 user_data = (char __user *) (uintptr_t) args->data_ptr;
4871 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4872
Zhao Yakui44d98a62009-10-09 11:39:40 +08004873 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004874 ret = copy_from_user(obj_addr, user_data, args->size);
4875 if (ret)
4876 return -EFAULT;
4877
4878 drm_agp_chipset_flush(dev);
4879 return 0;
4880}
Eric Anholtb9624422009-06-03 07:27:35 +00004881
4882void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4883{
4884 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4885
4886 /* Clean up our request list when the client is going away, so that
4887 * later retire_requests won't dereference our soon-to-be-gone
4888 * file_priv.
4889 */
4890 mutex_lock(&dev->struct_mutex);
4891 while (!list_empty(&i915_file_priv->mm.request_list))
4892 list_del_init(i915_file_priv->mm.request_list.next);
4893 mutex_unlock(&dev->struct_mutex);
4894}
Chris Wilson31169712009-09-14 16:50:28 +01004895
Chris Wilson31169712009-09-14 16:50:28 +01004896static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004897i915_gpu_is_active(struct drm_device *dev)
4898{
4899 drm_i915_private_t *dev_priv = dev->dev_private;
4900 int lists_empty;
4901
4902 spin_lock(&dev_priv->mm.active_list_lock);
4903 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan hai852835f2010-05-21 09:08:56 +08004904 list_empty(&dev_priv->render_ring.active_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004905 if (HAS_BSD(dev))
4906 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004907 spin_unlock(&dev_priv->mm.active_list_lock);
4908
4909 return !lists_empty;
4910}
4911
4912static int
Dave Chinner7f8275d2010-07-19 14:56:17 +10004913i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01004914{
4915 drm_i915_private_t *dev_priv, *next_dev;
4916 struct drm_i915_gem_object *obj_priv, *next_obj;
4917 int cnt = 0;
4918 int would_deadlock = 1;
4919
4920 /* "fast-path" to count number of available objects */
4921 if (nr_to_scan == 0) {
4922 spin_lock(&shrink_list_lock);
4923 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4924 struct drm_device *dev = dev_priv->dev;
4925
4926 if (mutex_trylock(&dev->struct_mutex)) {
4927 list_for_each_entry(obj_priv,
4928 &dev_priv->mm.inactive_list,
4929 list)
4930 cnt++;
4931 mutex_unlock(&dev->struct_mutex);
4932 }
4933 }
4934 spin_unlock(&shrink_list_lock);
4935
4936 return (cnt / 100) * sysctl_vfs_cache_pressure;
4937 }
4938
4939 spin_lock(&shrink_list_lock);
4940
Chris Wilson1637ef42010-04-20 17:10:35 +01004941rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004942 /* first scan for clean buffers */
4943 list_for_each_entry_safe(dev_priv, next_dev,
4944 &shrink_list, mm.shrink_list) {
4945 struct drm_device *dev = dev_priv->dev;
4946
4947 if (! mutex_trylock(&dev->struct_mutex))
4948 continue;
4949
4950 spin_unlock(&shrink_list_lock);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01004951 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004952
Chris Wilson31169712009-09-14 16:50:28 +01004953 list_for_each_entry_safe(obj_priv, next_obj,
4954 &dev_priv->mm.inactive_list,
4955 list) {
4956 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00004957 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01004958 if (--nr_to_scan <= 0)
4959 break;
4960 }
4961 }
4962
4963 spin_lock(&shrink_list_lock);
4964 mutex_unlock(&dev->struct_mutex);
4965
Chris Wilson963b4832009-09-20 23:03:54 +01004966 would_deadlock = 0;
4967
Chris Wilson31169712009-09-14 16:50:28 +01004968 if (nr_to_scan <= 0)
4969 break;
4970 }
4971
4972 /* second pass, evict/count anything still on the inactive list */
4973 list_for_each_entry_safe(dev_priv, next_dev,
4974 &shrink_list, mm.shrink_list) {
4975 struct drm_device *dev = dev_priv->dev;
4976
4977 if (! mutex_trylock(&dev->struct_mutex))
4978 continue;
4979
4980 spin_unlock(&shrink_list_lock);
4981
4982 list_for_each_entry_safe(obj_priv, next_obj,
4983 &dev_priv->mm.inactive_list,
4984 list) {
4985 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00004986 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01004987 nr_to_scan--;
4988 } else
4989 cnt++;
4990 }
4991
4992 spin_lock(&shrink_list_lock);
4993 mutex_unlock(&dev->struct_mutex);
4994
4995 would_deadlock = 0;
4996 }
4997
Chris Wilson1637ef42010-04-20 17:10:35 +01004998 if (nr_to_scan) {
4999 int active = 0;
5000
5001 /*
5002 * We are desperate for pages, so as a last resort, wait
5003 * for the GPU to finish and discard whatever we can.
5004 * This has a dramatic impact to reduce the number of
5005 * OOM-killer events whilst running the GPU aggressively.
5006 */
5007 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5008 struct drm_device *dev = dev_priv->dev;
5009
5010 if (!mutex_trylock(&dev->struct_mutex))
5011 continue;
5012
5013 spin_unlock(&shrink_list_lock);
5014
5015 if (i915_gpu_is_active(dev)) {
5016 i915_gpu_idle(dev);
5017 active++;
5018 }
5019
5020 spin_lock(&shrink_list_lock);
5021 mutex_unlock(&dev->struct_mutex);
5022 }
5023
5024 if (active)
5025 goto rescan;
5026 }
5027
Chris Wilson31169712009-09-14 16:50:28 +01005028 spin_unlock(&shrink_list_lock);
5029
5030 if (would_deadlock)
5031 return -1;
5032 else if (cnt > 0)
5033 return (cnt / 100) * sysctl_vfs_cache_pressure;
5034 else
5035 return 0;
5036}
5037
5038static struct shrinker shrinker = {
5039 .shrink = i915_gem_shrink,
5040 .seeks = DEFAULT_SEEKS,
5041};
5042
5043__init void
5044i915_gem_shrinker_init(void)
5045{
5046 register_shrinker(&shrinker);
5047}
5048
5049__exit void
5050i915_gem_shrinker_exit(void)
5051{
5052 unregister_shrinker(&shrinker);
5053}