blob: eeb768818136c3c5b35637612029304bba657d65 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070035#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070037
Chris Wilson2dafb1e2010-06-07 14:03:05 +010038static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080039static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
Eric Anholte47c68e2008-11-14 13:35:19 -080041static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42 int write);
43static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
44 uint64_t offset,
45 uint64_t size);
46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070047static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -080048static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment);
Jesse Barnesde151cf2008-11-12 10:03:55 -080050static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
Chris Wilson07f73f62009-09-14 16:50:30 +010051static int i915_gem_evict_something(struct drm_device *dev, int min_size);
Chris Wilsonab5ee572009-09-20 19:25:47 +010052static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
Dave Airlie71acb5e2008-12-30 20:31:46 +100053static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv);
Chris Wilsonbe726152010-07-23 23:18:50 +010056static void i915_gem_free_object_tail(struct drm_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070057
Chris Wilson31169712009-09-14 16:50:28 +010058static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock);
60
Jesse Barnes79e53942008-11-07 14:24:08 -080061int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62 unsigned long end)
63{
64 drm_i915_private_t *dev_priv = dev->dev_private;
65
66 if (start >= end ||
67 (start & (PAGE_SIZE - 1)) != 0 ||
68 (end & (PAGE_SIZE - 1)) != 0) {
69 return -EINVAL;
70 }
71
72 drm_mm_init(&dev_priv->mm.gtt_space, start,
73 end - start);
74
75 dev->gtt_total = (uint32_t) (end - start);
76
77 return 0;
78}
Keith Packard6dbe2772008-10-14 21:41:13 -070079
Eric Anholt673a3942008-07-30 12:06:12 -070080int
81i915_gem_init_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
83{
Eric Anholt673a3942008-07-30 12:06:12 -070084 struct drm_i915_gem_init *args = data;
Jesse Barnes79e53942008-11-07 14:24:08 -080085 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -070086
87 mutex_lock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -080088 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -070089 mutex_unlock(&dev->struct_mutex);
90
Jesse Barnes79e53942008-11-07 14:24:08 -080091 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -070092}
93
Eric Anholt5a125c32008-10-22 21:40:13 -070094int
95i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv)
97{
Eric Anholt5a125c32008-10-22 21:40:13 -070098 struct drm_i915_gem_get_aperture *args = data;
Eric Anholt5a125c32008-10-22 21:40:13 -070099
100 if (!(dev->driver->driver_features & DRIVER_GEM))
101 return -ENODEV;
102
103 args->aper_size = dev->gtt_total;
Keith Packard2678d9d2008-11-20 22:54:54 -0800104 args->aper_available_size = (args->aper_size -
105 atomic_read(&dev->pin_memory));
Eric Anholt5a125c32008-10-22 21:40:13 -0700106
107 return 0;
108}
109
Eric Anholt673a3942008-07-30 12:06:12 -0700110
111/**
112 * Creates a new mm object and returns a handle to it.
113 */
114int
115i915_gem_create_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv)
117{
118 struct drm_i915_gem_create *args = data;
119 struct drm_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300120 int ret;
121 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700122
123 args->size = roundup(args->size, PAGE_SIZE);
124
125 /* Allocate the new object */
Daniel Vetterac52bc52010-04-09 19:05:06 +0000126 obj = i915_gem_alloc_object(dev, args->size);
Eric Anholt673a3942008-07-30 12:06:12 -0700127 if (obj == NULL)
128 return -ENOMEM;
129
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000131 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700132
133 if (ret)
134 return ret;
135
136 args->handle = handle;
137
138 return 0;
139}
140
Eric Anholt40123c12009-03-09 13:42:30 -0700141static inline int
Eric Anholteb014592009-03-10 11:44:52 -0700142fast_shmem_read(struct page **pages,
143 loff_t page_base, int page_offset,
144 char __user *data,
145 int length)
146{
147 char __iomem *vaddr;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200148 int unwritten;
Eric Anholteb014592009-03-10 11:44:52 -0700149
150 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
151 if (vaddr == NULL)
152 return -ENOMEM;
Florian Mickler2bc43b52009-04-06 22:55:41 +0200153 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
Eric Anholteb014592009-03-10 11:44:52 -0700154 kunmap_atomic(vaddr, KM_USER0);
155
Florian Mickler2bc43b52009-04-06 22:55:41 +0200156 if (unwritten)
157 return -EFAULT;
158
159 return 0;
Eric Anholteb014592009-03-10 11:44:52 -0700160}
161
Eric Anholt280b7132009-03-12 16:56:27 -0700162static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
163{
164 drm_i915_private_t *dev_priv = obj->dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100165 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700166
167 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
168 obj_priv->tiling_mode != I915_TILING_NONE;
169}
170
Chris Wilson99a03df2010-05-27 14:15:34 +0100171static inline void
Eric Anholt40123c12009-03-09 13:42:30 -0700172slow_shmem_copy(struct page *dst_page,
173 int dst_offset,
174 struct page *src_page,
175 int src_offset,
176 int length)
177{
178 char *dst_vaddr, *src_vaddr;
179
Chris Wilson99a03df2010-05-27 14:15:34 +0100180 dst_vaddr = kmap(dst_page);
181 src_vaddr = kmap(src_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700182
183 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
184
Chris Wilson99a03df2010-05-27 14:15:34 +0100185 kunmap(src_page);
186 kunmap(dst_page);
Eric Anholt40123c12009-03-09 13:42:30 -0700187}
188
Chris Wilson99a03df2010-05-27 14:15:34 +0100189static inline void
Eric Anholt280b7132009-03-12 16:56:27 -0700190slow_shmem_bit17_copy(struct page *gpu_page,
191 int gpu_offset,
192 struct page *cpu_page,
193 int cpu_offset,
194 int length,
195 int is_read)
196{
197 char *gpu_vaddr, *cpu_vaddr;
198
199 /* Use the unswizzled path if this page isn't affected. */
200 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
201 if (is_read)
202 return slow_shmem_copy(cpu_page, cpu_offset,
203 gpu_page, gpu_offset, length);
204 else
205 return slow_shmem_copy(gpu_page, gpu_offset,
206 cpu_page, cpu_offset, length);
207 }
208
Chris Wilson99a03df2010-05-27 14:15:34 +0100209 gpu_vaddr = kmap(gpu_page);
210 cpu_vaddr = kmap(cpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700211
212 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
213 * XORing with the other bits (A9 for Y, A9 and A10 for X)
214 */
215 while (length > 0) {
216 int cacheline_end = ALIGN(gpu_offset + 1, 64);
217 int this_length = min(cacheline_end - gpu_offset, length);
218 int swizzled_gpu_offset = gpu_offset ^ 64;
219
220 if (is_read) {
221 memcpy(cpu_vaddr + cpu_offset,
222 gpu_vaddr + swizzled_gpu_offset,
223 this_length);
224 } else {
225 memcpy(gpu_vaddr + swizzled_gpu_offset,
226 cpu_vaddr + cpu_offset,
227 this_length);
228 }
229 cpu_offset += this_length;
230 gpu_offset += this_length;
231 length -= this_length;
232 }
233
Chris Wilson99a03df2010-05-27 14:15:34 +0100234 kunmap(cpu_page);
235 kunmap(gpu_page);
Eric Anholt280b7132009-03-12 16:56:27 -0700236}
237
Eric Anholt673a3942008-07-30 12:06:12 -0700238/**
Eric Anholteb014592009-03-10 11:44:52 -0700239 * This is the fast shmem pread path, which attempts to copy_from_user directly
240 * from the backing pages of the object to the user's address space. On a
241 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
242 */
243static int
244i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
245 struct drm_i915_gem_pread *args,
246 struct drm_file *file_priv)
247{
Daniel Vetter23010e42010-03-08 13:35:02 +0100248 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700249 ssize_t remain;
250 loff_t offset, page_base;
251 char __user *user_data;
252 int page_offset, page_length;
253 int ret;
254
255 user_data = (char __user *) (uintptr_t) args->data_ptr;
256 remain = args->size;
257
258 mutex_lock(&dev->struct_mutex);
259
Chris Wilson4bdadb92010-01-27 13:36:32 +0000260 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholteb014592009-03-10 11:44:52 -0700261 if (ret != 0)
262 goto fail_unlock;
263
264 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
265 args->size);
266 if (ret != 0)
267 goto fail_put_pages;
268
Daniel Vetter23010e42010-03-08 13:35:02 +0100269 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700270 offset = args->offset;
271
272 while (remain > 0) {
273 /* Operation in this page
274 *
275 * page_base = page offset within aperture
276 * page_offset = offset within page
277 * page_length = bytes to copy for this page
278 */
279 page_base = (offset & ~(PAGE_SIZE-1));
280 page_offset = offset & (PAGE_SIZE-1);
281 page_length = remain;
282 if ((page_offset + remain) > PAGE_SIZE)
283 page_length = PAGE_SIZE - page_offset;
284
285 ret = fast_shmem_read(obj_priv->pages,
286 page_base, page_offset,
287 user_data, page_length);
288 if (ret)
289 goto fail_put_pages;
290
291 remain -= page_length;
292 user_data += page_length;
293 offset += page_length;
294 }
295
296fail_put_pages:
297 i915_gem_object_put_pages(obj);
298fail_unlock:
299 mutex_unlock(&dev->struct_mutex);
300
301 return ret;
302}
303
Chris Wilson07f73f62009-09-14 16:50:30 +0100304static int
305i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
306{
307 int ret;
308
Chris Wilson4bdadb92010-01-27 13:36:32 +0000309 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
Chris Wilson07f73f62009-09-14 16:50:30 +0100310
311 /* If we've insufficient memory to map in the pages, attempt
312 * to make some space by throwing out some old buffers.
313 */
314 if (ret == -ENOMEM) {
315 struct drm_device *dev = obj->dev;
Chris Wilson07f73f62009-09-14 16:50:30 +0100316
317 ret = i915_gem_evict_something(dev, obj->size);
318 if (ret)
319 return ret;
320
Chris Wilson4bdadb92010-01-27 13:36:32 +0000321 ret = i915_gem_object_get_pages(obj, 0);
Chris Wilson07f73f62009-09-14 16:50:30 +0100322 }
323
324 return ret;
325}
326
Eric Anholteb014592009-03-10 11:44:52 -0700327/**
328 * This is the fallback shmem pread path, which allocates temporary storage
329 * in kernel space to copy_to_user into outside of the struct_mutex, so we
330 * can copy out of the object's backing pages while holding the struct mutex
331 * and not take page faults.
332 */
333static int
334i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
335 struct drm_i915_gem_pread *args,
336 struct drm_file *file_priv)
337{
Daniel Vetter23010e42010-03-08 13:35:02 +0100338 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700339 struct mm_struct *mm = current->mm;
340 struct page **user_pages;
341 ssize_t remain;
342 loff_t offset, pinned_pages, i;
343 loff_t first_data_page, last_data_page, num_pages;
344 int shmem_page_index, shmem_page_offset;
345 int data_page_index, data_page_offset;
346 int page_length;
347 int ret;
348 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700349 int do_bit17_swizzling;
Eric Anholteb014592009-03-10 11:44:52 -0700350
351 remain = args->size;
352
353 /* Pin the user pages containing the data. We can't fault while
354 * holding the struct mutex, yet we want to hold it while
355 * dereferencing the user data.
356 */
357 first_data_page = data_ptr / PAGE_SIZE;
358 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
359 num_pages = last_data_page - first_data_page + 1;
360
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700361 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholteb014592009-03-10 11:44:52 -0700362 if (user_pages == NULL)
363 return -ENOMEM;
364
365 down_read(&mm->mmap_sem);
366 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
Eric Anholte5e9ecd2009-04-07 16:01:22 -0700367 num_pages, 1, 0, user_pages, NULL);
Eric Anholteb014592009-03-10 11:44:52 -0700368 up_read(&mm->mmap_sem);
369 if (pinned_pages < num_pages) {
370 ret = -EFAULT;
371 goto fail_put_user_pages;
372 }
373
Eric Anholt280b7132009-03-12 16:56:27 -0700374 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
375
Eric Anholteb014592009-03-10 11:44:52 -0700376 mutex_lock(&dev->struct_mutex);
377
Chris Wilson07f73f62009-09-14 16:50:30 +0100378 ret = i915_gem_object_get_pages_or_evict(obj);
379 if (ret)
Eric Anholteb014592009-03-10 11:44:52 -0700380 goto fail_unlock;
381
382 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
383 args->size);
384 if (ret != 0)
385 goto fail_put_pages;
386
Daniel Vetter23010e42010-03-08 13:35:02 +0100387 obj_priv = to_intel_bo(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700388 offset = args->offset;
389
390 while (remain > 0) {
391 /* Operation in this page
392 *
393 * shmem_page_index = page number within shmem file
394 * shmem_page_offset = offset within page in shmem file
395 * data_page_index = page number in get_user_pages return
396 * data_page_offset = offset with data_page_index page.
397 * page_length = bytes to copy for this page
398 */
399 shmem_page_index = offset / PAGE_SIZE;
400 shmem_page_offset = offset & ~PAGE_MASK;
401 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
402 data_page_offset = data_ptr & ~PAGE_MASK;
403
404 page_length = remain;
405 if ((shmem_page_offset + page_length) > PAGE_SIZE)
406 page_length = PAGE_SIZE - shmem_page_offset;
407 if ((data_page_offset + page_length) > PAGE_SIZE)
408 page_length = PAGE_SIZE - data_page_offset;
409
Eric Anholt280b7132009-03-12 16:56:27 -0700410 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100411 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700412 shmem_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100413 user_pages[data_page_index],
414 data_page_offset,
415 page_length,
416 1);
417 } else {
418 slow_shmem_copy(user_pages[data_page_index],
419 data_page_offset,
420 obj_priv->pages[shmem_page_index],
421 shmem_page_offset,
422 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700423 }
Eric Anholteb014592009-03-10 11:44:52 -0700424
425 remain -= page_length;
426 data_ptr += page_length;
427 offset += page_length;
428 }
429
430fail_put_pages:
431 i915_gem_object_put_pages(obj);
432fail_unlock:
433 mutex_unlock(&dev->struct_mutex);
434fail_put_user_pages:
435 for (i = 0; i < pinned_pages; i++) {
436 SetPageDirty(user_pages[i]);
437 page_cache_release(user_pages[i]);
438 }
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700439 drm_free_large(user_pages);
Eric Anholteb014592009-03-10 11:44:52 -0700440
441 return ret;
442}
443
Eric Anholt673a3942008-07-30 12:06:12 -0700444/**
445 * Reads data from the object referenced by handle.
446 *
447 * On error, the contents of *data are undefined.
448 */
449int
450i915_gem_pread_ioctl(struct drm_device *dev, void *data,
451 struct drm_file *file_priv)
452{
453 struct drm_i915_gem_pread *args = data;
454 struct drm_gem_object *obj;
455 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -0700456 int ret;
457
458 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
459 if (obj == NULL)
460 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100461 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700462
463 /* Bounds check source.
464 *
465 * XXX: This could use review for overflow issues...
466 */
467 if (args->offset > obj->size || args->size > obj->size ||
468 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000469 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700470 return -EINVAL;
471 }
472
Eric Anholt280b7132009-03-12 16:56:27 -0700473 if (i915_gem_object_needs_bit17_swizzle(obj)) {
Eric Anholteb014592009-03-10 11:44:52 -0700474 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
Eric Anholt280b7132009-03-12 16:56:27 -0700475 } else {
476 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
477 if (ret != 0)
478 ret = i915_gem_shmem_pread_slow(dev, obj, args,
479 file_priv);
480 }
Eric Anholt673a3942008-07-30 12:06:12 -0700481
Luca Barbieribc9025b2010-02-09 05:49:12 +0000482 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700483
Eric Anholteb014592009-03-10 11:44:52 -0700484 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700485}
486
Keith Packard0839ccb2008-10-30 19:38:48 -0700487/* This is the fast write path which cannot handle
488 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700489 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700490
Keith Packard0839ccb2008-10-30 19:38:48 -0700491static inline int
492fast_user_write(struct io_mapping *mapping,
493 loff_t page_base, int page_offset,
494 char __user *user_data,
495 int length)
496{
497 char *vaddr_atomic;
498 unsigned long unwritten;
499
500 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
501 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
502 user_data, length);
503 io_mapping_unmap_atomic(vaddr_atomic);
504 if (unwritten)
505 return -EFAULT;
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700506 return 0;
Keith Packard0839ccb2008-10-30 19:38:48 -0700507}
508
509/* Here's the write path which can sleep for
510 * page faults
511 */
512
Chris Wilsonab34c222010-05-27 14:15:35 +0100513static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700514slow_kernel_write(struct io_mapping *mapping,
515 loff_t gtt_base, int gtt_offset,
516 struct page *user_page, int user_offset,
517 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700518{
Chris Wilsonab34c222010-05-27 14:15:35 +0100519 char __iomem *dst_vaddr;
520 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700521
Chris Wilsonab34c222010-05-27 14:15:35 +0100522 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
523 src_vaddr = kmap(user_page);
524
525 memcpy_toio(dst_vaddr + gtt_offset,
526 src_vaddr + user_offset,
527 length);
528
529 kunmap(user_page);
530 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700531}
532
Eric Anholt40123c12009-03-09 13:42:30 -0700533static inline int
534fast_shmem_write(struct page **pages,
535 loff_t page_base, int page_offset,
536 char __user *data,
537 int length)
538{
539 char __iomem *vaddr;
Dave Airlied0088772009-03-28 20:29:48 -0400540 unsigned long unwritten;
Eric Anholt40123c12009-03-09 13:42:30 -0700541
542 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
543 if (vaddr == NULL)
544 return -ENOMEM;
Dave Airlied0088772009-03-28 20:29:48 -0400545 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
Eric Anholt40123c12009-03-09 13:42:30 -0700546 kunmap_atomic(vaddr, KM_USER0);
547
Dave Airlied0088772009-03-28 20:29:48 -0400548 if (unwritten)
549 return -EFAULT;
Eric Anholt40123c12009-03-09 13:42:30 -0700550 return 0;
551}
552
Eric Anholt3de09aa2009-03-09 09:42:23 -0700553/**
554 * This is the fast pwrite path, where we copy the data directly from the
555 * user into the GTT, uncached.
556 */
Eric Anholt673a3942008-07-30 12:06:12 -0700557static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700558i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
559 struct drm_i915_gem_pwrite *args,
560 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700561{
Daniel Vetter23010e42010-03-08 13:35:02 +0100562 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Keith Packard0839ccb2008-10-30 19:38:48 -0700563 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700564 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700565 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700566 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700567 int page_offset, page_length;
568 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700569
570 user_data = (char __user *) (uintptr_t) args->data_ptr;
571 remain = args->size;
572 if (!access_ok(VERIFY_READ, user_data, remain))
573 return -EFAULT;
574
575
576 mutex_lock(&dev->struct_mutex);
577 ret = i915_gem_object_pin(obj, 0);
578 if (ret) {
579 mutex_unlock(&dev->struct_mutex);
580 return ret;
581 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800582 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700583 if (ret)
584 goto fail;
585
Daniel Vetter23010e42010-03-08 13:35:02 +0100586 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700587 offset = obj_priv->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700588
589 while (remain > 0) {
590 /* Operation in this page
591 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700592 * page_base = page offset within aperture
593 * page_offset = offset within page
594 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700595 */
Keith Packard0839ccb2008-10-30 19:38:48 -0700596 page_base = (offset & ~(PAGE_SIZE-1));
597 page_offset = offset & (PAGE_SIZE-1);
598 page_length = remain;
599 if ((page_offset + remain) > PAGE_SIZE)
600 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700601
Keith Packard0839ccb2008-10-30 19:38:48 -0700602 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
603 page_offset, user_data, page_length);
Eric Anholt673a3942008-07-30 12:06:12 -0700604
Keith Packard0839ccb2008-10-30 19:38:48 -0700605 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700606 * source page isn't available. Return the error and we'll
607 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700608 */
Eric Anholt3de09aa2009-03-09 09:42:23 -0700609 if (ret)
610 goto fail;
Eric Anholt673a3942008-07-30 12:06:12 -0700611
Keith Packard0839ccb2008-10-30 19:38:48 -0700612 remain -= page_length;
613 user_data += page_length;
614 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700615 }
Eric Anholt673a3942008-07-30 12:06:12 -0700616
617fail:
618 i915_gem_object_unpin(obj);
619 mutex_unlock(&dev->struct_mutex);
620
621 return ret;
622}
623
Eric Anholt3de09aa2009-03-09 09:42:23 -0700624/**
625 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
626 * the memory and maps it using kmap_atomic for copying.
627 *
628 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
629 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
630 */
Eric Anholt3043c602008-10-02 12:24:47 -0700631static int
Eric Anholt3de09aa2009-03-09 09:42:23 -0700632i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
633 struct drm_i915_gem_pwrite *args,
634 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700635{
Daniel Vetter23010e42010-03-08 13:35:02 +0100636 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700637 drm_i915_private_t *dev_priv = dev->dev_private;
638 ssize_t remain;
639 loff_t gtt_page_base, offset;
640 loff_t first_data_page, last_data_page, num_pages;
641 loff_t pinned_pages, i;
642 struct page **user_pages;
643 struct mm_struct *mm = current->mm;
644 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700645 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700646 uint64_t data_ptr = args->data_ptr;
647
648 remain = args->size;
649
650 /* Pin the user pages containing the data. We can't fault while
651 * holding the struct mutex, and all of the pwrite implementations
652 * want to hold it while dereferencing the user data.
653 */
654 first_data_page = data_ptr / PAGE_SIZE;
655 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
656 num_pages = last_data_page - first_data_page + 1;
657
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700658 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700659 if (user_pages == NULL)
660 return -ENOMEM;
661
662 down_read(&mm->mmap_sem);
663 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
664 num_pages, 0, 0, user_pages, NULL);
665 up_read(&mm->mmap_sem);
666 if (pinned_pages < num_pages) {
667 ret = -EFAULT;
668 goto out_unpin_pages;
669 }
670
671 mutex_lock(&dev->struct_mutex);
672 ret = i915_gem_object_pin(obj, 0);
673 if (ret)
674 goto out_unlock;
675
676 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
677 if (ret)
678 goto out_unpin_object;
679
Daniel Vetter23010e42010-03-08 13:35:02 +0100680 obj_priv = to_intel_bo(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700681 offset = obj_priv->gtt_offset + args->offset;
682
683 while (remain > 0) {
684 /* Operation in this page
685 *
686 * gtt_page_base = page offset within aperture
687 * gtt_page_offset = offset within page in aperture
688 * data_page_index = page number in get_user_pages return
689 * data_page_offset = offset with data_page_index page.
690 * page_length = bytes to copy for this page
691 */
692 gtt_page_base = offset & PAGE_MASK;
693 gtt_page_offset = offset & ~PAGE_MASK;
694 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
695 data_page_offset = data_ptr & ~PAGE_MASK;
696
697 page_length = remain;
698 if ((gtt_page_offset + page_length) > PAGE_SIZE)
699 page_length = PAGE_SIZE - gtt_page_offset;
700 if ((data_page_offset + page_length) > PAGE_SIZE)
701 page_length = PAGE_SIZE - data_page_offset;
702
Chris Wilsonab34c222010-05-27 14:15:35 +0100703 slow_kernel_write(dev_priv->mm.gtt_mapping,
704 gtt_page_base, gtt_page_offset,
705 user_pages[data_page_index],
706 data_page_offset,
707 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700708
709 remain -= page_length;
710 offset += page_length;
711 data_ptr += page_length;
712 }
713
714out_unpin_object:
715 i915_gem_object_unpin(obj);
716out_unlock:
717 mutex_unlock(&dev->struct_mutex);
718out_unpin_pages:
719 for (i = 0; i < pinned_pages; i++)
720 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700721 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700722
723 return ret;
724}
725
Eric Anholt40123c12009-03-09 13:42:30 -0700726/**
727 * This is the fast shmem pwrite path, which attempts to directly
728 * copy_from_user into the kmapped pages backing the object.
729 */
Eric Anholt673a3942008-07-30 12:06:12 -0700730static int
Eric Anholt40123c12009-03-09 13:42:30 -0700731i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
732 struct drm_i915_gem_pwrite *args,
733 struct drm_file *file_priv)
Eric Anholt673a3942008-07-30 12:06:12 -0700734{
Daniel Vetter23010e42010-03-08 13:35:02 +0100735 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700736 ssize_t remain;
737 loff_t offset, page_base;
738 char __user *user_data;
739 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700740 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700741
742 user_data = (char __user *) (uintptr_t) args->data_ptr;
743 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700744
745 mutex_lock(&dev->struct_mutex);
746
Chris Wilson4bdadb92010-01-27 13:36:32 +0000747 ret = i915_gem_object_get_pages(obj, 0);
Eric Anholt40123c12009-03-09 13:42:30 -0700748 if (ret != 0)
749 goto fail_unlock;
750
Eric Anholte47c68e2008-11-14 13:35:19 -0800751 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Eric Anholt40123c12009-03-09 13:42:30 -0700752 if (ret != 0)
753 goto fail_put_pages;
Eric Anholt673a3942008-07-30 12:06:12 -0700754
Daniel Vetter23010e42010-03-08 13:35:02 +0100755 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700756 offset = args->offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700757 obj_priv->dirty = 1;
Eric Anholt673a3942008-07-30 12:06:12 -0700758
Eric Anholt40123c12009-03-09 13:42:30 -0700759 while (remain > 0) {
760 /* Operation in this page
761 *
762 * page_base = page offset within aperture
763 * page_offset = offset within page
764 * page_length = bytes to copy for this page
765 */
766 page_base = (offset & ~(PAGE_SIZE-1));
767 page_offset = offset & (PAGE_SIZE-1);
768 page_length = remain;
769 if ((page_offset + remain) > PAGE_SIZE)
770 page_length = PAGE_SIZE - page_offset;
771
772 ret = fast_shmem_write(obj_priv->pages,
773 page_base, page_offset,
774 user_data, page_length);
775 if (ret)
776 goto fail_put_pages;
777
778 remain -= page_length;
779 user_data += page_length;
780 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700781 }
782
Eric Anholt40123c12009-03-09 13:42:30 -0700783fail_put_pages:
784 i915_gem_object_put_pages(obj);
785fail_unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700786 mutex_unlock(&dev->struct_mutex);
787
Eric Anholt40123c12009-03-09 13:42:30 -0700788 return ret;
789}
790
791/**
792 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
793 * the memory and maps it using kmap_atomic for copying.
794 *
795 * This avoids taking mmap_sem for faulting on the user's address while the
796 * struct_mutex is held.
797 */
798static int
799i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
800 struct drm_i915_gem_pwrite *args,
801 struct drm_file *file_priv)
802{
Daniel Vetter23010e42010-03-08 13:35:02 +0100803 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700804 struct mm_struct *mm = current->mm;
805 struct page **user_pages;
806 ssize_t remain;
807 loff_t offset, pinned_pages, i;
808 loff_t first_data_page, last_data_page, num_pages;
809 int shmem_page_index, shmem_page_offset;
810 int data_page_index, data_page_offset;
811 int page_length;
812 int ret;
813 uint64_t data_ptr = args->data_ptr;
Eric Anholt280b7132009-03-12 16:56:27 -0700814 int do_bit17_swizzling;
Eric Anholt40123c12009-03-09 13:42:30 -0700815
816 remain = args->size;
817
818 /* Pin the user pages containing the data. We can't fault while
819 * holding the struct mutex, and all of the pwrite implementations
820 * want to hold it while dereferencing the user data.
821 */
822 first_data_page = data_ptr / PAGE_SIZE;
823 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
824 num_pages = last_data_page - first_data_page + 1;
825
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700826 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
Eric Anholt40123c12009-03-09 13:42:30 -0700827 if (user_pages == NULL)
828 return -ENOMEM;
829
830 down_read(&mm->mmap_sem);
831 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
832 num_pages, 0, 0, user_pages, NULL);
833 up_read(&mm->mmap_sem);
834 if (pinned_pages < num_pages) {
835 ret = -EFAULT;
836 goto fail_put_user_pages;
837 }
838
Eric Anholt280b7132009-03-12 16:56:27 -0700839 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
840
Eric Anholt40123c12009-03-09 13:42:30 -0700841 mutex_lock(&dev->struct_mutex);
842
Chris Wilson07f73f62009-09-14 16:50:30 +0100843 ret = i915_gem_object_get_pages_or_evict(obj);
844 if (ret)
Eric Anholt40123c12009-03-09 13:42:30 -0700845 goto fail_unlock;
846
847 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
848 if (ret != 0)
849 goto fail_put_pages;
850
Daniel Vetter23010e42010-03-08 13:35:02 +0100851 obj_priv = to_intel_bo(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700852 offset = args->offset;
853 obj_priv->dirty = 1;
854
855 while (remain > 0) {
856 /* Operation in this page
857 *
858 * shmem_page_index = page number within shmem file
859 * shmem_page_offset = offset within page in shmem file
860 * data_page_index = page number in get_user_pages return
861 * data_page_offset = offset with data_page_index page.
862 * page_length = bytes to copy for this page
863 */
864 shmem_page_index = offset / PAGE_SIZE;
865 shmem_page_offset = offset & ~PAGE_MASK;
866 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
867 data_page_offset = data_ptr & ~PAGE_MASK;
868
869 page_length = remain;
870 if ((shmem_page_offset + page_length) > PAGE_SIZE)
871 page_length = PAGE_SIZE - shmem_page_offset;
872 if ((data_page_offset + page_length) > PAGE_SIZE)
873 page_length = PAGE_SIZE - data_page_offset;
874
Eric Anholt280b7132009-03-12 16:56:27 -0700875 if (do_bit17_swizzling) {
Chris Wilson99a03df2010-05-27 14:15:34 +0100876 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
Eric Anholt280b7132009-03-12 16:56:27 -0700877 shmem_page_offset,
878 user_pages[data_page_index],
879 data_page_offset,
Chris Wilson99a03df2010-05-27 14:15:34 +0100880 page_length,
881 0);
882 } else {
883 slow_shmem_copy(obj_priv->pages[shmem_page_index],
884 shmem_page_offset,
885 user_pages[data_page_index],
886 data_page_offset,
887 page_length);
Eric Anholt280b7132009-03-12 16:56:27 -0700888 }
Eric Anholt40123c12009-03-09 13:42:30 -0700889
890 remain -= page_length;
891 data_ptr += page_length;
892 offset += page_length;
893 }
894
895fail_put_pages:
896 i915_gem_object_put_pages(obj);
897fail_unlock:
898 mutex_unlock(&dev->struct_mutex);
899fail_put_user_pages:
900 for (i = 0; i < pinned_pages; i++)
901 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700902 drm_free_large(user_pages);
Eric Anholt40123c12009-03-09 13:42:30 -0700903
904 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700905}
906
907/**
908 * Writes data to the object referenced by handle.
909 *
910 * On error, the contents of the buffer that were to be modified are undefined.
911 */
912int
913i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
915{
916 struct drm_i915_gem_pwrite *args = data;
917 struct drm_gem_object *obj;
918 struct drm_i915_gem_object *obj_priv;
919 int ret = 0;
920
921 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
922 if (obj == NULL)
923 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +0100924 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700925
926 /* Bounds check destination.
927 *
928 * XXX: This could use review for overflow issues...
929 */
930 if (args->offset > obj->size || args->size > obj->size ||
931 args->offset + args->size > obj->size) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000932 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700933 return -EINVAL;
934 }
935
936 /* We can only do the GTT pwrite on untiled buffers, as otherwise
937 * it would end up going through the fenced access, and we'll get
938 * different detiling behavior between reading and writing.
939 * pread/pwrite currently are reading and writing from the CPU
940 * perspective, requiring manual detiling by the client.
941 */
Dave Airlie71acb5e2008-12-30 20:31:46 +1000942 if (obj_priv->phys_obj)
943 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
944 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
Chris Wilson9b8c4a02010-05-27 14:21:01 +0100945 dev->gtt_total != 0 &&
946 obj->write_domain != I915_GEM_DOMAIN_CPU) {
Eric Anholt3de09aa2009-03-09 09:42:23 -0700947 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
948 if (ret == -EFAULT) {
949 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
950 file_priv);
951 }
Eric Anholt280b7132009-03-12 16:56:27 -0700952 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
953 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
Eric Anholt40123c12009-03-09 13:42:30 -0700954 } else {
955 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
956 if (ret == -EFAULT) {
957 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
958 file_priv);
959 }
960 }
Eric Anholt673a3942008-07-30 12:06:12 -0700961
962#if WATCH_PWRITE
963 if (ret)
964 DRM_INFO("pwrite failed %d\n", ret);
965#endif
966
Luca Barbieribc9025b2010-02-09 05:49:12 +0000967 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700968
969 return ret;
970}
971
972/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800973 * Called when user space prepares to use an object with the CPU, either
974 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700975 */
976int
977i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
978 struct drm_file *file_priv)
979{
Eric Anholta09ba7f2009-08-29 12:49:51 -0700980 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700981 struct drm_i915_gem_set_domain *args = data;
982 struct drm_gem_object *obj;
Jesse Barnes652c3932009-08-17 13:31:43 -0700983 struct drm_i915_gem_object *obj_priv;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800991 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100992 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800993 return -EINVAL;
994
Chris Wilson21d509e2009-06-06 09:46:02 +0100995 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
Eric Anholt673a3942008-07-30 12:06:12 -07001004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
Daniel Vetter23010e42010-03-08 13:35:02 +01001007 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001008
1009 mutex_lock(&dev->struct_mutex);
Jesse Barnes652c3932009-08-17 13:31:43 -07001010
1011 intel_mark_busy(dev, obj);
1012
Eric Anholt673a3942008-07-30 12:06:12 -07001013#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001014 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001015 obj, obj->size, read_domains, write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07001016#endif
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001017 if (read_domains & I915_GEM_DOMAIN_GTT) {
1018 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -08001019
Eric Anholta09ba7f2009-08-29 12:49:51 -07001020 /* Update the LRU on the fence for the CPU access that's
1021 * about to occur.
1022 */
1023 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001024 struct drm_i915_fence_reg *reg =
1025 &dev_priv->fence_regs[obj_priv->fence_reg];
1026 list_move_tail(&reg->lru_list,
Eric Anholta09ba7f2009-08-29 12:49:51 -07001027 &dev_priv->mm.fence_list);
1028 }
1029
Eric Anholt02354392008-11-26 13:58:13 -08001030 /* Silently promote "you're not bound, there was nothing to do"
1031 * to success, since the client was just asking us to
1032 * make sure everything was done.
1033 */
1034 if (ret == -EINVAL)
1035 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001036 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -08001037 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08001038 }
1039
Eric Anholt673a3942008-07-30 12:06:12 -07001040 drm_gem_object_unreference(obj);
1041 mutex_unlock(&dev->struct_mutex);
1042 return ret;
1043}
1044
1045/**
1046 * Called when user space has done writes to this buffer
1047 */
1048int
1049i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1050 struct drm_file *file_priv)
1051{
1052 struct drm_i915_gem_sw_finish *args = data;
1053 struct drm_gem_object *obj;
1054 struct drm_i915_gem_object *obj_priv;
1055 int ret = 0;
1056
1057 if (!(dev->driver->driver_features & DRIVER_GEM))
1058 return -ENODEV;
1059
1060 mutex_lock(&dev->struct_mutex);
1061 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1062 if (obj == NULL) {
1063 mutex_unlock(&dev->struct_mutex);
1064 return -EBADF;
1065 }
1066
1067#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02001068 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
Eric Anholt673a3942008-07-30 12:06:12 -07001069 __func__, args->handle, obj, obj->size);
1070#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01001071 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001072
1073 /* Pinned buffers may be scanout, so flush the cache */
Eric Anholte47c68e2008-11-14 13:35:19 -08001074 if (obj_priv->pin_count)
1075 i915_gem_object_flush_cpu_write_domain(obj);
1076
Eric Anholt673a3942008-07-30 12:06:12 -07001077 drm_gem_object_unreference(obj);
1078 mutex_unlock(&dev->struct_mutex);
1079 return ret;
1080}
1081
1082/**
1083 * Maps the contents of an object, returning the address it is mapped
1084 * into.
1085 *
1086 * While the mapping holds a reference on the contents of the object, it doesn't
1087 * imply a ref on the object itself.
1088 */
1089int
1090i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv)
1092{
1093 struct drm_i915_gem_mmap *args = data;
1094 struct drm_gem_object *obj;
1095 loff_t offset;
1096 unsigned long addr;
1097
1098 if (!(dev->driver->driver_features & DRIVER_GEM))
1099 return -ENODEV;
1100
1101 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1102 if (obj == NULL)
1103 return -EBADF;
1104
1105 offset = args->offset;
1106
1107 down_write(&current->mm->mmap_sem);
1108 addr = do_mmap(obj->filp, 0, args->size,
1109 PROT_READ | PROT_WRITE, MAP_SHARED,
1110 args->offset);
1111 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001112 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001113 if (IS_ERR((void *)addr))
1114 return addr;
1115
1116 args->addr_ptr = (uint64_t) addr;
1117
1118 return 0;
1119}
1120
Jesse Barnesde151cf2008-11-12 10:03:55 -08001121/**
1122 * i915_gem_fault - fault a page into the GTT
1123 * vma: VMA in question
1124 * vmf: fault info
1125 *
1126 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1127 * from userspace. The fault handler takes care of binding the object to
1128 * the GTT (if needed), allocating and programming a fence register (again,
1129 * only if needed based on whether the old reg is still valid or the object
1130 * is tiled) and inserting a new PTE into the faulting process.
1131 *
1132 * Note that the faulting process may involve evicting existing objects
1133 * from the GTT and/or fence registers to make room. So performance may
1134 * suffer if the GTT working set is large or there are few fence registers
1135 * left.
1136 */
1137int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1138{
1139 struct drm_gem_object *obj = vma->vm_private_data;
1140 struct drm_device *dev = obj->dev;
1141 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001142 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001143 pgoff_t page_offset;
1144 unsigned long pfn;
1145 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001146 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001147
1148 /* We don't use vmf->pgoff since that has the fake offset */
1149 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1150 PAGE_SHIFT;
1151
1152 /* Now bind it into the GTT if needed */
1153 mutex_lock(&dev->struct_mutex);
1154 if (!obj_priv->gtt_space) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001155 ret = i915_gem_object_bind_to_gtt(obj, 0);
Chris Wilsonc7150892009-09-23 00:43:56 +01001156 if (ret)
1157 goto unlock;
Kristian Høgsberg07f4f3e2009-05-27 14:37:28 -04001158
Jesse Barnes14b60392009-05-20 16:47:08 -04001159 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001160
1161 ret = i915_gem_object_set_to_gtt_domain(obj, write);
Chris Wilsonc7150892009-09-23 00:43:56 +01001162 if (ret)
1163 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001164 }
1165
1166 /* Need a new fence register? */
Eric Anholta09ba7f2009-08-29 12:49:51 -07001167 if (obj_priv->tiling_mode != I915_TILING_NONE) {
Chris Wilson8c4b8c32009-06-17 22:08:52 +01001168 ret = i915_gem_object_get_fence_reg(obj);
Chris Wilsonc7150892009-09-23 00:43:56 +01001169 if (ret)
1170 goto unlock;
Eric Anholtd9ddcb92009-01-27 10:33:49 -08001171 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001172
1173 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1174 page_offset;
1175
1176 /* Finally, remap it using the new GTT offset */
1177 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001178unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001179 mutex_unlock(&dev->struct_mutex);
1180
1181 switch (ret) {
Chris Wilsonc7150892009-09-23 00:43:56 +01001182 case 0:
1183 case -ERESTARTSYS:
1184 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001185 case -ENOMEM:
1186 case -EAGAIN:
1187 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001188 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001189 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001190 }
1191}
1192
1193/**
1194 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1195 * @obj: obj in question
1196 *
1197 * GEM memory mapping works by handing back to userspace a fake mmap offset
1198 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1199 * up the object based on the offset and sets up the various memory mapping
1200 * structures.
1201 *
1202 * This routine allocates and attaches a fake offset for @obj.
1203 */
1204static int
1205i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1206{
1207 struct drm_device *dev = obj->dev;
1208 struct drm_gem_mm *mm = dev->mm_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001209 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001210 struct drm_map_list *list;
Benjamin Herrenschmidtf77d3902009-02-02 16:55:46 +11001211 struct drm_local_map *map;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001212 int ret = 0;
1213
1214 /* Set the object up for mmap'ing */
1215 list = &obj->map_list;
Eric Anholt9a298b22009-03-24 12:23:04 -07001216 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001217 if (!list->map)
1218 return -ENOMEM;
1219
1220 map = list->map;
1221 map->type = _DRM_GEM;
1222 map->size = obj->size;
1223 map->handle = obj;
1224
1225 /* Get a DRM GEM mmap offset allocated... */
1226 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1227 obj->size / PAGE_SIZE, 0, 0);
1228 if (!list->file_offset_node) {
1229 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1230 ret = -ENOMEM;
1231 goto out_free_list;
1232 }
1233
1234 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1235 obj->size / PAGE_SIZE, 0);
1236 if (!list->file_offset_node) {
1237 ret = -ENOMEM;
1238 goto out_free_list;
1239 }
1240
1241 list->hash.key = list->file_offset_node->start;
1242 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1243 DRM_ERROR("failed to add to map hash\n");
Chris Wilson5618ca62009-12-02 15:15:30 +00001244 ret = -ENOMEM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001245 goto out_free_mm;
1246 }
1247
1248 /* By now we should be all set, any drm_mmap request on the offset
1249 * below will get to our mmap & fault handler */
1250 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1251
1252 return 0;
1253
1254out_free_mm:
1255 drm_mm_put_block(list->file_offset_node);
1256out_free_list:
Eric Anholt9a298b22009-03-24 12:23:04 -07001257 kfree(list->map);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001258
1259 return ret;
1260}
1261
Chris Wilson901782b2009-07-10 08:18:50 +01001262/**
1263 * i915_gem_release_mmap - remove physical page mappings
1264 * @obj: obj in question
1265 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001266 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001267 * relinquish ownership of the pages back to the system.
1268 *
1269 * It is vital that we remove the page mapping if we have mapped a tiled
1270 * object through the GTT and then lose the fence register due to
1271 * resource pressure. Similarly if the object has been moved out of the
1272 * aperture, than pages mapped into userspace must be revoked. Removing the
1273 * mapping will then trigger a page fault on the next user access, allowing
1274 * fixup by i915_gem_fault().
1275 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001276void
Chris Wilson901782b2009-07-10 08:18:50 +01001277i915_gem_release_mmap(struct drm_gem_object *obj)
1278{
1279 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001280 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson901782b2009-07-10 08:18:50 +01001281
1282 if (dev->dev_mapping)
1283 unmap_mapping_range(dev->dev_mapping,
1284 obj_priv->mmap_offset, obj->size, 1);
1285}
1286
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001287static void
1288i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1289{
1290 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001291 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001292 struct drm_gem_mm *mm = dev->mm_private;
1293 struct drm_map_list *list;
1294
1295 list = &obj->map_list;
1296 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1297
1298 if (list->file_offset_node) {
1299 drm_mm_put_block(list->file_offset_node);
1300 list->file_offset_node = NULL;
1301 }
1302
1303 if (list->map) {
Eric Anholt9a298b22009-03-24 12:23:04 -07001304 kfree(list->map);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001305 list->map = NULL;
1306 }
1307
1308 obj_priv->mmap_offset = 0;
1309}
1310
Jesse Barnesde151cf2008-11-12 10:03:55 -08001311/**
1312 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1313 * @obj: object to check
1314 *
1315 * Return the required GTT alignment for an object, taking into account
1316 * potential fence register mapping if needed.
1317 */
1318static uint32_t
1319i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1320{
1321 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001322 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001323 int start, i;
1324
1325 /*
1326 * Minimum alignment is 4k (GTT page size), but might be greater
1327 * if a fence register is needed for the object.
1328 */
1329 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1330 return 4096;
1331
1332 /*
1333 * Previous chips need to be aligned to the size of the smallest
1334 * fence register that can contain the object.
1335 */
1336 if (IS_I9XX(dev))
1337 start = 1024*1024;
1338 else
1339 start = 512*1024;
1340
1341 for (i = start; i < obj->size; i <<= 1)
1342 ;
1343
1344 return i;
1345}
1346
1347/**
1348 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1349 * @dev: DRM device
1350 * @data: GTT mapping ioctl data
1351 * @file_priv: GEM object info
1352 *
1353 * Simply returns the fake offset to userspace so it can mmap it.
1354 * The mmap call will end up in drm_gem_mmap(), which will set things
1355 * up so we can get faults in the handler above.
1356 *
1357 * The fault handler will take care of binding the object into the GTT
1358 * (since it may have been evicted to make room for something), allocating
1359 * a fence register, and mapping the appropriate aperture address into
1360 * userspace.
1361 */
1362int
1363i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv)
1365{
1366 struct drm_i915_gem_mmap_gtt *args = data;
1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 struct drm_gem_object *obj;
1369 struct drm_i915_gem_object *obj_priv;
1370 int ret;
1371
1372 if (!(dev->driver->driver_features & DRIVER_GEM))
1373 return -ENODEV;
1374
1375 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1376 if (obj == NULL)
1377 return -EBADF;
1378
1379 mutex_lock(&dev->struct_mutex);
1380
Daniel Vetter23010e42010-03-08 13:35:02 +01001381 obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001382
Chris Wilsonab182822009-09-22 18:46:17 +01001383 if (obj_priv->madv != I915_MADV_WILLNEED) {
1384 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1385 drm_gem_object_unreference(obj);
1386 mutex_unlock(&dev->struct_mutex);
1387 return -EINVAL;
1388 }
1389
1390
Jesse Barnesde151cf2008-11-12 10:03:55 -08001391 if (!obj_priv->mmap_offset) {
1392 ret = i915_gem_create_mmap_offset(obj);
Chris Wilson13af1062009-02-11 14:26:31 +00001393 if (ret) {
1394 drm_gem_object_unreference(obj);
1395 mutex_unlock(&dev->struct_mutex);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001396 return ret;
Chris Wilson13af1062009-02-11 14:26:31 +00001397 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001398 }
1399
1400 args->offset = obj_priv->mmap_offset;
1401
Jesse Barnesde151cf2008-11-12 10:03:55 -08001402 /*
1403 * Pull it into the GTT so that we have a page list (makes the
1404 * initial fault faster and any subsequent flushing possible).
1405 */
1406 if (!obj_priv->agp_mem) {
Chris Wilsone67b8ce2009-09-14 16:50:26 +01001407 ret = i915_gem_object_bind_to_gtt(obj, 0);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001408 if (ret) {
1409 drm_gem_object_unreference(obj);
1410 mutex_unlock(&dev->struct_mutex);
1411 return ret;
1412 }
Jesse Barnes14b60392009-05-20 16:47:08 -04001413 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001414 }
1415
1416 drm_gem_object_unreference(obj);
1417 mutex_unlock(&dev->struct_mutex);
1418
1419 return 0;
1420}
1421
Ben Gamari6911a9b2009-04-02 11:24:54 -07001422void
Eric Anholt856fa192009-03-19 14:10:50 -07001423i915_gem_object_put_pages(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001424{
Daniel Vetter23010e42010-03-08 13:35:02 +01001425 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001426 int page_count = obj->size / PAGE_SIZE;
1427 int i;
1428
Eric Anholt856fa192009-03-19 14:10:50 -07001429 BUG_ON(obj_priv->pages_refcount == 0);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001430 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001431
1432 if (--obj_priv->pages_refcount != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07001433 return;
1434
Eric Anholt280b7132009-03-12 16:56:27 -07001435 if (obj_priv->tiling_mode != I915_TILING_NONE)
1436 i915_gem_object_save_bit_17_swizzle(obj);
1437
Chris Wilson3ef94da2009-09-14 16:50:29 +01001438 if (obj_priv->madv == I915_MADV_DONTNEED)
Chris Wilson13a05fd2009-09-20 23:03:19 +01001439 obj_priv->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001440
1441 for (i = 0; i < page_count; i++) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01001442 if (obj_priv->dirty)
1443 set_page_dirty(obj_priv->pages[i]);
1444
1445 if (obj_priv->madv == I915_MADV_WILLNEED)
Eric Anholt856fa192009-03-19 14:10:50 -07001446 mark_page_accessed(obj_priv->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001447
1448 page_cache_release(obj_priv->pages[i]);
1449 }
Eric Anholt673a3942008-07-30 12:06:12 -07001450 obj_priv->dirty = 0;
1451
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07001452 drm_free_large(obj_priv->pages);
Eric Anholt856fa192009-03-19 14:10:50 -07001453 obj_priv->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001454}
1455
1456static void
Zou Nan hai852835f2010-05-21 09:08:56 +08001457i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1458 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001459{
1460 struct drm_device *dev = obj->dev;
1461 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001462 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zou Nan hai852835f2010-05-21 09:08:56 +08001463 BUG_ON(ring == NULL);
1464 obj_priv->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001465
1466 /* Add a reference if we're newly entering the active list. */
1467 if (!obj_priv->active) {
1468 drm_gem_object_reference(obj);
1469 obj_priv->active = 1;
1470 }
1471 /* Move from whatever list we were on to the tail of execution. */
Carl Worth5e118f42009-03-20 11:54:25 -07001472 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08001473 list_move_tail(&obj_priv->list, &ring->active_list);
Carl Worth5e118f42009-03-20 11:54:25 -07001474 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001475 obj_priv->last_rendering_seqno = seqno;
Eric Anholt673a3942008-07-30 12:06:12 -07001476}
1477
Eric Anholtce44b0e2008-11-06 16:00:31 -08001478static void
1479i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1480{
1481 struct drm_device *dev = obj->dev;
1482 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001483 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001484
1485 BUG_ON(!obj_priv->active);
1486 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1487 obj_priv->last_rendering_seqno = 0;
1488}
Eric Anholt673a3942008-07-30 12:06:12 -07001489
Chris Wilson963b4832009-09-20 23:03:54 +01001490/* Immediately discard the backing storage */
1491static void
1492i915_gem_object_truncate(struct drm_gem_object *obj)
1493{
Daniel Vetter23010e42010-03-08 13:35:02 +01001494 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001495 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001496
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001497 inode = obj->filp->f_path.dentry->d_inode;
1498 if (inode->i_op->truncate)
1499 inode->i_op->truncate (inode);
1500
1501 obj_priv->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001502}
1503
1504static inline int
1505i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1506{
1507 return obj_priv->madv == I915_MADV_DONTNEED;
1508}
1509
Eric Anholt673a3942008-07-30 12:06:12 -07001510static void
1511i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1512{
1513 struct drm_device *dev = obj->dev;
1514 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001515 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001516
1517 i915_verify_inactive(dev, __FILE__, __LINE__);
1518 if (obj_priv->pin_count != 0)
1519 list_del_init(&obj_priv->list);
1520 else
1521 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1522
Daniel Vetter99fcb762010-02-07 16:20:18 +01001523 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1524
Eric Anholtce44b0e2008-11-06 16:00:31 -08001525 obj_priv->last_rendering_seqno = 0;
Zou Nan hai852835f2010-05-21 09:08:56 +08001526 obj_priv->ring = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001527 if (obj_priv->active) {
1528 obj_priv->active = 0;
1529 drm_gem_object_unreference(obj);
1530 }
1531 i915_verify_inactive(dev, __FILE__, __LINE__);
1532}
1533
Daniel Vetter63560392010-02-19 11:51:59 +01001534static void
1535i915_gem_process_flushing_list(struct drm_device *dev,
Zou Nan hai852835f2010-05-21 09:08:56 +08001536 uint32_t flush_domains, uint32_t seqno,
1537 struct intel_ring_buffer *ring)
Daniel Vetter63560392010-02-19 11:51:59 +01001538{
1539 drm_i915_private_t *dev_priv = dev->dev_private;
1540 struct drm_i915_gem_object *obj_priv, *next;
1541
1542 list_for_each_entry_safe(obj_priv, next,
1543 &dev_priv->mm.gpu_write_list,
1544 gpu_write_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00001545 struct drm_gem_object *obj = &obj_priv->base;
Daniel Vetter63560392010-02-19 11:51:59 +01001546
1547 if ((obj->write_domain & flush_domains) ==
Zou Nan hai852835f2010-05-21 09:08:56 +08001548 obj->write_domain &&
1549 obj_priv->ring->ring_flag == ring->ring_flag) {
Daniel Vetter63560392010-02-19 11:51:59 +01001550 uint32_t old_write_domain = obj->write_domain;
1551
1552 obj->write_domain = 0;
1553 list_del_init(&obj_priv->gpu_write_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08001554 i915_gem_object_move_to_active(obj, seqno, ring);
Daniel Vetter63560392010-02-19 11:51:59 +01001555
1556 /* update the fence lru list */
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001557 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1558 struct drm_i915_fence_reg *reg =
1559 &dev_priv->fence_regs[obj_priv->fence_reg];
1560 list_move_tail(&reg->lru_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001561 &dev_priv->mm.fence_list);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02001562 }
Daniel Vetter63560392010-02-19 11:51:59 +01001563
1564 trace_i915_gem_object_change_domain(obj,
1565 obj->read_domains,
1566 old_write_domain);
1567 }
1568 }
1569}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001570
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001571uint32_t
Eric Anholtb9624422009-06-03 07:27:35 +00001572i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
Zou Nan hai852835f2010-05-21 09:08:56 +08001573 uint32_t flush_domains, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001574{
1575 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtb9624422009-06-03 07:27:35 +00001576 struct drm_i915_file_private *i915_file_priv = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001577 struct drm_i915_gem_request *request;
1578 uint32_t seqno;
1579 int was_empty;
Eric Anholt673a3942008-07-30 12:06:12 -07001580
Eric Anholtb9624422009-06-03 07:27:35 +00001581 if (file_priv != NULL)
1582 i915_file_priv = file_priv->driver_priv;
1583
Eric Anholt9a298b22009-03-24 12:23:04 -07001584 request = kzalloc(sizeof(*request), GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -07001585 if (request == NULL)
1586 return 0;
1587
Zou Nan hai852835f2010-05-21 09:08:56 +08001588 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001589
1590 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001591 request->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001592 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001593 was_empty = list_empty(&ring->request_list);
1594 list_add_tail(&request->list, &ring->request_list);
1595
Eric Anholtb9624422009-06-03 07:27:35 +00001596 if (i915_file_priv) {
1597 list_add_tail(&request->client_list,
1598 &i915_file_priv->mm.request_list);
1599 } else {
1600 INIT_LIST_HEAD(&request->client_list);
1601 }
Eric Anholt673a3942008-07-30 12:06:12 -07001602
Eric Anholtce44b0e2008-11-06 16:00:31 -08001603 /* Associate any objects on the flushing list matching the write
1604 * domain we're flushing with our flush.
1605 */
Daniel Vetter63560392010-02-19 11:51:59 +01001606 if (flush_domains != 0)
Zou Nan hai852835f2010-05-21 09:08:56 +08001607 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
Eric Anholtce44b0e2008-11-06 16:00:31 -08001608
Ben Gamarif65d9422009-09-14 17:48:44 -04001609 if (!dev_priv->mm.suspended) {
1610 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1611 if (was_empty)
1612 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1613 }
Eric Anholt673a3942008-07-30 12:06:12 -07001614 return seqno;
1615}
1616
1617/**
1618 * Command execution barrier
1619 *
1620 * Ensures that all commands in the ring are finished
1621 * before signalling the CPU
1622 */
Eric Anholt3043c602008-10-02 12:24:47 -07001623static uint32_t
Zou Nan hai852835f2010-05-21 09:08:56 +08001624i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001625{
Eric Anholt673a3942008-07-30 12:06:12 -07001626 uint32_t flush_domains = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001627
1628 /* The sampler always gets flushed on i965 (sigh) */
1629 if (IS_I965G(dev))
1630 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
Zou Nan hai852835f2010-05-21 09:08:56 +08001631
1632 ring->flush(dev, ring,
1633 I915_GEM_DOMAIN_COMMAND, flush_domains);
Eric Anholt673a3942008-07-30 12:06:12 -07001634 return flush_domains;
1635}
1636
1637/**
1638 * Moves buffers associated only with the given active seqno from the active
1639 * to inactive list, potentially freeing them.
1640 */
1641static void
1642i915_gem_retire_request(struct drm_device *dev,
1643 struct drm_i915_gem_request *request)
1644{
1645 drm_i915_private_t *dev_priv = dev->dev_private;
1646
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001647 trace_i915_gem_request_retire(dev, request->seqno);
1648
Eric Anholt673a3942008-07-30 12:06:12 -07001649 /* Move any buffers on the active list that are no longer referenced
1650 * by the ringbuffer to the flushing/inactive lists as appropriate.
1651 */
Carl Worth5e118f42009-03-20 11:54:25 -07001652 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08001653 while (!list_empty(&request->ring->active_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001654 struct drm_gem_object *obj;
1655 struct drm_i915_gem_object *obj_priv;
1656
Zou Nan hai852835f2010-05-21 09:08:56 +08001657 obj_priv = list_first_entry(&request->ring->active_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001658 struct drm_i915_gem_object,
1659 list);
Daniel Vettera8089e82010-04-09 19:05:09 +00001660 obj = &obj_priv->base;
Eric Anholt673a3942008-07-30 12:06:12 -07001661
1662 /* If the seqno being retired doesn't match the oldest in the
1663 * list, then the oldest in the list must still be newer than
1664 * this seqno.
1665 */
1666 if (obj_priv->last_rendering_seqno != request->seqno)
Carl Worth5e118f42009-03-20 11:54:25 -07001667 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001668
Eric Anholt673a3942008-07-30 12:06:12 -07001669#if WATCH_LRU
1670 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1671 __func__, request->seqno, obj);
1672#endif
1673
Eric Anholtce44b0e2008-11-06 16:00:31 -08001674 if (obj->write_domain != 0)
1675 i915_gem_object_move_to_flushing(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001676 else {
1677 /* Take a reference on the object so it won't be
1678 * freed while the spinlock is held. The list
1679 * protection for this spinlock is safe when breaking
1680 * the lock like this since the next thing we do
1681 * is just get the head of the list again.
1682 */
1683 drm_gem_object_reference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001684 i915_gem_object_move_to_inactive(obj);
Shaohua Li68c84342009-04-08 10:58:23 +08001685 spin_unlock(&dev_priv->mm.active_list_lock);
1686 drm_gem_object_unreference(obj);
1687 spin_lock(&dev_priv->mm.active_list_lock);
1688 }
Eric Anholt673a3942008-07-30 12:06:12 -07001689 }
Carl Worth5e118f42009-03-20 11:54:25 -07001690out:
1691 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001692}
1693
1694/**
1695 * Returns true if seq1 is later than seq2.
1696 */
Ben Gamari22be1722009-09-14 17:48:43 -04001697bool
Eric Anholt673a3942008-07-30 12:06:12 -07001698i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1699{
1700 return (int32_t)(seq1 - seq2) >= 0;
1701}
1702
1703uint32_t
Zou Nan hai852835f2010-05-21 09:08:56 +08001704i915_get_gem_seqno(struct drm_device *dev,
Zou Nan haid1b851f2010-05-21 09:08:57 +08001705 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001706{
Zou Nan hai852835f2010-05-21 09:08:56 +08001707 return ring->get_gem_seqno(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001708}
1709
1710/**
1711 * This function clears the request list as sequence numbers are passed.
1712 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001713static void
1714i915_gem_retire_requests_ring(struct drm_device *dev,
1715 struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001716{
1717 drm_i915_private_t *dev_priv = dev->dev_private;
1718 uint32_t seqno;
1719
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001720 if (!ring->status_page.page_addr
Zou Nan hai852835f2010-05-21 09:08:56 +08001721 || list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001722 return;
1723
Zou Nan hai852835f2010-05-21 09:08:56 +08001724 seqno = i915_get_gem_seqno(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001725
Zou Nan hai852835f2010-05-21 09:08:56 +08001726 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001727 struct drm_i915_gem_request *request;
1728 uint32_t retiring_seqno;
1729
Zou Nan hai852835f2010-05-21 09:08:56 +08001730 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001731 struct drm_i915_gem_request,
1732 list);
1733 retiring_seqno = request->seqno;
1734
1735 if (i915_seqno_passed(seqno, retiring_seqno) ||
Ben Gamariba1234d2009-09-14 17:48:47 -04001736 atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001737 i915_gem_retire_request(dev, request);
1738
1739 list_del(&request->list);
Eric Anholtb9624422009-06-03 07:27:35 +00001740 list_del(&request->client_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07001741 kfree(request);
Eric Anholt673a3942008-07-30 12:06:12 -07001742 } else
1743 break;
1744 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001745
1746 if (unlikely (dev_priv->trace_irq_seqno &&
1747 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001748
1749 ring->user_irq_put(dev, ring);
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001750 dev_priv->trace_irq_seqno = 0;
1751 }
Eric Anholt673a3942008-07-30 12:06:12 -07001752}
1753
1754void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001755i915_gem_retire_requests(struct drm_device *dev)
1756{
1757 drm_i915_private_t *dev_priv = dev->dev_private;
1758
Chris Wilsonbe726152010-07-23 23:18:50 +01001759 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1760 struct drm_i915_gem_object *obj_priv, *tmp;
1761
1762 /* We must be careful that during unbind() we do not
1763 * accidentally infinitely recurse into retire requests.
1764 * Currently:
1765 * retire -> free -> unbind -> wait -> retire_ring
1766 */
1767 list_for_each_entry_safe(obj_priv, tmp,
1768 &dev_priv->mm.deferred_free_list,
1769 list)
1770 i915_gem_free_object_tail(&obj_priv->base);
1771 }
1772
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001773 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1774 if (HAS_BSD(dev))
1775 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1776}
1777
1778void
Eric Anholt673a3942008-07-30 12:06:12 -07001779i915_gem_retire_work_handler(struct work_struct *work)
1780{
1781 drm_i915_private_t *dev_priv;
1782 struct drm_device *dev;
1783
1784 dev_priv = container_of(work, drm_i915_private_t,
1785 mm.retire_work.work);
1786 dev = dev_priv->dev;
1787
1788 mutex_lock(&dev->struct_mutex);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001789 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001790
Keith Packard6dbe2772008-10-14 21:41:13 -07001791 if (!dev_priv->mm.suspended &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08001792 (!list_empty(&dev_priv->render_ring.request_list) ||
1793 (HAS_BSD(dev) &&
1794 !list_empty(&dev_priv->bsd_ring.request_list))))
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001795 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Eric Anholt673a3942008-07-30 12:06:12 -07001796 mutex_unlock(&dev->struct_mutex);
1797}
1798
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001799int
Zou Nan hai852835f2010-05-21 09:08:56 +08001800i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1801 int interruptible, struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001802{
1803 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001804 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001805 int ret = 0;
1806
1807 BUG_ON(seqno == 0);
1808
Ben Gamariba1234d2009-09-14 17:48:47 -04001809 if (atomic_read(&dev_priv->mm.wedged))
Ben Gamariffed1d02009-09-14 17:48:41 -04001810 return -EIO;
1811
Zou Nan hai852835f2010-05-21 09:08:56 +08001812 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
Eric Anholtbad720f2009-10-22 16:11:14 -07001813 if (HAS_PCH_SPLIT(dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001814 ier = I915_READ(DEIER) | I915_READ(GTIER);
1815 else
1816 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001817 if (!ier) {
1818 DRM_ERROR("something (likely vbetool) disabled "
1819 "interrupts, re-enabling\n");
1820 i915_driver_irq_preinstall(dev);
1821 i915_driver_irq_postinstall(dev);
1822 }
1823
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001824 trace_i915_gem_request_wait_begin(dev, seqno);
1825
Zou Nan hai852835f2010-05-21 09:08:56 +08001826 ring->waiting_gem_seqno = seqno;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001827 ring->user_irq_get(dev, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001828 if (interruptible)
Zou Nan hai852835f2010-05-21 09:08:56 +08001829 ret = wait_event_interruptible(ring->irq_queue,
1830 i915_seqno_passed(
1831 ring->get_gem_seqno(dev, ring), seqno)
1832 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001833 else
Zou Nan hai852835f2010-05-21 09:08:56 +08001834 wait_event(ring->irq_queue,
1835 i915_seqno_passed(
1836 ring->get_gem_seqno(dev, ring), seqno)
1837 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001838
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001839 ring->user_irq_put(dev, ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08001840 ring->waiting_gem_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001841
1842 trace_i915_gem_request_wait_end(dev, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001843 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001844 if (atomic_read(&dev_priv->mm.wedged))
Eric Anholt673a3942008-07-30 12:06:12 -07001845 ret = -EIO;
1846
1847 if (ret && ret != -ERESTARTSYS)
1848 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
Zou Nan hai852835f2010-05-21 09:08:56 +08001849 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
Eric Anholt673a3942008-07-30 12:06:12 -07001850
1851 /* Directly dispatch request retiring. While we have the work queue
1852 * to handle this, the waiter on a request often wants an associated
1853 * buffer to have made it to the inactive list, and we would need
1854 * a separate wait queue to handle that.
1855 */
1856 if (ret == 0)
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001857 i915_gem_retire_requests_ring(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001858
1859 return ret;
1860}
1861
Daniel Vetter48764bf2009-09-15 22:57:32 +02001862/**
1863 * Waits for a sequence number to be signaled, and cleans up the
1864 * request and object lists appropriately for that event.
1865 */
1866static int
Zou Nan hai852835f2010-05-21 09:08:56 +08001867i915_wait_request(struct drm_device *dev, uint32_t seqno,
1868 struct intel_ring_buffer *ring)
Daniel Vetter48764bf2009-09-15 22:57:32 +02001869{
Zou Nan hai852835f2010-05-21 09:08:56 +08001870 return i915_do_wait_request(dev, seqno, 1, ring);
Daniel Vetter48764bf2009-09-15 22:57:32 +02001871}
1872
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001873static void
1874i915_gem_flush(struct drm_device *dev,
1875 uint32_t invalidate_domains,
1876 uint32_t flush_domains)
1877{
1878 drm_i915_private_t *dev_priv = dev->dev_private;
1879 if (flush_domains & I915_GEM_DOMAIN_CPU)
1880 drm_agp_chipset_flush(dev);
1881 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1882 invalidate_domains,
1883 flush_domains);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001884
1885 if (HAS_BSD(dev))
1886 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1887 invalidate_domains,
1888 flush_domains);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001889}
1890
Zou Nan hai852835f2010-05-21 09:08:56 +08001891static void
1892i915_gem_flush_ring(struct drm_device *dev,
1893 uint32_t invalidate_domains,
1894 uint32_t flush_domains,
1895 struct intel_ring_buffer *ring)
1896{
1897 if (flush_domains & I915_GEM_DOMAIN_CPU)
1898 drm_agp_chipset_flush(dev);
1899 ring->flush(dev, ring,
1900 invalidate_domains,
1901 flush_domains);
1902}
1903
Eric Anholt673a3942008-07-30 12:06:12 -07001904/**
1905 * Ensures that all rendering to the object has completed and the object is
1906 * safe to unbind from the GTT or access from the CPU.
1907 */
1908static int
1909i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1910{
1911 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01001912 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001913 int ret;
1914
Eric Anholte47c68e2008-11-14 13:35:19 -08001915 /* This function only exists to support waiting for existing rendering,
1916 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001917 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001918 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001919
1920 /* If there is rendering queued on the buffer being evicted, wait for
1921 * it.
1922 */
1923 if (obj_priv->active) {
1924#if WATCH_BUF
1925 DRM_INFO("%s: object %p wait for seqno %08x\n",
1926 __func__, obj, obj_priv->last_rendering_seqno);
1927#endif
Zou Nan hai852835f2010-05-21 09:08:56 +08001928 ret = i915_wait_request(dev,
1929 obj_priv->last_rendering_seqno, obj_priv->ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001930 if (ret != 0)
1931 return ret;
1932 }
1933
1934 return 0;
1935}
1936
1937/**
1938 * Unbinds an object from the GTT aperture.
1939 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001940int
Eric Anholt673a3942008-07-30 12:06:12 -07001941i915_gem_object_unbind(struct drm_gem_object *obj)
1942{
1943 struct drm_device *dev = obj->dev;
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01001944 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01001945 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001946 int ret = 0;
1947
1948#if WATCH_BUF
1949 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1950 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1951#endif
1952 if (obj_priv->gtt_space == NULL)
1953 return 0;
1954
1955 if (obj_priv->pin_count != 0) {
1956 DRM_ERROR("Attempting to unbind pinned buffer\n");
1957 return -EINVAL;
1958 }
1959
Eric Anholt5323fd02009-09-09 11:50:45 -07001960 /* blow away mappings if mapped through GTT */
1961 i915_gem_release_mmap(obj);
1962
Eric Anholt673a3942008-07-30 12:06:12 -07001963 /* Move the object to the CPU domain to ensure that
1964 * any possible CPU writes while it's not in the GTT
1965 * are flushed when we go to remap it. This will
1966 * also ensure that all pending GPU writes are finished
1967 * before we unbind.
1968 */
Eric Anholte47c68e2008-11-14 13:35:19 -08001969 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
Chris Wilson8dc17752010-07-23 23:18:51 +01001970 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07001971 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01001972 /* Continue on if we fail due to EIO, the GPU is hung so we
1973 * should be safe and we need to cleanup or else we might
1974 * cause memory corruption through use-after-free.
1975 */
Eric Anholt673a3942008-07-30 12:06:12 -07001976
Eric Anholt5323fd02009-09-09 11:50:45 -07001977 BUG_ON(obj_priv->active);
1978
Daniel Vetter96b47b62009-12-15 17:50:00 +01001979 /* release the fence reg _after_ flushing */
1980 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1981 i915_gem_clear_fence_reg(obj);
1982
Eric Anholt673a3942008-07-30 12:06:12 -07001983 if (obj_priv->agp_mem != NULL) {
1984 drm_unbind_agp(obj_priv->agp_mem);
1985 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1986 obj_priv->agp_mem = NULL;
1987 }
1988
Eric Anholt856fa192009-03-19 14:10:50 -07001989 i915_gem_object_put_pages(obj);
Chris Wilsona32808c2009-09-20 21:29:47 +01001990 BUG_ON(obj_priv->pages_refcount);
Eric Anholt673a3942008-07-30 12:06:12 -07001991
1992 if (obj_priv->gtt_space) {
1993 atomic_dec(&dev->gtt_count);
1994 atomic_sub(obj->size, &dev->gtt_memory);
1995
1996 drm_mm_put_block(obj_priv->gtt_space);
1997 obj_priv->gtt_space = NULL;
1998 }
1999
2000 /* Remove ourselves from the LRU list if present. */
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002001 spin_lock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002002 if (!list_empty(&obj_priv->list))
2003 list_del_init(&obj_priv->list);
Daniel Vetter4a87b8c2010-02-19 11:51:57 +01002004 spin_unlock(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07002005
Chris Wilson963b4832009-09-20 23:03:54 +01002006 if (i915_gem_object_is_purgeable(obj_priv))
2007 i915_gem_object_truncate(obj);
2008
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002009 trace_i915_gem_object_unbind(obj);
2010
Chris Wilson8dc17752010-07-23 23:18:51 +01002011 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002012}
2013
Chris Wilson07f73f62009-09-14 16:50:30 +01002014static struct drm_gem_object *
2015i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2016{
2017 drm_i915_private_t *dev_priv = dev->dev_private;
2018 struct drm_i915_gem_object *obj_priv;
2019 struct drm_gem_object *best = NULL;
2020 struct drm_gem_object *first = NULL;
2021
2022 /* Try to find the smallest clean object */
2023 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00002024 struct drm_gem_object *obj = &obj_priv->base;
Chris Wilson07f73f62009-09-14 16:50:30 +01002025 if (obj->size >= min_size) {
Chris Wilson963b4832009-09-20 23:03:54 +01002026 if ((!obj_priv->dirty ||
2027 i915_gem_object_is_purgeable(obj_priv)) &&
Chris Wilson07f73f62009-09-14 16:50:30 +01002028 (!best || obj->size < best->size)) {
2029 best = obj;
2030 if (best->size == min_size)
2031 return best;
2032 }
2033 if (!first)
2034 first = obj;
2035 }
2036 }
2037
2038 return best ? best : first;
2039}
2040
Eric Anholt673a3942008-07-30 12:06:12 -07002041static int
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002042i915_gpu_idle(struct drm_device *dev)
2043{
2044 drm_i915_private_t *dev_priv = dev->dev_private;
2045 bool lists_empty;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002046 uint32_t seqno1, seqno2;
Zou Nan hai852835f2010-05-21 09:08:56 +08002047 int ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002048
2049 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002050 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2051 list_empty(&dev_priv->render_ring.active_list) &&
2052 (!HAS_BSD(dev) ||
2053 list_empty(&dev_priv->bsd_ring.active_list)));
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002054 spin_unlock(&dev_priv->mm.active_list_lock);
2055
2056 if (lists_empty)
2057 return 0;
2058
2059 /* Flush everything onto the inactive list. */
2060 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002061 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
Zou Nan hai852835f2010-05-21 09:08:56 +08002062 &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002063 if (seqno1 == 0)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002064 return -ENOMEM;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002065 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2066
2067 if (HAS_BSD(dev)) {
2068 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2069 &dev_priv->bsd_ring);
2070 if (seqno2 == 0)
2071 return -ENOMEM;
2072
2073 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2074 if (ret)
2075 return ret;
2076 }
2077
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002078
Zou Nan hai852835f2010-05-21 09:08:56 +08002079 return ret;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002080}
2081
2082static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002083i915_gem_evict_everything(struct drm_device *dev)
2084{
2085 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson07f73f62009-09-14 16:50:30 +01002086 int ret;
2087 bool lists_empty;
2088
Chris Wilson07f73f62009-09-14 16:50:30 +01002089 spin_lock(&dev_priv->mm.active_list_lock);
2090 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2091 list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08002092 list_empty(&dev_priv->render_ring.active_list) &&
2093 (!HAS_BSD(dev)
2094 || list_empty(&dev_priv->bsd_ring.active_list)));
Chris Wilson07f73f62009-09-14 16:50:30 +01002095 spin_unlock(&dev_priv->mm.active_list_lock);
2096
Chris Wilson97311292009-09-21 00:22:34 +01002097 if (lists_empty)
Chris Wilson07f73f62009-09-14 16:50:30 +01002098 return -ENOSPC;
Chris Wilson07f73f62009-09-14 16:50:30 +01002099
2100 /* Flush everything (on to the inactive lists) and evict */
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002101 ret = i915_gpu_idle(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002102 if (ret)
2103 return ret;
2104
Daniel Vetter99fcb762010-02-07 16:20:18 +01002105 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2106
Chris Wilsonab5ee572009-09-20 19:25:47 +01002107 ret = i915_gem_evict_from_inactive_list(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01002108 if (ret)
2109 return ret;
2110
2111 spin_lock(&dev_priv->mm.active_list_lock);
2112 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2113 list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan haid1b851f2010-05-21 09:08:57 +08002114 list_empty(&dev_priv->render_ring.active_list) &&
2115 (!HAS_BSD(dev)
2116 || list_empty(&dev_priv->bsd_ring.active_list)));
Chris Wilson07f73f62009-09-14 16:50:30 +01002117 spin_unlock(&dev_priv->mm.active_list_lock);
2118 BUG_ON(!lists_empty);
2119
Eric Anholt673a3942008-07-30 12:06:12 -07002120 return 0;
2121}
2122
2123static int
Chris Wilson07f73f62009-09-14 16:50:30 +01002124i915_gem_evict_something(struct drm_device *dev, int min_size)
Eric Anholt673a3942008-07-30 12:06:12 -07002125{
2126 drm_i915_private_t *dev_priv = dev->dev_private;
2127 struct drm_gem_object *obj;
Chris Wilson07f73f62009-09-14 16:50:30 +01002128 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002129
Zou Nan hai852835f2010-05-21 09:08:56 +08002130 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08002131 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
Eric Anholt673a3942008-07-30 12:06:12 -07002132 for (;;) {
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01002133 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08002134
Eric Anholt673a3942008-07-30 12:06:12 -07002135 /* If there's an inactive buffer available now, grab it
2136 * and be done.
2137 */
Chris Wilson07f73f62009-09-14 16:50:30 +01002138 obj = i915_gem_find_inactive_object(dev, min_size);
2139 if (obj) {
2140 struct drm_i915_gem_object *obj_priv;
2141
Eric Anholt673a3942008-07-30 12:06:12 -07002142#if WATCH_LRU
2143 DRM_INFO("%s: evicting %p\n", __func__, obj);
2144#endif
Daniel Vetter23010e42010-03-08 13:35:02 +01002145 obj_priv = to_intel_bo(obj);
Chris Wilson07f73f62009-09-14 16:50:30 +01002146 BUG_ON(obj_priv->pin_count != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002147 BUG_ON(obj_priv->active);
2148
2149 /* Wait on the rendering and unbind the buffer. */
Chris Wilson07f73f62009-09-14 16:50:30 +01002150 return i915_gem_object_unbind(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002151 }
2152
2153 /* If we didn't get anything, but the ring is still processing
Chris Wilson07f73f62009-09-14 16:50:30 +01002154 * things, wait for the next to finish and hopefully leave us
2155 * a buffer to evict.
Eric Anholt673a3942008-07-30 12:06:12 -07002156 */
Zou Nan hai852835f2010-05-21 09:08:56 +08002157 if (!list_empty(&render_ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002158 struct drm_i915_gem_request *request;
2159
Zou Nan hai852835f2010-05-21 09:08:56 +08002160 request = list_first_entry(&render_ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07002161 struct drm_i915_gem_request,
2162 list);
2163
Zou Nan hai852835f2010-05-21 09:08:56 +08002164 ret = i915_wait_request(dev,
2165 request->seqno, request->ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002166 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002167 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002168
Chris Wilson07f73f62009-09-14 16:50:30 +01002169 continue;
Eric Anholt673a3942008-07-30 12:06:12 -07002170 }
2171
Zou Nan haid1b851f2010-05-21 09:08:57 +08002172 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2173 struct drm_i915_gem_request *request;
2174
2175 request = list_first_entry(&bsd_ring->request_list,
2176 struct drm_i915_gem_request,
2177 list);
2178
2179 ret = i915_wait_request(dev,
2180 request->seqno, request->ring);
2181 if (ret)
2182 return ret;
2183
2184 continue;
2185 }
2186
Eric Anholt673a3942008-07-30 12:06:12 -07002187 /* If we didn't have anything on the request list but there
2188 * are buffers awaiting a flush, emit one and try again.
2189 * When we wait on it, those buffers waiting for that flush
2190 * will get moved to inactive.
2191 */
2192 if (!list_empty(&dev_priv->mm.flushing_list)) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002193 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07002194
Chris Wilson9a1e2582009-09-20 20:16:50 +01002195 /* Find an object that we can immediately reuse */
2196 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
Daniel Vettera8089e82010-04-09 19:05:09 +00002197 obj = &obj_priv->base;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002198 if (obj->size >= min_size)
2199 break;
Eric Anholt673a3942008-07-30 12:06:12 -07002200
Chris Wilson9a1e2582009-09-20 20:16:50 +01002201 obj = NULL;
2202 }
Eric Anholt673a3942008-07-30 12:06:12 -07002203
Chris Wilson9a1e2582009-09-20 20:16:50 +01002204 if (obj != NULL) {
2205 uint32_t seqno;
Chris Wilson07f73f62009-09-14 16:50:30 +01002206
Zou Nan hai852835f2010-05-21 09:08:56 +08002207 i915_gem_flush_ring(dev,
Chris Wilson9a1e2582009-09-20 20:16:50 +01002208 obj->write_domain,
Zou Nan hai852835f2010-05-21 09:08:56 +08002209 obj->write_domain,
2210 obj_priv->ring);
2211 seqno = i915_add_request(dev, NULL,
2212 obj->write_domain,
2213 obj_priv->ring);
Chris Wilson9a1e2582009-09-20 20:16:50 +01002214 if (seqno == 0)
2215 return -ENOMEM;
Chris Wilson9a1e2582009-09-20 20:16:50 +01002216 continue;
2217 }
Eric Anholt673a3942008-07-30 12:06:12 -07002218 }
2219
Chris Wilson07f73f62009-09-14 16:50:30 +01002220 /* If we didn't do any of the above, there's no single buffer
2221 * large enough to swap out for the new one, so just evict
2222 * everything and start again. (This should be rare.)
Eric Anholt673a3942008-07-30 12:06:12 -07002223 */
Chris Wilson97311292009-09-21 00:22:34 +01002224 if (!list_empty (&dev_priv->mm.inactive_list))
Chris Wilsonab5ee572009-09-20 19:25:47 +01002225 return i915_gem_evict_from_inactive_list(dev);
Chris Wilson97311292009-09-21 00:22:34 +01002226 else
Chris Wilson07f73f62009-09-14 16:50:30 +01002227 return i915_gem_evict_everything(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07002228 }
Keith Packardac94a962008-11-20 23:30:27 -08002229}
2230
Ben Gamari6911a9b2009-04-02 11:24:54 -07002231int
Chris Wilson4bdadb92010-01-27 13:36:32 +00002232i915_gem_object_get_pages(struct drm_gem_object *obj,
2233 gfp_t gfpmask)
Eric Anholt673a3942008-07-30 12:06:12 -07002234{
Daniel Vetter23010e42010-03-08 13:35:02 +01002235 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002236 int page_count, i;
2237 struct address_space *mapping;
2238 struct inode *inode;
2239 struct page *page;
Eric Anholt673a3942008-07-30 12:06:12 -07002240
Daniel Vetter778c3542010-05-13 11:49:44 +02002241 BUG_ON(obj_priv->pages_refcount
2242 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2243
Eric Anholt856fa192009-03-19 14:10:50 -07002244 if (obj_priv->pages_refcount++ != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07002245 return 0;
2246
2247 /* Get the list of pages out of our struct file. They'll be pinned
2248 * at this point until we release them.
2249 */
2250 page_count = obj->size / PAGE_SIZE;
Eric Anholt856fa192009-03-19 14:10:50 -07002251 BUG_ON(obj_priv->pages != NULL);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07002252 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
Eric Anholt856fa192009-03-19 14:10:50 -07002253 if (obj_priv->pages == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002254 obj_priv->pages_refcount--;
Eric Anholt673a3942008-07-30 12:06:12 -07002255 return -ENOMEM;
2256 }
2257
2258 inode = obj->filp->f_path.dentry->d_inode;
2259 mapping = inode->i_mapping;
2260 for (i = 0; i < page_count; i++) {
Chris Wilson4bdadb92010-01-27 13:36:32 +00002261 page = read_cache_page_gfp(mapping, i,
Linus Torvalds985b8232010-07-02 10:04:42 +10002262 GFP_HIGHUSER |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002263 __GFP_COLD |
Linus Torvaldscd9f0402010-07-18 09:44:37 -07002264 __GFP_RECLAIMABLE |
Chris Wilson4bdadb92010-01-27 13:36:32 +00002265 gfpmask);
Chris Wilson1f2b1012010-03-12 19:52:55 +00002266 if (IS_ERR(page))
2267 goto err_pages;
2268
Eric Anholt856fa192009-03-19 14:10:50 -07002269 obj_priv->pages[i] = page;
Eric Anholt673a3942008-07-30 12:06:12 -07002270 }
Eric Anholt280b7132009-03-12 16:56:27 -07002271
2272 if (obj_priv->tiling_mode != I915_TILING_NONE)
2273 i915_gem_object_do_bit_17_swizzle(obj);
2274
Eric Anholt673a3942008-07-30 12:06:12 -07002275 return 0;
Chris Wilson1f2b1012010-03-12 19:52:55 +00002276
2277err_pages:
2278 while (i--)
2279 page_cache_release(obj_priv->pages[i]);
2280
2281 drm_free_large(obj_priv->pages);
2282 obj_priv->pages = NULL;
2283 obj_priv->pages_refcount--;
2284 return PTR_ERR(page);
Eric Anholt673a3942008-07-30 12:06:12 -07002285}
2286
Eric Anholt4e901fd2009-10-26 16:44:17 -07002287static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2288{
2289 struct drm_gem_object *obj = reg->obj;
2290 struct drm_device *dev = obj->dev;
2291 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002292 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002293 int regnum = obj_priv->fence_reg;
2294 uint64_t val;
2295
2296 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2297 0xfffff000) << 32;
2298 val |= obj_priv->gtt_offset & 0xfffff000;
2299 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2300 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2301
2302 if (obj_priv->tiling_mode == I915_TILING_Y)
2303 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2304 val |= I965_FENCE_REG_VALID;
2305
2306 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2307}
2308
Jesse Barnesde151cf2008-11-12 10:03:55 -08002309static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2310{
2311 struct drm_gem_object *obj = reg->obj;
2312 struct drm_device *dev = obj->dev;
2313 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002314 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002315 int regnum = obj_priv->fence_reg;
2316 uint64_t val;
2317
2318 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2319 0xfffff000) << 32;
2320 val |= obj_priv->gtt_offset & 0xfffff000;
2321 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2322 if (obj_priv->tiling_mode == I915_TILING_Y)
2323 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2324 val |= I965_FENCE_REG_VALID;
2325
2326 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2327}
2328
2329static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2330{
2331 struct drm_gem_object *obj = reg->obj;
2332 struct drm_device *dev = obj->dev;
2333 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002334 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002335 int regnum = obj_priv->fence_reg;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002336 int tile_width;
Eric Anholtdc529a42009-03-10 22:34:49 -07002337 uint32_t fence_reg, val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002338 uint32_t pitch_val;
2339
2340 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2341 (obj_priv->gtt_offset & (obj->size - 1))) {
Linus Torvaldsf06da262009-02-09 08:57:29 -08002342 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002343 __func__, obj_priv->gtt_offset, obj->size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002344 return;
2345 }
2346
Jesse Barnes0f973f22009-01-26 17:10:45 -08002347 if (obj_priv->tiling_mode == I915_TILING_Y &&
2348 HAS_128_BYTE_Y_TILING(dev))
2349 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002350 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002351 tile_width = 512;
2352
2353 /* Note: pitch better be a power of two tile widths */
2354 pitch_val = obj_priv->stride / tile_width;
2355 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002356
Daniel Vetterc36a2a62010-04-17 15:12:03 +02002357 if (obj_priv->tiling_mode == I915_TILING_Y &&
2358 HAS_128_BYTE_Y_TILING(dev))
2359 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2360 else
2361 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2362
Jesse Barnesde151cf2008-11-12 10:03:55 -08002363 val = obj_priv->gtt_offset;
2364 if (obj_priv->tiling_mode == I915_TILING_Y)
2365 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2366 val |= I915_FENCE_SIZE_BITS(obj->size);
2367 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2368 val |= I830_FENCE_REG_VALID;
2369
Eric Anholtdc529a42009-03-10 22:34:49 -07002370 if (regnum < 8)
2371 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2372 else
2373 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2374 I915_WRITE(fence_reg, val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002375}
2376
2377static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2378{
2379 struct drm_gem_object *obj = reg->obj;
2380 struct drm_device *dev = obj->dev;
2381 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002382 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002383 int regnum = obj_priv->fence_reg;
2384 uint32_t val;
2385 uint32_t pitch_val;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002386 uint32_t fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002387
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002388 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
Jesse Barnesde151cf2008-11-12 10:03:55 -08002389 (obj_priv->gtt_offset & (obj->size - 1))) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002390 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
Jesse Barnes0f973f22009-01-26 17:10:45 -08002391 __func__, obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002392 return;
2393 }
2394
Eric Anholte76a16d2009-05-26 17:44:56 -07002395 pitch_val = obj_priv->stride / 128;
2396 pitch_val = ffs(pitch_val) - 1;
2397 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2398
Jesse Barnesde151cf2008-11-12 10:03:55 -08002399 val = obj_priv->gtt_offset;
2400 if (obj_priv->tiling_mode == I915_TILING_Y)
2401 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002402 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2403 WARN_ON(fence_size_bits & ~0x00000f00);
2404 val |= fence_size_bits;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002405 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2406 val |= I830_FENCE_REG_VALID;
2407
2408 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002409}
2410
Daniel Vetterae3db242010-02-19 11:51:58 +01002411static int i915_find_fence_reg(struct drm_device *dev)
2412{
2413 struct drm_i915_fence_reg *reg = NULL;
2414 struct drm_i915_gem_object *obj_priv = NULL;
2415 struct drm_i915_private *dev_priv = dev->dev_private;
2416 struct drm_gem_object *obj = NULL;
2417 int i, avail, ret;
2418
2419 /* First try to find a free reg */
2420 avail = 0;
2421 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2422 reg = &dev_priv->fence_regs[i];
2423 if (!reg->obj)
2424 return i;
2425
Daniel Vetter23010e42010-03-08 13:35:02 +01002426 obj_priv = to_intel_bo(reg->obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002427 if (!obj_priv->pin_count)
2428 avail++;
2429 }
2430
2431 if (avail == 0)
2432 return -ENOSPC;
2433
2434 /* None available, try to steal one or wait for a user to finish */
2435 i = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002436 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2437 lru_list) {
2438 obj = reg->obj;
2439 obj_priv = to_intel_bo(obj);
Daniel Vetterae3db242010-02-19 11:51:58 +01002440
2441 if (obj_priv->pin_count)
2442 continue;
2443
2444 /* found one! */
2445 i = obj_priv->fence_reg;
2446 break;
2447 }
2448
2449 BUG_ON(i == I915_FENCE_REG_NONE);
2450
2451 /* We only have a reference on obj from the active list. put_fence_reg
2452 * might drop that one, causing a use-after-free in it. So hold a
2453 * private reference to obj like the other callers of put_fence_reg
2454 * (set_tiling ioctl) do. */
2455 drm_gem_object_reference(obj);
2456 ret = i915_gem_object_put_fence_reg(obj);
2457 drm_gem_object_unreference(obj);
2458 if (ret != 0)
2459 return ret;
2460
2461 return i;
2462}
2463
Jesse Barnesde151cf2008-11-12 10:03:55 -08002464/**
2465 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2466 * @obj: object to map through a fence reg
2467 *
2468 * When mapping objects through the GTT, userspace wants to be able to write
2469 * to them without having to worry about swizzling if the object is tiled.
2470 *
2471 * This function walks the fence regs looking for a free one for @obj,
2472 * stealing one if it can't find any.
2473 *
2474 * It then sets up the reg based on the object's properties: address, pitch
2475 * and tiling format.
2476 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002477int
2478i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002479{
2480 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002481 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002482 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002483 struct drm_i915_fence_reg *reg = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002484 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002485
Eric Anholta09ba7f2009-08-29 12:49:51 -07002486 /* Just update our place in the LRU if our fence is getting used. */
2487 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002488 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2489 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002490 return 0;
2491 }
2492
Jesse Barnesde151cf2008-11-12 10:03:55 -08002493 switch (obj_priv->tiling_mode) {
2494 case I915_TILING_NONE:
2495 WARN(1, "allocating a fence for non-tiled object?\n");
2496 break;
2497 case I915_TILING_X:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002498 if (!obj_priv->stride)
2499 return -EINVAL;
2500 WARN((obj_priv->stride & (512 - 1)),
2501 "object 0x%08x is X tiled but has non-512B pitch\n",
2502 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002503 break;
2504 case I915_TILING_Y:
Jesse Barnes0f973f22009-01-26 17:10:45 -08002505 if (!obj_priv->stride)
2506 return -EINVAL;
2507 WARN((obj_priv->stride & (128 - 1)),
2508 "object 0x%08x is Y tiled but has non-128B pitch\n",
2509 obj_priv->gtt_offset);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002510 break;
2511 }
2512
Daniel Vetterae3db242010-02-19 11:51:58 +01002513 ret = i915_find_fence_reg(dev);
2514 if (ret < 0)
2515 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002516
Daniel Vetterae3db242010-02-19 11:51:58 +01002517 obj_priv->fence_reg = ret;
2518 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002519 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07002520
Jesse Barnesde151cf2008-11-12 10:03:55 -08002521 reg->obj = obj;
2522
Eric Anholt4e901fd2009-10-26 16:44:17 -07002523 if (IS_GEN6(dev))
2524 sandybridge_write_fence_reg(reg);
2525 else if (IS_I965G(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08002526 i965_write_fence_reg(reg);
2527 else if (IS_I9XX(dev))
2528 i915_write_fence_reg(reg);
2529 else
2530 i830_write_fence_reg(reg);
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002531
Daniel Vetterae3db242010-02-19 11:51:58 +01002532 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2533 obj_priv->tiling_mode);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002534
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002535 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002536}
2537
2538/**
2539 * i915_gem_clear_fence_reg - clear out fence register info
2540 * @obj: object to clear
2541 *
2542 * Zeroes out the fence register itself and clears out the associated
2543 * data structures in dev_priv and obj_priv.
2544 */
2545static void
2546i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2547{
2548 struct drm_device *dev = obj->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002549 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002550 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002551 struct drm_i915_fence_reg *reg =
2552 &dev_priv->fence_regs[obj_priv->fence_reg];
Jesse Barnesde151cf2008-11-12 10:03:55 -08002553
Eric Anholt4e901fd2009-10-26 16:44:17 -07002554 if (IS_GEN6(dev)) {
2555 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2556 (obj_priv->fence_reg * 8), 0);
2557 } else if (IS_I965G(dev)) {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002558 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002559 } else {
Eric Anholtdc529a42009-03-10 22:34:49 -07002560 uint32_t fence_reg;
2561
2562 if (obj_priv->fence_reg < 8)
2563 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2564 else
2565 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2566 8) * 4;
2567
2568 I915_WRITE(fence_reg, 0);
2569 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002570
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002571 reg->obj = NULL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002572 obj_priv->fence_reg = I915_FENCE_REG_NONE;
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002573 list_del_init(&reg->lru_list);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002574}
2575
Eric Anholt673a3942008-07-30 12:06:12 -07002576/**
Chris Wilson52dc7d32009-06-06 09:46:01 +01002577 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2578 * to the buffer to finish, and then resets the fence register.
2579 * @obj: tiled object holding a fence register.
2580 *
2581 * Zeroes out the fence register itself and clears out the associated
2582 * data structures in dev_priv and obj_priv.
2583 */
2584int
2585i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2586{
2587 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002588 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002589
2590 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2591 return 0;
2592
Daniel Vetter10ae9bd2010-02-01 13:59:17 +01002593 /* If we've changed tiling, GTT-mappings of the object
2594 * need to re-fault to ensure that the correct fence register
2595 * setup is in place.
2596 */
2597 i915_gem_release_mmap(obj);
2598
Chris Wilson52dc7d32009-06-06 09:46:01 +01002599 /* On the i915, GPU access to tiled buffers is via a fence,
2600 * therefore we must wait for any outstanding access to complete
2601 * before clearing the fence.
2602 */
2603 if (!IS_I965G(dev)) {
2604 int ret;
2605
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002606 ret = i915_gem_object_flush_gpu_write_domain(obj);
2607 if (ret != 0)
2608 return ret;
2609
Chris Wilson52dc7d32009-06-06 09:46:01 +01002610 ret = i915_gem_object_wait_rendering(obj);
2611 if (ret != 0)
2612 return ret;
2613 }
2614
Daniel Vetter4a726612010-02-01 13:59:16 +01002615 i915_gem_object_flush_gtt_write_domain(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +01002616 i915_gem_clear_fence_reg (obj);
2617
2618 return 0;
2619}
2620
2621/**
Eric Anholt673a3942008-07-30 12:06:12 -07002622 * Finds free space in the GTT aperture and binds the object there.
2623 */
2624static int
2625i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2626{
2627 struct drm_device *dev = obj->dev;
2628 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01002629 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002630 struct drm_mm_node *free_space;
Chris Wilson4bdadb92010-01-27 13:36:32 +00002631 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Chris Wilson07f73f62009-09-14 16:50:30 +01002632 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002633
Chris Wilsonbb6baf72009-09-22 14:24:13 +01002634 if (obj_priv->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002635 DRM_ERROR("Attempting to bind a purgeable object\n");
2636 return -EINVAL;
2637 }
2638
Eric Anholt673a3942008-07-30 12:06:12 -07002639 if (alignment == 0)
Jesse Barnes0f973f22009-01-26 17:10:45 -08002640 alignment = i915_gem_get_gtt_alignment(obj);
Daniel Vetter8d7773a2009-03-29 14:09:41 +02002641 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002642 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2643 return -EINVAL;
2644 }
2645
Chris Wilson654fc602010-05-27 13:18:21 +01002646 /* If the object is bigger than the entire aperture, reject it early
2647 * before evicting everything in a vain attempt to find space.
2648 */
2649 if (obj->size > dev->gtt_total) {
2650 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2651 return -E2BIG;
2652 }
2653
Eric Anholt673a3942008-07-30 12:06:12 -07002654 search_free:
2655 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2656 obj->size, alignment, 0);
2657 if (free_space != NULL) {
2658 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2659 alignment);
Daniel Vetterdb3307a2010-07-02 15:02:12 +01002660 if (obj_priv->gtt_space != NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002661 obj_priv->gtt_offset = obj_priv->gtt_space->start;
Eric Anholt673a3942008-07-30 12:06:12 -07002662 }
2663 if (obj_priv->gtt_space == NULL) {
2664 /* If the gtt is empty and we're still having trouble
2665 * fitting our object in, we're out of memory.
2666 */
2667#if WATCH_LRU
2668 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2669#endif
Chris Wilson07f73f62009-09-14 16:50:30 +01002670 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002671 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002672 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002673
Eric Anholt673a3942008-07-30 12:06:12 -07002674 goto search_free;
2675 }
2676
2677#if WATCH_BUF
Krzysztof Halasacfd43c02009-06-20 00:31:28 +02002678 DRM_INFO("Binding object of size %zd at 0x%08x\n",
Eric Anholt673a3942008-07-30 12:06:12 -07002679 obj->size, obj_priv->gtt_offset);
2680#endif
Chris Wilson4bdadb92010-01-27 13:36:32 +00002681 ret = i915_gem_object_get_pages(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002682 if (ret) {
2683 drm_mm_put_block(obj_priv->gtt_space);
2684 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002685
2686 if (ret == -ENOMEM) {
2687 /* first try to clear up some space from the GTT */
2688 ret = i915_gem_evict_something(dev, obj->size);
2689 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002690 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002691 if (gfpmask) {
2692 gfpmask = 0;
2693 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002694 }
2695
2696 return ret;
2697 }
2698
2699 goto search_free;
2700 }
2701
Eric Anholt673a3942008-07-30 12:06:12 -07002702 return ret;
2703 }
2704
Eric Anholt673a3942008-07-30 12:06:12 -07002705 /* Create an AGP memory structure pointing at our pages, and bind it
2706 * into the GTT.
2707 */
2708 obj_priv->agp_mem = drm_agp_bind_pages(dev,
Eric Anholt856fa192009-03-19 14:10:50 -07002709 obj_priv->pages,
Chris Wilson07f73f62009-09-14 16:50:30 +01002710 obj->size >> PAGE_SHIFT,
Keith Packardba1eb1d2008-10-14 19:55:10 -07002711 obj_priv->gtt_offset,
2712 obj_priv->agp_type);
Eric Anholt673a3942008-07-30 12:06:12 -07002713 if (obj_priv->agp_mem == NULL) {
Eric Anholt856fa192009-03-19 14:10:50 -07002714 i915_gem_object_put_pages(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002715 drm_mm_put_block(obj_priv->gtt_space);
2716 obj_priv->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002717
2718 ret = i915_gem_evict_something(dev, obj->size);
Chris Wilson97311292009-09-21 00:22:34 +01002719 if (ret)
Chris Wilson07f73f62009-09-14 16:50:30 +01002720 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002721
2722 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002723 }
2724 atomic_inc(&dev->gtt_count);
2725 atomic_add(obj->size, &dev->gtt_memory);
2726
2727 /* Assert that the object is not currently in any GPU domain. As it
2728 * wasn't in the GTT, there shouldn't be any way it could have been in
2729 * a GPU cache
2730 */
Chris Wilson21d509e2009-06-06 09:46:02 +01002731 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2732 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002733
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002734 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2735
Eric Anholt673a3942008-07-30 12:06:12 -07002736 return 0;
2737}
2738
2739void
2740i915_gem_clflush_object(struct drm_gem_object *obj)
2741{
Daniel Vetter23010e42010-03-08 13:35:02 +01002742 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002743
2744 /* If we don't have a page list set up, then we're not pinned
2745 * to GPU, and we can ignore the cache flush because it'll happen
2746 * again at bind time.
2747 */
Eric Anholt856fa192009-03-19 14:10:50 -07002748 if (obj_priv->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002749 return;
2750
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002751 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002752
Eric Anholt856fa192009-03-19 14:10:50 -07002753 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002754}
2755
Eric Anholte47c68e2008-11-14 13:35:19 -08002756/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002757static int
Eric Anholte47c68e2008-11-14 13:35:19 -08002758i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2759{
2760 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002761 uint32_t old_write_domain;
Zou Nan hai852835f2010-05-21 09:08:56 +08002762 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002763
2764 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002765 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002766
2767 /* Queue the GPU write cache flushing we need. */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002768 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002769 i915_gem_flush(dev, 0, obj->write_domain);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002770 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2771 return -ENOMEM;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002772
2773 trace_i915_gem_object_change_domain(obj,
2774 obj->read_domains,
2775 old_write_domain);
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002776 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002777}
2778
2779/** Flushes the GTT write domain for the object if it's dirty. */
2780static void
2781i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2782{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002783 uint32_t old_write_domain;
2784
Eric Anholte47c68e2008-11-14 13:35:19 -08002785 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2786 return;
2787
2788 /* No actual flushing is required for the GTT write domain. Writes
2789 * to it immediately go to main memory as far as we know, so there's
2790 * no chipset flush. It also doesn't land in render cache.
2791 */
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002792 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002793 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002794
2795 trace_i915_gem_object_change_domain(obj,
2796 obj->read_domains,
2797 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002798}
2799
2800/** Flushes the CPU write domain for the object if it's dirty. */
2801static void
2802i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2803{
2804 struct drm_device *dev = obj->dev;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002805 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002806
2807 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2808 return;
2809
2810 i915_gem_clflush_object(obj);
2811 drm_agp_chipset_flush(dev);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002812 old_write_domain = obj->write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002813 obj->write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002814
2815 trace_i915_gem_object_change_domain(obj,
2816 obj->read_domains,
2817 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002818}
2819
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002820int
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002821i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2822{
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002823 int ret = 0;
2824
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002825 switch (obj->write_domain) {
2826 case I915_GEM_DOMAIN_GTT:
2827 i915_gem_object_flush_gtt_write_domain(obj);
2828 break;
2829 case I915_GEM_DOMAIN_CPU:
2830 i915_gem_object_flush_cpu_write_domain(obj);
2831 break;
2832 default:
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002833 ret = i915_gem_object_flush_gpu_write_domain(obj);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002834 break;
2835 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002836
2837 return ret;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002838}
2839
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002840/**
2841 * Moves a single object to the GTT read, and possibly write domain.
2842 *
2843 * This function returns when the move is complete, including waiting on
2844 * flushes to occur.
2845 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002846int
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002847i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2848{
Daniel Vetter23010e42010-03-08 13:35:02 +01002849 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002850 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002851 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002852
Eric Anholt02354392008-11-26 13:58:13 -08002853 /* Not valid to be called on unbound objects. */
2854 if (obj_priv->gtt_space == NULL)
2855 return -EINVAL;
2856
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002857 ret = i915_gem_object_flush_gpu_write_domain(obj);
2858 if (ret != 0)
2859 return ret;
2860
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002861 /* Wait on any GPU rendering and flushing to occur. */
Eric Anholte47c68e2008-11-14 13:35:19 -08002862 ret = i915_gem_object_wait_rendering(obj);
2863 if (ret != 0)
2864 return ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002865
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002866 old_write_domain = obj->write_domain;
2867 old_read_domains = obj->read_domains;
2868
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002869 /* If we're writing through the GTT domain, then CPU and GPU caches
2870 * will need to be invalidated at next use.
2871 */
2872 if (write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002873 obj->read_domains &= I915_GEM_DOMAIN_GTT;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002874
Eric Anholte47c68e2008-11-14 13:35:19 -08002875 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002876
2877 /* It should now be out of any other write domains, and we can update
2878 * the domain values for our changes.
2879 */
2880 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2881 obj->read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002882 if (write) {
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002883 obj->write_domain = I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002884 obj_priv->dirty = 1;
2885 }
2886
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002887 trace_i915_gem_object_change_domain(obj,
2888 old_read_domains,
2889 old_write_domain);
2890
Eric Anholte47c68e2008-11-14 13:35:19 -08002891 return 0;
2892}
2893
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002894/*
2895 * Prepare buffer for display plane. Use uninterruptible for possible flush
2896 * wait, as in modesetting process we're not supposed to be interrupted.
2897 */
2898int
2899i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2900{
2901 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01002902 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002903 uint32_t old_write_domain, old_read_domains;
2904 int ret;
2905
2906 /* Not valid to be called on unbound objects. */
2907 if (obj_priv->gtt_space == NULL)
2908 return -EINVAL;
2909
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002910 ret = i915_gem_object_flush_gpu_write_domain(obj);
2911 if (ret)
2912 return ret;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002913
2914 /* Wait on any GPU rendering and flushing to occur. */
2915 if (obj_priv->active) {
2916#if WATCH_BUF
2917 DRM_INFO("%s: object %p wait for seqno %08x\n",
2918 __func__, obj, obj_priv->last_rendering_seqno);
2919#endif
Zou Nan hai852835f2010-05-21 09:08:56 +08002920 ret = i915_do_wait_request(dev,
2921 obj_priv->last_rendering_seqno,
2922 0,
2923 obj_priv->ring);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002924 if (ret != 0)
2925 return ret;
2926 }
2927
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002928 i915_gem_object_flush_cpu_write_domain(obj);
2929
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002930 old_write_domain = obj->write_domain;
2931 old_read_domains = obj->read_domains;
2932
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002933 /* It should now be out of any other write domains, and we can update
2934 * the domain values for our changes.
2935 */
2936 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002937 obj->read_domains = I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002938 obj->write_domain = I915_GEM_DOMAIN_GTT;
2939 obj_priv->dirty = 1;
2940
2941 trace_i915_gem_object_change_domain(obj,
2942 old_read_domains,
2943 old_write_domain);
2944
2945 return 0;
2946}
2947
Eric Anholte47c68e2008-11-14 13:35:19 -08002948/**
2949 * Moves a single object to the CPU read, and possibly write domain.
2950 *
2951 * This function returns when the move is complete, including waiting on
2952 * flushes to occur.
2953 */
2954static int
2955i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2956{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002957 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002958 int ret;
2959
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002960 ret = i915_gem_object_flush_gpu_write_domain(obj);
2961 if (ret)
2962 return ret;
2963
Eric Anholte47c68e2008-11-14 13:35:19 -08002964 /* Wait on any GPU rendering and flushing to occur. */
2965 ret = i915_gem_object_wait_rendering(obj);
2966 if (ret != 0)
2967 return ret;
2968
2969 i915_gem_object_flush_gtt_write_domain(obj);
2970
2971 /* If we have a partially-valid cache of the object in the CPU,
2972 * finish invalidating it and free the per-page flags.
2973 */
2974 i915_gem_object_set_to_full_cpu_read_domain(obj);
2975
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002976 old_write_domain = obj->write_domain;
2977 old_read_domains = obj->read_domains;
2978
Eric Anholte47c68e2008-11-14 13:35:19 -08002979 /* Flush the CPU cache if it's still invalid. */
2980 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2981 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002982
2983 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2984 }
2985
2986 /* It should now be out of any other write domains, and we can update
2987 * the domain values for our changes.
2988 */
2989 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2990
2991 /* If we're writing through the CPU, then the GPU read domains will
2992 * need to be invalidated at next use.
2993 */
2994 if (write) {
2995 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2996 obj->write_domain = I915_GEM_DOMAIN_CPU;
2997 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002998
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002999 trace_i915_gem_object_change_domain(obj,
3000 old_read_domains,
3001 old_write_domain);
3002
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003003 return 0;
3004}
3005
Eric Anholt673a3942008-07-30 12:06:12 -07003006/*
3007 * Set the next domain for the specified object. This
3008 * may not actually perform the necessary flushing/invaliding though,
3009 * as that may want to be batched with other set_domain operations
3010 *
3011 * This is (we hope) the only really tricky part of gem. The goal
3012 * is fairly simple -- track which caches hold bits of the object
3013 * and make sure they remain coherent. A few concrete examples may
3014 * help to explain how it works. For shorthand, we use the notation
3015 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3016 * a pair of read and write domain masks.
3017 *
3018 * Case 1: the batch buffer
3019 *
3020 * 1. Allocated
3021 * 2. Written by CPU
3022 * 3. Mapped to GTT
3023 * 4. Read by GPU
3024 * 5. Unmapped from GTT
3025 * 6. Freed
3026 *
3027 * Let's take these a step at a time
3028 *
3029 * 1. Allocated
3030 * Pages allocated from the kernel may still have
3031 * cache contents, so we set them to (CPU, CPU) always.
3032 * 2. Written by CPU (using pwrite)
3033 * The pwrite function calls set_domain (CPU, CPU) and
3034 * this function does nothing (as nothing changes)
3035 * 3. Mapped by GTT
3036 * This function asserts that the object is not
3037 * currently in any GPU-based read or write domains
3038 * 4. Read by GPU
3039 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
3040 * As write_domain is zero, this function adds in the
3041 * current read domains (CPU+COMMAND, 0).
3042 * flush_domains is set to CPU.
3043 * invalidate_domains is set to COMMAND
3044 * clflush is run to get data out of the CPU caches
3045 * then i915_dev_set_domain calls i915_gem_flush to
3046 * emit an MI_FLUSH and drm_agp_chipset_flush
3047 * 5. Unmapped from GTT
3048 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3049 * flush_domains and invalidate_domains end up both zero
3050 * so no flushing/invalidating happens
3051 * 6. Freed
3052 * yay, done
3053 *
3054 * Case 2: The shared render buffer
3055 *
3056 * 1. Allocated
3057 * 2. Mapped to GTT
3058 * 3. Read/written by GPU
3059 * 4. set_domain to (CPU,CPU)
3060 * 5. Read/written by CPU
3061 * 6. Read/written by GPU
3062 *
3063 * 1. Allocated
3064 * Same as last example, (CPU, CPU)
3065 * 2. Mapped to GTT
3066 * Nothing changes (assertions find that it is not in the GPU)
3067 * 3. Read/written by GPU
3068 * execbuffer calls set_domain (RENDER, RENDER)
3069 * flush_domains gets CPU
3070 * invalidate_domains gets GPU
3071 * clflush (obj)
3072 * MI_FLUSH and drm_agp_chipset_flush
3073 * 4. set_domain (CPU, CPU)
3074 * flush_domains gets GPU
3075 * invalidate_domains gets CPU
3076 * wait_rendering (obj) to make sure all drawing is complete.
3077 * This will include an MI_FLUSH to get the data from GPU
3078 * to memory
3079 * clflush (obj) to invalidate the CPU cache
3080 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3081 * 5. Read/written by CPU
3082 * cache lines are loaded and dirtied
3083 * 6. Read written by GPU
3084 * Same as last GPU access
3085 *
3086 * Case 3: The constant buffer
3087 *
3088 * 1. Allocated
3089 * 2. Written by CPU
3090 * 3. Read by GPU
3091 * 4. Updated (written) by CPU again
3092 * 5. Read by GPU
3093 *
3094 * 1. Allocated
3095 * (CPU, CPU)
3096 * 2. Written by CPU
3097 * (CPU, CPU)
3098 * 3. Read by GPU
3099 * (CPU+RENDER, 0)
3100 * flush_domains = CPU
3101 * invalidate_domains = RENDER
3102 * clflush (obj)
3103 * MI_FLUSH
3104 * drm_agp_chipset_flush
3105 * 4. Updated (written) by CPU again
3106 * (CPU, CPU)
3107 * flush_domains = 0 (no previous write domain)
3108 * invalidate_domains = 0 (no new read domains)
3109 * 5. Read by GPU
3110 * (CPU+RENDER, 0)
3111 * flush_domains = CPU
3112 * invalidate_domains = RENDER
3113 * clflush (obj)
3114 * MI_FLUSH
3115 * drm_agp_chipset_flush
3116 */
Keith Packardc0d90822008-11-20 23:11:08 -08003117static void
Eric Anholt8b0e3782009-02-19 14:40:50 -08003118i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003119{
3120 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01003121 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003122 uint32_t invalidate_domains = 0;
3123 uint32_t flush_domains = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003124 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003125
Eric Anholt8b0e3782009-02-19 14:40:50 -08003126 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3127 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
Eric Anholt673a3942008-07-30 12:06:12 -07003128
Jesse Barnes652c3932009-08-17 13:31:43 -07003129 intel_mark_busy(dev, obj);
3130
Eric Anholt673a3942008-07-30 12:06:12 -07003131#if WATCH_BUF
3132 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3133 __func__, obj,
Eric Anholt8b0e3782009-02-19 14:40:50 -08003134 obj->read_domains, obj->pending_read_domains,
3135 obj->write_domain, obj->pending_write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003136#endif
3137 /*
3138 * If the object isn't moving to a new write domain,
3139 * let the object stay in multiple read domains
3140 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003141 if (obj->pending_write_domain == 0)
3142 obj->pending_read_domains |= obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003143 else
3144 obj_priv->dirty = 1;
3145
3146 /*
3147 * Flush the current write domain if
3148 * the new read domains don't match. Invalidate
3149 * any read domains which differ from the old
3150 * write domain
3151 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003152 if (obj->write_domain &&
3153 obj->write_domain != obj->pending_read_domains) {
Eric Anholt673a3942008-07-30 12:06:12 -07003154 flush_domains |= obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003155 invalidate_domains |=
3156 obj->pending_read_domains & ~obj->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -07003157 }
3158 /*
3159 * Invalidate any read caches which may have
3160 * stale data. That is, any new read domains.
3161 */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003162 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003163 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3164#if WATCH_BUF
3165 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3166 __func__, flush_domains, invalidate_domains);
3167#endif
Eric Anholt673a3942008-07-30 12:06:12 -07003168 i915_gem_clflush_object(obj);
3169 }
3170
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003171 old_read_domains = obj->read_domains;
3172
Eric Anholtefbeed92009-02-19 14:54:51 -08003173 /* The actual obj->write_domain will be updated with
3174 * pending_write_domain after we emit the accumulated flush for all
3175 * of our domain changes in execbuffers (which clears objects'
3176 * write_domains). So if we have a current write domain that we
3177 * aren't changing, set pending_write_domain to that.
3178 */
3179 if (flush_domains == 0 && obj->pending_write_domain == 0)
3180 obj->pending_write_domain = obj->write_domain;
Eric Anholt8b0e3782009-02-19 14:40:50 -08003181 obj->read_domains = obj->pending_read_domains;
Eric Anholt673a3942008-07-30 12:06:12 -07003182
3183 dev->invalidate_domains |= invalidate_domains;
3184 dev->flush_domains |= flush_domains;
3185#if WATCH_BUF
3186 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3187 __func__,
3188 obj->read_domains, obj->write_domain,
3189 dev->invalidate_domains, dev->flush_domains);
3190#endif
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003191
3192 trace_i915_gem_object_change_domain(obj,
3193 old_read_domains,
3194 obj->write_domain);
Eric Anholt673a3942008-07-30 12:06:12 -07003195}
3196
3197/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003198 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003199 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003200 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3201 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3202 */
3203static void
3204i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3205{
Daniel Vetter23010e42010-03-08 13:35:02 +01003206 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003207
3208 if (!obj_priv->page_cpu_valid)
3209 return;
3210
3211 /* If we're partially in the CPU read domain, finish moving it in.
3212 */
3213 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3214 int i;
3215
3216 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3217 if (obj_priv->page_cpu_valid[i])
3218 continue;
Eric Anholt856fa192009-03-19 14:10:50 -07003219 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003220 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003221 }
3222
3223 /* Free the page_cpu_valid mappings which are now stale, whether
3224 * or not we've got I915_GEM_DOMAIN_CPU.
3225 */
Eric Anholt9a298b22009-03-24 12:23:04 -07003226 kfree(obj_priv->page_cpu_valid);
Eric Anholte47c68e2008-11-14 13:35:19 -08003227 obj_priv->page_cpu_valid = NULL;
3228}
3229
3230/**
3231 * Set the CPU read domain on a range of the object.
3232 *
3233 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3234 * not entirely valid. The page_cpu_valid member of the object flags which
3235 * pages have been flushed, and will be respected by
3236 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3237 * of the whole object.
3238 *
3239 * This function returns when the move is complete, including waiting on
3240 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003241 */
3242static int
Eric Anholte47c68e2008-11-14 13:35:19 -08003243i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3244 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003245{
Daniel Vetter23010e42010-03-08 13:35:02 +01003246 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003247 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003248 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003249
Eric Anholte47c68e2008-11-14 13:35:19 -08003250 if (offset == 0 && size == obj->size)
3251 return i915_gem_object_set_to_cpu_domain(obj, 0);
3252
Chris Wilson2dafb1e2010-06-07 14:03:05 +01003253 ret = i915_gem_object_flush_gpu_write_domain(obj);
3254 if (ret)
3255 return ret;
3256
Eric Anholte47c68e2008-11-14 13:35:19 -08003257 /* Wait on any GPU rendering and flushing to occur. */
3258 ret = i915_gem_object_wait_rendering(obj);
3259 if (ret != 0)
3260 return ret;
3261 i915_gem_object_flush_gtt_write_domain(obj);
3262
3263 /* If we're already fully in the CPU read domain, we're done. */
3264 if (obj_priv->page_cpu_valid == NULL &&
3265 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003266 return 0;
3267
Eric Anholte47c68e2008-11-14 13:35:19 -08003268 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3269 * newly adding I915_GEM_DOMAIN_CPU
3270 */
Eric Anholt673a3942008-07-30 12:06:12 -07003271 if (obj_priv->page_cpu_valid == NULL) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003272 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3273 GFP_KERNEL);
Eric Anholte47c68e2008-11-14 13:35:19 -08003274 if (obj_priv->page_cpu_valid == NULL)
3275 return -ENOMEM;
3276 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3277 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003278
3279 /* Flush the cache on any pages that are still invalid from the CPU's
3280 * perspective.
3281 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003282 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3283 i++) {
Eric Anholt673a3942008-07-30 12:06:12 -07003284 if (obj_priv->page_cpu_valid[i])
3285 continue;
3286
Eric Anholt856fa192009-03-19 14:10:50 -07003287 drm_clflush_pages(obj_priv->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003288
3289 obj_priv->page_cpu_valid[i] = 1;
3290 }
3291
Eric Anholte47c68e2008-11-14 13:35:19 -08003292 /* It should now be out of any other write domains, and we can update
3293 * the domain values for our changes.
3294 */
3295 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3296
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003297 old_read_domains = obj->read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003298 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3299
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003300 trace_i915_gem_object_change_domain(obj,
3301 old_read_domains,
3302 obj->write_domain);
3303
Eric Anholt673a3942008-07-30 12:06:12 -07003304 return 0;
3305}
3306
3307/**
Eric Anholt673a3942008-07-30 12:06:12 -07003308 * Pin an object to the GTT and evaluate the relocations landing in it.
3309 */
3310static int
3311i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3312 struct drm_file *file_priv,
Jesse Barnes76446ca2009-12-17 22:05:42 -05003313 struct drm_i915_gem_exec_object2 *entry,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003314 struct drm_i915_gem_relocation_entry *relocs)
Eric Anholt673a3942008-07-30 12:06:12 -07003315{
3316 struct drm_device *dev = obj->dev;
Keith Packard0839ccb2008-10-30 19:38:48 -07003317 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01003318 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003319 int i, ret;
Keith Packard0839ccb2008-10-30 19:38:48 -07003320 void __iomem *reloc_page;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003321 bool need_fence;
3322
3323 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3324 obj_priv->tiling_mode != I915_TILING_NONE;
3325
3326 /* Check fence reg constraints and rebind if necessary */
Chris Wilson808b24d62010-05-27 13:18:15 +01003327 if (need_fence &&
3328 !i915_gem_object_fence_offset_ok(obj,
3329 obj_priv->tiling_mode)) {
3330 ret = i915_gem_object_unbind(obj);
3331 if (ret)
3332 return ret;
3333 }
Eric Anholt673a3942008-07-30 12:06:12 -07003334
3335 /* Choose the GTT offset for our buffer and put it there. */
3336 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3337 if (ret)
3338 return ret;
3339
Jesse Barnes76446ca2009-12-17 22:05:42 -05003340 /*
3341 * Pre-965 chips need a fence register set up in order to
3342 * properly handle blits to/from tiled surfaces.
3343 */
3344 if (need_fence) {
3345 ret = i915_gem_object_get_fence_reg(obj);
3346 if (ret != 0) {
Jesse Barnes76446ca2009-12-17 22:05:42 -05003347 i915_gem_object_unpin(obj);
3348 return ret;
3349 }
3350 }
3351
Eric Anholt673a3942008-07-30 12:06:12 -07003352 entry->offset = obj_priv->gtt_offset;
3353
Eric Anholt673a3942008-07-30 12:06:12 -07003354 /* Apply the relocations, using the GTT aperture to avoid cache
3355 * flushing requirements.
3356 */
3357 for (i = 0; i < entry->relocation_count; i++) {
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003358 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003359 struct drm_gem_object *target_obj;
3360 struct drm_i915_gem_object *target_obj_priv;
Eric Anholt3043c602008-10-02 12:24:47 -07003361 uint32_t reloc_val, reloc_offset;
3362 uint32_t __iomem *reloc_entry;
Eric Anholt673a3942008-07-30 12:06:12 -07003363
Eric Anholt673a3942008-07-30 12:06:12 -07003364 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003365 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003366 if (target_obj == NULL) {
3367 i915_gem_object_unpin(obj);
3368 return -EBADF;
3369 }
Daniel Vetter23010e42010-03-08 13:35:02 +01003370 target_obj_priv = to_intel_bo(target_obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003371
Chris Wilson8542a0b2009-09-09 21:15:15 +01003372#if WATCH_RELOC
3373 DRM_INFO("%s: obj %p offset %08x target %d "
3374 "read %08x write %08x gtt %08x "
3375 "presumed %08x delta %08x\n",
3376 __func__,
3377 obj,
3378 (int) reloc->offset,
3379 (int) reloc->target_handle,
3380 (int) reloc->read_domains,
3381 (int) reloc->write_domain,
3382 (int) target_obj_priv->gtt_offset,
3383 (int) reloc->presumed_offset,
3384 reloc->delta);
3385#endif
3386
Eric Anholt673a3942008-07-30 12:06:12 -07003387 /* The target buffer should have appeared before us in the
3388 * exec_object list, so it should have a GTT space bound by now.
3389 */
3390 if (target_obj_priv->gtt_space == NULL) {
3391 DRM_ERROR("No GTT space found for object %d\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003392 reloc->target_handle);
Eric Anholt673a3942008-07-30 12:06:12 -07003393 drm_gem_object_unreference(target_obj);
3394 i915_gem_object_unpin(obj);
3395 return -EINVAL;
3396 }
3397
Chris Wilson8542a0b2009-09-09 21:15:15 +01003398 /* Validate that the target is in a valid r/w GPU domain */
Daniel Vetter16edd552010-02-19 11:52:02 +01003399 if (reloc->write_domain & (reloc->write_domain - 1)) {
3400 DRM_ERROR("reloc with multiple write domains: "
3401 "obj %p target %d offset %d "
3402 "read %08x write %08x",
3403 obj, reloc->target_handle,
3404 (int) reloc->offset,
3405 reloc->read_domains,
3406 reloc->write_domain);
3407 return -EINVAL;
3408 }
Chris Wilson8542a0b2009-09-09 21:15:15 +01003409 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3410 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3411 DRM_ERROR("reloc with read/write CPU domains: "
3412 "obj %p target %d offset %d "
3413 "read %08x write %08x",
3414 obj, reloc->target_handle,
3415 (int) reloc->offset,
3416 reloc->read_domains,
3417 reloc->write_domain);
3418 drm_gem_object_unreference(target_obj);
3419 i915_gem_object_unpin(obj);
3420 return -EINVAL;
3421 }
3422 if (reloc->write_domain && target_obj->pending_write_domain &&
3423 reloc->write_domain != target_obj->pending_write_domain) {
3424 DRM_ERROR("Write domain conflict: "
3425 "obj %p target %d offset %d "
3426 "new %08x old %08x\n",
3427 obj, reloc->target_handle,
3428 (int) reloc->offset,
3429 reloc->write_domain,
3430 target_obj->pending_write_domain);
3431 drm_gem_object_unreference(target_obj);
3432 i915_gem_object_unpin(obj);
3433 return -EINVAL;
3434 }
3435
3436 target_obj->pending_read_domains |= reloc->read_domains;
3437 target_obj->pending_write_domain |= reloc->write_domain;
3438
3439 /* If the relocation already has the right value in it, no
3440 * more work needs to be done.
3441 */
3442 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3443 drm_gem_object_unreference(target_obj);
3444 continue;
3445 }
3446
3447 /* Check that the relocation address is valid... */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003448 if (reloc->offset > obj->size - 4) {
Eric Anholt673a3942008-07-30 12:06:12 -07003449 DRM_ERROR("Relocation beyond object bounds: "
3450 "obj %p target %d offset %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003451 obj, reloc->target_handle,
3452 (int) reloc->offset, (int) obj->size);
Eric Anholt673a3942008-07-30 12:06:12 -07003453 drm_gem_object_unreference(target_obj);
3454 i915_gem_object_unpin(obj);
3455 return -EINVAL;
3456 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003457 if (reloc->offset & 3) {
Eric Anholt673a3942008-07-30 12:06:12 -07003458 DRM_ERROR("Relocation not 4-byte aligned: "
3459 "obj %p target %d offset %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003460 obj, reloc->target_handle,
3461 (int) reloc->offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003462 drm_gem_object_unreference(target_obj);
3463 i915_gem_object_unpin(obj);
3464 return -EINVAL;
3465 }
3466
Chris Wilson8542a0b2009-09-09 21:15:15 +01003467 /* and points to somewhere within the target object. */
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003468 if (reloc->delta >= target_obj->size) {
3469 DRM_ERROR("Relocation beyond target object bounds: "
3470 "obj %p target %d delta %d size %d.\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003471 obj, reloc->target_handle,
Chris Wilsoncd0b9fb2009-09-15 23:23:18 +01003472 (int) reloc->delta, (int) target_obj->size);
Chris Wilson491152b2009-02-11 14:26:32 +00003473 drm_gem_object_unreference(target_obj);
3474 i915_gem_object_unpin(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003475 return -EINVAL;
3476 }
3477
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003478 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3479 if (ret != 0) {
3480 drm_gem_object_unreference(target_obj);
3481 i915_gem_object_unpin(obj);
3482 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -07003483 }
3484
3485 /* Map the page containing the relocation we're going to
3486 * perform.
3487 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003488 reloc_offset = obj_priv->gtt_offset + reloc->offset;
Keith Packard0839ccb2008-10-30 19:38:48 -07003489 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3490 (reloc_offset &
3491 ~(PAGE_SIZE - 1)));
Eric Anholt3043c602008-10-02 12:24:47 -07003492 reloc_entry = (uint32_t __iomem *)(reloc_page +
Keith Packard0839ccb2008-10-30 19:38:48 -07003493 (reloc_offset & (PAGE_SIZE - 1)));
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003494 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
Eric Anholt673a3942008-07-30 12:06:12 -07003495
3496#if WATCH_BUF
3497 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003498 obj, (unsigned int) reloc->offset,
Eric Anholt673a3942008-07-30 12:06:12 -07003499 readl(reloc_entry), reloc_val);
3500#endif
3501 writel(reloc_val, reloc_entry);
Keith Packard0839ccb2008-10-30 19:38:48 -07003502 io_mapping_unmap_atomic(reloc_page);
Eric Anholt673a3942008-07-30 12:06:12 -07003503
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003504 /* The updated presumed offset for this entry will be
3505 * copied back out to the user.
Eric Anholt673a3942008-07-30 12:06:12 -07003506 */
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003507 reloc->presumed_offset = target_obj_priv->gtt_offset;
Eric Anholt673a3942008-07-30 12:06:12 -07003508
3509 drm_gem_object_unreference(target_obj);
3510 }
3511
Eric Anholt673a3942008-07-30 12:06:12 -07003512#if WATCH_BUF
3513 if (0)
3514 i915_gem_dump_object(obj, 128, __func__, ~0);
3515#endif
3516 return 0;
3517}
3518
Eric Anholt673a3942008-07-30 12:06:12 -07003519/* Throttle our rendering by waiting until the ring has completed our requests
3520 * emitted over 20 msec ago.
3521 *
Eric Anholtb9624422009-06-03 07:27:35 +00003522 * Note that if we were to use the current jiffies each time around the loop,
3523 * we wouldn't escape the function with any frames outstanding if the time to
3524 * render a frame was over 20ms.
3525 *
Eric Anholt673a3942008-07-30 12:06:12 -07003526 * This should get us reasonable parallelism between CPU and GPU but also
3527 * relatively low latency when blocking on a particular request to finish.
3528 */
3529static int
3530i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3531{
3532 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3533 int ret = 0;
Eric Anholtb9624422009-06-03 07:27:35 +00003534 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Eric Anholt673a3942008-07-30 12:06:12 -07003535
3536 mutex_lock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003537 while (!list_empty(&i915_file_priv->mm.request_list)) {
3538 struct drm_i915_gem_request *request;
3539
3540 request = list_first_entry(&i915_file_priv->mm.request_list,
3541 struct drm_i915_gem_request,
3542 client_list);
3543
3544 if (time_after_eq(request->emitted_jiffies, recent_enough))
3545 break;
3546
Zou Nan hai852835f2010-05-21 09:08:56 +08003547 ret = i915_wait_request(dev, request->seqno, request->ring);
Eric Anholtb9624422009-06-03 07:27:35 +00003548 if (ret != 0)
3549 break;
3550 }
Eric Anholt673a3942008-07-30 12:06:12 -07003551 mutex_unlock(&dev->struct_mutex);
Eric Anholtb9624422009-06-03 07:27:35 +00003552
Eric Anholt673a3942008-07-30 12:06:12 -07003553 return ret;
3554}
3555
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003556static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003557i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003558 uint32_t buffer_count,
3559 struct drm_i915_gem_relocation_entry **relocs)
3560{
3561 uint32_t reloc_count = 0, reloc_index = 0, i;
3562 int ret;
3563
3564 *relocs = NULL;
3565 for (i = 0; i < buffer_count; i++) {
3566 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3567 return -EINVAL;
3568 reloc_count += exec_list[i].relocation_count;
3569 }
3570
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003571 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
Jesse Barnes76446ca2009-12-17 22:05:42 -05003572 if (*relocs == NULL) {
3573 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003574 return -ENOMEM;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003575 }
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003576
3577 for (i = 0; i < buffer_count; i++) {
3578 struct drm_i915_gem_relocation_entry __user *user_relocs;
3579
3580 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3581
3582 ret = copy_from_user(&(*relocs)[reloc_index],
3583 user_relocs,
3584 exec_list[i].relocation_count *
3585 sizeof(**relocs));
3586 if (ret != 0) {
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003587 drm_free_large(*relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003588 *relocs = NULL;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003589 return -EFAULT;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003590 }
3591
3592 reloc_index += exec_list[i].relocation_count;
3593 }
3594
Florian Mickler2bc43b52009-04-06 22:55:41 +02003595 return 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003596}
3597
3598static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003599i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003600 uint32_t buffer_count,
3601 struct drm_i915_gem_relocation_entry *relocs)
3602{
3603 uint32_t reloc_count = 0, i;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003604 int ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003605
Chris Wilson93533c22010-01-31 10:40:48 +00003606 if (relocs == NULL)
3607 return 0;
3608
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003609 for (i = 0; i < buffer_count; i++) {
3610 struct drm_i915_gem_relocation_entry __user *user_relocs;
Florian Mickler2bc43b52009-04-06 22:55:41 +02003611 int unwritten;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003612
3613 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3614
Florian Mickler2bc43b52009-04-06 22:55:41 +02003615 unwritten = copy_to_user(user_relocs,
3616 &relocs[reloc_count],
3617 exec_list[i].relocation_count *
3618 sizeof(*relocs));
3619
3620 if (unwritten) {
3621 ret = -EFAULT;
3622 goto err;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003623 }
3624
3625 reloc_count += exec_list[i].relocation_count;
3626 }
3627
Florian Mickler2bc43b52009-04-06 22:55:41 +02003628err:
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07003629 drm_free_large(relocs);
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003630
3631 return ret;
3632}
3633
Chris Wilson83d60792009-06-06 09:45:57 +01003634static int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003635i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
Chris Wilson83d60792009-06-06 09:45:57 +01003636 uint64_t exec_offset)
3637{
3638 uint32_t exec_start, exec_len;
3639
3640 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3641 exec_len = (uint32_t) exec->batch_len;
3642
3643 if ((exec_start | exec_len) & 0x7)
3644 return -EINVAL;
3645
3646 if (!exec_start)
3647 return -EINVAL;
3648
3649 return 0;
3650}
3651
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003652static int
3653i915_gem_wait_for_pending_flip(struct drm_device *dev,
3654 struct drm_gem_object **object_list,
3655 int count)
3656{
3657 drm_i915_private_t *dev_priv = dev->dev_private;
3658 struct drm_i915_gem_object *obj_priv;
3659 DEFINE_WAIT(wait);
3660 int i, ret = 0;
3661
3662 for (;;) {
3663 prepare_to_wait(&dev_priv->pending_flip_queue,
3664 &wait, TASK_INTERRUPTIBLE);
3665 for (i = 0; i < count; i++) {
Daniel Vetter23010e42010-03-08 13:35:02 +01003666 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003667 if (atomic_read(&obj_priv->pending_flip) > 0)
3668 break;
3669 }
3670 if (i == count)
3671 break;
3672
3673 if (!signal_pending(current)) {
3674 mutex_unlock(&dev->struct_mutex);
3675 schedule();
3676 mutex_lock(&dev->struct_mutex);
3677 continue;
3678 }
3679 ret = -ERESTARTSYS;
3680 break;
3681 }
3682 finish_wait(&dev_priv->pending_flip_queue, &wait);
3683
3684 return ret;
3685}
3686
Chris Wilson43b27f42010-07-02 08:57:15 +01003687
Eric Anholt673a3942008-07-30 12:06:12 -07003688int
Jesse Barnes76446ca2009-12-17 22:05:42 -05003689i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3690 struct drm_file *file_priv,
3691 struct drm_i915_gem_execbuffer2 *args,
3692 struct drm_i915_gem_exec_object2 *exec_list)
Eric Anholt673a3942008-07-30 12:06:12 -07003693{
3694 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003695 struct drm_gem_object **object_list = NULL;
3696 struct drm_gem_object *batch_obj;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003697 struct drm_i915_gem_object *obj_priv;
Eric Anholt201361a2009-03-11 12:30:04 -07003698 struct drm_clip_rect *cliprects = NULL;
Chris Wilson93533c22010-01-31 10:40:48 +00003699 struct drm_i915_gem_relocation_entry *relocs = NULL;
Jesse Barnes76446ca2009-12-17 22:05:42 -05003700 int ret = 0, ret2, i, pinned = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003701 uint64_t exec_offset;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003702 uint32_t seqno, flush_domains, reloc_index;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003703 int pin_tries, flips;
Eric Anholt673a3942008-07-30 12:06:12 -07003704
Zou Nan hai852835f2010-05-21 09:08:56 +08003705 struct intel_ring_buffer *ring = NULL;
3706
Eric Anholt673a3942008-07-30 12:06:12 -07003707#if WATCH_EXEC
3708 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3709 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3710#endif
Zou Nan haid1b851f2010-05-21 09:08:57 +08003711 if (args->flags & I915_EXEC_BSD) {
3712 if (!HAS_BSD(dev)) {
3713 DRM_ERROR("execbuf with wrong flag\n");
3714 return -EINVAL;
3715 }
3716 ring = &dev_priv->bsd_ring;
3717 } else {
3718 ring = &dev_priv->render_ring;
3719 }
3720
Eric Anholt673a3942008-07-30 12:06:12 -07003721
Eric Anholt4f481ed2008-09-10 14:22:49 -07003722 if (args->buffer_count < 1) {
3723 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3724 return -EINVAL;
3725 }
Eric Anholtc8e0f932009-11-22 03:49:37 +01003726 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
Jesse Barnes76446ca2009-12-17 22:05:42 -05003727 if (object_list == NULL) {
3728 DRM_ERROR("Failed to allocate object list for %d buffers\n",
Eric Anholt673a3942008-07-30 12:06:12 -07003729 args->buffer_count);
3730 ret = -ENOMEM;
3731 goto pre_mutex_err;
3732 }
Eric Anholt673a3942008-07-30 12:06:12 -07003733
Eric Anholt201361a2009-03-11 12:30:04 -07003734 if (args->num_cliprects != 0) {
Eric Anholt9a298b22009-03-24 12:23:04 -07003735 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3736 GFP_KERNEL);
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003737 if (cliprects == NULL) {
3738 ret = -ENOMEM;
Eric Anholt201361a2009-03-11 12:30:04 -07003739 goto pre_mutex_err;
Owain Ainswortha40e8d32010-02-09 14:25:55 +00003740 }
Eric Anholt201361a2009-03-11 12:30:04 -07003741
3742 ret = copy_from_user(cliprects,
3743 (struct drm_clip_rect __user *)
3744 (uintptr_t) args->cliprects_ptr,
3745 sizeof(*cliprects) * args->num_cliprects);
3746 if (ret != 0) {
3747 DRM_ERROR("copy %d cliprects failed: %d\n",
3748 args->num_cliprects, ret);
3749 goto pre_mutex_err;
3750 }
3751 }
3752
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003753 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3754 &relocs);
3755 if (ret != 0)
3756 goto pre_mutex_err;
3757
Eric Anholt673a3942008-07-30 12:06:12 -07003758 mutex_lock(&dev->struct_mutex);
3759
3760 i915_verify_inactive(dev, __FILE__, __LINE__);
3761
Ben Gamariba1234d2009-09-14 17:48:47 -04003762 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003763 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003764 ret = -EIO;
3765 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003766 }
3767
3768 if (dev_priv->mm.suspended) {
Eric Anholt673a3942008-07-30 12:06:12 -07003769 mutex_unlock(&dev->struct_mutex);
Chris Wilsona198bc82009-02-06 16:55:20 +00003770 ret = -EBUSY;
3771 goto pre_mutex_err;
Eric Anholt673a3942008-07-30 12:06:12 -07003772 }
3773
Keith Packardac94a962008-11-20 23:30:27 -08003774 /* Look up object handles */
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003775 flips = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07003776 for (i = 0; i < args->buffer_count; i++) {
3777 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3778 exec_list[i].handle);
3779 if (object_list[i] == NULL) {
3780 DRM_ERROR("Invalid object handle %d at index %d\n",
3781 exec_list[i].handle, i);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003782 /* prevent error path from reading uninitialized data */
3783 args->buffer_count = i + 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003784 ret = -EBADF;
3785 goto err;
3786 }
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003787
Daniel Vetter23010e42010-03-08 13:35:02 +01003788 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003789 if (obj_priv->in_execbuffer) {
3790 DRM_ERROR("Object %p appears more than once in object list\n",
3791 object_list[i]);
Chris Wilson0ce907f2010-01-23 20:26:35 +00003792 /* prevent error path from reading uninitialized data */
3793 args->buffer_count = i + 1;
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05003794 ret = -EBADF;
3795 goto err;
3796 }
3797 obj_priv->in_execbuffer = true;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003798 flips += atomic_read(&obj_priv->pending_flip);
3799 }
3800
3801 if (flips > 0) {
3802 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3803 args->buffer_count);
3804 if (ret)
3805 goto err;
Keith Packardac94a962008-11-20 23:30:27 -08003806 }
Eric Anholt673a3942008-07-30 12:06:12 -07003807
Keith Packardac94a962008-11-20 23:30:27 -08003808 /* Pin and relocate */
3809 for (pin_tries = 0; ; pin_tries++) {
3810 ret = 0;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003811 reloc_index = 0;
3812
Keith Packardac94a962008-11-20 23:30:27 -08003813 for (i = 0; i < args->buffer_count; i++) {
3814 object_list[i]->pending_read_domains = 0;
3815 object_list[i]->pending_write_domain = 0;
3816 ret = i915_gem_object_pin_and_relocate(object_list[i],
3817 file_priv,
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003818 &exec_list[i],
3819 &relocs[reloc_index]);
Keith Packardac94a962008-11-20 23:30:27 -08003820 if (ret)
3821 break;
3822 pinned = i + 1;
Eric Anholt40a5f0d2009-03-12 11:23:52 -07003823 reloc_index += exec_list[i].relocation_count;
Keith Packardac94a962008-11-20 23:30:27 -08003824 }
3825 /* success */
3826 if (ret == 0)
3827 break;
3828
3829 /* error other than GTT full, or we've already tried again */
Chris Wilson2939e1f2009-06-06 09:46:03 +01003830 if (ret != -ENOSPC || pin_tries >= 1) {
Chris Wilson07f73f62009-09-14 16:50:30 +01003831 if (ret != -ERESTARTSYS) {
3832 unsigned long long total_size = 0;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003833 int num_fences = 0;
3834 for (i = 0; i < args->buffer_count; i++) {
Chris Wilson43b27f42010-07-02 08:57:15 +01003835 obj_priv = to_intel_bo(object_list[i]);
Chris Wilson3d1cc472010-05-27 13:18:19 +01003836
Chris Wilson07f73f62009-09-14 16:50:30 +01003837 total_size += object_list[i]->size;
Chris Wilson3d1cc472010-05-27 13:18:19 +01003838 num_fences +=
3839 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3840 obj_priv->tiling_mode != I915_TILING_NONE;
3841 }
3842 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
Chris Wilson07f73f62009-09-14 16:50:30 +01003843 pinned+1, args->buffer_count,
Chris Wilson3d1cc472010-05-27 13:18:19 +01003844 total_size, num_fences,
3845 ret);
Chris Wilson07f73f62009-09-14 16:50:30 +01003846 DRM_ERROR("%d objects [%d pinned], "
3847 "%d object bytes [%d pinned], "
3848 "%d/%d gtt bytes\n",
3849 atomic_read(&dev->object_count),
3850 atomic_read(&dev->pin_count),
3851 atomic_read(&dev->object_memory),
3852 atomic_read(&dev->pin_memory),
3853 atomic_read(&dev->gtt_memory),
3854 dev->gtt_total);
3855 }
Eric Anholt673a3942008-07-30 12:06:12 -07003856 goto err;
3857 }
Keith Packardac94a962008-11-20 23:30:27 -08003858
3859 /* unpin all of our buffers */
3860 for (i = 0; i < pinned; i++)
3861 i915_gem_object_unpin(object_list[i]);
Eric Anholtb1177632008-12-10 10:09:41 -08003862 pinned = 0;
Keith Packardac94a962008-11-20 23:30:27 -08003863
3864 /* evict everyone we can from the aperture */
3865 ret = i915_gem_evict_everything(dev);
Chris Wilson07f73f62009-09-14 16:50:30 +01003866 if (ret && ret != -ENOSPC)
Keith Packardac94a962008-11-20 23:30:27 -08003867 goto err;
Eric Anholt673a3942008-07-30 12:06:12 -07003868 }
3869
3870 /* Set the pending read domains for the batch buffer to COMMAND */
3871 batch_obj = object_list[args->buffer_count-1];
Chris Wilson5f26a2c2009-06-06 09:45:58 +01003872 if (batch_obj->pending_write_domain) {
3873 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3874 ret = -EINVAL;
3875 goto err;
3876 }
3877 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
Eric Anholt673a3942008-07-30 12:06:12 -07003878
Chris Wilson83d60792009-06-06 09:45:57 +01003879 /* Sanity check the batch buffer, prior to moving objects */
3880 exec_offset = exec_list[args->buffer_count - 1].offset;
3881 ret = i915_gem_check_execbuffer (args, exec_offset);
3882 if (ret != 0) {
3883 DRM_ERROR("execbuf with invalid offset/length\n");
3884 goto err;
3885 }
3886
Eric Anholt673a3942008-07-30 12:06:12 -07003887 i915_verify_inactive(dev, __FILE__, __LINE__);
3888
Keith Packard646f0f62008-11-20 23:23:03 -08003889 /* Zero the global flush/invalidate flags. These
3890 * will be modified as new domains are computed
3891 * for each object
3892 */
3893 dev->invalidate_domains = 0;
3894 dev->flush_domains = 0;
3895
Eric Anholt673a3942008-07-30 12:06:12 -07003896 for (i = 0; i < args->buffer_count; i++) {
3897 struct drm_gem_object *obj = object_list[i];
Eric Anholt673a3942008-07-30 12:06:12 -07003898
Keith Packard646f0f62008-11-20 23:23:03 -08003899 /* Compute new gpu domains and update invalidate/flush */
Eric Anholt8b0e3782009-02-19 14:40:50 -08003900 i915_gem_object_set_to_gpu_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003901 }
3902
3903 i915_verify_inactive(dev, __FILE__, __LINE__);
3904
Keith Packard646f0f62008-11-20 23:23:03 -08003905 if (dev->invalidate_domains | dev->flush_domains) {
3906#if WATCH_EXEC
3907 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3908 __func__,
3909 dev->invalidate_domains,
3910 dev->flush_domains);
3911#endif
3912 i915_gem_flush(dev,
3913 dev->invalidate_domains,
3914 dev->flush_domains);
Zou Nan hai852835f2010-05-21 09:08:56 +08003915 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
Eric Anholtb9624422009-06-03 07:27:35 +00003916 (void)i915_add_request(dev, file_priv,
Zou Nan hai852835f2010-05-21 09:08:56 +08003917 dev->flush_domains,
3918 &dev_priv->render_ring);
3919
Zou Nan haid1b851f2010-05-21 09:08:57 +08003920 if (HAS_BSD(dev))
3921 (void)i915_add_request(dev, file_priv,
3922 dev->flush_domains,
3923 &dev_priv->bsd_ring);
Zou Nan hai852835f2010-05-21 09:08:56 +08003924 }
Keith Packard646f0f62008-11-20 23:23:03 -08003925 }
Eric Anholt673a3942008-07-30 12:06:12 -07003926
Eric Anholtefbeed92009-02-19 14:54:51 -08003927 for (i = 0; i < args->buffer_count; i++) {
3928 struct drm_gem_object *obj = object_list[i];
Daniel Vetter23010e42010-03-08 13:35:02 +01003929 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003930 uint32_t old_write_domain = obj->write_domain;
Eric Anholtefbeed92009-02-19 14:54:51 -08003931
3932 obj->write_domain = obj->pending_write_domain;
Daniel Vetter99fcb762010-02-07 16:20:18 +01003933 if (obj->write_domain)
3934 list_move_tail(&obj_priv->gpu_write_list,
3935 &dev_priv->mm.gpu_write_list);
3936 else
3937 list_del_init(&obj_priv->gpu_write_list);
3938
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003939 trace_i915_gem_object_change_domain(obj,
3940 obj->read_domains,
3941 old_write_domain);
Eric Anholtefbeed92009-02-19 14:54:51 -08003942 }
3943
Eric Anholt673a3942008-07-30 12:06:12 -07003944 i915_verify_inactive(dev, __FILE__, __LINE__);
3945
3946#if WATCH_COHERENCY
3947 for (i = 0; i < args->buffer_count; i++) {
3948 i915_gem_object_check_coherency(object_list[i],
3949 exec_list[i].handle);
3950 }
3951#endif
3952
Eric Anholt673a3942008-07-30 12:06:12 -07003953#if WATCH_EXEC
Ben Gamari6911a9b2009-04-02 11:24:54 -07003954 i915_gem_dump_object(batch_obj,
Eric Anholt673a3942008-07-30 12:06:12 -07003955 args->batch_len,
3956 __func__,
3957 ~0);
3958#endif
3959
Eric Anholt673a3942008-07-30 12:06:12 -07003960 /* Exec the batchbuffer */
Zou Nan hai852835f2010-05-21 09:08:56 +08003961 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3962 cliprects, exec_offset);
Eric Anholt673a3942008-07-30 12:06:12 -07003963 if (ret) {
3964 DRM_ERROR("dispatch failed %d\n", ret);
3965 goto err;
3966 }
3967
3968 /*
3969 * Ensure that the commands in the batch buffer are
3970 * finished before the interrupt fires
3971 */
Zou Nan hai852835f2010-05-21 09:08:56 +08003972 flush_domains = i915_retire_commands(dev, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003973
3974 i915_verify_inactive(dev, __FILE__, __LINE__);
3975
3976 /*
3977 * Get a seqno representing the execution of the current buffer,
3978 * which we can wait on. We would like to mitigate these interrupts,
3979 * likely by only creating seqnos occasionally (so that we have
3980 * *some* interrupts representing completion of buffers that we can
3981 * wait on when trying to clear up gtt space).
3982 */
Zou Nan hai852835f2010-05-21 09:08:56 +08003983 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003984 BUG_ON(seqno == 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003985 for (i = 0; i < args->buffer_count; i++) {
3986 struct drm_gem_object *obj = object_list[i];
Zou Nan hai852835f2010-05-21 09:08:56 +08003987 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003988
Zou Nan hai852835f2010-05-21 09:08:56 +08003989 i915_gem_object_move_to_active(obj, seqno, ring);
Eric Anholt673a3942008-07-30 12:06:12 -07003990#if WATCH_LRU
3991 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3992#endif
3993 }
3994#if WATCH_LRU
3995 i915_dump_lru(dev, __func__);
3996#endif
3997
3998 i915_verify_inactive(dev, __FILE__, __LINE__);
3999
Eric Anholt673a3942008-07-30 12:06:12 -07004000err:
Julia Lawallaad87df2008-12-21 16:28:47 +01004001 for (i = 0; i < pinned; i++)
4002 i915_gem_object_unpin(object_list[i]);
Eric Anholt673a3942008-07-30 12:06:12 -07004003
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004004 for (i = 0; i < args->buffer_count; i++) {
4005 if (object_list[i]) {
Daniel Vetter23010e42010-03-08 13:35:02 +01004006 obj_priv = to_intel_bo(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004007 obj_priv->in_execbuffer = false;
4008 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004009 drm_gem_object_unreference(object_list[i]);
Kristian Høgsbergb70d11d2009-03-03 14:45:57 -05004010 }
Julia Lawallaad87df2008-12-21 16:28:47 +01004011
Eric Anholt673a3942008-07-30 12:06:12 -07004012 mutex_unlock(&dev->struct_mutex);
4013
Chris Wilson93533c22010-01-31 10:40:48 +00004014pre_mutex_err:
Eric Anholt40a5f0d2009-03-12 11:23:52 -07004015 /* Copy the updated relocations out regardless of current error
4016 * state. Failure to update the relocs would mean that the next
4017 * time userland calls execbuf, it would do so with presumed offset
4018 * state that didn't match the actual object state.
4019 */
4020 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
4021 relocs);
4022 if (ret2 != 0) {
4023 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
4024
4025 if (ret == 0)
4026 ret = ret2;
4027 }
4028
Jesse Barnes8e7d2b22009-05-08 16:13:25 -07004029 drm_free_large(object_list);
Eric Anholt9a298b22009-03-24 12:23:04 -07004030 kfree(cliprects);
Eric Anholt673a3942008-07-30 12:06:12 -07004031
4032 return ret;
4033}
4034
Jesse Barnes76446ca2009-12-17 22:05:42 -05004035/*
4036 * Legacy execbuffer just creates an exec2 list from the original exec object
4037 * list array and passes it to the real function.
4038 */
4039int
4040i915_gem_execbuffer(struct drm_device *dev, void *data,
4041 struct drm_file *file_priv)
4042{
4043 struct drm_i915_gem_execbuffer *args = data;
4044 struct drm_i915_gem_execbuffer2 exec2;
4045 struct drm_i915_gem_exec_object *exec_list = NULL;
4046 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4047 int ret, i;
4048
4049#if WATCH_EXEC
4050 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4051 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4052#endif
4053
4054 if (args->buffer_count < 1) {
4055 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4056 return -EINVAL;
4057 }
4058
4059 /* Copy in the exec list from userland */
4060 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4061 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4062 if (exec_list == NULL || exec2_list == NULL) {
4063 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4064 args->buffer_count);
4065 drm_free_large(exec_list);
4066 drm_free_large(exec2_list);
4067 return -ENOMEM;
4068 }
4069 ret = copy_from_user(exec_list,
4070 (struct drm_i915_relocation_entry __user *)
4071 (uintptr_t) args->buffers_ptr,
4072 sizeof(*exec_list) * args->buffer_count);
4073 if (ret != 0) {
4074 DRM_ERROR("copy %d exec entries failed %d\n",
4075 args->buffer_count, ret);
4076 drm_free_large(exec_list);
4077 drm_free_large(exec2_list);
4078 return -EFAULT;
4079 }
4080
4081 for (i = 0; i < args->buffer_count; i++) {
4082 exec2_list[i].handle = exec_list[i].handle;
4083 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4084 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4085 exec2_list[i].alignment = exec_list[i].alignment;
4086 exec2_list[i].offset = exec_list[i].offset;
4087 if (!IS_I965G(dev))
4088 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4089 else
4090 exec2_list[i].flags = 0;
4091 }
4092
4093 exec2.buffers_ptr = args->buffers_ptr;
4094 exec2.buffer_count = args->buffer_count;
4095 exec2.batch_start_offset = args->batch_start_offset;
4096 exec2.batch_len = args->batch_len;
4097 exec2.DR1 = args->DR1;
4098 exec2.DR4 = args->DR4;
4099 exec2.num_cliprects = args->num_cliprects;
4100 exec2.cliprects_ptr = args->cliprects_ptr;
Zou Nan hai852835f2010-05-21 09:08:56 +08004101 exec2.flags = I915_EXEC_RENDER;
Jesse Barnes76446ca2009-12-17 22:05:42 -05004102
4103 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4104 if (!ret) {
4105 /* Copy the new buffer offsets back to the user's exec list. */
4106 for (i = 0; i < args->buffer_count; i++)
4107 exec_list[i].offset = exec2_list[i].offset;
4108 /* ... and back out to userspace */
4109 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4110 (uintptr_t) args->buffers_ptr,
4111 exec_list,
4112 sizeof(*exec_list) * args->buffer_count);
4113 if (ret) {
4114 ret = -EFAULT;
4115 DRM_ERROR("failed to copy %d exec entries "
4116 "back to user (%d)\n",
4117 args->buffer_count, ret);
4118 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004119 }
4120
4121 drm_free_large(exec_list);
4122 drm_free_large(exec2_list);
4123 return ret;
4124}
4125
4126int
4127i915_gem_execbuffer2(struct drm_device *dev, void *data,
4128 struct drm_file *file_priv)
4129{
4130 struct drm_i915_gem_execbuffer2 *args = data;
4131 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4132 int ret;
4133
4134#if WATCH_EXEC
4135 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4136 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4137#endif
4138
4139 if (args->buffer_count < 1) {
4140 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4141 return -EINVAL;
4142 }
4143
4144 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4145 if (exec2_list == NULL) {
4146 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4147 args->buffer_count);
4148 return -ENOMEM;
4149 }
4150 ret = copy_from_user(exec2_list,
4151 (struct drm_i915_relocation_entry __user *)
4152 (uintptr_t) args->buffers_ptr,
4153 sizeof(*exec2_list) * args->buffer_count);
4154 if (ret != 0) {
4155 DRM_ERROR("copy %d exec entries failed %d\n",
4156 args->buffer_count, ret);
4157 drm_free_large(exec2_list);
4158 return -EFAULT;
4159 }
4160
4161 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4162 if (!ret) {
4163 /* Copy the new buffer offsets back to the user's exec list. */
4164 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4165 (uintptr_t) args->buffers_ptr,
4166 exec2_list,
4167 sizeof(*exec2_list) * args->buffer_count);
4168 if (ret) {
4169 ret = -EFAULT;
4170 DRM_ERROR("failed to copy %d exec entries "
4171 "back to user (%d)\n",
4172 args->buffer_count, ret);
4173 }
4174 }
4175
4176 drm_free_large(exec2_list);
4177 return ret;
4178}
4179
Eric Anholt673a3942008-07-30 12:06:12 -07004180int
4181i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4182{
4183 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004184 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004185 int ret;
4186
Daniel Vetter778c3542010-05-13 11:49:44 +02004187 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4188
Eric Anholt673a3942008-07-30 12:06:12 -07004189 i915_verify_inactive(dev, __FILE__, __LINE__);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01004190
4191 if (obj_priv->gtt_space != NULL) {
4192 if (alignment == 0)
4193 alignment = i915_gem_get_gtt_alignment(obj);
4194 if (obj_priv->gtt_offset & (alignment - 1)) {
4195 ret = i915_gem_object_unbind(obj);
4196 if (ret)
4197 return ret;
4198 }
4199 }
4200
Eric Anholt673a3942008-07-30 12:06:12 -07004201 if (obj_priv->gtt_space == NULL) {
4202 ret = i915_gem_object_bind_to_gtt(obj, alignment);
Chris Wilson97311292009-09-21 00:22:34 +01004203 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07004204 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00004205 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05004206
Eric Anholt673a3942008-07-30 12:06:12 -07004207 obj_priv->pin_count++;
4208
4209 /* If the object is not active and not pending a flush,
4210 * remove it from the inactive list
4211 */
4212 if (obj_priv->pin_count == 1) {
4213 atomic_inc(&dev->pin_count);
4214 atomic_add(obj->size, &dev->pin_memory);
4215 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004216 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
Eric Anholt673a3942008-07-30 12:06:12 -07004217 !list_empty(&obj_priv->list))
4218 list_del_init(&obj_priv->list);
4219 }
4220 i915_verify_inactive(dev, __FILE__, __LINE__);
4221
4222 return 0;
4223}
4224
4225void
4226i915_gem_object_unpin(struct drm_gem_object *obj)
4227{
4228 struct drm_device *dev = obj->dev;
4229 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +01004230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004231
4232 i915_verify_inactive(dev, __FILE__, __LINE__);
4233 obj_priv->pin_count--;
4234 BUG_ON(obj_priv->pin_count < 0);
4235 BUG_ON(obj_priv->gtt_space == NULL);
4236
4237 /* If the object is no longer pinned, and is
4238 * neither active nor being flushed, then stick it on
4239 * the inactive list
4240 */
4241 if (obj_priv->pin_count == 0) {
4242 if (!obj_priv->active &&
Chris Wilson21d509e2009-06-06 09:46:02 +01004243 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
Eric Anholt673a3942008-07-30 12:06:12 -07004244 list_move_tail(&obj_priv->list,
4245 &dev_priv->mm.inactive_list);
4246 atomic_dec(&dev->pin_count);
4247 atomic_sub(obj->size, &dev->pin_memory);
4248 }
4249 i915_verify_inactive(dev, __FILE__, __LINE__);
4250}
4251
4252int
4253i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4254 struct drm_file *file_priv)
4255{
4256 struct drm_i915_gem_pin *args = data;
4257 struct drm_gem_object *obj;
4258 struct drm_i915_gem_object *obj_priv;
4259 int ret;
4260
4261 mutex_lock(&dev->struct_mutex);
4262
4263 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4264 if (obj == NULL) {
4265 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4266 args->handle);
4267 mutex_unlock(&dev->struct_mutex);
4268 return -EBADF;
4269 }
Daniel Vetter23010e42010-03-08 13:35:02 +01004270 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004271
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004272 if (obj_priv->madv != I915_MADV_WILLNEED) {
4273 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson3ef94da2009-09-14 16:50:29 +01004274 drm_gem_object_unreference(obj);
4275 mutex_unlock(&dev->struct_mutex);
4276 return -EINVAL;
4277 }
4278
Jesse Barnes79e53942008-11-07 14:24:08 -08004279 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4280 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4281 args->handle);
Chris Wilson96dec612009-02-08 19:08:04 +00004282 drm_gem_object_unreference(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004283 mutex_unlock(&dev->struct_mutex);
Jesse Barnes79e53942008-11-07 14:24:08 -08004284 return -EINVAL;
4285 }
4286
4287 obj_priv->user_pin_count++;
4288 obj_priv->pin_filp = file_priv;
4289 if (obj_priv->user_pin_count == 1) {
4290 ret = i915_gem_object_pin(obj, args->alignment);
4291 if (ret != 0) {
4292 drm_gem_object_unreference(obj);
4293 mutex_unlock(&dev->struct_mutex);
4294 return ret;
4295 }
Eric Anholt673a3942008-07-30 12:06:12 -07004296 }
4297
4298 /* XXX - flush the CPU caches for pinned objects
4299 * as the X server doesn't manage domains yet
4300 */
Eric Anholte47c68e2008-11-14 13:35:19 -08004301 i915_gem_object_flush_cpu_write_domain(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004302 args->offset = obj_priv->gtt_offset;
4303 drm_gem_object_unreference(obj);
4304 mutex_unlock(&dev->struct_mutex);
4305
4306 return 0;
4307}
4308
4309int
4310i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4311 struct drm_file *file_priv)
4312{
4313 struct drm_i915_gem_pin *args = data;
4314 struct drm_gem_object *obj;
Jesse Barnes79e53942008-11-07 14:24:08 -08004315 struct drm_i915_gem_object *obj_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07004316
4317 mutex_lock(&dev->struct_mutex);
4318
4319 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4320 if (obj == NULL) {
4321 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4322 args->handle);
4323 mutex_unlock(&dev->struct_mutex);
4324 return -EBADF;
4325 }
4326
Daniel Vetter23010e42010-03-08 13:35:02 +01004327 obj_priv = to_intel_bo(obj);
Jesse Barnes79e53942008-11-07 14:24:08 -08004328 if (obj_priv->pin_filp != file_priv) {
4329 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4330 args->handle);
4331 drm_gem_object_unreference(obj);
4332 mutex_unlock(&dev->struct_mutex);
4333 return -EINVAL;
4334 }
4335 obj_priv->user_pin_count--;
4336 if (obj_priv->user_pin_count == 0) {
4337 obj_priv->pin_filp = NULL;
4338 i915_gem_object_unpin(obj);
4339 }
Eric Anholt673a3942008-07-30 12:06:12 -07004340
4341 drm_gem_object_unreference(obj);
4342 mutex_unlock(&dev->struct_mutex);
4343 return 0;
4344}
4345
4346int
4347i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4348 struct drm_file *file_priv)
4349{
4350 struct drm_i915_gem_busy *args = data;
4351 struct drm_gem_object *obj;
4352 struct drm_i915_gem_object *obj_priv;
4353
Eric Anholt673a3942008-07-30 12:06:12 -07004354 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4355 if (obj == NULL) {
4356 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4357 args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07004358 return -EBADF;
4359 }
4360
Chris Wilsonb1ce7862009-06-06 09:46:00 +01004361 mutex_lock(&dev->struct_mutex);
Eric Anholtf21289b2009-02-18 09:44:56 -08004362 /* Update the active list for the hardware's current position.
4363 * Otherwise this only updates on a delayed timer or when irqs are
4364 * actually unmasked, and our working set ends up being larger than
4365 * required.
4366 */
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01004367 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004368
Daniel Vetter23010e42010-03-08 13:35:02 +01004369 obj_priv = to_intel_bo(obj);
Eric Anholtc4de0a52008-12-14 19:05:04 -08004370 /* Don't count being on the flushing list against the object being
4371 * done. Otherwise, a buffer left on the flushing list but not getting
4372 * flushed (because nobody's flushing that domain) won't ever return
4373 * unbusy and get reused by libdrm's bo cache. The other expected
4374 * consumer of this interface, OpenGL's occlusion queries, also specs
4375 * that the objects get unbusy "eventually" without any interference.
4376 */
4377 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
Eric Anholt673a3942008-07-30 12:06:12 -07004378
4379 drm_gem_object_unreference(obj);
4380 mutex_unlock(&dev->struct_mutex);
4381 return 0;
4382}
4383
4384int
4385i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4386 struct drm_file *file_priv)
4387{
4388 return i915_gem_ring_throttle(dev, file_priv);
4389}
4390
Chris Wilson3ef94da2009-09-14 16:50:29 +01004391int
4392i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4393 struct drm_file *file_priv)
4394{
4395 struct drm_i915_gem_madvise *args = data;
4396 struct drm_gem_object *obj;
4397 struct drm_i915_gem_object *obj_priv;
4398
4399 switch (args->madv) {
4400 case I915_MADV_DONTNEED:
4401 case I915_MADV_WILLNEED:
4402 break;
4403 default:
4404 return -EINVAL;
4405 }
4406
4407 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4408 if (obj == NULL) {
4409 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4410 args->handle);
4411 return -EBADF;
4412 }
4413
4414 mutex_lock(&dev->struct_mutex);
Daniel Vetter23010e42010-03-08 13:35:02 +01004415 obj_priv = to_intel_bo(obj);
Chris Wilson3ef94da2009-09-14 16:50:29 +01004416
4417 if (obj_priv->pin_count) {
4418 drm_gem_object_unreference(obj);
4419 mutex_unlock(&dev->struct_mutex);
4420
4421 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4422 return -EINVAL;
4423 }
4424
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004425 if (obj_priv->madv != __I915_MADV_PURGED)
4426 obj_priv->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01004427
Chris Wilson2d7ef392009-09-20 23:13:10 +01004428 /* if the object is no longer bound, discard its backing storage */
4429 if (i915_gem_object_is_purgeable(obj_priv) &&
4430 obj_priv->gtt_space == NULL)
4431 i915_gem_object_truncate(obj);
4432
Chris Wilsonbb6baf72009-09-22 14:24:13 +01004433 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4434
Chris Wilson3ef94da2009-09-14 16:50:29 +01004435 drm_gem_object_unreference(obj);
4436 mutex_unlock(&dev->struct_mutex);
4437
4438 return 0;
4439}
4440
Daniel Vetterac52bc52010-04-09 19:05:06 +00004441struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4442 size_t size)
4443{
Daniel Vetterc397b902010-04-09 19:05:07 +00004444 struct drm_i915_gem_object *obj;
4445
4446 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4447 if (obj == NULL)
4448 return NULL;
4449
4450 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4451 kfree(obj);
4452 return NULL;
4453 }
4454
4455 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4456 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4457
4458 obj->agp_type = AGP_USER_MEMORY;
Daniel Vetter62b8b212010-04-09 19:05:08 +00004459 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00004460 obj->fence_reg = I915_FENCE_REG_NONE;
4461 INIT_LIST_HEAD(&obj->list);
4462 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00004463 obj->madv = I915_MADV_WILLNEED;
4464
4465 trace_i915_gem_object_create(&obj->base);
4466
4467 return &obj->base;
Daniel Vetterac52bc52010-04-09 19:05:06 +00004468}
4469
Eric Anholt673a3942008-07-30 12:06:12 -07004470int i915_gem_init_object(struct drm_gem_object *obj)
4471{
Daniel Vetterc397b902010-04-09 19:05:07 +00004472 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08004473
Eric Anholt673a3942008-07-30 12:06:12 -07004474 return 0;
4475}
4476
Chris Wilsonbe726152010-07-23 23:18:50 +01004477static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4478{
4479 struct drm_device *dev = obj->dev;
4480 drm_i915_private_t *dev_priv = dev->dev_private;
4481 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4482 int ret;
4483
4484 ret = i915_gem_object_unbind(obj);
4485 if (ret == -ERESTARTSYS) {
4486 list_move(&obj_priv->list,
4487 &dev_priv->mm.deferred_free_list);
4488 return;
4489 }
4490
4491 if (obj_priv->mmap_offset)
4492 i915_gem_free_mmap_offset(obj);
4493
4494 drm_gem_object_release(obj);
4495
4496 kfree(obj_priv->page_cpu_valid);
4497 kfree(obj_priv->bit_17);
4498 kfree(obj_priv);
4499}
4500
Eric Anholt673a3942008-07-30 12:06:12 -07004501void i915_gem_free_object(struct drm_gem_object *obj)
4502{
Jesse Barnesde151cf2008-11-12 10:03:55 -08004503 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +01004504 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004505
Chris Wilson1c5d22f2009-08-25 11:15:50 +01004506 trace_i915_gem_object_destroy(obj);
4507
Eric Anholt673a3942008-07-30 12:06:12 -07004508 while (obj_priv->pin_count > 0)
4509 i915_gem_object_unpin(obj);
4510
Dave Airlie71acb5e2008-12-30 20:31:46 +10004511 if (obj_priv->phys_obj)
4512 i915_gem_detach_phys_object(dev, obj);
4513
Chris Wilsonbe726152010-07-23 23:18:50 +01004514 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07004515}
4516
Chris Wilsonab5ee572009-09-20 19:25:47 +01004517/** Unbinds all inactive objects. */
Eric Anholt673a3942008-07-30 12:06:12 -07004518static int
Chris Wilsonab5ee572009-09-20 19:25:47 +01004519i915_gem_evict_from_inactive_list(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07004520{
Chris Wilsonab5ee572009-09-20 19:25:47 +01004521 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07004522
Chris Wilsonab5ee572009-09-20 19:25:47 +01004523 while (!list_empty(&dev_priv->mm.inactive_list)) {
4524 struct drm_gem_object *obj;
4525 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004526
Daniel Vettera8089e82010-04-09 19:05:09 +00004527 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4528 struct drm_i915_gem_object,
4529 list)->base;
Eric Anholt673a3942008-07-30 12:06:12 -07004530
4531 ret = i915_gem_object_unbind(obj);
4532 if (ret != 0) {
Chris Wilsonab5ee572009-09-20 19:25:47 +01004533 DRM_ERROR("Error unbinding object: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004534 return ret;
4535 }
4536 }
4537
Eric Anholt673a3942008-07-30 12:06:12 -07004538 return 0;
4539}
4540
Jesse Barnes5669fca2009-02-17 15:13:31 -08004541int
Eric Anholt673a3942008-07-30 12:06:12 -07004542i915_gem_idle(struct drm_device *dev)
4543{
4544 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00004545 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004546
Keith Packard6dbe2772008-10-14 21:41:13 -07004547 mutex_lock(&dev->struct_mutex);
4548
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004549 if (dev_priv->mm.suspended ||
Zou Nan haid1b851f2010-05-21 09:08:57 +08004550 (dev_priv->render_ring.gem_object == NULL) ||
4551 (HAS_BSD(dev) &&
4552 dev_priv->bsd_ring.gem_object == NULL)) {
Keith Packard6dbe2772008-10-14 21:41:13 -07004553 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004554 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07004555 }
Eric Anholt673a3942008-07-30 12:06:12 -07004556
Chris Wilson29105cc2010-01-07 10:39:13 +00004557 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004558 if (ret) {
4559 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07004560 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07004561 }
Eric Anholt673a3942008-07-30 12:06:12 -07004562
Chris Wilson29105cc2010-01-07 10:39:13 +00004563 /* Under UMS, be paranoid and evict. */
4564 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4565 ret = i915_gem_evict_from_inactive_list(dev);
4566 if (ret) {
4567 mutex_unlock(&dev->struct_mutex);
4568 return ret;
4569 }
4570 }
4571
4572 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4573 * We need to replace this with a semaphore, or something.
4574 * And not confound mm.suspended!
4575 */
4576 dev_priv->mm.suspended = 1;
4577 del_timer(&dev_priv->hangcheck_timer);
4578
4579 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07004580 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00004581
Keith Packard6dbe2772008-10-14 21:41:13 -07004582 mutex_unlock(&dev->struct_mutex);
4583
Chris Wilson29105cc2010-01-07 10:39:13 +00004584 /* Cancel the retire work handler, which should be idle now. */
4585 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4586
Eric Anholt673a3942008-07-30 12:06:12 -07004587 return 0;
4588}
4589
Jesse Barnese552eb72010-04-21 11:39:23 -07004590/*
4591 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4592 * over cache flushing.
4593 */
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004594static int
Jesse Barnese552eb72010-04-21 11:39:23 -07004595i915_gem_init_pipe_control(struct drm_device *dev)
4596{
4597 drm_i915_private_t *dev_priv = dev->dev_private;
4598 struct drm_gem_object *obj;
4599 struct drm_i915_gem_object *obj_priv;
4600 int ret;
4601
Eric Anholt34dc4d42010-05-07 14:30:03 -07004602 obj = i915_gem_alloc_object(dev, 4096);
Jesse Barnese552eb72010-04-21 11:39:23 -07004603 if (obj == NULL) {
4604 DRM_ERROR("Failed to allocate seqno page\n");
4605 ret = -ENOMEM;
4606 goto err;
4607 }
4608 obj_priv = to_intel_bo(obj);
4609 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4610
4611 ret = i915_gem_object_pin(obj, 4096);
4612 if (ret)
4613 goto err_unref;
4614
4615 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4616 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4617 if (dev_priv->seqno_page == NULL)
4618 goto err_unpin;
4619
4620 dev_priv->seqno_obj = obj;
4621 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4622
4623 return 0;
4624
4625err_unpin:
4626 i915_gem_object_unpin(obj);
4627err_unref:
4628 drm_gem_object_unreference(obj);
4629err:
4630 return ret;
4631}
4632
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004633
4634static void
Jesse Barnese552eb72010-04-21 11:39:23 -07004635i915_gem_cleanup_pipe_control(struct drm_device *dev)
4636{
4637 drm_i915_private_t *dev_priv = dev->dev_private;
4638 struct drm_gem_object *obj;
4639 struct drm_i915_gem_object *obj_priv;
4640
4641 obj = dev_priv->seqno_obj;
4642 obj_priv = to_intel_bo(obj);
4643 kunmap(obj_priv->pages[0]);
4644 i915_gem_object_unpin(obj);
4645 drm_gem_object_unreference(obj);
4646 dev_priv->seqno_obj = NULL;
4647
4648 dev_priv->seqno_page = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07004649}
4650
Eric Anholt673a3942008-07-30 12:06:12 -07004651int
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004652i915_gem_init_ringbuffer(struct drm_device *dev)
4653{
4654 drm_i915_private_t *dev_priv = dev->dev_private;
4655 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004656
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004657 dev_priv->render_ring = render_ring;
Chris Wilson68f95ba2010-05-27 13:18:22 +01004658
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004659 if (!I915_NEED_GFX_HWS(dev)) {
4660 dev_priv->render_ring.status_page.page_addr
4661 = dev_priv->status_page_dmah->vaddr;
4662 memset(dev_priv->render_ring.status_page.page_addr,
4663 0, PAGE_SIZE);
4664 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004665
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004666 if (HAS_PIPE_CONTROL(dev)) {
4667 ret = i915_gem_init_pipe_control(dev);
4668 if (ret)
4669 return ret;
4670 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004671
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004672 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004673 if (ret)
4674 goto cleanup_pipe_control;
4675
4676 if (HAS_BSD(dev)) {
Zou Nan haid1b851f2010-05-21 09:08:57 +08004677 dev_priv->bsd_ring = bsd_ring;
4678 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
Chris Wilson68f95ba2010-05-27 13:18:22 +01004679 if (ret)
4680 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08004681 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01004682
4683 return 0;
4684
4685cleanup_render_ring:
4686 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4687cleanup_pipe_control:
4688 if (HAS_PIPE_CONTROL(dev))
4689 i915_gem_cleanup_pipe_control(dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004690 return ret;
4691}
4692
4693void
4694i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4695{
4696 drm_i915_private_t *dev_priv = dev->dev_private;
4697
4698 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004699 if (HAS_BSD(dev))
4700 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08004701 if (HAS_PIPE_CONTROL(dev))
4702 i915_gem_cleanup_pipe_control(dev);
4703}
4704
4705int
Eric Anholt673a3942008-07-30 12:06:12 -07004706i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4707 struct drm_file *file_priv)
4708{
4709 drm_i915_private_t *dev_priv = dev->dev_private;
4710 int ret;
4711
Jesse Barnes79e53942008-11-07 14:24:08 -08004712 if (drm_core_check_feature(dev, DRIVER_MODESET))
4713 return 0;
4714
Ben Gamariba1234d2009-09-14 17:48:47 -04004715 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07004716 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04004717 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07004718 }
4719
Eric Anholt673a3942008-07-30 12:06:12 -07004720 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004721 dev_priv->mm.suspended = 0;
4722
4723 ret = i915_gem_init_ringbuffer(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004724 if (ret != 0) {
4725 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004726 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08004727 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08004728
Carl Worth5e118f42009-03-20 11:54:25 -07004729 spin_lock(&dev_priv->mm.active_list_lock);
Zou Nan hai852835f2010-05-21 09:08:56 +08004730 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004731 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
Carl Worth5e118f42009-03-20 11:54:25 -07004732 spin_unlock(&dev_priv->mm.active_list_lock);
4733
Eric Anholt673a3942008-07-30 12:06:12 -07004734 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4735 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Zou Nan hai852835f2010-05-21 09:08:56 +08004736 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
Zou Nan haid1b851f2010-05-21 09:08:57 +08004737 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
Eric Anholt673a3942008-07-30 12:06:12 -07004738 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004739
Chris Wilson5f353082010-06-07 14:03:03 +01004740 ret = drm_irq_install(dev);
4741 if (ret)
4742 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004743
Eric Anholt673a3942008-07-30 12:06:12 -07004744 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01004745
4746cleanup_ringbuffer:
4747 mutex_lock(&dev->struct_mutex);
4748 i915_gem_cleanup_ringbuffer(dev);
4749 dev_priv->mm.suspended = 1;
4750 mutex_unlock(&dev->struct_mutex);
4751
4752 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004753}
4754
4755int
4756i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4757 struct drm_file *file_priv)
4758{
Jesse Barnes79e53942008-11-07 14:24:08 -08004759 if (drm_core_check_feature(dev, DRIVER_MODESET))
4760 return 0;
4761
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04004762 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07004763 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07004764}
4765
4766void
4767i915_gem_lastclose(struct drm_device *dev)
4768{
4769 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07004770
Eric Anholte806b492009-01-22 09:56:58 -08004771 if (drm_core_check_feature(dev, DRIVER_MODESET))
4772 return;
4773
Keith Packard6dbe2772008-10-14 21:41:13 -07004774 ret = i915_gem_idle(dev);
4775 if (ret)
4776 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07004777}
4778
4779void
4780i915_gem_load(struct drm_device *dev)
4781{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004782 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07004783 drm_i915_private_t *dev_priv = dev->dev_private;
4784
Carl Worth5e118f42009-03-20 11:54:25 -07004785 spin_lock_init(&dev_priv->mm.active_list_lock);
Eric Anholt673a3942008-07-30 12:06:12 -07004786 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
Daniel Vetter99fcb762010-02-07 16:20:18 +01004787 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004788 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07004789 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01004790 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Zou Nan hai852835f2010-05-21 09:08:56 +08004791 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4792 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08004793 if (HAS_BSD(dev)) {
4794 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4795 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4796 }
Daniel Vetter007cc8a2010-04-28 11:02:31 +02004797 for (i = 0; i < 16; i++)
4798 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07004799 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4800 i915_gem_retire_work_handler);
Chris Wilson31169712009-09-14 16:50:28 +01004801 spin_lock(&shrink_list_lock);
4802 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4803 spin_unlock(&shrink_list_lock);
4804
Dave Airlie94400122010-07-20 13:15:31 +10004805 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4806 if (IS_GEN3(dev)) {
4807 u32 tmp = I915_READ(MI_ARB_STATE);
4808 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4809 /* arb state is a masked write, so set bit + bit in mask */
4810 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4811 I915_WRITE(MI_ARB_STATE, tmp);
4812 }
4813 }
4814
Jesse Barnesde151cf2008-11-12 10:03:55 -08004815 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08004816 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4817 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08004818
Jesse Barnes0f973f22009-01-26 17:10:45 -08004819 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08004820 dev_priv->num_fence_regs = 16;
4821 else
4822 dev_priv->num_fence_regs = 8;
4823
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02004824 /* Initialize fence registers to zero */
4825 if (IS_I965G(dev)) {
4826 for (i = 0; i < 16; i++)
4827 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4828 } else {
4829 for (i = 0; i < 8; i++)
4830 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4831 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4832 for (i = 0; i < 8; i++)
4833 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4834 }
Eric Anholt673a3942008-07-30 12:06:12 -07004835 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05004836 init_waitqueue_head(&dev_priv->pending_flip_queue);
Eric Anholt673a3942008-07-30 12:06:12 -07004837}
Dave Airlie71acb5e2008-12-30 20:31:46 +10004838
4839/*
4840 * Create a physically contiguous memory object for this object
4841 * e.g. for cursor + overlay regs
4842 */
4843int i915_gem_init_phys_object(struct drm_device *dev,
4844 int id, int size)
4845{
4846 drm_i915_private_t *dev_priv = dev->dev_private;
4847 struct drm_i915_gem_phys_object *phys_obj;
4848 int ret;
4849
4850 if (dev_priv->mm.phys_objs[id - 1] || !size)
4851 return 0;
4852
Eric Anholt9a298b22009-03-24 12:23:04 -07004853 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004854 if (!phys_obj)
4855 return -ENOMEM;
4856
4857 phys_obj->id = id;
4858
Zhenyu Wange6be8d92010-01-05 11:25:05 +08004859 phys_obj->handle = drm_pci_alloc(dev, size, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004860 if (!phys_obj->handle) {
4861 ret = -ENOMEM;
4862 goto kfree_obj;
4863 }
4864#ifdef CONFIG_X86
4865 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4866#endif
4867
4868 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4869
4870 return 0;
4871kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004872 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004873 return ret;
4874}
4875
4876void i915_gem_free_phys_object(struct drm_device *dev, int id)
4877{
4878 drm_i915_private_t *dev_priv = dev->dev_private;
4879 struct drm_i915_gem_phys_object *phys_obj;
4880
4881 if (!dev_priv->mm.phys_objs[id - 1])
4882 return;
4883
4884 phys_obj = dev_priv->mm.phys_objs[id - 1];
4885 if (phys_obj->cur_obj) {
4886 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4887 }
4888
4889#ifdef CONFIG_X86
4890 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4891#endif
4892 drm_pci_free(dev, phys_obj->handle);
4893 kfree(phys_obj);
4894 dev_priv->mm.phys_objs[id - 1] = NULL;
4895}
4896
4897void i915_gem_free_all_phys_object(struct drm_device *dev)
4898{
4899 int i;
4900
Dave Airlie260883c2009-01-22 17:58:49 +10004901 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004902 i915_gem_free_phys_object(dev, i);
4903}
4904
4905void i915_gem_detach_phys_object(struct drm_device *dev,
4906 struct drm_gem_object *obj)
4907{
4908 struct drm_i915_gem_object *obj_priv;
4909 int i;
4910 int ret;
4911 int page_count;
4912
Daniel Vetter23010e42010-03-08 13:35:02 +01004913 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004914 if (!obj_priv->phys_obj)
4915 return;
4916
Chris Wilson4bdadb92010-01-27 13:36:32 +00004917 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004918 if (ret)
4919 goto out;
4920
4921 page_count = obj->size / PAGE_SIZE;
4922
4923 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004924 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004925 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4926
4927 memcpy(dst, src, PAGE_SIZE);
4928 kunmap_atomic(dst, KM_USER0);
4929 }
Eric Anholt856fa192009-03-19 14:10:50 -07004930 drm_clflush_pages(obj_priv->pages, page_count);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004931 drm_agp_chipset_flush(dev);
Chris Wilsond78b47b2009-06-17 21:52:49 +01004932
4933 i915_gem_object_put_pages(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004934out:
4935 obj_priv->phys_obj->cur_obj = NULL;
4936 obj_priv->phys_obj = NULL;
4937}
4938
4939int
4940i915_gem_attach_phys_object(struct drm_device *dev,
4941 struct drm_gem_object *obj, int id)
4942{
4943 drm_i915_private_t *dev_priv = dev->dev_private;
4944 struct drm_i915_gem_object *obj_priv;
4945 int ret = 0;
4946 int page_count;
4947 int i;
4948
4949 if (id > I915_MAX_PHYS_OBJECT)
4950 return -EINVAL;
4951
Daniel Vetter23010e42010-03-08 13:35:02 +01004952 obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004953
4954 if (obj_priv->phys_obj) {
4955 if (obj_priv->phys_obj->id == id)
4956 return 0;
4957 i915_gem_detach_phys_object(dev, obj);
4958 }
4959
4960
4961 /* create a new object */
4962 if (!dev_priv->mm.phys_objs[id - 1]) {
4963 ret = i915_gem_init_phys_object(dev, id,
4964 obj->size);
4965 if (ret) {
Linus Torvaldsaeb565d2009-01-26 10:01:53 -08004966 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004967 goto out;
4968 }
4969 }
4970
4971 /* bind to the object */
4972 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4973 obj_priv->phys_obj->cur_obj = obj;
4974
Chris Wilson4bdadb92010-01-27 13:36:32 +00004975 ret = i915_gem_object_get_pages(obj, 0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004976 if (ret) {
4977 DRM_ERROR("failed to get page list\n");
4978 goto out;
4979 }
4980
4981 page_count = obj->size / PAGE_SIZE;
4982
4983 for (i = 0; i < page_count; i++) {
Eric Anholt856fa192009-03-19 14:10:50 -07004984 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004985 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4986
4987 memcpy(dst, src, PAGE_SIZE);
4988 kunmap_atomic(src, KM_USER0);
4989 }
4990
Chris Wilsond78b47b2009-06-17 21:52:49 +01004991 i915_gem_object_put_pages(obj);
4992
Dave Airlie71acb5e2008-12-30 20:31:46 +10004993 return 0;
4994out:
4995 return ret;
4996}
4997
4998static int
4999i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5000 struct drm_i915_gem_pwrite *args,
5001 struct drm_file *file_priv)
5002{
Daniel Vetter23010e42010-03-08 13:35:02 +01005003 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005004 void *obj_addr;
5005 int ret;
5006 char __user *user_data;
5007
5008 user_data = (char __user *) (uintptr_t) args->data_ptr;
5009 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
5010
Zhao Yakui44d98a62009-10-09 11:39:40 +08005011 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
Dave Airlie71acb5e2008-12-30 20:31:46 +10005012 ret = copy_from_user(obj_addr, user_data, args->size);
5013 if (ret)
5014 return -EFAULT;
5015
5016 drm_agp_chipset_flush(dev);
5017 return 0;
5018}
Eric Anholtb9624422009-06-03 07:27:35 +00005019
5020void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5021{
5022 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
5023
5024 /* Clean up our request list when the client is going away, so that
5025 * later retire_requests won't dereference our soon-to-be-gone
5026 * file_priv.
5027 */
5028 mutex_lock(&dev->struct_mutex);
5029 while (!list_empty(&i915_file_priv->mm.request_list))
5030 list_del_init(i915_file_priv->mm.request_list.next);
5031 mutex_unlock(&dev->struct_mutex);
5032}
Chris Wilson31169712009-09-14 16:50:28 +01005033
Chris Wilson31169712009-09-14 16:50:28 +01005034static int
Chris Wilson1637ef42010-04-20 17:10:35 +01005035i915_gpu_is_active(struct drm_device *dev)
5036{
5037 drm_i915_private_t *dev_priv = dev->dev_private;
5038 int lists_empty;
5039
5040 spin_lock(&dev_priv->mm.active_list_lock);
5041 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Zou Nan hai852835f2010-05-21 09:08:56 +08005042 list_empty(&dev_priv->render_ring.active_list);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005043 if (HAS_BSD(dev))
5044 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01005045 spin_unlock(&dev_priv->mm.active_list_lock);
5046
5047 return !lists_empty;
5048}
5049
5050static int
Dave Chinner7f8275d2010-07-19 14:56:17 +10005051i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Chris Wilson31169712009-09-14 16:50:28 +01005052{
5053 drm_i915_private_t *dev_priv, *next_dev;
5054 struct drm_i915_gem_object *obj_priv, *next_obj;
5055 int cnt = 0;
5056 int would_deadlock = 1;
5057
5058 /* "fast-path" to count number of available objects */
5059 if (nr_to_scan == 0) {
5060 spin_lock(&shrink_list_lock);
5061 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5062 struct drm_device *dev = dev_priv->dev;
5063
5064 if (mutex_trylock(&dev->struct_mutex)) {
5065 list_for_each_entry(obj_priv,
5066 &dev_priv->mm.inactive_list,
5067 list)
5068 cnt++;
5069 mutex_unlock(&dev->struct_mutex);
5070 }
5071 }
5072 spin_unlock(&shrink_list_lock);
5073
5074 return (cnt / 100) * sysctl_vfs_cache_pressure;
5075 }
5076
5077 spin_lock(&shrink_list_lock);
5078
Chris Wilson1637ef42010-04-20 17:10:35 +01005079rescan:
Chris Wilson31169712009-09-14 16:50:28 +01005080 /* first scan for clean buffers */
5081 list_for_each_entry_safe(dev_priv, next_dev,
5082 &shrink_list, mm.shrink_list) {
5083 struct drm_device *dev = dev_priv->dev;
5084
5085 if (! mutex_trylock(&dev->struct_mutex))
5086 continue;
5087
5088 spin_unlock(&shrink_list_lock);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01005089 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08005090
Chris Wilson31169712009-09-14 16:50:28 +01005091 list_for_each_entry_safe(obj_priv, next_obj,
5092 &dev_priv->mm.inactive_list,
5093 list) {
5094 if (i915_gem_object_is_purgeable(obj_priv)) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005095 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005096 if (--nr_to_scan <= 0)
5097 break;
5098 }
5099 }
5100
5101 spin_lock(&shrink_list_lock);
5102 mutex_unlock(&dev->struct_mutex);
5103
Chris Wilson963b4832009-09-20 23:03:54 +01005104 would_deadlock = 0;
5105
Chris Wilson31169712009-09-14 16:50:28 +01005106 if (nr_to_scan <= 0)
5107 break;
5108 }
5109
5110 /* second pass, evict/count anything still on the inactive list */
5111 list_for_each_entry_safe(dev_priv, next_dev,
5112 &shrink_list, mm.shrink_list) {
5113 struct drm_device *dev = dev_priv->dev;
5114
5115 if (! mutex_trylock(&dev->struct_mutex))
5116 continue;
5117
5118 spin_unlock(&shrink_list_lock);
5119
5120 list_for_each_entry_safe(obj_priv, next_obj,
5121 &dev_priv->mm.inactive_list,
5122 list) {
5123 if (nr_to_scan > 0) {
Daniel Vettera8089e82010-04-09 19:05:09 +00005124 i915_gem_object_unbind(&obj_priv->base);
Chris Wilson31169712009-09-14 16:50:28 +01005125 nr_to_scan--;
5126 } else
5127 cnt++;
5128 }
5129
5130 spin_lock(&shrink_list_lock);
5131 mutex_unlock(&dev->struct_mutex);
5132
5133 would_deadlock = 0;
5134 }
5135
Chris Wilson1637ef42010-04-20 17:10:35 +01005136 if (nr_to_scan) {
5137 int active = 0;
5138
5139 /*
5140 * We are desperate for pages, so as a last resort, wait
5141 * for the GPU to finish and discard whatever we can.
5142 * This has a dramatic impact to reduce the number of
5143 * OOM-killer events whilst running the GPU aggressively.
5144 */
5145 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5146 struct drm_device *dev = dev_priv->dev;
5147
5148 if (!mutex_trylock(&dev->struct_mutex))
5149 continue;
5150
5151 spin_unlock(&shrink_list_lock);
5152
5153 if (i915_gpu_is_active(dev)) {
5154 i915_gpu_idle(dev);
5155 active++;
5156 }
5157
5158 spin_lock(&shrink_list_lock);
5159 mutex_unlock(&dev->struct_mutex);
5160 }
5161
5162 if (active)
5163 goto rescan;
5164 }
5165
Chris Wilson31169712009-09-14 16:50:28 +01005166 spin_unlock(&shrink_list_lock);
5167
5168 if (would_deadlock)
5169 return -1;
5170 else if (cnt > 0)
5171 return (cnt / 100) * sysctl_vfs_cache_pressure;
5172 else
5173 return 0;
5174}
5175
5176static struct shrinker shrinker = {
5177 .shrink = i915_gem_shrink,
5178 .seeks = DEFAULT_SEEKS,
5179};
5180
5181__init void
5182i915_gem_shrinker_init(void)
5183{
5184 register_shrinker(&shrinker);
5185}
5186
5187__exit void
5188i915_gem_shrinker_exit(void)
5189{
5190 unregister_shrinker(&shrinker);
5191}